| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Xen mmu operations | 
 | 3 |  * | 
 | 4 |  * This file contains the various mmu fetch and update operations. | 
 | 5 |  * The most important job they must perform is the mapping between the | 
 | 6 |  * domain's pfn and the overall machine mfns. | 
 | 7 |  * | 
 | 8 |  * Xen allows guests to directly update the pagetable, in a controlled | 
 | 9 |  * fashion.  In other words, the guest modifies the same pagetable | 
 | 10 |  * that the CPU actually uses, which eliminates the overhead of having | 
 | 11 |  * a separate shadow pagetable. | 
 | 12 |  * | 
 | 13 |  * In order to allow this, it falls on the guest domain to map its | 
 | 14 |  * notion of a "physical" pfn - which is just a domain-local linear | 
 | 15 |  * address - into a real "machine address" which the CPU's MMU can | 
 | 16 |  * use. | 
 | 17 |  * | 
 | 18 |  * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be | 
 | 19 |  * inserted directly into the pagetable.  When creating a new | 
 | 20 |  * pte/pmd/pgd, it converts the passed pfn into an mfn.  Conversely, | 
 | 21 |  * when reading the content back with __(pgd|pmd|pte)_val, it converts | 
 | 22 |  * the mfn back into a pfn. | 
 | 23 |  * | 
 | 24 |  * The other constraint is that all pages which make up a pagetable | 
 | 25 |  * must be mapped read-only in the guest.  This prevents uncontrolled | 
 | 26 |  * guest updates to the pagetable.  Xen strictly enforces this, and | 
 | 27 |  * will disallow any pagetable update which will end up mapping a | 
 | 28 |  * pagetable page RW, and will disallow using any writable page as a | 
 | 29 |  * pagetable. | 
 | 30 |  * | 
 | 31 |  * Naively, when loading %cr3 with the base of a new pagetable, Xen | 
 | 32 |  * would need to validate the whole pagetable before going on. | 
 | 33 |  * Naturally, this is quite slow.  The solution is to "pin" a | 
 | 34 |  * pagetable, which enforces all the constraints on the pagetable even | 
 | 35 |  * when it is not actively in use.  This menas that Xen can be assured | 
 | 36 |  * that it is still valid when you do load it into %cr3, and doesn't | 
 | 37 |  * need to revalidate it. | 
 | 38 |  * | 
 | 39 |  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | 
 | 40 |  */ | 
| Jeremy Fitzhardinge | f120f13 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 41 | #include <linux/sched.h> | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 42 | #include <linux/highmem.h> | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 43 | #include <linux/debugfs.h> | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 44 | #include <linux/bug.h> | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 45 |  | 
 | 46 | #include <asm/pgtable.h> | 
 | 47 | #include <asm/tlbflush.h> | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 48 | #include <asm/fixmap.h> | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 49 | #include <asm/mmu_context.h> | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 50 | #include <asm/setup.h> | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 51 | #include <asm/paravirt.h> | 
| Jeremy Fitzhardinge | cbcd79c | 2008-07-08 15:06:27 -0700 | [diff] [blame] | 52 | #include <asm/linkage.h> | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 53 |  | 
 | 54 | #include <asm/xen/hypercall.h> | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 55 | #include <asm/xen/hypervisor.h> | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 56 |  | 
 | 57 | #include <xen/page.h> | 
 | 58 | #include <xen/interface/xen.h> | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 59 | #include <xen/interface/version.h> | 
 | 60 | #include <xen/hvc-console.h> | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 61 |  | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 62 | #include "multicalls.h" | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 63 | #include "mmu.h" | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 64 | #include "debugfs.h" | 
 | 65 |  | 
 | 66 | #define MMU_UPDATE_HISTO	30 | 
 | 67 |  | 
 | 68 | #ifdef CONFIG_XEN_DEBUG_FS | 
 | 69 |  | 
 | 70 | static struct { | 
 | 71 | 	u32 pgd_update; | 
 | 72 | 	u32 pgd_update_pinned; | 
 | 73 | 	u32 pgd_update_batched; | 
 | 74 |  | 
 | 75 | 	u32 pud_update; | 
 | 76 | 	u32 pud_update_pinned; | 
 | 77 | 	u32 pud_update_batched; | 
 | 78 |  | 
 | 79 | 	u32 pmd_update; | 
 | 80 | 	u32 pmd_update_pinned; | 
 | 81 | 	u32 pmd_update_batched; | 
 | 82 |  | 
 | 83 | 	u32 pte_update; | 
 | 84 | 	u32 pte_update_pinned; | 
 | 85 | 	u32 pte_update_batched; | 
 | 86 |  | 
 | 87 | 	u32 mmu_update; | 
 | 88 | 	u32 mmu_update_extended; | 
 | 89 | 	u32 mmu_update_histo[MMU_UPDATE_HISTO]; | 
 | 90 |  | 
 | 91 | 	u32 prot_commit; | 
 | 92 | 	u32 prot_commit_batched; | 
 | 93 |  | 
 | 94 | 	u32 set_pte_at; | 
 | 95 | 	u32 set_pte_at_batched; | 
 | 96 | 	u32 set_pte_at_pinned; | 
 | 97 | 	u32 set_pte_at_current; | 
 | 98 | 	u32 set_pte_at_kernel; | 
 | 99 | } mmu_stats; | 
 | 100 |  | 
 | 101 | static u8 zero_stats; | 
 | 102 |  | 
 | 103 | static inline void check_zero(void) | 
 | 104 | { | 
 | 105 | 	if (unlikely(zero_stats)) { | 
 | 106 | 		memset(&mmu_stats, 0, sizeof(mmu_stats)); | 
 | 107 | 		zero_stats = 0; | 
 | 108 | 	} | 
 | 109 | } | 
 | 110 |  | 
 | 111 | #define ADD_STATS(elem, val)			\ | 
 | 112 | 	do { check_zero(); mmu_stats.elem += (val); } while(0) | 
 | 113 |  | 
 | 114 | #else  /* !CONFIG_XEN_DEBUG_FS */ | 
 | 115 |  | 
 | 116 | #define ADD_STATS(elem, val)	do { (void)(val); } while(0) | 
 | 117 |  | 
 | 118 | #endif /* CONFIG_XEN_DEBUG_FS */ | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 119 |  | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 120 |  | 
 | 121 | /* | 
 | 122 |  * Identity map, in addition to plain kernel map.  This needs to be | 
 | 123 |  * large enough to allocate page table pages to allocate the rest. | 
 | 124 |  * Each page can map 2MB. | 
 | 125 |  */ | 
 | 126 | static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss; | 
 | 127 |  | 
 | 128 | #ifdef CONFIG_X86_64 | 
 | 129 | /* l3 pud for userspace vsyscall mapping */ | 
 | 130 | static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; | 
 | 131 | #endif /* CONFIG_X86_64 */ | 
 | 132 |  | 
 | 133 | /* | 
 | 134 |  * Note about cr3 (pagetable base) values: | 
 | 135 |  * | 
 | 136 |  * xen_cr3 contains the current logical cr3 value; it contains the | 
 | 137 |  * last set cr3.  This may not be the current effective cr3, because | 
 | 138 |  * its update may be being lazily deferred.  However, a vcpu looking | 
 | 139 |  * at its own cr3 can use this value knowing that it everything will | 
 | 140 |  * be self-consistent. | 
 | 141 |  * | 
 | 142 |  * xen_current_cr3 contains the actual vcpu cr3; it is set once the | 
 | 143 |  * hypercall to set the vcpu cr3 is complete (so it may be a little | 
 | 144 |  * out of date, but it will never be set early).  If one vcpu is | 
 | 145 |  * looking at another vcpu's cr3 value, it should use this variable. | 
 | 146 |  */ | 
 | 147 | DEFINE_PER_CPU(unsigned long, xen_cr3);	 /* cr3 stored as physaddr */ | 
 | 148 | DEFINE_PER_CPU(unsigned long, xen_current_cr3);	 /* actual vcpu cr3 */ | 
 | 149 |  | 
 | 150 |  | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 151 | /* | 
 | 152 |  * Just beyond the highest usermode address.  STACK_TOP_MAX has a | 
 | 153 |  * redzone above it, so round it up to a PGD boundary. | 
 | 154 |  */ | 
 | 155 | #define USER_LIMIT	((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) | 
 | 156 |  | 
 | 157 |  | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 158 | #define P2M_ENTRIES_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long)) | 
| Jeremy Fitzhardinge | cf0923e | 2008-05-26 23:31:20 +0100 | [diff] [blame] | 159 | #define TOP_ENTRIES		(MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE) | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 160 |  | 
| Jeremy Fitzhardinge | cf0923e | 2008-05-26 23:31:20 +0100 | [diff] [blame] | 161 | /* Placeholder for holes in the address space */ | 
| Jeremy Fitzhardinge | cbcd79c | 2008-07-08 15:06:27 -0700 | [diff] [blame] | 162 | static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data = | 
| Jeremy Fitzhardinge | cf0923e | 2008-05-26 23:31:20 +0100 | [diff] [blame] | 163 | 		{ [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL }; | 
 | 164 |  | 
 | 165 |  /* Array of pointers to pages containing p2m entries */ | 
| Jeremy Fitzhardinge | cbcd79c | 2008-07-08 15:06:27 -0700 | [diff] [blame] | 166 | static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data = | 
| Jeremy Fitzhardinge | cf0923e | 2008-05-26 23:31:20 +0100 | [diff] [blame] | 167 | 		{ [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] }; | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 168 |  | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 169 | /* Arrays of p2m arrays expressed in mfns used for save/restore */ | 
| Jeremy Fitzhardinge | cbcd79c | 2008-07-08 15:06:27 -0700 | [diff] [blame] | 170 | static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss; | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 171 |  | 
| Jeremy Fitzhardinge | cbcd79c | 2008-07-08 15:06:27 -0700 | [diff] [blame] | 172 | static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE] | 
 | 173 | 	__page_aligned_bss; | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 174 |  | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 175 | static inline unsigned p2m_top_index(unsigned long pfn) | 
 | 176 | { | 
| Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 177 | 	BUG_ON(pfn >= MAX_DOMAIN_PAGES); | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 178 | 	return pfn / P2M_ENTRIES_PER_PAGE; | 
 | 179 | } | 
 | 180 |  | 
 | 181 | static inline unsigned p2m_index(unsigned long pfn) | 
 | 182 | { | 
 | 183 | 	return pfn % P2M_ENTRIES_PER_PAGE; | 
 | 184 | } | 
 | 185 |  | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 186 | /* Build the parallel p2m_top_mfn structures */ | 
| Jeremy Fitzhardinge | cdaead6 | 2009-02-27 15:34:59 -0800 | [diff] [blame] | 187 | static void __init xen_build_mfn_list_list(void) | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 188 | { | 
 | 189 | 	unsigned pfn, idx; | 
 | 190 |  | 
| Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 191 | 	for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) { | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 192 | 		unsigned topidx = p2m_top_index(pfn); | 
 | 193 |  | 
 | 194 | 		p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]); | 
 | 195 | 	} | 
 | 196 |  | 
| Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 197 | 	for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) { | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 198 | 		unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; | 
 | 199 | 		p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); | 
 | 200 | 	} | 
| Jeremy Fitzhardinge | cdaead6 | 2009-02-27 15:34:59 -0800 | [diff] [blame] | 201 | } | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 202 |  | 
| Jeremy Fitzhardinge | cdaead6 | 2009-02-27 15:34:59 -0800 | [diff] [blame] | 203 | void xen_setup_mfn_list_list(void) | 
 | 204 | { | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 205 | 	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); | 
 | 206 |  | 
 | 207 | 	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = | 
 | 208 | 		virt_to_mfn(p2m_top_mfn_list); | 
 | 209 | 	HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages; | 
 | 210 | } | 
 | 211 |  | 
 | 212 | /* Set up p2m_top to point to the domain-builder provided p2m pages */ | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 213 | void __init xen_build_dynamic_phys_to_machine(void) | 
 | 214 | { | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 215 | 	unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; | 
| Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 216 | 	unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 217 | 	unsigned pfn; | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 218 |  | 
| Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 219 | 	for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 220 | 		unsigned topidx = p2m_top_index(pfn); | 
 | 221 |  | 
 | 222 | 		p2m_top[topidx] = &mfn_list[pfn]; | 
 | 223 | 	} | 
| Jeremy Fitzhardinge | cdaead6 | 2009-02-27 15:34:59 -0800 | [diff] [blame] | 224 |  | 
 | 225 | 	xen_build_mfn_list_list(); | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 226 | } | 
 | 227 |  | 
 | 228 | unsigned long get_phys_to_machine(unsigned long pfn) | 
 | 229 | { | 
 | 230 | 	unsigned topidx, idx; | 
 | 231 |  | 
| Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 232 | 	if (unlikely(pfn >= MAX_DOMAIN_PAGES)) | 
 | 233 | 		return INVALID_P2M_ENTRY; | 
 | 234 |  | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 235 | 	topidx = p2m_top_index(pfn); | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 236 | 	idx = p2m_index(pfn); | 
 | 237 | 	return p2m_top[topidx][idx]; | 
 | 238 | } | 
| Ingo Molnar | 15ce6005 | 2008-06-02 13:20:11 +0200 | [diff] [blame] | 239 | EXPORT_SYMBOL_GPL(get_phys_to_machine); | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 240 |  | 
| Jeremy Fitzhardinge | e791ca0 | 2009-02-26 15:48:33 -0800 | [diff] [blame] | 241 | /* install a  new p2m_top page */ | 
 | 242 | bool install_p2mtop_page(unsigned long pfn, unsigned long *p) | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 243 | { | 
| Jeremy Fitzhardinge | e791ca0 | 2009-02-26 15:48:33 -0800 | [diff] [blame] | 244 | 	unsigned topidx = p2m_top_index(pfn); | 
 | 245 | 	unsigned long **pfnp, *mfnp; | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 246 | 	unsigned i; | 
 | 247 |  | 
| Jeremy Fitzhardinge | e791ca0 | 2009-02-26 15:48:33 -0800 | [diff] [blame] | 248 | 	pfnp = &p2m_top[topidx]; | 
 | 249 | 	mfnp = &p2m_top_mfn[topidx]; | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 250 |  | 
| Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 251 | 	for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 252 | 		p[i] = INVALID_P2M_ENTRY; | 
 | 253 |  | 
| Jeremy Fitzhardinge | e791ca0 | 2009-02-26 15:48:33 -0800 | [diff] [blame] | 254 | 	if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) { | 
| Jeremy Fitzhardinge | d5edbc1 | 2008-05-26 23:31:22 +0100 | [diff] [blame] | 255 | 		*mfnp = virt_to_mfn(p); | 
| Jeremy Fitzhardinge | e791ca0 | 2009-02-26 15:48:33 -0800 | [diff] [blame] | 256 | 		return true; | 
 | 257 | 	} | 
 | 258 |  | 
 | 259 | 	return false; | 
 | 260 | } | 
 | 261 |  | 
 | 262 | static void alloc_p2m(unsigned long pfn) | 
 | 263 | { | 
 | 264 | 	unsigned long *p; | 
 | 265 |  | 
 | 266 | 	p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); | 
 | 267 | 	BUG_ON(p == NULL); | 
 | 268 |  | 
 | 269 | 	if (!install_p2mtop_page(pfn, p)) | 
 | 270 | 		free_page((unsigned long)p); | 
 | 271 | } | 
 | 272 |  | 
 | 273 | /* Try to install p2m mapping; fail if intermediate bits missing */ | 
 | 274 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 
 | 275 | { | 
 | 276 | 	unsigned topidx, idx; | 
 | 277 |  | 
 | 278 | 	if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { | 
 | 279 | 		BUG_ON(mfn != INVALID_P2M_ENTRY); | 
 | 280 | 		return true; | 
 | 281 | 	} | 
 | 282 |  | 
 | 283 | 	topidx = p2m_top_index(pfn); | 
 | 284 | 	if (p2m_top[topidx] == p2m_missing) { | 
 | 285 | 		if (mfn == INVALID_P2M_ENTRY) | 
 | 286 | 			return true; | 
 | 287 | 		return false; | 
 | 288 | 	} | 
 | 289 |  | 
 | 290 | 	idx = p2m_index(pfn); | 
 | 291 | 	p2m_top[topidx][idx] = mfn; | 
 | 292 |  | 
 | 293 | 	return true; | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 294 | } | 
 | 295 |  | 
 | 296 | void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 
 | 297 | { | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 298 | 	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { | 
 | 299 | 		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | 
 | 300 | 		return; | 
 | 301 | 	} | 
 | 302 |  | 
| Jeremy Fitzhardinge | e791ca0 | 2009-02-26 15:48:33 -0800 | [diff] [blame] | 303 | 	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  { | 
 | 304 | 		alloc_p2m(pfn); | 
| Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 305 |  | 
| Jeremy Fitzhardinge | e791ca0 | 2009-02-26 15:48:33 -0800 | [diff] [blame] | 306 | 		if (!__set_phys_to_machine(pfn, mfn)) | 
 | 307 | 			BUG(); | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 308 | 	} | 
| Jeremy Fitzhardinge | d451bb7 | 2008-05-26 23:31:18 +0100 | [diff] [blame] | 309 | } | 
 | 310 |  | 
| Jeremy Fitzhardinge | 9976b39 | 2009-02-27 09:19:26 -0800 | [diff] [blame] | 311 | unsigned long arbitrary_virt_to_mfn(void *vaddr) | 
 | 312 | { | 
 | 313 | 	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); | 
 | 314 |  | 
 | 315 | 	return PFN_DOWN(maddr.maddr); | 
 | 316 | } | 
 | 317 |  | 
| Jeremy Fitzhardinge | ce803e7 | 2008-07-08 15:06:55 -0700 | [diff] [blame] | 318 | xmaddr_t arbitrary_virt_to_machine(void *vaddr) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 319 | { | 
| Jeremy Fitzhardinge | ce803e7 | 2008-07-08 15:06:55 -0700 | [diff] [blame] | 320 | 	unsigned long address = (unsigned long)vaddr; | 
| Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 321 | 	unsigned int level; | 
| Chris Lalancette | 9f32d21 | 2008-10-23 17:40:25 -0700 | [diff] [blame] | 322 | 	pte_t *pte; | 
 | 323 | 	unsigned offset; | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 324 |  | 
| Chris Lalancette | 9f32d21 | 2008-10-23 17:40:25 -0700 | [diff] [blame] | 325 | 	/* | 
 | 326 | 	 * if the PFN is in the linear mapped vaddr range, we can just use | 
 | 327 | 	 * the (quick) virt_to_machine() p2m lookup | 
 | 328 | 	 */ | 
 | 329 | 	if (virt_addr_valid(vaddr)) | 
 | 330 | 		return virt_to_machine(vaddr); | 
 | 331 |  | 
 | 332 | 	/* otherwise we have to do a (slower) full page-table walk */ | 
 | 333 |  | 
 | 334 | 	pte = lookup_address(address, &level); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 335 | 	BUG_ON(pte == NULL); | 
| Chris Lalancette | 9f32d21 | 2008-10-23 17:40:25 -0700 | [diff] [blame] | 336 | 	offset = address & ~PAGE_MASK; | 
| Jeremy Fitzhardinge | ebd879e | 2008-07-08 15:06:54 -0700 | [diff] [blame] | 337 | 	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 338 | } | 
 | 339 |  | 
 | 340 | void make_lowmem_page_readonly(void *vaddr) | 
 | 341 | { | 
 | 342 | 	pte_t *pte, ptev; | 
 | 343 | 	unsigned long address = (unsigned long)vaddr; | 
| Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 344 | 	unsigned int level; | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 345 |  | 
| Ingo Molnar | f0646e4 | 2008-01-30 13:33:43 +0100 | [diff] [blame] | 346 | 	pte = lookup_address(address, &level); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 347 | 	BUG_ON(pte == NULL); | 
 | 348 |  | 
 | 349 | 	ptev = pte_wrprotect(*pte); | 
 | 350 |  | 
 | 351 | 	if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | 
 | 352 | 		BUG(); | 
 | 353 | } | 
 | 354 |  | 
 | 355 | void make_lowmem_page_readwrite(void *vaddr) | 
 | 356 | { | 
 | 357 | 	pte_t *pte, ptev; | 
 | 358 | 	unsigned long address = (unsigned long)vaddr; | 
| Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 359 | 	unsigned int level; | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 360 |  | 
| Ingo Molnar | f0646e4 | 2008-01-30 13:33:43 +0100 | [diff] [blame] | 361 | 	pte = lookup_address(address, &level); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 362 | 	BUG_ON(pte == NULL); | 
 | 363 |  | 
 | 364 | 	ptev = pte_mkwrite(*pte); | 
 | 365 |  | 
 | 366 | 	if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | 
 | 367 | 		BUG(); | 
 | 368 | } | 
 | 369 |  | 
 | 370 |  | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 371 | static bool xen_page_pinned(void *ptr) | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 372 | { | 
 | 373 | 	struct page *page = virt_to_page(ptr); | 
 | 374 |  | 
 | 375 | 	return PagePinned(page); | 
 | 376 | } | 
 | 377 |  | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 378 | static void xen_extend_mmu_update(const struct mmu_update *update) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 379 | { | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 380 | 	struct multicall_space mcs; | 
 | 381 | 	struct mmu_update *u; | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 382 |  | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 383 | 	mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); | 
 | 384 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 385 | 	if (mcs.mc != NULL) { | 
 | 386 | 		ADD_STATS(mmu_update_extended, 1); | 
 | 387 | 		ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1); | 
 | 388 |  | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 389 | 		mcs.mc->args[1]++; | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 390 |  | 
 | 391 | 		if (mcs.mc->args[1] < MMU_UPDATE_HISTO) | 
 | 392 | 			ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1); | 
 | 393 | 		else | 
 | 394 | 			ADD_STATS(mmu_update_histo[0], 1); | 
 | 395 | 	} else { | 
 | 396 | 		ADD_STATS(mmu_update, 1); | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 397 | 		mcs = __xen_mc_entry(sizeof(*u)); | 
 | 398 | 		MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 399 | 		ADD_STATS(mmu_update_histo[1], 1); | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 400 | 	} | 
 | 401 |  | 
 | 402 | 	u = mcs.args; | 
 | 403 | 	*u = *update; | 
 | 404 | } | 
 | 405 |  | 
 | 406 | void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | 
 | 407 | { | 
 | 408 | 	struct mmu_update u; | 
 | 409 |  | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 410 | 	preempt_disable(); | 
 | 411 |  | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 412 | 	xen_mc_batch(); | 
 | 413 |  | 
| Jeremy Fitzhardinge | ce803e7 | 2008-07-08 15:06:55 -0700 | [diff] [blame] | 414 | 	/* ptr may be ioremapped for 64-bit pagetable setup */ | 
 | 415 | 	u.ptr = arbitrary_virt_to_machine(ptr).maddr; | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 416 | 	u.val = pmd_val_ma(val); | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 417 | 	xen_extend_mmu_update(&u); | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 418 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 419 | 	ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | 
 | 420 |  | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 421 | 	xen_mc_issue(PARAVIRT_LAZY_MMU); | 
 | 422 |  | 
 | 423 | 	preempt_enable(); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 424 | } | 
 | 425 |  | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 426 | void xen_set_pmd(pmd_t *ptr, pmd_t val) | 
 | 427 | { | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 428 | 	ADD_STATS(pmd_update, 1); | 
 | 429 |  | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 430 | 	/* If page is not pinned, we can just update the entry | 
 | 431 | 	   directly */ | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 432 | 	if (!xen_page_pinned(ptr)) { | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 433 | 		*ptr = val; | 
 | 434 | 		return; | 
 | 435 | 	} | 
 | 436 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 437 | 	ADD_STATS(pmd_update_pinned, 1); | 
 | 438 |  | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 439 | 	xen_set_pmd_hyper(ptr, val); | 
 | 440 | } | 
 | 441 |  | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 442 | /* | 
 | 443 |  * Associate a virtual page frame with a given physical page frame | 
 | 444 |  * and protection flags for that frame. | 
 | 445 |  */ | 
 | 446 | void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) | 
 | 447 | { | 
| Jeremy Fitzhardinge | 836fe2f | 2008-07-08 15:06:58 -0700 | [diff] [blame] | 448 | 	set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 449 | } | 
 | 450 |  | 
 | 451 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | 
 | 452 | 		    pte_t *ptep, pte_t pteval) | 
 | 453 | { | 
| Jeremy Fitzhardinge | 2bd5003 | 2008-04-02 10:54:10 -0700 | [diff] [blame] | 454 | 	/* updates to init_mm may be done without lock */ | 
 | 455 | 	if (mm == &init_mm) | 
 | 456 | 		preempt_disable(); | 
 | 457 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 458 | 	ADD_STATS(set_pte_at, 1); | 
 | 459 | //	ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); | 
 | 460 | 	ADD_STATS(set_pte_at_current, mm == current->mm); | 
 | 461 | 	ADD_STATS(set_pte_at_kernel, mm == &init_mm); | 
 | 462 |  | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 463 | 	if (mm == current->mm || mm == &init_mm) { | 
| Jeremy Fitzhardinge | 8965c1c | 2007-10-16 11:51:29 -0700 | [diff] [blame] | 464 | 		if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 465 | 			struct multicall_space mcs; | 
 | 466 | 			mcs = xen_mc_entry(0); | 
 | 467 |  | 
 | 468 | 			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 469 | 			ADD_STATS(set_pte_at_batched, 1); | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 470 | 			xen_mc_issue(PARAVIRT_LAZY_MMU); | 
| Jeremy Fitzhardinge | 2bd5003 | 2008-04-02 10:54:10 -0700 | [diff] [blame] | 471 | 			goto out; | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 472 | 		} else | 
 | 473 | 			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0) | 
| Jeremy Fitzhardinge | 2bd5003 | 2008-04-02 10:54:10 -0700 | [diff] [blame] | 474 | 				goto out; | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 475 | 	} | 
 | 476 | 	xen_set_pte(ptep, pteval); | 
| Jeremy Fitzhardinge | 2bd5003 | 2008-04-02 10:54:10 -0700 | [diff] [blame] | 477 |  | 
 | 478 | out: | 
 | 479 | 	if (mm == &init_mm) | 
 | 480 | 		preempt_enable(); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 481 | } | 
 | 482 |  | 
| Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 483 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, | 
 | 484 | 				 unsigned long addr, pte_t *ptep) | 
| Jeremy Fitzhardinge | e57778a | 2008-06-16 04:30:02 -0700 | [diff] [blame] | 485 | { | 
 | 486 | 	/* Just return the pte as-is.  We preserve the bits on commit */ | 
 | 487 | 	return *ptep; | 
 | 488 | } | 
 | 489 |  | 
 | 490 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | 
 | 491 | 				 pte_t *ptep, pte_t pte) | 
 | 492 | { | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 493 | 	struct mmu_update u; | 
| Jeremy Fitzhardinge | e57778a | 2008-06-16 04:30:02 -0700 | [diff] [blame] | 494 |  | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 495 | 	xen_mc_batch(); | 
 | 496 |  | 
| Chris Lalancette | 9f32d21 | 2008-10-23 17:40:25 -0700 | [diff] [blame] | 497 | 	u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 498 | 	u.val = pte_val_ma(pte); | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 499 | 	xen_extend_mmu_update(&u); | 
| Jeremy Fitzhardinge | e57778a | 2008-06-16 04:30:02 -0700 | [diff] [blame] | 500 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 501 | 	ADD_STATS(prot_commit, 1); | 
 | 502 | 	ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | 
 | 503 |  | 
| Jeremy Fitzhardinge | e57778a | 2008-06-16 04:30:02 -0700 | [diff] [blame] | 504 | 	xen_mc_issue(PARAVIRT_LAZY_MMU); | 
 | 505 | } | 
 | 506 |  | 
| Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 507 | /* Assume pteval_t is equivalent to all the other *val_t types. */ | 
 | 508 | static pteval_t pte_mfn_to_pfn(pteval_t val) | 
 | 509 | { | 
 | 510 | 	if (val & _PAGE_PRESENT) { | 
| Jeremy Fitzhardinge | 59438c9 | 2008-07-21 22:59:42 -0700 | [diff] [blame] | 511 | 		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | 
| Jeremy Fitzhardinge | 77be1fa | 2008-07-21 22:59:56 -0700 | [diff] [blame] | 512 | 		pteval_t flags = val & PTE_FLAGS_MASK; | 
| Jeremy Fitzhardinge | d8355ac | 2008-07-03 22:10:18 -0700 | [diff] [blame] | 513 | 		val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; | 
| Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 514 | 	} | 
 | 515 |  | 
 | 516 | 	return val; | 
 | 517 | } | 
 | 518 |  | 
 | 519 | static pteval_t pte_pfn_to_mfn(pteval_t val) | 
 | 520 | { | 
 | 521 | 	if (val & _PAGE_PRESENT) { | 
| Jeremy Fitzhardinge | 59438c9 | 2008-07-21 22:59:42 -0700 | [diff] [blame] | 522 | 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | 
| Jeremy Fitzhardinge | 77be1fa | 2008-07-21 22:59:56 -0700 | [diff] [blame] | 523 | 		pteval_t flags = val & PTE_FLAGS_MASK; | 
| Jeremy Fitzhardinge | d8355ac | 2008-07-03 22:10:18 -0700 | [diff] [blame] | 524 | 		val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; | 
| Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 525 | 	} | 
 | 526 |  | 
 | 527 | 	return val; | 
 | 528 | } | 
 | 529 |  | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 530 | pteval_t xen_pte_val(pte_t pte) | 
 | 531 | { | 
| Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 532 | 	return pte_mfn_to_pfn(pte.pte); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 533 | } | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 534 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 535 |  | 
 | 536 | pgdval_t xen_pgd_val(pgd_t pgd) | 
 | 537 | { | 
| Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 538 | 	return pte_mfn_to_pfn(pgd.pgd); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 539 | } | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 540 | PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 541 |  | 
 | 542 | pte_t xen_make_pte(pteval_t pte) | 
 | 543 | { | 
| Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 544 | 	pte = pte_pfn_to_mfn(pte); | 
 | 545 | 	return native_make_pte(pte); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 546 | } | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 547 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 548 |  | 
 | 549 | pgd_t xen_make_pgd(pgdval_t pgd) | 
 | 550 | { | 
| Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 551 | 	pgd = pte_pfn_to_mfn(pgd); | 
 | 552 | 	return native_make_pgd(pgd); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 553 | } | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 554 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 555 |  | 
 | 556 | pmdval_t xen_pmd_val(pmd_t pmd) | 
 | 557 | { | 
| Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 558 | 	return pte_mfn_to_pfn(pmd.pmd); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 559 | } | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 560 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); | 
| Jeremy Fitzhardinge | 2849914 | 2008-05-09 12:05:57 +0100 | [diff] [blame] | 561 |  | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 562 | void xen_set_pud_hyper(pud_t *ptr, pud_t val) | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 563 | { | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 564 | 	struct mmu_update u; | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 565 |  | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 566 | 	preempt_disable(); | 
 | 567 |  | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 568 | 	xen_mc_batch(); | 
 | 569 |  | 
| Jeremy Fitzhardinge | ce803e7 | 2008-07-08 15:06:55 -0700 | [diff] [blame] | 570 | 	/* ptr may be ioremapped for 64-bit pagetable setup */ | 
 | 571 | 	u.ptr = arbitrary_virt_to_machine(ptr).maddr; | 
| Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 572 | 	u.val = pud_val_ma(val); | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 573 | 	xen_extend_mmu_update(&u); | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 574 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 575 | 	ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | 
 | 576 |  | 
| Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 577 | 	xen_mc_issue(PARAVIRT_LAZY_MMU); | 
 | 578 |  | 
 | 579 | 	preempt_enable(); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 580 | } | 
 | 581 |  | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 582 | void xen_set_pud(pud_t *ptr, pud_t val) | 
 | 583 | { | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 584 | 	ADD_STATS(pud_update, 1); | 
 | 585 |  | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 586 | 	/* If page is not pinned, we can just update the entry | 
 | 587 | 	   directly */ | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 588 | 	if (!xen_page_pinned(ptr)) { | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 589 | 		*ptr = val; | 
 | 590 | 		return; | 
 | 591 | 	} | 
 | 592 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 593 | 	ADD_STATS(pud_update_pinned, 1); | 
 | 594 |  | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 595 | 	xen_set_pud_hyper(ptr, val); | 
 | 596 | } | 
 | 597 |  | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 598 | void xen_set_pte(pte_t *ptep, pte_t pte) | 
 | 599 | { | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 600 | 	ADD_STATS(pte_update, 1); | 
 | 601 | //	ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); | 
 | 602 | 	ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | 
 | 603 |  | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 604 | #ifdef CONFIG_X86_PAE | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 605 | 	ptep->pte_high = pte.pte_high; | 
 | 606 | 	smp_wmb(); | 
 | 607 | 	ptep->pte_low = pte.pte_low; | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 608 | #else | 
 | 609 | 	*ptep = pte; | 
 | 610 | #endif | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 611 | } | 
 | 612 |  | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 613 | #ifdef CONFIG_X86_PAE | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 614 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | 
 | 615 | { | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 616 | 	set_64bit((u64 *)ptep, native_pte_val(pte)); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 617 | } | 
 | 618 |  | 
 | 619 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
 | 620 | { | 
 | 621 | 	ptep->pte_low = 0; | 
 | 622 | 	smp_wmb();		/* make sure low gets written first */ | 
 | 623 | 	ptep->pte_high = 0; | 
 | 624 | } | 
 | 625 |  | 
 | 626 | void xen_pmd_clear(pmd_t *pmdp) | 
 | 627 | { | 
| Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 628 | 	set_pmd(pmdp, __pmd(0)); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 629 | } | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 630 | #endif	/* CONFIG_X86_PAE */ | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 631 |  | 
| Jeremy Fitzhardinge | abf3303 | 2008-03-17 16:37:07 -0700 | [diff] [blame] | 632 | pmd_t xen_make_pmd(pmdval_t pmd) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 633 | { | 
| Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 634 | 	pmd = pte_pfn_to_mfn(pmd); | 
| Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 635 | 	return native_make_pmd(pmd); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 636 | } | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 637 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 638 |  | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 639 | #if PAGETABLE_LEVELS == 4 | 
 | 640 | pudval_t xen_pud_val(pud_t pud) | 
 | 641 | { | 
 | 642 | 	return pte_mfn_to_pfn(pud.pud); | 
 | 643 | } | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 644 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 645 |  | 
 | 646 | pud_t xen_make_pud(pudval_t pud) | 
 | 647 | { | 
 | 648 | 	pud = pte_pfn_to_mfn(pud); | 
 | 649 |  | 
 | 650 | 	return native_make_pud(pud); | 
 | 651 | } | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 652 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 653 |  | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 654 | pgd_t *xen_get_user_pgd(pgd_t *pgd) | 
 | 655 | { | 
 | 656 | 	pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); | 
 | 657 | 	unsigned offset = pgd - pgd_page; | 
 | 658 | 	pgd_t *user_ptr = NULL; | 
 | 659 |  | 
 | 660 | 	if (offset < pgd_index(USER_LIMIT)) { | 
 | 661 | 		struct page *page = virt_to_page(pgd_page); | 
 | 662 | 		user_ptr = (pgd_t *)page->private; | 
 | 663 | 		if (user_ptr) | 
 | 664 | 			user_ptr += offset; | 
 | 665 | 	} | 
 | 666 |  | 
 | 667 | 	return user_ptr; | 
 | 668 | } | 
 | 669 |  | 
 | 670 | static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 671 | { | 
 | 672 | 	struct mmu_update u; | 
 | 673 |  | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 674 | 	u.ptr = virt_to_machine(ptr).maddr; | 
 | 675 | 	u.val = pgd_val_ma(val); | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 676 | 	xen_extend_mmu_update(&u); | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 677 | } | 
 | 678 |  | 
 | 679 | /* | 
 | 680 |  * Raw hypercall-based set_pgd, intended for in early boot before | 
 | 681 |  * there's a page structure.  This implies: | 
 | 682 |  *  1. The only existing pagetable is the kernel's | 
 | 683 |  *  2. It is always pinned | 
 | 684 |  *  3. It has no user pagetable attached to it | 
 | 685 |  */ | 
 | 686 | void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | 
 | 687 | { | 
 | 688 | 	preempt_disable(); | 
 | 689 |  | 
 | 690 | 	xen_mc_batch(); | 
 | 691 |  | 
 | 692 | 	__xen_set_pgd_hyper(ptr, val); | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 693 |  | 
 | 694 | 	xen_mc_issue(PARAVIRT_LAZY_MMU); | 
 | 695 |  | 
 | 696 | 	preempt_enable(); | 
 | 697 | } | 
 | 698 |  | 
 | 699 | void xen_set_pgd(pgd_t *ptr, pgd_t val) | 
 | 700 | { | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 701 | 	pgd_t *user_ptr = xen_get_user_pgd(ptr); | 
 | 702 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 703 | 	ADD_STATS(pgd_update, 1); | 
 | 704 |  | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 705 | 	/* If page is not pinned, we can just update the entry | 
 | 706 | 	   directly */ | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 707 | 	if (!xen_page_pinned(ptr)) { | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 708 | 		*ptr = val; | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 709 | 		if (user_ptr) { | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 710 | 			WARN_ON(xen_page_pinned(user_ptr)); | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 711 | 			*user_ptr = val; | 
 | 712 | 		} | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 713 | 		return; | 
 | 714 | 	} | 
 | 715 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 716 | 	ADD_STATS(pgd_update_pinned, 1); | 
 | 717 | 	ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | 
 | 718 |  | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 719 | 	/* If it's pinned, then we can at least batch the kernel and | 
 | 720 | 	   user updates together. */ | 
 | 721 | 	xen_mc_batch(); | 
 | 722 |  | 
 | 723 | 	__xen_set_pgd_hyper(ptr, val); | 
 | 724 | 	if (user_ptr) | 
 | 725 | 		__xen_set_pgd_hyper(user_ptr, val); | 
 | 726 |  | 
 | 727 | 	xen_mc_issue(PARAVIRT_LAZY_MMU); | 
| Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 728 | } | 
 | 729 | #endif	/* PAGETABLE_LEVELS == 4 */ | 
 | 730 |  | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 731 | /* | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 732 |  * (Yet another) pagetable walker.  This one is intended for pinning a | 
 | 733 |  * pagetable.  This means that it walks a pagetable and calls the | 
 | 734 |  * callback function on each page it finds making up the page table, | 
 | 735 |  * at every level.  It walks the entire pagetable, but it only bothers | 
 | 736 |  * pinning pte pages which are below limit.  In the normal case this | 
 | 737 |  * will be STACK_TOP_MAX, but at boot we need to pin up to | 
 | 738 |  * FIXADDR_TOP. | 
 | 739 |  * | 
 | 740 |  * For 32-bit the important bit is that we don't pin beyond there, | 
 | 741 |  * because then we start getting into Xen's ptes. | 
 | 742 |  * | 
 | 743 |  * For 64-bit, we must skip the Xen hole in the middle of the address | 
 | 744 |  * space, just after the big x86-64 virtual hole. | 
 | 745 |  */ | 
| Ian Campbell | 86bbc2c | 2008-11-21 10:21:33 +0000 | [diff] [blame] | 746 | static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, | 
 | 747 | 			  int (*func)(struct mm_struct *mm, struct page *, | 
 | 748 | 				      enum pt_level), | 
 | 749 | 			  unsigned long limit) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 750 | { | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 751 | 	int flush = 0; | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 752 | 	unsigned hole_low, hole_high; | 
 | 753 | 	unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; | 
 | 754 | 	unsigned pgdidx, pudidx, pmdidx; | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 755 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 756 | 	/* The limit is the last byte to be touched */ | 
 | 757 | 	limit--; | 
 | 758 | 	BUG_ON(limit >= FIXADDR_TOP); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 759 |  | 
 | 760 | 	if (xen_feature(XENFEAT_auto_translated_physmap)) | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 761 | 		return 0; | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 762 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 763 | 	/* | 
 | 764 | 	 * 64-bit has a great big hole in the middle of the address | 
 | 765 | 	 * space, which contains the Xen mappings.  On 32-bit these | 
 | 766 | 	 * will end up making a zero-sized hole and so is a no-op. | 
 | 767 | 	 */ | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 768 | 	hole_low = pgd_index(USER_LIMIT); | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 769 | 	hole_high = pgd_index(PAGE_OFFSET); | 
 | 770 |  | 
 | 771 | 	pgdidx_limit = pgd_index(limit); | 
 | 772 | #if PTRS_PER_PUD > 1 | 
 | 773 | 	pudidx_limit = pud_index(limit); | 
 | 774 | #else | 
 | 775 | 	pudidx_limit = 0; | 
 | 776 | #endif | 
 | 777 | #if PTRS_PER_PMD > 1 | 
 | 778 | 	pmdidx_limit = pmd_index(limit); | 
 | 779 | #else | 
 | 780 | 	pmdidx_limit = 0; | 
 | 781 | #endif | 
 | 782 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 783 | 	for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) { | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 784 | 		pud_t *pud; | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 785 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 786 | 		if (pgdidx >= hole_low && pgdidx < hole_high) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 787 | 			continue; | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 788 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 789 | 		if (!pgd_val(pgd[pgdidx])) | 
 | 790 | 			continue; | 
 | 791 |  | 
 | 792 | 		pud = pud_offset(&pgd[pgdidx], 0); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 793 |  | 
 | 794 | 		if (PTRS_PER_PUD > 1) /* not folded */ | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 795 | 			flush |= (*func)(mm, virt_to_page(pud), PT_PUD); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 796 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 797 | 		for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) { | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 798 | 			pmd_t *pmd; | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 799 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 800 | 			if (pgdidx == pgdidx_limit && | 
 | 801 | 			    pudidx > pudidx_limit) | 
 | 802 | 				goto out; | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 803 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 804 | 			if (pud_none(pud[pudidx])) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 805 | 				continue; | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 806 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 807 | 			pmd = pmd_offset(&pud[pudidx], 0); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 808 |  | 
 | 809 | 			if (PTRS_PER_PMD > 1) /* not folded */ | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 810 | 				flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 811 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 812 | 			for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) { | 
 | 813 | 				struct page *pte; | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 814 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 815 | 				if (pgdidx == pgdidx_limit && | 
 | 816 | 				    pudidx == pudidx_limit && | 
 | 817 | 				    pmdidx > pmdidx_limit) | 
 | 818 | 					goto out; | 
 | 819 |  | 
 | 820 | 				if (pmd_none(pmd[pmdidx])) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 821 | 					continue; | 
 | 822 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 823 | 				pte = pmd_page(pmd[pmdidx]); | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 824 | 				flush |= (*func)(mm, pte, PT_PTE); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 825 | 			} | 
 | 826 | 		} | 
 | 827 | 	} | 
| Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 828 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 829 | out: | 
| Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 830 | 	/* Do the top level last, so that the callbacks can use it as | 
 | 831 | 	   a cue to do final things like tlb flushes. */ | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 832 | 	flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 833 |  | 
 | 834 | 	return flush; | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 835 | } | 
 | 836 |  | 
| Ian Campbell | 86bbc2c | 2008-11-21 10:21:33 +0000 | [diff] [blame] | 837 | static int xen_pgd_walk(struct mm_struct *mm, | 
 | 838 | 			int (*func)(struct mm_struct *mm, struct page *, | 
 | 839 | 				    enum pt_level), | 
 | 840 | 			unsigned long limit) | 
 | 841 | { | 
 | 842 | 	return __xen_pgd_walk(mm, mm->pgd, func, limit); | 
 | 843 | } | 
 | 844 |  | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 845 | /* If we're using split pte locks, then take the page's lock and | 
 | 846 |    return a pointer to it.  Otherwise return NULL. */ | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 847 | static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 848 | { | 
 | 849 | 	spinlock_t *ptl = NULL; | 
 | 850 |  | 
| Jeremy Fitzhardinge | f7d0b92 | 2008-09-09 15:43:22 -0700 | [diff] [blame] | 851 | #if USE_SPLIT_PTLOCKS | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 852 | 	ptl = __pte_lockptr(page); | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 853 | 	spin_lock_nest_lock(ptl, &mm->page_table_lock); | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 854 | #endif | 
 | 855 |  | 
 | 856 | 	return ptl; | 
 | 857 | } | 
 | 858 |  | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 859 | static void xen_pte_unlock(void *v) | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 860 | { | 
 | 861 | 	spinlock_t *ptl = v; | 
 | 862 | 	spin_unlock(ptl); | 
 | 863 | } | 
 | 864 |  | 
 | 865 | static void xen_do_pin(unsigned level, unsigned long pfn) | 
 | 866 | { | 
 | 867 | 	struct mmuext_op *op; | 
 | 868 | 	struct multicall_space mcs; | 
 | 869 |  | 
 | 870 | 	mcs = __xen_mc_entry(sizeof(*op)); | 
 | 871 | 	op = mcs.args; | 
 | 872 | 	op->cmd = level; | 
 | 873 | 	op->arg1.mfn = pfn_to_mfn(pfn); | 
 | 874 | 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | 
 | 875 | } | 
 | 876 |  | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 877 | static int xen_pin_page(struct mm_struct *mm, struct page *page, | 
 | 878 | 			enum pt_level level) | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 879 | { | 
| Christoph Lameter | d60cd46 | 2008-04-28 02:12:51 -0700 | [diff] [blame] | 880 | 	unsigned pgfl = TestSetPagePinned(page); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 881 | 	int flush; | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 882 |  | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 883 | 	if (pgfl) | 
 | 884 | 		flush = 0;		/* already pinned */ | 
 | 885 | 	else if (PageHighMem(page)) | 
 | 886 | 		/* kmaps need flushing if we found an unpinned | 
 | 887 | 		   highpage */ | 
 | 888 | 		flush = 1; | 
 | 889 | 	else { | 
 | 890 | 		void *pt = lowmem_page_address(page); | 
 | 891 | 		unsigned long pfn = page_to_pfn(page); | 
 | 892 | 		struct multicall_space mcs = __xen_mc_entry(0); | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 893 | 		spinlock_t *ptl; | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 894 |  | 
 | 895 | 		flush = 0; | 
 | 896 |  | 
| Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 897 | 		/* | 
 | 898 | 		 * We need to hold the pagetable lock between the time | 
 | 899 | 		 * we make the pagetable RO and when we actually pin | 
 | 900 | 		 * it.  If we don't, then other users may come in and | 
 | 901 | 		 * attempt to update the pagetable by writing it, | 
 | 902 | 		 * which will fail because the memory is RO but not | 
 | 903 | 		 * pinned, so Xen won't do the trap'n'emulate. | 
 | 904 | 		 * | 
 | 905 | 		 * If we're using split pte locks, we can't hold the | 
 | 906 | 		 * entire pagetable's worth of locks during the | 
 | 907 | 		 * traverse, because we may wrap the preempt count (8 | 
 | 908 | 		 * bits).  The solution is to mark RO and pin each PTE | 
 | 909 | 		 * page while holding the lock.  This means the number | 
 | 910 | 		 * of locks we end up holding is never more than a | 
 | 911 | 		 * batch size (~32 entries, at present). | 
 | 912 | 		 * | 
 | 913 | 		 * If we're not using split pte locks, we needn't pin | 
 | 914 | 		 * the PTE pages independently, because we're | 
 | 915 | 		 * protected by the overall pagetable lock. | 
 | 916 | 		 */ | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 917 | 		ptl = NULL; | 
 | 918 | 		if (level == PT_PTE) | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 919 | 			ptl = xen_pte_lock(page, mm); | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 920 |  | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 921 | 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, | 
 | 922 | 					pfn_pte(pfn, PAGE_KERNEL_RO), | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 923 | 					level == PT_PGD ? UVMF_TLB_FLUSH : 0); | 
 | 924 |  | 
| Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 925 | 		if (ptl) { | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 926 | 			xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); | 
 | 927 |  | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 928 | 			/* Queue a deferred unlock for when this batch | 
 | 929 | 			   is completed. */ | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 930 | 			xen_mc_callback(xen_pte_unlock, ptl); | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 931 | 		} | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 932 | 	} | 
 | 933 |  | 
 | 934 | 	return flush; | 
 | 935 | } | 
 | 936 |  | 
 | 937 | /* This is called just after a mm has been created, but it has not | 
 | 938 |    been used yet.  We need to make sure that its pagetable is all | 
 | 939 |    read-only, and can be pinned. */ | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 940 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 941 | { | 
| Jeremy Fitzhardinge | d05fdf3 | 2008-10-28 19:23:06 +1100 | [diff] [blame] | 942 | 	vm_unmap_aliases(); | 
 | 943 |  | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 944 | 	xen_mc_batch(); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 945 |  | 
| Ian Campbell | 86bbc2c | 2008-11-21 10:21:33 +0000 | [diff] [blame] | 946 | 	if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { | 
| Jeremy Fitzhardinge | d05fdf3 | 2008-10-28 19:23:06 +1100 | [diff] [blame] | 947 | 		/* re-enable interrupts for flushing */ | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 948 | 		xen_mc_issue(0); | 
| Jeremy Fitzhardinge | d05fdf3 | 2008-10-28 19:23:06 +1100 | [diff] [blame] | 949 |  | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 950 | 		kmap_flush_unused(); | 
| Jeremy Fitzhardinge | d05fdf3 | 2008-10-28 19:23:06 +1100 | [diff] [blame] | 951 |  | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 952 | 		xen_mc_batch(); | 
 | 953 | 	} | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 954 |  | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 955 | #ifdef CONFIG_X86_64 | 
 | 956 | 	{ | 
 | 957 | 		pgd_t *user_pgd = xen_get_user_pgd(pgd); | 
 | 958 |  | 
 | 959 | 		xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); | 
 | 960 |  | 
 | 961 | 		if (user_pgd) { | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 962 | 			xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); | 
| Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 963 | 			xen_do_pin(MMUEXT_PIN_L4_TABLE, | 
 | 964 | 				   PFN_DOWN(__pa(user_pgd))); | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 965 | 		} | 
 | 966 | 	} | 
 | 967 | #else /* CONFIG_X86_32 */ | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 968 | #ifdef CONFIG_X86_PAE | 
 | 969 | 	/* Need to make sure unshared kernel PMD is pinnable */ | 
| Jeremy Fitzhardinge | 47cb2ed | 2008-11-06 13:48:24 -0800 | [diff] [blame] | 970 | 	xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 971 | 		     PT_PMD); | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 972 | #endif | 
| Jeremy Fitzhardinge | 2849914 | 2008-05-09 12:05:57 +0100 | [diff] [blame] | 973 | 	xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 974 | #endif /* CONFIG_X86_64 */ | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 975 | 	xen_mc_issue(0); | 
 | 976 | } | 
 | 977 |  | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 978 | static void xen_pgd_pin(struct mm_struct *mm) | 
 | 979 | { | 
 | 980 | 	__xen_pgd_pin(mm, mm->pgd); | 
 | 981 | } | 
 | 982 |  | 
| Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 983 | /* | 
 | 984 |  * On save, we need to pin all pagetables to make sure they get their | 
 | 985 |  * mfns turned into pfns.  Search the list for any unpinned pgds and pin | 
 | 986 |  * them (unpinned pgds are not currently in use, probably because the | 
 | 987 |  * process is under construction or destruction). | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 988 |  * | 
 | 989 |  * Expected to be called in stop_machine() ("equivalent to taking | 
 | 990 |  * every spinlock in the system"), so the locking doesn't really | 
 | 991 |  * matter all that much. | 
| Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 992 |  */ | 
 | 993 | void xen_mm_pin_all(void) | 
 | 994 | { | 
 | 995 | 	unsigned long flags; | 
 | 996 | 	struct page *page; | 
 | 997 |  | 
 | 998 | 	spin_lock_irqsave(&pgd_lock, flags); | 
 | 999 |  | 
 | 1000 | 	list_for_each_entry(page, &pgd_list, lru) { | 
 | 1001 | 		if (!PagePinned(page)) { | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1002 | 			__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); | 
| Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1003 | 			SetPageSavePinned(page); | 
 | 1004 | 		} | 
 | 1005 | 	} | 
 | 1006 |  | 
 | 1007 | 	spin_unlock_irqrestore(&pgd_lock, flags); | 
 | 1008 | } | 
 | 1009 |  | 
| Eduardo Habkost | c1f2f09 | 2008-07-08 15:06:24 -0700 | [diff] [blame] | 1010 | /* | 
 | 1011 |  * The init_mm pagetable is really pinned as soon as its created, but | 
 | 1012 |  * that's before we have page structures to store the bits.  So do all | 
 | 1013 |  * the book-keeping now. | 
 | 1014 |  */ | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1015 | static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, | 
 | 1016 | 				  enum pt_level level) | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1017 | { | 
 | 1018 | 	SetPagePinned(page); | 
 | 1019 | 	return 0; | 
 | 1020 | } | 
 | 1021 |  | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1022 | static void __init xen_mark_init_mm_pinned(void) | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1023 | { | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1024 | 	xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1025 | } | 
 | 1026 |  | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1027 | static int xen_unpin_page(struct mm_struct *mm, struct page *page, | 
 | 1028 | 			  enum pt_level level) | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1029 | { | 
| Christoph Lameter | d60cd46 | 2008-04-28 02:12:51 -0700 | [diff] [blame] | 1030 | 	unsigned pgfl = TestClearPagePinned(page); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1031 |  | 
 | 1032 | 	if (pgfl && !PageHighMem(page)) { | 
 | 1033 | 		void *pt = lowmem_page_address(page); | 
 | 1034 | 		unsigned long pfn = page_to_pfn(page); | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1035 | 		spinlock_t *ptl = NULL; | 
 | 1036 | 		struct multicall_space mcs; | 
 | 1037 |  | 
| Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 1038 | 		/* | 
 | 1039 | 		 * Do the converse to pin_page.  If we're using split | 
 | 1040 | 		 * pte locks, we must be holding the lock for while | 
 | 1041 | 		 * the pte page is unpinned but still RO to prevent | 
 | 1042 | 		 * concurrent updates from seeing it in this | 
 | 1043 | 		 * partially-pinned state. | 
 | 1044 | 		 */ | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1045 | 		if (level == PT_PTE) { | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1046 | 			ptl = xen_pte_lock(page, mm); | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1047 |  | 
| Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 1048 | 			if (ptl) | 
 | 1049 | 				xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1050 | 		} | 
 | 1051 |  | 
 | 1052 | 		mcs = __xen_mc_entry(0); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1053 |  | 
 | 1054 | 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, | 
 | 1055 | 					pfn_pte(pfn, PAGE_KERNEL), | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1056 | 					level == PT_PGD ? UVMF_TLB_FLUSH : 0); | 
 | 1057 |  | 
 | 1058 | 		if (ptl) { | 
 | 1059 | 			/* unlock when batch completed */ | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1060 | 			xen_mc_callback(xen_pte_unlock, ptl); | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1061 | 		} | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1062 | 	} | 
 | 1063 |  | 
 | 1064 | 	return 0;		/* never need to flush on unpin */ | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1065 | } | 
 | 1066 |  | 
 | 1067 | /* Release a pagetables pages back as normal RW */ | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1068 | static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1069 | { | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1070 | 	xen_mc_batch(); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1071 |  | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1072 | 	xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1073 |  | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 1074 | #ifdef CONFIG_X86_64 | 
 | 1075 | 	{ | 
 | 1076 | 		pgd_t *user_pgd = xen_get_user_pgd(pgd); | 
 | 1077 |  | 
 | 1078 | 		if (user_pgd) { | 
| Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 1079 | 			xen_do_pin(MMUEXT_UNPIN_TABLE, | 
 | 1080 | 				   PFN_DOWN(__pa(user_pgd))); | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1081 | 			xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 1082 | 		} | 
 | 1083 | 	} | 
 | 1084 | #endif | 
 | 1085 |  | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 1086 | #ifdef CONFIG_X86_PAE | 
 | 1087 | 	/* Need to make sure unshared kernel PMD is unpinned */ | 
| Jeremy Fitzhardinge | 47cb2ed | 2008-11-06 13:48:24 -0800 | [diff] [blame] | 1088 | 	xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1089 | 		       PT_PMD); | 
| Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 1090 | #endif | 
| Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 1091 |  | 
| Ian Campbell | 86bbc2c | 2008-11-21 10:21:33 +0000 | [diff] [blame] | 1092 | 	__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1093 |  | 
 | 1094 | 	xen_mc_issue(0); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1095 | } | 
 | 1096 |  | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1097 | static void xen_pgd_unpin(struct mm_struct *mm) | 
 | 1098 | { | 
 | 1099 | 	__xen_pgd_unpin(mm, mm->pgd); | 
 | 1100 | } | 
 | 1101 |  | 
| Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1102 | /* | 
 | 1103 |  * On resume, undo any pinning done at save, so that the rest of the | 
 | 1104 |  * kernel doesn't see any unexpected pinned pagetables. | 
 | 1105 |  */ | 
 | 1106 | void xen_mm_unpin_all(void) | 
 | 1107 | { | 
 | 1108 | 	unsigned long flags; | 
 | 1109 | 	struct page *page; | 
 | 1110 |  | 
 | 1111 | 	spin_lock_irqsave(&pgd_lock, flags); | 
 | 1112 |  | 
 | 1113 | 	list_for_each_entry(page, &pgd_list, lru) { | 
 | 1114 | 		if (PageSavePinned(page)) { | 
 | 1115 | 			BUG_ON(!PagePinned(page)); | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1116 | 			__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); | 
| Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1117 | 			ClearPageSavePinned(page); | 
 | 1118 | 		} | 
 | 1119 | 	} | 
 | 1120 |  | 
 | 1121 | 	spin_unlock_irqrestore(&pgd_lock, flags); | 
 | 1122 | } | 
 | 1123 |  | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1124 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 
 | 1125 | { | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1126 | 	spin_lock(&next->page_table_lock); | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1127 | 	xen_pgd_pin(next); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1128 | 	spin_unlock(&next->page_table_lock); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1129 | } | 
 | 1130 |  | 
 | 1131 | void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | 
 | 1132 | { | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1133 | 	spin_lock(&mm->page_table_lock); | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1134 | 	xen_pgd_pin(mm); | 
| Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1135 | 	spin_unlock(&mm->page_table_lock); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1136 | } | 
 | 1137 |  | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1138 |  | 
 | 1139 | #ifdef CONFIG_SMP | 
 | 1140 | /* Another cpu may still have their %cr3 pointing at the pagetable, so | 
 | 1141 |    we need to repoint it somewhere else before we can unpin it. */ | 
 | 1142 | static void drop_other_mm_ref(void *info) | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1143 | { | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1144 | 	struct mm_struct *mm = info; | 
| Jeremy Fitzhardinge | ce87b3d | 2008-07-08 15:06:40 -0700 | [diff] [blame] | 1145 | 	struct mm_struct *active_mm; | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1146 |  | 
| Brian Gerst | 9eb912d | 2009-01-19 00:38:57 +0900 | [diff] [blame] | 1147 | 	active_mm = percpu_read(cpu_tlbstate.active_mm); | 
| Jeremy Fitzhardinge | ce87b3d | 2008-07-08 15:06:40 -0700 | [diff] [blame] | 1148 |  | 
 | 1149 | 	if (active_mm == mm) | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1150 | 		leave_mm(smp_processor_id()); | 
| Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1151 |  | 
 | 1152 | 	/* If this cpu still has a stale cr3 reference, then make sure | 
 | 1153 | 	   it has been flushed. */ | 
| Ingo Molnar | 6dbde35 | 2009-01-15 22:15:53 +0900 | [diff] [blame] | 1154 | 	if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) { | 
| Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1155 | 		load_cr3(swapper_pg_dir); | 
 | 1156 | 		arch_flush_lazy_cpu_mode(); | 
 | 1157 | 	} | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1158 | } | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1159 |  | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1160 | static void xen_drop_mm_ref(struct mm_struct *mm) | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1161 | { | 
| Mike Travis | e4d9820 | 2008-12-16 17:34:05 -0800 | [diff] [blame] | 1162 | 	cpumask_var_t mask; | 
| Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1163 | 	unsigned cpu; | 
 | 1164 |  | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1165 | 	if (current->active_mm == mm) { | 
 | 1166 | 		if (current->mm == mm) | 
 | 1167 | 			load_cr3(swapper_pg_dir); | 
 | 1168 | 		else | 
 | 1169 | 			leave_mm(smp_processor_id()); | 
| Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1170 | 		arch_flush_lazy_cpu_mode(); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1171 | 	} | 
 | 1172 |  | 
| Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1173 | 	/* Get the "official" set of cpus referring to our pagetable. */ | 
| Mike Travis | e4d9820 | 2008-12-16 17:34:05 -0800 | [diff] [blame] | 1174 | 	if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { | 
 | 1175 | 		for_each_online_cpu(cpu) { | 
 | 1176 | 			if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) | 
 | 1177 | 			    && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) | 
 | 1178 | 				continue; | 
 | 1179 | 			smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); | 
 | 1180 | 		} | 
 | 1181 | 		return; | 
 | 1182 | 	} | 
 | 1183 | 	cpumask_copy(mask, &mm->cpu_vm_mask); | 
| Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1184 |  | 
 | 1185 | 	/* It's possible that a vcpu may have a stale reference to our | 
 | 1186 | 	   cr3, because its in lazy mode, and it hasn't yet flushed | 
 | 1187 | 	   its set of pending hypercalls yet.  In this case, we can | 
 | 1188 | 	   look at its actual current cr3 value, and force it to flush | 
 | 1189 | 	   if needed. */ | 
 | 1190 | 	for_each_online_cpu(cpu) { | 
 | 1191 | 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) | 
| Mike Travis | e4d9820 | 2008-12-16 17:34:05 -0800 | [diff] [blame] | 1192 | 			cpumask_set_cpu(cpu, mask); | 
| Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1193 | 	} | 
 | 1194 |  | 
| Mike Travis | e4d9820 | 2008-12-16 17:34:05 -0800 | [diff] [blame] | 1195 | 	if (!cpumask_empty(mask)) | 
 | 1196 | 		smp_call_function_many(mask, drop_other_mm_ref, mm, 1); | 
 | 1197 | 	free_cpumask_var(mask); | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1198 | } | 
 | 1199 | #else | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1200 | static void xen_drop_mm_ref(struct mm_struct *mm) | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1201 | { | 
 | 1202 | 	if (current->active_mm == mm) | 
 | 1203 | 		load_cr3(swapper_pg_dir); | 
 | 1204 | } | 
 | 1205 | #endif | 
 | 1206 |  | 
 | 1207 | /* | 
 | 1208 |  * While a process runs, Xen pins its pagetables, which means that the | 
 | 1209 |  * hypervisor forces it to be read-only, and it controls all updates | 
 | 1210 |  * to it.  This means that all pagetable updates have to go via the | 
 | 1211 |  * hypervisor, which is moderately expensive. | 
 | 1212 |  * | 
 | 1213 |  * Since we're pulling the pagetable down, we switch to use init_mm, | 
 | 1214 |  * unpin old process pagetable and mark it all read-write, which | 
 | 1215 |  * allows further operations on it to be simple memory accesses. | 
 | 1216 |  * | 
 | 1217 |  * The only subtle point is that another CPU may be still using the | 
 | 1218 |  * pagetable because of lazy tlb flushing.  This means we need need to | 
 | 1219 |  * switch all CPUs off this pagetable before we can unpin it. | 
 | 1220 |  */ | 
 | 1221 | void xen_exit_mmap(struct mm_struct *mm) | 
 | 1222 | { | 
 | 1223 | 	get_cpu();		/* make sure we don't move around */ | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1224 | 	xen_drop_mm_ref(mm); | 
| Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1225 | 	put_cpu(); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1226 |  | 
| Jeremy Fitzhardinge | f120f13 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1227 | 	spin_lock(&mm->page_table_lock); | 
| Jeremy Fitzhardinge | df912ea | 2007-09-25 11:50:00 -0700 | [diff] [blame] | 1228 |  | 
 | 1229 | 	/* pgd may not be pinned in the error exit path of execve */ | 
| Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1230 | 	if (xen_page_pinned(mm->pgd)) | 
| Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1231 | 		xen_pgd_unpin(mm); | 
| Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1232 |  | 
| Jeremy Fitzhardinge | f120f13 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1233 | 	spin_unlock(&mm->page_table_lock); | 
| Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1234 | } | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 1235 |  | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1236 | static __init void xen_pagetable_setup_start(pgd_t *base) | 
 | 1237 | { | 
 | 1238 | } | 
 | 1239 |  | 
 | 1240 | static __init void xen_pagetable_setup_done(pgd_t *base) | 
 | 1241 | { | 
 | 1242 | 	xen_setup_shared_info(); | 
 | 1243 | } | 
 | 1244 |  | 
 | 1245 | static void xen_write_cr2(unsigned long cr2) | 
 | 1246 | { | 
 | 1247 | 	percpu_read(xen_vcpu)->arch.cr2 = cr2; | 
 | 1248 | } | 
 | 1249 |  | 
 | 1250 | static unsigned long xen_read_cr2(void) | 
 | 1251 | { | 
 | 1252 | 	return percpu_read(xen_vcpu)->arch.cr2; | 
 | 1253 | } | 
 | 1254 |  | 
 | 1255 | unsigned long xen_read_cr2_direct(void) | 
 | 1256 | { | 
 | 1257 | 	return percpu_read(xen_vcpu_info.arch.cr2); | 
 | 1258 | } | 
 | 1259 |  | 
 | 1260 | static void xen_flush_tlb(void) | 
 | 1261 | { | 
 | 1262 | 	struct mmuext_op *op; | 
 | 1263 | 	struct multicall_space mcs; | 
 | 1264 |  | 
 | 1265 | 	preempt_disable(); | 
 | 1266 |  | 
 | 1267 | 	mcs = xen_mc_entry(sizeof(*op)); | 
 | 1268 |  | 
 | 1269 | 	op = mcs.args; | 
 | 1270 | 	op->cmd = MMUEXT_TLB_FLUSH_LOCAL; | 
 | 1271 | 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | 
 | 1272 |  | 
 | 1273 | 	xen_mc_issue(PARAVIRT_LAZY_MMU); | 
 | 1274 |  | 
 | 1275 | 	preempt_enable(); | 
 | 1276 | } | 
 | 1277 |  | 
 | 1278 | static void xen_flush_tlb_single(unsigned long addr) | 
 | 1279 | { | 
 | 1280 | 	struct mmuext_op *op; | 
 | 1281 | 	struct multicall_space mcs; | 
 | 1282 |  | 
 | 1283 | 	preempt_disable(); | 
 | 1284 |  | 
 | 1285 | 	mcs = xen_mc_entry(sizeof(*op)); | 
 | 1286 | 	op = mcs.args; | 
 | 1287 | 	op->cmd = MMUEXT_INVLPG_LOCAL; | 
 | 1288 | 	op->arg1.linear_addr = addr & PAGE_MASK; | 
 | 1289 | 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | 
 | 1290 |  | 
 | 1291 | 	xen_mc_issue(PARAVIRT_LAZY_MMU); | 
 | 1292 |  | 
 | 1293 | 	preempt_enable(); | 
 | 1294 | } | 
 | 1295 |  | 
 | 1296 | static void xen_flush_tlb_others(const struct cpumask *cpus, | 
 | 1297 | 				 struct mm_struct *mm, unsigned long va) | 
 | 1298 | { | 
 | 1299 | 	struct { | 
 | 1300 | 		struct mmuext_op op; | 
 | 1301 | 		DECLARE_BITMAP(mask, NR_CPUS); | 
 | 1302 | 	} *args; | 
 | 1303 | 	struct multicall_space mcs; | 
 | 1304 |  | 
| Jeremy Fitzhardinge | e3f8a74 | 2009-03-04 17:36:57 -0800 | [diff] [blame] | 1305 | 	if (cpumask_empty(cpus)) | 
 | 1306 | 		return;		/* nothing to do */ | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1307 |  | 
 | 1308 | 	mcs = xen_mc_entry(sizeof(*args)); | 
 | 1309 | 	args = mcs.args; | 
 | 1310 | 	args->op.arg2.vcpumask = to_cpumask(args->mask); | 
 | 1311 |  | 
 | 1312 | 	/* Remove us, and any offline CPUS. */ | 
 | 1313 | 	cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); | 
 | 1314 | 	cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1315 |  | 
 | 1316 | 	if (va == TLB_FLUSH_ALL) { | 
 | 1317 | 		args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; | 
 | 1318 | 	} else { | 
 | 1319 | 		args->op.cmd = MMUEXT_INVLPG_MULTI; | 
 | 1320 | 		args->op.arg1.linear_addr = va; | 
 | 1321 | 	} | 
 | 1322 |  | 
 | 1323 | 	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); | 
 | 1324 |  | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1325 | 	xen_mc_issue(PARAVIRT_LAZY_MMU); | 
 | 1326 | } | 
 | 1327 |  | 
 | 1328 | static unsigned long xen_read_cr3(void) | 
 | 1329 | { | 
 | 1330 | 	return percpu_read(xen_cr3); | 
 | 1331 | } | 
 | 1332 |  | 
 | 1333 | static void set_current_cr3(void *v) | 
 | 1334 | { | 
 | 1335 | 	percpu_write(xen_current_cr3, (unsigned long)v); | 
 | 1336 | } | 
 | 1337 |  | 
 | 1338 | static void __xen_write_cr3(bool kernel, unsigned long cr3) | 
 | 1339 | { | 
 | 1340 | 	struct mmuext_op *op; | 
 | 1341 | 	struct multicall_space mcs; | 
 | 1342 | 	unsigned long mfn; | 
 | 1343 |  | 
 | 1344 | 	if (cr3) | 
 | 1345 | 		mfn = pfn_to_mfn(PFN_DOWN(cr3)); | 
 | 1346 | 	else | 
 | 1347 | 		mfn = 0; | 
 | 1348 |  | 
 | 1349 | 	WARN_ON(mfn == 0 && kernel); | 
 | 1350 |  | 
 | 1351 | 	mcs = __xen_mc_entry(sizeof(*op)); | 
 | 1352 |  | 
 | 1353 | 	op = mcs.args; | 
 | 1354 | 	op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; | 
 | 1355 | 	op->arg1.mfn = mfn; | 
 | 1356 |  | 
 | 1357 | 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | 
 | 1358 |  | 
 | 1359 | 	if (kernel) { | 
 | 1360 | 		percpu_write(xen_cr3, cr3); | 
 | 1361 |  | 
 | 1362 | 		/* Update xen_current_cr3 once the batch has actually | 
 | 1363 | 		   been submitted. */ | 
 | 1364 | 		xen_mc_callback(set_current_cr3, (void *)cr3); | 
 | 1365 | 	} | 
 | 1366 | } | 
 | 1367 |  | 
 | 1368 | static void xen_write_cr3(unsigned long cr3) | 
 | 1369 | { | 
 | 1370 | 	BUG_ON(preemptible()); | 
 | 1371 |  | 
 | 1372 | 	xen_mc_batch();  /* disables interrupts */ | 
 | 1373 |  | 
 | 1374 | 	/* Update while interrupts are disabled, so its atomic with | 
 | 1375 | 	   respect to ipis */ | 
 | 1376 | 	percpu_write(xen_cr3, cr3); | 
 | 1377 |  | 
 | 1378 | 	__xen_write_cr3(true, cr3); | 
 | 1379 |  | 
 | 1380 | #ifdef CONFIG_X86_64 | 
 | 1381 | 	{ | 
 | 1382 | 		pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); | 
 | 1383 | 		if (user_pgd) | 
 | 1384 | 			__xen_write_cr3(false, __pa(user_pgd)); | 
 | 1385 | 		else | 
 | 1386 | 			__xen_write_cr3(false, 0); | 
 | 1387 | 	} | 
 | 1388 | #endif | 
 | 1389 |  | 
 | 1390 | 	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */ | 
 | 1391 | } | 
 | 1392 |  | 
 | 1393 | static int xen_pgd_alloc(struct mm_struct *mm) | 
 | 1394 | { | 
 | 1395 | 	pgd_t *pgd = mm->pgd; | 
 | 1396 | 	int ret = 0; | 
 | 1397 |  | 
 | 1398 | 	BUG_ON(PagePinned(virt_to_page(pgd))); | 
 | 1399 |  | 
 | 1400 | #ifdef CONFIG_X86_64 | 
 | 1401 | 	{ | 
 | 1402 | 		struct page *page = virt_to_page(pgd); | 
 | 1403 | 		pgd_t *user_pgd; | 
 | 1404 |  | 
 | 1405 | 		BUG_ON(page->private != 0); | 
 | 1406 |  | 
 | 1407 | 		ret = -ENOMEM; | 
 | 1408 |  | 
 | 1409 | 		user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | 
 | 1410 | 		page->private = (unsigned long)user_pgd; | 
 | 1411 |  | 
 | 1412 | 		if (user_pgd != NULL) { | 
 | 1413 | 			user_pgd[pgd_index(VSYSCALL_START)] = | 
 | 1414 | 				__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); | 
 | 1415 | 			ret = 0; | 
 | 1416 | 		} | 
 | 1417 |  | 
 | 1418 | 		BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); | 
 | 1419 | 	} | 
 | 1420 | #endif | 
 | 1421 |  | 
 | 1422 | 	return ret; | 
 | 1423 | } | 
 | 1424 |  | 
 | 1425 | static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | 
 | 1426 | { | 
 | 1427 | #ifdef CONFIG_X86_64 | 
 | 1428 | 	pgd_t *user_pgd = xen_get_user_pgd(pgd); | 
 | 1429 |  | 
 | 1430 | 	if (user_pgd) | 
 | 1431 | 		free_page((unsigned long)user_pgd); | 
 | 1432 | #endif | 
 | 1433 | } | 
 | 1434 |  | 
| Jeremy Fitzhardinge | 1f4f931 | 2009-02-02 13:58:06 -0800 | [diff] [blame] | 1435 | #ifdef CONFIG_HIGHPTE | 
 | 1436 | static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) | 
 | 1437 | { | 
 | 1438 | 	pgprot_t prot = PAGE_KERNEL; | 
 | 1439 |  | 
 | 1440 | 	if (PagePinned(page)) | 
 | 1441 | 		prot = PAGE_KERNEL_RO; | 
 | 1442 |  | 
 | 1443 | 	if (0 && PageHighMem(page)) | 
 | 1444 | 		printk("mapping highpte %lx type %d prot %s\n", | 
 | 1445 | 		       page_to_pfn(page), type, | 
 | 1446 | 		       (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); | 
 | 1447 |  | 
 | 1448 | 	return kmap_atomic_prot(page, type, prot); | 
 | 1449 | } | 
 | 1450 | #endif | 
 | 1451 |  | 
 | 1452 | #ifdef CONFIG_X86_32 | 
 | 1453 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 
 | 1454 | { | 
 | 1455 | 	/* If there's an existing pte, then don't allow _PAGE_RW to be set */ | 
 | 1456 | 	if (pte_val_ma(*ptep) & _PAGE_PRESENT) | 
 | 1457 | 		pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | 
 | 1458 | 			       pte_val_ma(pte)); | 
 | 1459 |  | 
 | 1460 | 	return pte; | 
 | 1461 | } | 
 | 1462 |  | 
 | 1463 | /* Init-time set_pte while constructing initial pagetables, which | 
 | 1464 |    doesn't allow RO pagetable pages to be remapped RW */ | 
 | 1465 | static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) | 
 | 1466 | { | 
 | 1467 | 	pte = mask_rw_pte(ptep, pte); | 
 | 1468 |  | 
 | 1469 | 	xen_set_pte(ptep, pte); | 
 | 1470 | } | 
 | 1471 | #endif | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1472 |  | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1473 | static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | 
 | 1474 | { | 
 | 1475 | 	struct mmuext_op op; | 
 | 1476 | 	op.cmd = cmd; | 
 | 1477 | 	op.arg1.mfn = pfn_to_mfn(pfn); | 
 | 1478 | 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | 
 | 1479 | 		BUG(); | 
 | 1480 | } | 
 | 1481 |  | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1482 | /* Early in boot, while setting up the initial pagetable, assume | 
 | 1483 |    everything is pinned. */ | 
 | 1484 | static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | 
 | 1485 | { | 
 | 1486 | #ifdef CONFIG_FLATMEM | 
 | 1487 | 	BUG_ON(mem_map);	/* should only be used early */ | 
 | 1488 | #endif | 
 | 1489 | 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1490 | 	pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | 
 | 1491 | } | 
 | 1492 |  | 
 | 1493 | /* Used for pmd and pud */ | 
 | 1494 | static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) | 
 | 1495 | { | 
 | 1496 | #ifdef CONFIG_FLATMEM | 
 | 1497 | 	BUG_ON(mem_map);	/* should only be used early */ | 
 | 1498 | #endif | 
 | 1499 | 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1500 | } | 
 | 1501 |  | 
 | 1502 | /* Early release_pte assumes that all pts are pinned, since there's | 
 | 1503 |    only init_mm and anything attached to that is pinned. */ | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1504 | static __init void xen_release_pte_init(unsigned long pfn) | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1505 | { | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1506 | 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1507 | 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 
 | 1508 | } | 
 | 1509 |  | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1510 | static __init void xen_release_pmd_init(unsigned long pfn) | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1511 | { | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1512 | 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1513 | } | 
 | 1514 |  | 
 | 1515 | /* This needs to make sure the new pte page is pinned iff its being | 
 | 1516 |    attached to a pinned pagetable. */ | 
 | 1517 | static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) | 
 | 1518 | { | 
 | 1519 | 	struct page *page = pfn_to_page(pfn); | 
 | 1520 |  | 
 | 1521 | 	if (PagePinned(virt_to_page(mm->pgd))) { | 
 | 1522 | 		SetPagePinned(page); | 
 | 1523 |  | 
 | 1524 | 		vm_unmap_aliases(); | 
 | 1525 | 		if (!PageHighMem(page)) { | 
 | 1526 | 			make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); | 
 | 1527 | 			if (level == PT_PTE && USE_SPLIT_PTLOCKS) | 
 | 1528 | 				pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | 
 | 1529 | 		} else { | 
 | 1530 | 			/* make sure there are no stray mappings of | 
 | 1531 | 			   this page */ | 
 | 1532 | 			kmap_flush_unused(); | 
 | 1533 | 		} | 
 | 1534 | 	} | 
 | 1535 | } | 
 | 1536 |  | 
 | 1537 | static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) | 
 | 1538 | { | 
 | 1539 | 	xen_alloc_ptpage(mm, pfn, PT_PTE); | 
 | 1540 | } | 
 | 1541 |  | 
 | 1542 | static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) | 
 | 1543 | { | 
 | 1544 | 	xen_alloc_ptpage(mm, pfn, PT_PMD); | 
 | 1545 | } | 
 | 1546 |  | 
 | 1547 | /* This should never happen until we're OK to use struct page */ | 
 | 1548 | static void xen_release_ptpage(unsigned long pfn, unsigned level) | 
 | 1549 | { | 
 | 1550 | 	struct page *page = pfn_to_page(pfn); | 
 | 1551 |  | 
 | 1552 | 	if (PagePinned(page)) { | 
 | 1553 | 		if (!PageHighMem(page)) { | 
 | 1554 | 			if (level == PT_PTE && USE_SPLIT_PTLOCKS) | 
 | 1555 | 				pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | 
 | 1556 | 			make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 
 | 1557 | 		} | 
 | 1558 | 		ClearPagePinned(page); | 
 | 1559 | 	} | 
 | 1560 | } | 
 | 1561 |  | 
 | 1562 | static void xen_release_pte(unsigned long pfn) | 
 | 1563 | { | 
 | 1564 | 	xen_release_ptpage(pfn, PT_PTE); | 
 | 1565 | } | 
 | 1566 |  | 
 | 1567 | static void xen_release_pmd(unsigned long pfn) | 
 | 1568 | { | 
 | 1569 | 	xen_release_ptpage(pfn, PT_PMD); | 
 | 1570 | } | 
 | 1571 |  | 
 | 1572 | #if PAGETABLE_LEVELS == 4 | 
 | 1573 | static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) | 
 | 1574 | { | 
 | 1575 | 	xen_alloc_ptpage(mm, pfn, PT_PUD); | 
 | 1576 | } | 
 | 1577 |  | 
 | 1578 | static void xen_release_pud(unsigned long pfn) | 
 | 1579 | { | 
 | 1580 | 	xen_release_ptpage(pfn, PT_PUD); | 
 | 1581 | } | 
 | 1582 | #endif | 
 | 1583 |  | 
 | 1584 | void __init xen_reserve_top(void) | 
 | 1585 | { | 
 | 1586 | #ifdef CONFIG_X86_32 | 
 | 1587 | 	unsigned long top = HYPERVISOR_VIRT_START; | 
 | 1588 | 	struct xen_platform_parameters pp; | 
 | 1589 |  | 
 | 1590 | 	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) | 
 | 1591 | 		top = pp.virt_start; | 
 | 1592 |  | 
 | 1593 | 	reserve_top_address(-top); | 
 | 1594 | #endif	/* CONFIG_X86_32 */ | 
 | 1595 | } | 
 | 1596 |  | 
 | 1597 | /* | 
 | 1598 |  * Like __va(), but returns address in the kernel mapping (which is | 
 | 1599 |  * all we have until the physical memory mapping has been set up. | 
 | 1600 |  */ | 
 | 1601 | static void *__ka(phys_addr_t paddr) | 
 | 1602 | { | 
 | 1603 | #ifdef CONFIG_X86_64 | 
 | 1604 | 	return (void *)(paddr + __START_KERNEL_map); | 
 | 1605 | #else | 
 | 1606 | 	return __va(paddr); | 
 | 1607 | #endif | 
 | 1608 | } | 
 | 1609 |  | 
 | 1610 | /* Convert a machine address to physical address */ | 
 | 1611 | static unsigned long m2p(phys_addr_t maddr) | 
 | 1612 | { | 
 | 1613 | 	phys_addr_t paddr; | 
 | 1614 |  | 
 | 1615 | 	maddr &= PTE_PFN_MASK; | 
 | 1616 | 	paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; | 
 | 1617 |  | 
 | 1618 | 	return paddr; | 
 | 1619 | } | 
 | 1620 |  | 
 | 1621 | /* Convert a machine address to kernel virtual */ | 
 | 1622 | static void *m2v(phys_addr_t maddr) | 
 | 1623 | { | 
 | 1624 | 	return __ka(m2p(maddr)); | 
 | 1625 | } | 
 | 1626 |  | 
 | 1627 | static void set_page_prot(void *addr, pgprot_t prot) | 
 | 1628 | { | 
 | 1629 | 	unsigned long pfn = __pa(addr) >> PAGE_SHIFT; | 
 | 1630 | 	pte_t pte = pfn_pte(pfn, prot); | 
 | 1631 |  | 
 | 1632 | 	if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) | 
 | 1633 | 		BUG(); | 
 | 1634 | } | 
 | 1635 |  | 
 | 1636 | static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | 
 | 1637 | { | 
 | 1638 | 	unsigned pmdidx, pteidx; | 
 | 1639 | 	unsigned ident_pte; | 
 | 1640 | 	unsigned long pfn; | 
 | 1641 |  | 
 | 1642 | 	ident_pte = 0; | 
 | 1643 | 	pfn = 0; | 
 | 1644 | 	for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { | 
 | 1645 | 		pte_t *pte_page; | 
 | 1646 |  | 
 | 1647 | 		/* Reuse or allocate a page of ptes */ | 
 | 1648 | 		if (pmd_present(pmd[pmdidx])) | 
 | 1649 | 			pte_page = m2v(pmd[pmdidx].pmd); | 
 | 1650 | 		else { | 
 | 1651 | 			/* Check for free pte pages */ | 
 | 1652 | 			if (ident_pte == ARRAY_SIZE(level1_ident_pgt)) | 
 | 1653 | 				break; | 
 | 1654 |  | 
 | 1655 | 			pte_page = &level1_ident_pgt[ident_pte]; | 
 | 1656 | 			ident_pte += PTRS_PER_PTE; | 
 | 1657 |  | 
 | 1658 | 			pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); | 
 | 1659 | 		} | 
 | 1660 |  | 
 | 1661 | 		/* Install mappings */ | 
 | 1662 | 		for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { | 
 | 1663 | 			pte_t pte; | 
 | 1664 |  | 
 | 1665 | 			if (pfn > max_pfn_mapped) | 
 | 1666 | 				max_pfn_mapped = pfn; | 
 | 1667 |  | 
 | 1668 | 			if (!pte_none(pte_page[pteidx])) | 
 | 1669 | 				continue; | 
 | 1670 |  | 
 | 1671 | 			pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); | 
 | 1672 | 			pte_page[pteidx] = pte; | 
 | 1673 | 		} | 
 | 1674 | 	} | 
 | 1675 |  | 
 | 1676 | 	for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) | 
 | 1677 | 		set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); | 
 | 1678 |  | 
 | 1679 | 	set_page_prot(pmd, PAGE_KERNEL_RO); | 
 | 1680 | } | 
 | 1681 |  | 
 | 1682 | #ifdef CONFIG_X86_64 | 
 | 1683 | static void convert_pfn_mfn(void *v) | 
 | 1684 | { | 
 | 1685 | 	pte_t *pte = v; | 
 | 1686 | 	int i; | 
 | 1687 |  | 
 | 1688 | 	/* All levels are converted the same way, so just treat them | 
 | 1689 | 	   as ptes. */ | 
 | 1690 | 	for (i = 0; i < PTRS_PER_PTE; i++) | 
 | 1691 | 		pte[i] = xen_make_pte(pte[i].pte); | 
 | 1692 | } | 
 | 1693 |  | 
 | 1694 | /* | 
 | 1695 |  * Set up the inital kernel pagetable. | 
 | 1696 |  * | 
 | 1697 |  * We can construct this by grafting the Xen provided pagetable into | 
 | 1698 |  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into | 
 | 1699 |  * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This | 
 | 1700 |  * means that only the kernel has a physical mapping to start with - | 
 | 1701 |  * but that's enough to get __va working.  We need to fill in the rest | 
 | 1702 |  * of the physical mapping once some sort of allocator has been set | 
 | 1703 |  * up. | 
 | 1704 |  */ | 
 | 1705 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 
 | 1706 | 					 unsigned long max_pfn) | 
 | 1707 | { | 
 | 1708 | 	pud_t *l3; | 
 | 1709 | 	pmd_t *l2; | 
 | 1710 |  | 
 | 1711 | 	/* Zap identity mapping */ | 
 | 1712 | 	init_level4_pgt[0] = __pgd(0); | 
 | 1713 |  | 
 | 1714 | 	/* Pre-constructed entries are in pfn, so convert to mfn */ | 
 | 1715 | 	convert_pfn_mfn(init_level4_pgt); | 
 | 1716 | 	convert_pfn_mfn(level3_ident_pgt); | 
 | 1717 | 	convert_pfn_mfn(level3_kernel_pgt); | 
 | 1718 |  | 
 | 1719 | 	l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); | 
 | 1720 | 	l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); | 
 | 1721 |  | 
 | 1722 | 	memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); | 
 | 1723 | 	memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); | 
 | 1724 |  | 
 | 1725 | 	l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); | 
 | 1726 | 	l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); | 
 | 1727 | 	memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); | 
 | 1728 |  | 
 | 1729 | 	/* Set up identity map */ | 
 | 1730 | 	xen_map_identity_early(level2_ident_pgt, max_pfn); | 
 | 1731 |  | 
 | 1732 | 	/* Make pagetable pieces RO */ | 
 | 1733 | 	set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); | 
 | 1734 | 	set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); | 
 | 1735 | 	set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); | 
 | 1736 | 	set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); | 
 | 1737 | 	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | 
 | 1738 | 	set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | 
 | 1739 |  | 
 | 1740 | 	/* Pin down new L4 */ | 
 | 1741 | 	pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, | 
 | 1742 | 			  PFN_DOWN(__pa_symbol(init_level4_pgt))); | 
 | 1743 |  | 
 | 1744 | 	/* Unpin Xen-provided one */ | 
 | 1745 | 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | 
 | 1746 |  | 
 | 1747 | 	/* Switch over */ | 
 | 1748 | 	pgd = init_level4_pgt; | 
 | 1749 |  | 
 | 1750 | 	/* | 
 | 1751 | 	 * At this stage there can be no user pgd, and no page | 
 | 1752 | 	 * structure to attach it to, so make sure we just set kernel | 
 | 1753 | 	 * pgd. | 
 | 1754 | 	 */ | 
 | 1755 | 	xen_mc_batch(); | 
 | 1756 | 	__xen_write_cr3(true, __pa(pgd)); | 
 | 1757 | 	xen_mc_issue(PARAVIRT_LAZY_CPU); | 
 | 1758 |  | 
 | 1759 | 	reserve_early(__pa(xen_start_info->pt_base), | 
 | 1760 | 		      __pa(xen_start_info->pt_base + | 
 | 1761 | 			   xen_start_info->nr_pt_frames * PAGE_SIZE), | 
 | 1762 | 		      "XEN PAGETABLES"); | 
 | 1763 |  | 
 | 1764 | 	return pgd; | 
 | 1765 | } | 
 | 1766 | #else	/* !CONFIG_X86_64 */ | 
 | 1767 | static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss; | 
 | 1768 |  | 
 | 1769 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 
 | 1770 | 					 unsigned long max_pfn) | 
 | 1771 | { | 
 | 1772 | 	pmd_t *kernel_pmd; | 
 | 1773 |  | 
| Jeremy Fitzhardinge | 93dbda7 | 2009-02-26 17:35:44 -0800 | [diff] [blame] | 1774 | 	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + | 
 | 1775 | 				  xen_start_info->nr_pt_frames * PAGE_SIZE + | 
 | 1776 | 				  512*1024); | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1777 |  | 
 | 1778 | 	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); | 
 | 1779 | 	memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); | 
 | 1780 |  | 
 | 1781 | 	xen_map_identity_early(level2_kernel_pgt, max_pfn); | 
 | 1782 |  | 
 | 1783 | 	memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); | 
 | 1784 | 	set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], | 
 | 1785 | 			__pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); | 
 | 1786 |  | 
 | 1787 | 	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | 
 | 1788 | 	set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | 
 | 1789 | 	set_page_prot(empty_zero_page, PAGE_KERNEL_RO); | 
 | 1790 |  | 
 | 1791 | 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | 
 | 1792 |  | 
 | 1793 | 	xen_write_cr3(__pa(swapper_pg_dir)); | 
 | 1794 |  | 
 | 1795 | 	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); | 
 | 1796 |  | 
 | 1797 | 	return swapper_pg_dir; | 
 | 1798 | } | 
 | 1799 | #endif	/* CONFIG_X86_64 */ | 
 | 1800 |  | 
| Masami Hiramatsu | 3b3809a | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 1801 | static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1802 | { | 
 | 1803 | 	pte_t pte; | 
 | 1804 |  | 
 | 1805 | 	phys >>= PAGE_SHIFT; | 
 | 1806 |  | 
 | 1807 | 	switch (idx) { | 
 | 1808 | 	case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: | 
 | 1809 | #ifdef CONFIG_X86_F00F_BUG | 
 | 1810 | 	case FIX_F00F_IDT: | 
 | 1811 | #endif | 
 | 1812 | #ifdef CONFIG_X86_32 | 
 | 1813 | 	case FIX_WP_TEST: | 
 | 1814 | 	case FIX_VDSO: | 
 | 1815 | # ifdef CONFIG_HIGHMEM | 
 | 1816 | 	case FIX_KMAP_BEGIN ... FIX_KMAP_END: | 
 | 1817 | # endif | 
 | 1818 | #else | 
 | 1819 | 	case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: | 
 | 1820 | #endif | 
 | 1821 | #ifdef CONFIG_X86_LOCAL_APIC | 
 | 1822 | 	case FIX_APIC_BASE:	/* maps dummy local APIC */ | 
 | 1823 | #endif | 
| Jeremy Fitzhardinge | 3ecb1b7 | 2009-03-07 23:48:41 -0800 | [diff] [blame] | 1824 | 	case FIX_TEXT_POKE0: | 
 | 1825 | 	case FIX_TEXT_POKE1: | 
 | 1826 | 		/* All local page mappings */ | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1827 | 		pte = pfn_pte(phys, prot); | 
 | 1828 | 		break; | 
 | 1829 |  | 
 | 1830 | 	default: | 
 | 1831 | 		pte = mfn_pte(phys, prot); | 
 | 1832 | 		break; | 
 | 1833 | 	} | 
 | 1834 |  | 
 | 1835 | 	__native_set_fixmap(idx, pte); | 
 | 1836 |  | 
 | 1837 | #ifdef CONFIG_X86_64 | 
 | 1838 | 	/* Replicate changes to map the vsyscall page into the user | 
 | 1839 | 	   pagetable vsyscall mapping. */ | 
 | 1840 | 	if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { | 
 | 1841 | 		unsigned long vaddr = __fix_to_virt(idx); | 
 | 1842 | 		set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); | 
 | 1843 | 	} | 
 | 1844 | #endif | 
 | 1845 | } | 
 | 1846 |  | 
 | 1847 | __init void xen_post_allocator_init(void) | 
 | 1848 | { | 
 | 1849 | 	pv_mmu_ops.set_pte = xen_set_pte; | 
 | 1850 | 	pv_mmu_ops.set_pmd = xen_set_pmd; | 
 | 1851 | 	pv_mmu_ops.set_pud = xen_set_pud; | 
 | 1852 | #if PAGETABLE_LEVELS == 4 | 
 | 1853 | 	pv_mmu_ops.set_pgd = xen_set_pgd; | 
 | 1854 | #endif | 
 | 1855 |  | 
 | 1856 | 	/* This will work as long as patching hasn't happened yet | 
 | 1857 | 	   (which it hasn't) */ | 
 | 1858 | 	pv_mmu_ops.alloc_pte = xen_alloc_pte; | 
 | 1859 | 	pv_mmu_ops.alloc_pmd = xen_alloc_pmd; | 
 | 1860 | 	pv_mmu_ops.release_pte = xen_release_pte; | 
 | 1861 | 	pv_mmu_ops.release_pmd = xen_release_pmd; | 
 | 1862 | #if PAGETABLE_LEVELS == 4 | 
 | 1863 | 	pv_mmu_ops.alloc_pud = xen_alloc_pud; | 
 | 1864 | 	pv_mmu_ops.release_pud = xen_release_pud; | 
 | 1865 | #endif | 
 | 1866 |  | 
 | 1867 | #ifdef CONFIG_X86_64 | 
 | 1868 | 	SetPagePinned(virt_to_page(level3_user_vsyscall)); | 
 | 1869 | #endif | 
 | 1870 | 	xen_mark_init_mm_pinned(); | 
 | 1871 | } | 
 | 1872 |  | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1873 | const struct pv_mmu_ops xen_mmu_ops __initdata = { | 
 | 1874 | 	.pagetable_setup_start = xen_pagetable_setup_start, | 
 | 1875 | 	.pagetable_setup_done = xen_pagetable_setup_done, | 
 | 1876 |  | 
 | 1877 | 	.read_cr2 = xen_read_cr2, | 
 | 1878 | 	.write_cr2 = xen_write_cr2, | 
 | 1879 |  | 
 | 1880 | 	.read_cr3 = xen_read_cr3, | 
 | 1881 | 	.write_cr3 = xen_write_cr3, | 
 | 1882 |  | 
 | 1883 | 	.flush_tlb_user = xen_flush_tlb, | 
 | 1884 | 	.flush_tlb_kernel = xen_flush_tlb, | 
 | 1885 | 	.flush_tlb_single = xen_flush_tlb_single, | 
 | 1886 | 	.flush_tlb_others = xen_flush_tlb_others, | 
 | 1887 |  | 
 | 1888 | 	.pte_update = paravirt_nop, | 
 | 1889 | 	.pte_update_defer = paravirt_nop, | 
 | 1890 |  | 
 | 1891 | 	.pgd_alloc = xen_pgd_alloc, | 
 | 1892 | 	.pgd_free = xen_pgd_free, | 
 | 1893 |  | 
 | 1894 | 	.alloc_pte = xen_alloc_pte_init, | 
 | 1895 | 	.release_pte = xen_release_pte_init, | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1896 | 	.alloc_pmd = xen_alloc_pmd_init, | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1897 | 	.alloc_pmd_clone = paravirt_nop, | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1898 | 	.release_pmd = xen_release_pmd_init, | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1899 |  | 
 | 1900 | #ifdef CONFIG_HIGHPTE | 
 | 1901 | 	.kmap_atomic_pte = xen_kmap_atomic_pte, | 
 | 1902 | #endif | 
 | 1903 |  | 
 | 1904 | #ifdef CONFIG_X86_64 | 
 | 1905 | 	.set_pte = xen_set_pte, | 
 | 1906 | #else | 
 | 1907 | 	.set_pte = xen_set_pte_init, | 
 | 1908 | #endif | 
 | 1909 | 	.set_pte_at = xen_set_pte_at, | 
 | 1910 | 	.set_pmd = xen_set_pmd_hyper, | 
 | 1911 |  | 
 | 1912 | 	.ptep_modify_prot_start = __ptep_modify_prot_start, | 
 | 1913 | 	.ptep_modify_prot_commit = __ptep_modify_prot_commit, | 
 | 1914 |  | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 1915 | 	.pte_val = PV_CALLEE_SAVE(xen_pte_val), | 
 | 1916 | 	.pgd_val = PV_CALLEE_SAVE(xen_pgd_val), | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1917 |  | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 1918 | 	.make_pte = PV_CALLEE_SAVE(xen_make_pte), | 
 | 1919 | 	.make_pgd = PV_CALLEE_SAVE(xen_make_pgd), | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1920 |  | 
 | 1921 | #ifdef CONFIG_X86_PAE | 
 | 1922 | 	.set_pte_atomic = xen_set_pte_atomic, | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1923 | 	.pte_clear = xen_pte_clear, | 
 | 1924 | 	.pmd_clear = xen_pmd_clear, | 
 | 1925 | #endif	/* CONFIG_X86_PAE */ | 
 | 1926 | 	.set_pud = xen_set_pud_hyper, | 
 | 1927 |  | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 1928 | 	.make_pmd = PV_CALLEE_SAVE(xen_make_pmd), | 
 | 1929 | 	.pmd_val = PV_CALLEE_SAVE(xen_pmd_val), | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1930 |  | 
 | 1931 | #if PAGETABLE_LEVELS == 4 | 
| Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 1932 | 	.pud_val = PV_CALLEE_SAVE(xen_pud_val), | 
 | 1933 | 	.make_pud = PV_CALLEE_SAVE(xen_make_pud), | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1934 | 	.set_pgd = xen_set_pgd_hyper, | 
 | 1935 |  | 
| Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1936 | 	.alloc_pud = xen_alloc_pmd_init, | 
 | 1937 | 	.release_pud = xen_release_pmd_init, | 
| Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1938 | #endif	/* PAGETABLE_LEVELS == 4 */ | 
 | 1939 |  | 
 | 1940 | 	.activate_mm = xen_activate_mm, | 
 | 1941 | 	.dup_mmap = xen_dup_mmap, | 
 | 1942 | 	.exit_mmap = xen_exit_mmap, | 
 | 1943 |  | 
 | 1944 | 	.lazy_mode = { | 
 | 1945 | 		.enter = paravirt_enter_lazy_mmu, | 
 | 1946 | 		.leave = xen_leave_lazy, | 
 | 1947 | 	}, | 
 | 1948 |  | 
 | 1949 | 	.set_fixmap = xen_set_fixmap, | 
 | 1950 | }; | 
 | 1951 |  | 
 | 1952 |  | 
| Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 1953 | #ifdef CONFIG_XEN_DEBUG_FS | 
 | 1954 |  | 
 | 1955 | static struct dentry *d_mmu_debug; | 
 | 1956 |  | 
 | 1957 | static int __init xen_mmu_debugfs(void) | 
 | 1958 | { | 
 | 1959 | 	struct dentry *d_xen = xen_init_debugfs(); | 
 | 1960 |  | 
 | 1961 | 	if (d_xen == NULL) | 
 | 1962 | 		return -ENOMEM; | 
 | 1963 |  | 
 | 1964 | 	d_mmu_debug = debugfs_create_dir("mmu", d_xen); | 
 | 1965 |  | 
 | 1966 | 	debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats); | 
 | 1967 |  | 
 | 1968 | 	debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update); | 
 | 1969 | 	debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug, | 
 | 1970 | 			   &mmu_stats.pgd_update_pinned); | 
 | 1971 | 	debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug, | 
 | 1972 | 			   &mmu_stats.pgd_update_pinned); | 
 | 1973 |  | 
 | 1974 | 	debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update); | 
 | 1975 | 	debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug, | 
 | 1976 | 			   &mmu_stats.pud_update_pinned); | 
 | 1977 | 	debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug, | 
 | 1978 | 			   &mmu_stats.pud_update_pinned); | 
 | 1979 |  | 
 | 1980 | 	debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update); | 
 | 1981 | 	debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug, | 
 | 1982 | 			   &mmu_stats.pmd_update_pinned); | 
 | 1983 | 	debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug, | 
 | 1984 | 			   &mmu_stats.pmd_update_pinned); | 
 | 1985 |  | 
 | 1986 | 	debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update); | 
 | 1987 | //	debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug, | 
 | 1988 | //			   &mmu_stats.pte_update_pinned); | 
 | 1989 | 	debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug, | 
 | 1990 | 			   &mmu_stats.pte_update_pinned); | 
 | 1991 |  | 
 | 1992 | 	debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update); | 
 | 1993 | 	debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug, | 
 | 1994 | 			   &mmu_stats.mmu_update_extended); | 
 | 1995 | 	xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug, | 
 | 1996 | 				     mmu_stats.mmu_update_histo, 20); | 
 | 1997 |  | 
 | 1998 | 	debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at); | 
 | 1999 | 	debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug, | 
 | 2000 | 			   &mmu_stats.set_pte_at_batched); | 
 | 2001 | 	debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug, | 
 | 2002 | 			   &mmu_stats.set_pte_at_current); | 
 | 2003 | 	debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug, | 
 | 2004 | 			   &mmu_stats.set_pte_at_kernel); | 
 | 2005 |  | 
 | 2006 | 	debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit); | 
 | 2007 | 	debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, | 
 | 2008 | 			   &mmu_stats.prot_commit_batched); | 
 | 2009 |  | 
 | 2010 | 	return 0; | 
 | 2011 | } | 
 | 2012 | fs_initcall(xen_mmu_debugfs); | 
 | 2013 |  | 
 | 2014 | #endif	/* CONFIG_XEN_DEBUG_FS */ |