| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright 2002 Andi Kleen, SuSE Labs. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 |  * Thanks to Ben LaHaise for precious feedback. | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 4 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/highmem.h> | 
| Ingo Molnar | 8192206 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 6 | #include <linux/bootmem.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/module.h> | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 8 | #include <linux/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/slab.h> | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 10 | #include <linux/mm.h> | 
| Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 11 | #include <linux/interrupt.h> | 
| Thomas Gleixner | ee7ae7a | 2008-04-17 17:40:45 +0200 | [diff] [blame] | 12 | #include <linux/seq_file.h> | 
 | 13 | #include <linux/debugfs.h> | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 14 |  | 
| Thomas Gleixner | 950f9d9 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 15 | #include <asm/e820.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/processor.h> | 
 | 17 | #include <asm/tlbflush.h> | 
| Dave Jones | f8af095 | 2006-01-06 00:12:10 -0800 | [diff] [blame] | 18 | #include <asm/sections.h> | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 19 | #include <asm/uaccess.h> | 
 | 20 | #include <asm/pgalloc.h> | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 21 | #include <asm/proto.h> | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 22 | #include <asm/pat.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
| Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 24 | /* | 
 | 25 |  * The current flushing context - we pass it instead of 5 arguments: | 
 | 26 |  */ | 
| Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 27 | struct cpa_data { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 28 | 	unsigned long	*vaddr; | 
| Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 29 | 	pgprot_t	mask_set; | 
 | 30 | 	pgprot_t	mask_clr; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 31 | 	int		numpages; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 32 | 	int		flags; | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 33 | 	unsigned long	pfn; | 
| Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 34 | 	unsigned	force_split : 1; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 35 | 	int		curpage; | 
| Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 36 | }; | 
 | 37 |  | 
| Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 38 | /* | 
 | 39 |  * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) | 
 | 40 |  * using cpa_lock. So that we don't allow any other cpu, with stale large tlb | 
 | 41 |  * entries change the page attribute in parallel to some other cpu | 
 | 42 |  * splitting a large page entry along with changing the attribute. | 
 | 43 |  */ | 
 | 44 | static DEFINE_SPINLOCK(cpa_lock); | 
 | 45 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 46 | #define CPA_FLUSHTLB 1 | 
 | 47 | #define CPA_ARRAY 2 | 
 | 48 |  | 
| Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 49 | #ifdef CONFIG_PROC_FS | 
| Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 50 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; | 
 | 51 |  | 
| Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 52 | void update_page_count(int level, unsigned long pages) | 
| Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 53 | { | 
| Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 54 | 	unsigned long flags; | 
| Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 55 |  | 
| Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 56 | 	/* Protect against CPA */ | 
 | 57 | 	spin_lock_irqsave(&pgd_lock, flags); | 
 | 58 | 	direct_pages_count[level] += pages; | 
 | 59 | 	spin_unlock_irqrestore(&pgd_lock, flags); | 
| Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 60 | } | 
 | 61 |  | 
| Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 62 | static void split_page_count(int level) | 
 | 63 | { | 
 | 64 | 	direct_pages_count[level]--; | 
 | 65 | 	direct_pages_count[level - 1] += PTRS_PER_PTE; | 
 | 66 | } | 
 | 67 |  | 
| Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 68 | void arch_report_meminfo(struct seq_file *m) | 
| Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 69 | { | 
| Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 70 | 	seq_printf(m, "DirectMap4k:    %8lu kB\n", | 
| Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 71 | 			direct_pages_count[PG_LEVEL_4K] << 2); | 
 | 72 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 
| Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 73 | 	seq_printf(m, "DirectMap2M:    %8lu kB\n", | 
| Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 74 | 			direct_pages_count[PG_LEVEL_2M] << 11); | 
 | 75 | #else | 
| Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 76 | 	seq_printf(m, "DirectMap4M:    %8lu kB\n", | 
| Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 77 | 			direct_pages_count[PG_LEVEL_2M] << 12); | 
 | 78 | #endif | 
| Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 79 | #ifdef CONFIG_X86_64 | 
| Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 80 | 	if (direct_gbpages) | 
| Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 81 | 		seq_printf(m, "DirectMap1G:    %8lu kB\n", | 
| Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 82 | 			direct_pages_count[PG_LEVEL_1G] << 20); | 
| Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 83 | #endif | 
| Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 84 | } | 
 | 85 | #else | 
 | 86 | static inline void split_page_count(int level) { } | 
 | 87 | #endif | 
 | 88 |  | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 89 | #ifdef CONFIG_X86_64 | 
 | 90 |  | 
 | 91 | static inline unsigned long highmap_start_pfn(void) | 
 | 92 | { | 
 | 93 | 	return __pa(_text) >> PAGE_SHIFT; | 
 | 94 | } | 
 | 95 |  | 
 | 96 | static inline unsigned long highmap_end_pfn(void) | 
 | 97 | { | 
| Joerg Roedel | 15ae2d7 | 2008-07-25 16:48:56 +0200 | [diff] [blame] | 98 | 	return __pa(roundup((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 99 | } | 
 | 100 |  | 
 | 101 | #endif | 
 | 102 |  | 
| Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 103 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
 | 104 | # define debug_pagealloc 1 | 
 | 105 | #else | 
 | 106 | # define debug_pagealloc 0 | 
 | 107 | #endif | 
 | 108 |  | 
| Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 109 | static inline int | 
 | 110 | within(unsigned long addr, unsigned long start, unsigned long end) | 
| Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 111 | { | 
| Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 112 | 	return addr >= start && addr < end; | 
 | 113 | } | 
 | 114 |  | 
 | 115 | /* | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 116 |  * Flushing functions | 
 | 117 |  */ | 
| Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 118 |  | 
| Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 119 | /** | 
 | 120 |  * clflush_cache_range - flush a cache range with clflush | 
 | 121 |  * @addr:	virtual start address | 
 | 122 |  * @size:	number of bytes to flush | 
 | 123 |  * | 
 | 124 |  * clflush is an unordered instruction which needs fencing with mfence | 
 | 125 |  * to avoid ordering issues. | 
 | 126 |  */ | 
| Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 127 | void clflush_cache_range(void *vaddr, unsigned int size) | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 128 | { | 
| Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 129 | 	void *vend = vaddr + size - 1; | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 130 |  | 
| Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 131 | 	mb(); | 
| Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 132 |  | 
 | 133 | 	for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) | 
 | 134 | 		clflush(vaddr); | 
 | 135 | 	/* | 
 | 136 | 	 * Flush any possible final partial cacheline: | 
 | 137 | 	 */ | 
 | 138 | 	clflush(vend); | 
 | 139 |  | 
| Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 140 | 	mb(); | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 141 | } | 
 | 142 |  | 
| Thomas Gleixner | af1e684 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 143 | static void __cpa_flush_all(void *arg) | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 144 | { | 
| Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 145 | 	unsigned long cache = (unsigned long)arg; | 
 | 146 |  | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 147 | 	/* | 
 | 148 | 	 * Flush all to work around Errata in early athlons regarding | 
 | 149 | 	 * large page flushing. | 
 | 150 | 	 */ | 
 | 151 | 	__flush_tlb_all(); | 
 | 152 |  | 
| Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 153 | 	if (cache && boot_cpu_data.x86_model >= 4) | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 154 | 		wbinvd(); | 
 | 155 | } | 
 | 156 |  | 
| Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 157 | static void cpa_flush_all(unsigned long cache) | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 158 | { | 
 | 159 | 	BUG_ON(irqs_disabled()); | 
 | 160 |  | 
| Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 161 | 	on_each_cpu(__cpa_flush_all, (void *) cache, 1); | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 162 | } | 
 | 163 |  | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 164 | static void __cpa_flush_range(void *arg) | 
 | 165 | { | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 166 | 	/* | 
 | 167 | 	 * We could optimize that further and do individual per page | 
 | 168 | 	 * tlb invalidates for a low number of pages. Caveat: we must | 
 | 169 | 	 * flush the high aliases on 64bit as well. | 
 | 170 | 	 */ | 
 | 171 | 	__flush_tlb_all(); | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 172 | } | 
 | 173 |  | 
| Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 174 | static void cpa_flush_range(unsigned long start, int numpages, int cache) | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 175 | { | 
| Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 176 | 	unsigned int i, level; | 
 | 177 | 	unsigned long addr; | 
 | 178 |  | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 179 | 	BUG_ON(irqs_disabled()); | 
| Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 180 | 	WARN_ON(PAGE_ALIGN(start) != start); | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 181 |  | 
| Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 182 | 	on_each_cpu(__cpa_flush_range, NULL, 1); | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 183 |  | 
| Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 184 | 	if (!cache) | 
 | 185 | 		return; | 
 | 186 |  | 
| Thomas Gleixner | 3b233e5 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 187 | 	/* | 
 | 188 | 	 * We only need to flush on one CPU, | 
 | 189 | 	 * clflush is a MESI-coherent instruction that | 
 | 190 | 	 * will cause all other CPUs to flush the same | 
 | 191 | 	 * cachelines: | 
 | 192 | 	 */ | 
| Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 193 | 	for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { | 
 | 194 | 		pte_t *pte = lookup_address(addr, &level); | 
 | 195 |  | 
 | 196 | 		/* | 
 | 197 | 		 * Only flush present addresses: | 
 | 198 | 		 */ | 
| Thomas Gleixner | 7bfb72e | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 199 | 		if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | 
| Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 200 | 			clflush_cache_range((void *) addr, PAGE_SIZE); | 
 | 201 | 	} | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 202 | } | 
 | 203 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 204 | static void cpa_flush_array(unsigned long *start, int numpages, int cache) | 
 | 205 | { | 
 | 206 | 	unsigned int i, level; | 
 | 207 | 	unsigned long *addr; | 
 | 208 |  | 
 | 209 | 	BUG_ON(irqs_disabled()); | 
 | 210 |  | 
 | 211 | 	on_each_cpu(__cpa_flush_range, NULL, 1); | 
 | 212 |  | 
 | 213 | 	if (!cache) | 
 | 214 | 		return; | 
 | 215 |  | 
 | 216 | 	/* 4M threshold */ | 
 | 217 | 	if (numpages >= 1024) { | 
 | 218 | 		if (boot_cpu_data.x86_model >= 4) | 
 | 219 | 			wbinvd(); | 
 | 220 | 		return; | 
 | 221 | 	} | 
 | 222 | 	/* | 
 | 223 | 	 * We only need to flush on one CPU, | 
 | 224 | 	 * clflush is a MESI-coherent instruction that | 
 | 225 | 	 * will cause all other CPUs to flush the same | 
 | 226 | 	 * cachelines: | 
 | 227 | 	 */ | 
 | 228 | 	for (i = 0, addr = start; i < numpages; i++, addr++) { | 
 | 229 | 		pte_t *pte = lookup_address(*addr, &level); | 
 | 230 |  | 
 | 231 | 		/* | 
 | 232 | 		 * Only flush present addresses: | 
 | 233 | 		 */ | 
 | 234 | 		if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | 
 | 235 | 			clflush_cache_range((void *) *addr, PAGE_SIZE); | 
 | 236 | 	} | 
 | 237 | } | 
 | 238 |  | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 239 | /* | 
| Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 240 |  * Certain areas of memory on x86 require very specific protection flags, | 
 | 241 |  * for example the BIOS area or kernel text. Callers don't always get this | 
 | 242 |  * right (again, ioremap() on BIOS memory is not uncommon) so this function | 
 | 243 |  * checks and fixes these known static required protection bits. | 
 | 244 |  */ | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 245 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | 
 | 246 | 				   unsigned long pfn) | 
| Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 247 | { | 
 | 248 | 	pgprot_t forbidden = __pgprot(0); | 
 | 249 |  | 
| Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 250 | 	/* | 
| Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 251 | 	 * The BIOS area between 640k and 1Mb needs to be executable for | 
 | 252 | 	 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | 
| Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 253 | 	 */ | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 254 | 	if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) | 
| Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 255 | 		pgprot_val(forbidden) |= _PAGE_NX; | 
 | 256 |  | 
 | 257 | 	/* | 
 | 258 | 	 * The kernel text needs to be executable for obvious reasons | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 259 | 	 * Does not cover __inittext since that is gone later on. On | 
 | 260 | 	 * 64bit we do not enforce !NX on the low mapping | 
| Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 261 | 	 */ | 
 | 262 | 	if (within(address, (unsigned long)_text, (unsigned long)_etext)) | 
 | 263 | 		pgprot_val(forbidden) |= _PAGE_NX; | 
| Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 264 |  | 
| Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 265 | 	/* | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 266 | 	 * The .rodata section needs to be read-only. Using the pfn | 
 | 267 | 	 * catches all aliases. | 
| Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 268 | 	 */ | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 269 | 	if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, | 
 | 270 | 		   __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) | 
| Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 271 | 		pgprot_val(forbidden) |= _PAGE_RW; | 
| Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 272 |  | 
 | 273 | 	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | 
| Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 274 |  | 
 | 275 | 	return prot; | 
 | 276 | } | 
 | 277 |  | 
| Thomas Gleixner | 9a14aef | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 278 | /* | 
 | 279 |  * Lookup the page table entry for a virtual address. Return a pointer | 
 | 280 |  * to the entry and the level of the mapping. | 
 | 281 |  * | 
 | 282 |  * Note: We return pud and pmd either when the entry is marked large | 
 | 283 |  * or when the present bit is not set. Otherwise we would return a | 
 | 284 |  * pointer to a nonexisting mapping. | 
 | 285 |  */ | 
| Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 286 | pte_t *lookup_address(unsigned long address, unsigned int *level) | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 287 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | 	pgd_t *pgd = pgd_offset_k(address); | 
 | 289 | 	pud_t *pud; | 
 | 290 | 	pmd_t *pmd; | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 291 |  | 
| Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 292 | 	*level = PG_LEVEL_NONE; | 
 | 293 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | 	if (pgd_none(*pgd)) | 
 | 295 | 		return NULL; | 
| Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 296 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | 	pud = pud_offset(pgd, address); | 
 | 298 | 	if (pud_none(*pud)) | 
 | 299 | 		return NULL; | 
| Andi Kleen | c2f71ee | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 300 |  | 
 | 301 | 	*level = PG_LEVEL_1G; | 
 | 302 | 	if (pud_large(*pud) || !pud_present(*pud)) | 
 | 303 | 		return (pte_t *)pud; | 
 | 304 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | 	pmd = pmd_offset(pud, address); | 
 | 306 | 	if (pmd_none(*pmd)) | 
 | 307 | 		return NULL; | 
| Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 308 |  | 
 | 309 | 	*level = PG_LEVEL_2M; | 
| Thomas Gleixner | 9a14aef | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 310 | 	if (pmd_large(*pmd) || !pmd_present(*pmd)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | 		return (pte_t *)pmd; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 |  | 
| Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 313 | 	*level = PG_LEVEL_4K; | 
| Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 314 |  | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 315 | 	return pte_offset_kernel(pmd, address); | 
 | 316 | } | 
| Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 317 | EXPORT_SYMBOL_GPL(lookup_address); | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 318 |  | 
| Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 319 | /* | 
 | 320 |  * Set the new pmd in all the pgds we know about: | 
 | 321 |  */ | 
| Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 322 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 323 | { | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 324 | 	/* change init_mm */ | 
 | 325 | 	set_pte_atomic(kpte, pte); | 
| Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 326 | #ifdef CONFIG_X86_32 | 
| Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 327 | 	if (!SHARED_KERNEL_PMD) { | 
| Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 328 | 		struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 |  | 
| Jeremy Fitzhardinge | e3ed910 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 330 | 		list_for_each_entry(page, &pgd_list, lru) { | 
| Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 331 | 			pgd_t *pgd; | 
 | 332 | 			pud_t *pud; | 
 | 333 | 			pmd_t *pmd; | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 334 |  | 
| Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 335 | 			pgd = (pgd_t *)page_address(page) + pgd_index(address); | 
 | 336 | 			pud = pud_offset(pgd, address); | 
 | 337 | 			pmd = pmd_offset(pud, address); | 
 | 338 | 			set_pte_atomic((pte_t *)pmd, pte); | 
 | 339 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | 	} | 
| Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 341 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | } | 
 | 343 |  | 
| Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 344 | static int | 
 | 345 | try_preserve_large_page(pte_t *kpte, unsigned long address, | 
 | 346 | 			struct cpa_data *cpa) | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 347 | { | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 348 | 	unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 349 | 	pte_t new_pte, old_pte, *tmp; | 
 | 350 | 	pgprot_t old_prot, new_prot; | 
| Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 351 | 	int i, do_split = 1; | 
| Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 352 | 	unsigned int level; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 353 |  | 
| Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 354 | 	if (cpa->force_split) | 
 | 355 | 		return 1; | 
 | 356 |  | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 357 | 	spin_lock_irqsave(&pgd_lock, flags); | 
 | 358 | 	/* | 
 | 359 | 	 * Check for races, another CPU might have split this page | 
 | 360 | 	 * up already: | 
 | 361 | 	 */ | 
 | 362 | 	tmp = lookup_address(address, &level); | 
 | 363 | 	if (tmp != kpte) | 
 | 364 | 		goto out_unlock; | 
 | 365 |  | 
 | 366 | 	switch (level) { | 
 | 367 | 	case PG_LEVEL_2M: | 
| Andi Kleen | 31422c5 | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 368 | 		psize = PMD_PAGE_SIZE; | 
 | 369 | 		pmask = PMD_PAGE_MASK; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 370 | 		break; | 
| Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 371 | #ifdef CONFIG_X86_64 | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 372 | 	case PG_LEVEL_1G: | 
| Andi Kleen | 5d3c8b2 | 2008-02-13 16:20:35 +0100 | [diff] [blame] | 373 | 		psize = PUD_PAGE_SIZE; | 
 | 374 | 		pmask = PUD_PAGE_MASK; | 
| Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 375 | 		break; | 
 | 376 | #endif | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 377 | 	default: | 
| Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 378 | 		do_split = -EINVAL; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 379 | 		goto out_unlock; | 
 | 380 | 	} | 
 | 381 |  | 
 | 382 | 	/* | 
 | 383 | 	 * Calculate the number of pages, which fit into this large | 
 | 384 | 	 * page starting at address: | 
 | 385 | 	 */ | 
 | 386 | 	nextpage_addr = (address + psize) & pmask; | 
 | 387 | 	numpages = (nextpage_addr - address) >> PAGE_SHIFT; | 
| Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 388 | 	if (numpages < cpa->numpages) | 
 | 389 | 		cpa->numpages = numpages; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 390 |  | 
 | 391 | 	/* | 
 | 392 | 	 * We are safe now. Check whether the new pgprot is the same: | 
 | 393 | 	 */ | 
 | 394 | 	old_pte = *kpte; | 
 | 395 | 	old_prot = new_prot = pte_pgprot(old_pte); | 
 | 396 |  | 
 | 397 | 	pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | 
 | 398 | 	pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 399 |  | 
 | 400 | 	/* | 
 | 401 | 	 * old_pte points to the large page base address. So we need | 
 | 402 | 	 * to add the offset of the virtual address: | 
 | 403 | 	 */ | 
 | 404 | 	pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); | 
 | 405 | 	cpa->pfn = pfn; | 
 | 406 |  | 
 | 407 | 	new_prot = static_protections(new_prot, address, pfn); | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 408 |  | 
 | 409 | 	/* | 
| Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 410 | 	 * We need to check the full range, whether | 
 | 411 | 	 * static_protection() requires a different pgprot for one of | 
 | 412 | 	 * the pages in the range we try to preserve: | 
 | 413 | 	 */ | 
 | 414 | 	addr = address + PAGE_SIZE; | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 415 | 	pfn++; | 
| Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 416 | 	for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) { | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 417 | 		pgprot_t chk_prot = static_protections(new_prot, addr, pfn); | 
| Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 418 |  | 
 | 419 | 		if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | 
 | 420 | 			goto out_unlock; | 
 | 421 | 	} | 
 | 422 |  | 
 | 423 | 	/* | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 424 | 	 * If there are no changes, return. maxpages has been updated | 
 | 425 | 	 * above: | 
 | 426 | 	 */ | 
 | 427 | 	if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | 
| Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 428 | 		do_split = 0; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 429 | 		goto out_unlock; | 
 | 430 | 	} | 
 | 431 |  | 
 | 432 | 	/* | 
 | 433 | 	 * We need to change the attributes. Check, whether we can | 
 | 434 | 	 * change the large page in one go. We request a split, when | 
 | 435 | 	 * the address is not aligned and the number of pages is | 
 | 436 | 	 * smaller than the number of pages in the large page. Note | 
 | 437 | 	 * that we limited the number of possible pages already to | 
 | 438 | 	 * the number of pages in the large page. | 
 | 439 | 	 */ | 
| Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 440 | 	if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 441 | 		/* | 
 | 442 | 		 * The address is aligned and the number of pages | 
 | 443 | 		 * covers the full page. | 
 | 444 | 		 */ | 
 | 445 | 		new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); | 
 | 446 | 		__set_pmd_pte(kpte, address, new_pte); | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 447 | 		cpa->flags |= CPA_FLUSHTLB; | 
| Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 448 | 		do_split = 0; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 449 | 	} | 
 | 450 |  | 
 | 451 | out_unlock: | 
 | 452 | 	spin_unlock_irqrestore(&pgd_lock, flags); | 
| Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 453 |  | 
| Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 454 | 	return do_split; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 455 | } | 
 | 456 |  | 
| Ingo Molnar | 7afe15b | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 457 | static int split_large_page(pte_t *kpte, unsigned long address) | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 458 | { | 
| Thomas Gleixner | 7b610ee | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 459 | 	unsigned long flags, pfn, pfninc = 1; | 
| Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 460 | 	unsigned int i, level; | 
| Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 461 | 	pte_t *pbase, *tmp; | 
 | 462 | 	pgprot_t ref_prot; | 
| Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 463 | 	struct page *base; | 
 | 464 |  | 
 | 465 | 	if (!debug_pagealloc) | 
 | 466 | 		spin_unlock(&cpa_lock); | 
 | 467 | 	base = alloc_pages(GFP_KERNEL, 0); | 
 | 468 | 	if (!debug_pagealloc) | 
 | 469 | 		spin_lock(&cpa_lock); | 
| Suresh Siddha | 8311eb8 | 2008-09-23 14:00:41 -0700 | [diff] [blame] | 470 | 	if (!base) | 
 | 471 | 		return -ENOMEM; | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 472 |  | 
| Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 473 | 	spin_lock_irqsave(&pgd_lock, flags); | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 474 | 	/* | 
 | 475 | 	 * Check for races, another CPU might have split this page | 
 | 476 | 	 * up for us already: | 
 | 477 | 	 */ | 
 | 478 | 	tmp = lookup_address(address, &level); | 
| Ingo Molnar | 6ce9fc1 | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 479 | 	if (tmp != kpte) | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 480 | 		goto out_unlock; | 
 | 481 |  | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 482 | 	pbase = (pte_t *)page_address(base); | 
| Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 483 | 	paravirt_alloc_pte(&init_mm, page_to_pfn(base)); | 
| Thomas Gleixner | 07cf89c | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 484 | 	ref_prot = pte_pgprot(pte_clrhuge(*kpte)); | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 485 |  | 
| Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 486 | #ifdef CONFIG_X86_64 | 
 | 487 | 	if (level == PG_LEVEL_1G) { | 
 | 488 | 		pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; | 
 | 489 | 		pgprot_val(ref_prot) |= _PAGE_PSE; | 
| Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 490 | 	} | 
 | 491 | #endif | 
 | 492 |  | 
| Thomas Gleixner | 63c1dcf | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 493 | 	/* | 
 | 494 | 	 * Get the target pfn from the original entry: | 
 | 495 | 	 */ | 
 | 496 | 	pfn = pte_pfn(*kpte); | 
| Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 497 | 	for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) | 
| Thomas Gleixner | 63c1dcf | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 498 | 		set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 499 |  | 
| Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 500 | 	if (address >= (unsigned long)__va(0) && | 
| Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 501 | 		address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT)) | 
 | 502 | 		split_page_count(level); | 
 | 503 |  | 
 | 504 | #ifdef CONFIG_X86_64 | 
 | 505 | 	if (address >= (unsigned long)__va(1UL<<32) && | 
| Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 506 | 		address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT)) | 
 | 507 | 		split_page_count(level); | 
| Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 508 | #endif | 
| Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 509 |  | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 510 | 	/* | 
| Thomas Gleixner | 07cf89c | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 511 | 	 * Install the new, split up pagetable. Important details here: | 
| Huang, Ying | 4c881ca | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 512 | 	 * | 
 | 513 | 	 * On Intel the NX bit of all levels must be cleared to make a | 
 | 514 | 	 * page executable. See section 4.13.2 of Intel 64 and IA-32 | 
 | 515 | 	 * Architectures Software Developer's Manual). | 
| Thomas Gleixner | 07cf89c | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 516 | 	 * | 
 | 517 | 	 * Mark the entry present. The current mapping might be | 
 | 518 | 	 * set to not present, which we preserved above. | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 519 | 	 */ | 
| Huang, Ying | 4c881ca | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 520 | 	ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); | 
| Thomas Gleixner | 07cf89c | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 521 | 	pgprot_val(ref_prot) |= _PAGE_PRESENT; | 
| Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 522 | 	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 523 | 	base = NULL; | 
 | 524 |  | 
 | 525 | out_unlock: | 
| Thomas Gleixner | eb5b5f0 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 526 | 	/* | 
 | 527 | 	 * If we dropped out via the lookup_address check under | 
 | 528 | 	 * pgd_lock then stick the page back into the pool: | 
 | 529 | 	 */ | 
| Suresh Siddha | 8311eb8 | 2008-09-23 14:00:41 -0700 | [diff] [blame] | 530 | 	if (base) | 
 | 531 | 		__free_page(base); | 
| Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 532 | 	spin_unlock_irqrestore(&pgd_lock, flags); | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 533 |  | 
| Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 534 | 	return 0; | 
 | 535 | } | 
 | 536 |  | 
| Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 537 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | 
 | 538 | 			       int primary) | 
 | 539 | { | 
 | 540 | 	/* | 
 | 541 | 	 * Ignore all non primary paths. | 
 | 542 | 	 */ | 
 | 543 | 	if (!primary) | 
 | 544 | 		return 0; | 
 | 545 |  | 
 | 546 | 	/* | 
 | 547 | 	 * Ignore the NULL PTE for kernel identity mapping, as it is expected | 
 | 548 | 	 * to have holes. | 
 | 549 | 	 * Also set numpages to '1' indicating that we processed cpa req for | 
 | 550 | 	 * one virtual address page and its pfn. TBD: numpages can be set based | 
 | 551 | 	 * on the initial value and the level returned by lookup_address(). | 
 | 552 | 	 */ | 
 | 553 | 	if (within(vaddr, PAGE_OFFSET, | 
 | 554 | 		   PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | 
 | 555 | 		cpa->numpages = 1; | 
 | 556 | 		cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | 
 | 557 | 		return 0; | 
 | 558 | 	} else { | 
 | 559 | 		WARN(1, KERN_WARNING "CPA: called for zero pte. " | 
 | 560 | 			"vaddr = %lx cpa->vaddr = %lx\n", vaddr, | 
 | 561 | 			*cpa->vaddr); | 
 | 562 |  | 
 | 563 | 		return -EFAULT; | 
 | 564 | 	} | 
 | 565 | } | 
 | 566 |  | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 567 | static int __change_page_attr(struct cpa_data *cpa, int primary) | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 568 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 569 | 	unsigned long address; | 
| Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 570 | 	int do_split, err; | 
 | 571 | 	unsigned int level; | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 572 | 	pte_t *kpte, old_pte; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 574 | 	if (cpa->flags & CPA_ARRAY) | 
 | 575 | 		address = cpa->vaddr[cpa->curpage]; | 
 | 576 | 	else | 
 | 577 | 		address = *cpa->vaddr; | 
| Ingo Molnar | 97f99fe | 2008-01-30 13:33:55 +0100 | [diff] [blame] | 578 | repeat: | 
| Ingo Molnar | f0646e4 | 2008-01-30 13:33:43 +0100 | [diff] [blame] | 579 | 	kpte = lookup_address(address, &level); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | 	if (!kpte) | 
| Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 581 | 		return __cpa_process_fault(cpa, address, primary); | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 582 |  | 
 | 583 | 	old_pte = *kpte; | 
| Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 584 | 	if (!pte_val(old_pte)) | 
 | 585 | 		return __cpa_process_fault(cpa, address, primary); | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 586 |  | 
| Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 587 | 	if (level == PG_LEVEL_4K) { | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 588 | 		pte_t new_pte; | 
| Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 589 | 		pgprot_t new_prot = pte_pgprot(old_pte); | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 590 | 		unsigned long pfn = pte_pfn(old_pte); | 
| Thomas Gleixner | a72a08a | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 591 |  | 
| Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 592 | 		pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | 
 | 593 | 		pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | 
| Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 594 |  | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 595 | 		new_prot = static_protections(new_prot, address, pfn); | 
| Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 596 |  | 
| Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 597 | 		/* | 
 | 598 | 		 * We need to keep the pfn from the existing PTE, | 
 | 599 | 		 * after all we're only going to change it's attributes | 
 | 600 | 		 * not the memory it points to | 
 | 601 | 		 */ | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 602 | 		new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); | 
 | 603 | 		cpa->pfn = pfn; | 
| Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 604 | 		/* | 
 | 605 | 		 * Do we really change anything ? | 
 | 606 | 		 */ | 
 | 607 | 		if (pte_val(old_pte) != pte_val(new_pte)) { | 
 | 608 | 			set_pte_atomic(kpte, new_pte); | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 609 | 			cpa->flags |= CPA_FLUSHTLB; | 
| Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 610 | 		} | 
| Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 611 | 		cpa->numpages = 1; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 612 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | 	} | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 614 |  | 
 | 615 | 	/* | 
 | 616 | 	 * Check, whether we can keep the large page intact | 
 | 617 | 	 * and just change the pte: | 
 | 618 | 	 */ | 
| Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 619 | 	do_split = try_preserve_large_page(kpte, address, cpa); | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 620 | 	/* | 
 | 621 | 	 * When the range fits into the existing large page, | 
| Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 622 | 	 * return. cp->numpages and cpa->tlbflush have been updated in | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 623 | 	 * try_large_page: | 
 | 624 | 	 */ | 
| Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 625 | 	if (do_split <= 0) | 
 | 626 | 		return do_split; | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 627 |  | 
 | 628 | 	/* | 
 | 629 | 	 * We have to split the large page: | 
 | 630 | 	 */ | 
| Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 631 | 	err = split_large_page(kpte, address); | 
 | 632 | 	if (!err) { | 
| Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 633 | 		/* | 
 | 634 | 	 	 * Do a global flush tlb after splitting the large page | 
 | 635 | 	 	 * and before we do the actual change page attribute in the PTE. | 
 | 636 | 	 	 * | 
 | 637 | 	 	 * With out this, we violate the TLB application note, that says | 
 | 638 | 	 	 * "The TLBs may contain both ordinary and large-page | 
 | 639 | 		 *  translations for a 4-KByte range of linear addresses. This | 
 | 640 | 		 *  may occur if software modifies the paging structures so that | 
 | 641 | 		 *  the page size used for the address range changes. If the two | 
 | 642 | 		 *  translations differ with respect to page frame or attributes | 
 | 643 | 		 *  (e.g., permissions), processor behavior is undefined and may | 
 | 644 | 		 *  be implementation-specific." | 
 | 645 | 	 	 * | 
 | 646 | 	 	 * We do this global tlb flush inside the cpa_lock, so that we | 
 | 647 | 		 * don't allow any other cpu, with stale tlb entries change the | 
 | 648 | 		 * page attribute in parallel, that also falls into the | 
 | 649 | 		 * just split large page entry. | 
 | 650 | 	 	 */ | 
 | 651 | 		flush_tlb_all(); | 
| Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 652 | 		goto repeat; | 
 | 653 | 	} | 
| Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 654 |  | 
| Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 655 | 	return err; | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 656 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 |  | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 658 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); | 
 | 659 |  | 
 | 660 | static int cpa_process_alias(struct cpa_data *cpa) | 
| Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 661 | { | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 662 | 	struct cpa_data alias_cpa; | 
| Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 663 | 	int ret = 0; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 664 | 	unsigned long temp_cpa_vaddr, vaddr; | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 665 |  | 
| Yinghai Lu | 965194c | 2008-07-12 14:31:28 -0700 | [diff] [blame] | 666 | 	if (cpa->pfn >= max_pfn_mapped) | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 667 | 		return 0; | 
 | 668 |  | 
| Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 669 | #ifdef CONFIG_X86_64 | 
| Yinghai Lu | 965194c | 2008-07-12 14:31:28 -0700 | [diff] [blame] | 670 | 	if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT))) | 
| Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 671 | 		return 0; | 
 | 672 | #endif | 
| Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 673 | 	/* | 
 | 674 | 	 * No need to redo, when the primary call touched the direct | 
 | 675 | 	 * mapping already: | 
 | 676 | 	 */ | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 677 | 	if (cpa->flags & CPA_ARRAY) | 
 | 678 | 		vaddr = cpa->vaddr[cpa->curpage]; | 
 | 679 | 	else | 
 | 680 | 		vaddr = *cpa->vaddr; | 
 | 681 |  | 
 | 682 | 	if (!(within(vaddr, PAGE_OFFSET, | 
| Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 683 | 		    PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 684 |  | 
| Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 685 | 		alias_cpa = *cpa; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 686 | 		temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | 
 | 687 | 		alias_cpa.vaddr = &temp_cpa_vaddr; | 
 | 688 | 		alias_cpa.flags &= ~CPA_ARRAY; | 
 | 689 |  | 
| Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 690 |  | 
 | 691 | 		ret = __change_page_attr_set_clr(&alias_cpa, 0); | 
 | 692 | 	} | 
| Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 693 |  | 
| Arjan van de Ven | 488fd99 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 694 | #ifdef CONFIG_X86_64 | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 695 | 	if (ret) | 
 | 696 | 		return ret; | 
| Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 697 | 	/* | 
| Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 698 | 	 * No need to redo, when the primary call touched the high | 
 | 699 | 	 * mapping already: | 
 | 700 | 	 */ | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 701 | 	if (within(vaddr, (unsigned long) _text, (unsigned long) _end)) | 
| Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 702 | 		return 0; | 
 | 703 |  | 
 | 704 | 	/* | 
| Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 705 | 	 * If the physical address is inside the kernel map, we need | 
 | 706 | 	 * to touch the high mapped kernel as well: | 
 | 707 | 	 */ | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 708 | 	if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) | 
 | 709 | 		return 0; | 
| Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 710 |  | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 711 | 	alias_cpa = *cpa; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 712 | 	temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; | 
 | 713 | 	alias_cpa.vaddr = &temp_cpa_vaddr; | 
 | 714 | 	alias_cpa.flags &= ~CPA_ARRAY; | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 715 |  | 
 | 716 | 	/* | 
 | 717 | 	 * The high mapping range is imprecise, so ignore the return value. | 
 | 718 | 	 */ | 
 | 719 | 	__change_page_attr_set_clr(&alias_cpa, 0); | 
| Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 720 | #endif | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 721 | 	return ret; | 
| Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 722 | } | 
 | 723 |  | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 724 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | 
| Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 725 | { | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 726 | 	int ret, numpages = cpa->numpages; | 
| Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 727 |  | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 728 | 	while (numpages) { | 
 | 729 | 		/* | 
 | 730 | 		 * Store the remaining nr of pages for the large page | 
 | 731 | 		 * preservation check. | 
 | 732 | 		 */ | 
| Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 733 | 		cpa->numpages = numpages; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 734 | 		/* for array changes, we can't use large page */ | 
 | 735 | 		if (cpa->flags & CPA_ARRAY) | 
 | 736 | 			cpa->numpages = 1; | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 737 |  | 
| Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 738 | 		if (!debug_pagealloc) | 
 | 739 | 			spin_lock(&cpa_lock); | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 740 | 		ret = __change_page_attr(cpa, checkalias); | 
| Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 741 | 		if (!debug_pagealloc) | 
 | 742 | 			spin_unlock(&cpa_lock); | 
| Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 743 | 		if (ret) | 
 | 744 | 			return ret; | 
| Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 745 |  | 
| Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 746 | 		if (checkalias) { | 
 | 747 | 			ret = cpa_process_alias(cpa); | 
 | 748 | 			if (ret) | 
 | 749 | 				return ret; | 
 | 750 | 		} | 
 | 751 |  | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 752 | 		/* | 
 | 753 | 		 * Adjust the number of pages with the result of the | 
 | 754 | 		 * CPA operation. Either a large page has been | 
 | 755 | 		 * preserved or a single page update happened. | 
 | 756 | 		 */ | 
| Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 757 | 		BUG_ON(cpa->numpages > numpages); | 
 | 758 | 		numpages -= cpa->numpages; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 759 | 		if (cpa->flags & CPA_ARRAY) | 
 | 760 | 			cpa->curpage++; | 
 | 761 | 		else | 
 | 762 | 			*cpa->vaddr += cpa->numpages * PAGE_SIZE; | 
 | 763 |  | 
| Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 764 | 	} | 
| Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 765 | 	return 0; | 
 | 766 | } | 
 | 767 |  | 
| Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 768 | static inline int cache_attr(pgprot_t attr) | 
 | 769 | { | 
 | 770 | 	return pgprot_val(attr) & | 
 | 771 | 		(_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); | 
 | 772 | } | 
 | 773 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 774 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | 
| Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 775 | 				    pgprot_t mask_set, pgprot_t mask_clr, | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 776 | 				    int force_split, int array) | 
| Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 777 | { | 
| Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 778 | 	struct cpa_data cpa; | 
| Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 779 | 	int ret, cache, checkalias; | 
| Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 780 |  | 
 | 781 | 	/* | 
 | 782 | 	 * Check, if we are requested to change a not supported | 
 | 783 | 	 * feature: | 
 | 784 | 	 */ | 
 | 785 | 	mask_set = canon_pgprot(mask_set); | 
 | 786 | 	mask_clr = canon_pgprot(mask_clr); | 
| Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 787 | 	if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) | 
| Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 788 | 		return 0; | 
 | 789 |  | 
| Thomas Gleixner | 69b1415 | 2008-02-13 11:04:50 +0100 | [diff] [blame] | 790 | 	/* Ensure we are PAGE_SIZE aligned */ | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 791 | 	if (!array) { | 
 | 792 | 		if (*addr & ~PAGE_MASK) { | 
 | 793 | 			*addr &= PAGE_MASK; | 
 | 794 | 			/* | 
 | 795 | 			 * People should not be passing in unaligned addresses: | 
 | 796 | 			 */ | 
 | 797 | 			WARN_ON_ONCE(1); | 
 | 798 | 		} | 
 | 799 | 	} else { | 
 | 800 | 		int i; | 
 | 801 | 		for (i = 0; i < numpages; i++) { | 
 | 802 | 			if (addr[i] & ~PAGE_MASK) { | 
 | 803 | 				addr[i] &= PAGE_MASK; | 
 | 804 | 				WARN_ON_ONCE(1); | 
 | 805 | 			} | 
 | 806 | 		} | 
| Thomas Gleixner | 69b1415 | 2008-02-13 11:04:50 +0100 | [diff] [blame] | 807 | 	} | 
 | 808 |  | 
| Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 809 | 	/* Must avoid aliasing mappings in the highmem code */ | 
 | 810 | 	kmap_flush_unused(); | 
 | 811 |  | 
| Nick Piggin | db64fe0 | 2008-10-18 20:27:03 -0700 | [diff] [blame] | 812 | 	vm_unmap_aliases(); | 
 | 813 |  | 
| Thomas Gleixner | 7ad9de6 | 2009-02-12 21:16:09 +0100 | [diff] [blame] | 814 | 	/* | 
 | 815 | 	 * If we're called with lazy mmu updates enabled, the | 
 | 816 | 	 * in-memory pte state may be stale.  Flush pending updates to | 
 | 817 | 	 * bring them up to date. | 
 | 818 | 	 */ | 
 | 819 | 	arch_flush_lazy_mmu_mode(); | 
 | 820 |  | 
| Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 821 | 	cpa.vaddr = addr; | 
 | 822 | 	cpa.numpages = numpages; | 
 | 823 | 	cpa.mask_set = mask_set; | 
 | 824 | 	cpa.mask_clr = mask_clr; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 825 | 	cpa.flags = 0; | 
 | 826 | 	cpa.curpage = 0; | 
| Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 827 | 	cpa.force_split = force_split; | 
| Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 828 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 829 | 	if (array) | 
 | 830 | 		cpa.flags |= CPA_ARRAY; | 
 | 831 |  | 
| Thomas Gleixner | af96e44 | 2008-02-15 21:49:46 +0100 | [diff] [blame] | 832 | 	/* No alias checking for _NX bit modifications */ | 
 | 833 | 	checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; | 
 | 834 |  | 
 | 835 | 	ret = __change_page_attr_set_clr(&cpa, checkalias); | 
| Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 836 |  | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 837 | 	/* | 
| Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 838 | 	 * Check whether we really changed something: | 
 | 839 | 	 */ | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 840 | 	if (!(cpa.flags & CPA_FLUSHTLB)) | 
| Shaohua Li | 1ac2f7d | 2008-08-04 14:51:24 +0800 | [diff] [blame] | 841 | 		goto out; | 
| Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 842 |  | 
| Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 843 | 	/* | 
| Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 844 | 	 * No need to flush, when we did not set any of the caching | 
 | 845 | 	 * attributes: | 
 | 846 | 	 */ | 
 | 847 | 	cache = cache_attr(mask_set); | 
 | 848 |  | 
 | 849 | 	/* | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 850 | 	 * On success we use clflush, when the CPU supports it to | 
 | 851 | 	 * avoid the wbindv. If the CPU does not support it and in the | 
| Thomas Gleixner | af1e684 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 852 | 	 * error case we fall back to cpa_flush_all (which uses | 
| Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 853 | 	 * wbindv): | 
 | 854 | 	 */ | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 855 | 	if (!ret && cpu_has_clflush) { | 
 | 856 | 		if (cpa.flags & CPA_ARRAY) | 
 | 857 | 			cpa_flush_array(addr, numpages, cache); | 
 | 858 | 		else | 
 | 859 | 			cpa_flush_range(*addr, numpages, cache); | 
 | 860 | 	} else | 
| Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 861 | 		cpa_flush_all(cache); | 
| Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 862 |  | 
| Jeremy Fitzhardinge | 4f06b04 | 2009-02-11 09:32:19 -0800 | [diff] [blame] | 863 | 	/* | 
 | 864 | 	 * If we've been called with lazy mmu updates enabled, then | 
 | 865 | 	 * make sure that everything gets flushed out before we | 
 | 866 | 	 * return. | 
 | 867 | 	 */ | 
 | 868 | 	arch_flush_lazy_mmu_mode(); | 
 | 869 |  | 
| Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 870 | out: | 
| Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 871 | 	return ret; | 
 | 872 | } | 
 | 873 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 874 | static inline int change_page_attr_set(unsigned long *addr, int numpages, | 
 | 875 | 				       pgprot_t mask, int array) | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 876 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 877 | 	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, | 
 | 878 | 		array); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 879 | } | 
 | 880 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 881 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, | 
 | 882 | 					 pgprot_t mask, int array) | 
| Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 883 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 884 | 	return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, | 
 | 885 | 		array); | 
| Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 886 | } | 
 | 887 |  | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 888 | int _set_memory_uc(unsigned long addr, int numpages) | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 889 | { | 
| Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 890 | 	/* | 
 | 891 | 	 * for now UC MINUS. see comments in ioremap_nocache() | 
 | 892 | 	 */ | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 893 | 	return change_page_attr_set(&addr, numpages, | 
 | 894 | 				    __pgprot(_PAGE_CACHE_UC_MINUS), 0); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 895 | } | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 896 |  | 
 | 897 | int set_memory_uc(unsigned long addr, int numpages) | 
 | 898 | { | 
| Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 899 | 	/* | 
 | 900 | 	 * for now UC MINUS. see comments in ioremap_nocache() | 
 | 901 | 	 */ | 
| venkatesh.pallipadi@intel.com | c15238d | 2008-08-20 16:45:51 -0700 | [diff] [blame] | 902 | 	if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, | 
| Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 903 | 			    _PAGE_CACHE_UC_MINUS, NULL)) | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 904 | 		return -EINVAL; | 
 | 905 |  | 
 | 906 | 	return _set_memory_uc(addr, numpages); | 
 | 907 | } | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 908 | EXPORT_SYMBOL(set_memory_uc); | 
 | 909 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 910 | int set_memory_array_uc(unsigned long *addr, int addrinarray) | 
 | 911 | { | 
| Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 912 | 	unsigned long start; | 
 | 913 | 	unsigned long end; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 914 | 	int i; | 
 | 915 | 	/* | 
 | 916 | 	 * for now UC MINUS. see comments in ioremap_nocache() | 
 | 917 | 	 */ | 
 | 918 | 	for (i = 0; i < addrinarray; i++) { | 
| Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 919 | 		start = __pa(addr[i]); | 
 | 920 | 		for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { | 
 | 921 | 			if (end != __pa(addr[i + 1])) | 
 | 922 | 				break; | 
 | 923 | 			i++; | 
 | 924 | 		} | 
 | 925 | 		if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 926 | 			goto out; | 
 | 927 | 	} | 
 | 928 |  | 
 | 929 | 	return change_page_attr_set(addr, addrinarray, | 
 | 930 | 				    __pgprot(_PAGE_CACHE_UC_MINUS), 1); | 
 | 931 | out: | 
| Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 932 | 	for (i = 0; i < addrinarray; i++) { | 
 | 933 | 		unsigned long tmp = __pa(addr[i]); | 
 | 934 |  | 
 | 935 | 		if (tmp == start) | 
 | 936 | 			break; | 
| Venki Pallipadi | 01de05a | 2008-08-22 12:08:17 -0700 | [diff] [blame] | 937 | 		for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { | 
| Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 938 | 			if (end != __pa(addr[i + 1])) | 
 | 939 | 				break; | 
 | 940 | 			i++; | 
 | 941 | 		} | 
 | 942 | 		free_memtype(tmp, end); | 
 | 943 | 	} | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 944 | 	return -EINVAL; | 
 | 945 | } | 
 | 946 | EXPORT_SYMBOL(set_memory_array_uc); | 
 | 947 |  | 
| venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 948 | int _set_memory_wc(unsigned long addr, int numpages) | 
 | 949 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 950 | 	return change_page_attr_set(&addr, numpages, | 
 | 951 | 				    __pgprot(_PAGE_CACHE_WC), 0); | 
| venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 952 | } | 
 | 953 |  | 
 | 954 | int set_memory_wc(unsigned long addr, int numpages) | 
 | 955 | { | 
| Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 956 | 	if (!pat_enabled) | 
| venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 957 | 		return set_memory_uc(addr, numpages); | 
 | 958 |  | 
| venkatesh.pallipadi@intel.com | c15238d | 2008-08-20 16:45:51 -0700 | [diff] [blame] | 959 | 	if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, | 
| venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 960 | 		_PAGE_CACHE_WC, NULL)) | 
 | 961 | 		return -EINVAL; | 
 | 962 |  | 
 | 963 | 	return _set_memory_wc(addr, numpages); | 
 | 964 | } | 
 | 965 | EXPORT_SYMBOL(set_memory_wc); | 
 | 966 |  | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 967 | int _set_memory_wb(unsigned long addr, int numpages) | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 968 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 969 | 	return change_page_attr_clear(&addr, numpages, | 
 | 970 | 				      __pgprot(_PAGE_CACHE_MASK), 0); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 971 | } | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 972 |  | 
 | 973 | int set_memory_wb(unsigned long addr, int numpages) | 
 | 974 | { | 
| venkatesh.pallipadi@intel.com | c15238d | 2008-08-20 16:45:51 -0700 | [diff] [blame] | 975 | 	free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 976 |  | 
 | 977 | 	return _set_memory_wb(addr, numpages); | 
 | 978 | } | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 979 | EXPORT_SYMBOL(set_memory_wb); | 
 | 980 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 981 | int set_memory_array_wb(unsigned long *addr, int addrinarray) | 
 | 982 | { | 
 | 983 | 	int i; | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 984 |  | 
| Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 985 | 	for (i = 0; i < addrinarray; i++) { | 
 | 986 | 		unsigned long start = __pa(addr[i]); | 
 | 987 | 		unsigned long end; | 
 | 988 |  | 
 | 989 | 		for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { | 
 | 990 | 			if (end != __pa(addr[i + 1])) | 
 | 991 | 				break; | 
 | 992 | 			i++; | 
 | 993 | 		} | 
 | 994 | 		free_memtype(start, end); | 
 | 995 | 	} | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 996 | 	return change_page_attr_clear(addr, addrinarray, | 
 | 997 | 				      __pgprot(_PAGE_CACHE_MASK), 1); | 
 | 998 | } | 
 | 999 | EXPORT_SYMBOL(set_memory_array_wb); | 
 | 1000 |  | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1001 | int set_memory_x(unsigned long addr, int numpages) | 
 | 1002 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1003 | 	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1004 | } | 
 | 1005 | EXPORT_SYMBOL(set_memory_x); | 
 | 1006 |  | 
 | 1007 | int set_memory_nx(unsigned long addr, int numpages) | 
 | 1008 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1009 | 	return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1010 | } | 
 | 1011 | EXPORT_SYMBOL(set_memory_nx); | 
 | 1012 |  | 
 | 1013 | int set_memory_ro(unsigned long addr, int numpages) | 
 | 1014 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1015 | 	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1016 | } | 
| Bruce Allan | a03352d | 2008-09-29 20:19:22 -0700 | [diff] [blame] | 1017 | EXPORT_SYMBOL_GPL(set_memory_ro); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1018 |  | 
 | 1019 | int set_memory_rw(unsigned long addr, int numpages) | 
 | 1020 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1021 | 	return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1022 | } | 
| Bruce Allan | a03352d | 2008-09-29 20:19:22 -0700 | [diff] [blame] | 1023 | EXPORT_SYMBOL_GPL(set_memory_rw); | 
| Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1024 |  | 
 | 1025 | int set_memory_np(unsigned long addr, int numpages) | 
 | 1026 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1027 | 	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); | 
| Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1028 | } | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1029 |  | 
| Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1030 | int set_memory_4k(unsigned long addr, int numpages) | 
 | 1031 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1032 | 	return change_page_attr_set_clr(&addr, numpages, __pgprot(0), | 
 | 1033 | 					__pgprot(0), 1, 0); | 
| Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1034 | } | 
 | 1035 |  | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1036 | int set_pages_uc(struct page *page, int numpages) | 
 | 1037 | { | 
 | 1038 | 	unsigned long addr = (unsigned long)page_address(page); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1039 |  | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1040 | 	return set_memory_uc(addr, numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1041 | } | 
 | 1042 | EXPORT_SYMBOL(set_pages_uc); | 
 | 1043 |  | 
 | 1044 | int set_pages_wb(struct page *page, int numpages) | 
 | 1045 | { | 
 | 1046 | 	unsigned long addr = (unsigned long)page_address(page); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1047 |  | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1048 | 	return set_memory_wb(addr, numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1049 | } | 
 | 1050 | EXPORT_SYMBOL(set_pages_wb); | 
 | 1051 |  | 
 | 1052 | int set_pages_x(struct page *page, int numpages) | 
 | 1053 | { | 
 | 1054 | 	unsigned long addr = (unsigned long)page_address(page); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1055 |  | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1056 | 	return set_memory_x(addr, numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1057 | } | 
 | 1058 | EXPORT_SYMBOL(set_pages_x); | 
 | 1059 |  | 
 | 1060 | int set_pages_nx(struct page *page, int numpages) | 
 | 1061 | { | 
 | 1062 | 	unsigned long addr = (unsigned long)page_address(page); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1063 |  | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1064 | 	return set_memory_nx(addr, numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1065 | } | 
 | 1066 | EXPORT_SYMBOL(set_pages_nx); | 
 | 1067 |  | 
 | 1068 | int set_pages_ro(struct page *page, int numpages) | 
 | 1069 | { | 
 | 1070 | 	unsigned long addr = (unsigned long)page_address(page); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1071 |  | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1072 | 	return set_memory_ro(addr, numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1073 | } | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1074 |  | 
 | 1075 | int set_pages_rw(struct page *page, int numpages) | 
 | 1076 | { | 
 | 1077 | 	unsigned long addr = (unsigned long)page_address(page); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1078 |  | 
| Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1079 | 	return set_memory_rw(addr, numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1080 | } | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1081 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1082 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1083 |  | 
 | 1084 | static int __set_pages_p(struct page *page, int numpages) | 
 | 1085 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1086 | 	unsigned long tempaddr = (unsigned long) page_address(page); | 
 | 1087 | 	struct cpa_data cpa = { .vaddr = &tempaddr, | 
| Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1088 | 				.numpages = numpages, | 
 | 1089 | 				.mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1090 | 				.mask_clr = __pgprot(0), | 
 | 1091 | 				.flags = 0}; | 
| Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1092 |  | 
| Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 1093 | 	/* | 
 | 1094 | 	 * No alias checking needed for setting present flag. otherwise, | 
 | 1095 | 	 * we may need to break large pages for 64-bit kernel text | 
 | 1096 | 	 * mappings (this adds to complexity if we want to do this from | 
 | 1097 | 	 * atomic context especially). Let's keep it simple! | 
 | 1098 | 	 */ | 
 | 1099 | 	return __change_page_attr_set_clr(&cpa, 0); | 
| Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1100 | } | 
 | 1101 |  | 
 | 1102 | static int __set_pages_np(struct page *page, int numpages) | 
 | 1103 | { | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1104 | 	unsigned long tempaddr = (unsigned long) page_address(page); | 
 | 1105 | 	struct cpa_data cpa = { .vaddr = &tempaddr, | 
| Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1106 | 				.numpages = numpages, | 
 | 1107 | 				.mask_set = __pgprot(0), | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1108 | 				.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), | 
 | 1109 | 				.flags = 0}; | 
| Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1110 |  | 
| Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 1111 | 	/* | 
 | 1112 | 	 * No alias checking needed for setting not present flag. otherwise, | 
 | 1113 | 	 * we may need to break large pages for 64-bit kernel text | 
 | 1114 | 	 * mappings (this adds to complexity if we want to do this from | 
 | 1115 | 	 * atomic context especially). Let's keep it simple! | 
 | 1116 | 	 */ | 
 | 1117 | 	return __change_page_attr_set_clr(&cpa, 0); | 
| Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1118 | } | 
 | 1119 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | void kernel_map_pages(struct page *page, int numpages, int enable) | 
 | 1121 | { | 
 | 1122 | 	if (PageHighMem(page)) | 
 | 1123 | 		return; | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1124 | 	if (!enable) { | 
| Ingo Molnar | f9b8404 | 2006-06-27 02:54:49 -0700 | [diff] [blame] | 1125 | 		debug_check_no_locks_freed(page_address(page), | 
 | 1126 | 					   numpages * PAGE_SIZE); | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1127 | 	} | 
| Ingo Molnar | de5097c | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 1128 |  | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1129 | 	/* | 
| Ingo Molnar | 12d6f21 | 2008-01-30 13:33:58 +0100 | [diff] [blame] | 1130 | 	 * If page allocator is not up yet then do not call c_p_a(): | 
 | 1131 | 	 */ | 
 | 1132 | 	if (!debug_pagealloc_enabled) | 
 | 1133 | 		return; | 
 | 1134 |  | 
 | 1135 | 	/* | 
| Ingo Molnar | f8d8406 | 2008-02-13 14:09:53 +0100 | [diff] [blame] | 1136 | 	 * The return value is ignored as the calls cannot fail. | 
| Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 1137 | 	 * Large pages for identity mappings are not used at boot time | 
 | 1138 | 	 * and hence no memory allocations during large page split. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | 	 */ | 
| Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1140 | 	if (enable) | 
 | 1141 | 		__set_pages_p(page, numpages); | 
 | 1142 | 	else | 
 | 1143 | 		__set_pages_np(page, numpages); | 
| Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1144 |  | 
 | 1145 | 	/* | 
| Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 1146 | 	 * We should perform an IPI and flush all tlbs, | 
 | 1147 | 	 * but that can deadlock->flush only current cpu: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | 	 */ | 
 | 1149 | 	__flush_tlb_all(); | 
 | 1150 | } | 
| Rafael J. Wysocki | 8a235ef | 2008-02-20 01:47:44 +0100 | [diff] [blame] | 1151 |  | 
 | 1152 | #ifdef CONFIG_HIBERNATION | 
 | 1153 |  | 
 | 1154 | bool kernel_page_present(struct page *page) | 
 | 1155 | { | 
 | 1156 | 	unsigned int level; | 
 | 1157 | 	pte_t *pte; | 
 | 1158 |  | 
 | 1159 | 	if (PageHighMem(page)) | 
 | 1160 | 		return false; | 
 | 1161 |  | 
 | 1162 | 	pte = lookup_address((unsigned long)page_address(page), &level); | 
 | 1163 | 	return (pte_val(*pte) & _PAGE_PRESENT); | 
 | 1164 | } | 
 | 1165 |  | 
 | 1166 | #endif /* CONFIG_HIBERNATION */ | 
 | 1167 |  | 
 | 1168 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 
| Arjan van de Ven | d1028a1 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1169 |  | 
 | 1170 | /* | 
 | 1171 |  * The testcases use internal knowledge of the implementation that shouldn't | 
 | 1172 |  * be exposed to the rest of the kernel. Include these directly here. | 
 | 1173 |  */ | 
 | 1174 | #ifdef CONFIG_CPA_DEBUG | 
 | 1175 | #include "pageattr-test.c" | 
 | 1176 | #endif |