| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Handle caching attributes in page tables (PAT) | 
|  | 3 | * | 
|  | 4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | 
|  | 5 | *          Suresh B Siddha <suresh.b.siddha@intel.com> | 
|  | 6 | * | 
|  | 7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. | 
|  | 8 | */ | 
|  | 9 |  | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 10 | #include <linux/seq_file.h> | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 11 | #include <linux/bootmem.h> | 
| venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 12 | #include <linux/debugfs.h> | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 13 | #include <linux/kernel.h> | 
|  | 14 | #include <linux/gfp.h> | 
|  | 15 | #include <linux/mm.h> | 
|  | 16 | #include <linux/fs.h> | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 17 |  | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 18 | #include <asm/cacheflush.h> | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 19 | #include <asm/processor.h> | 
|  | 20 | #include <asm/tlbflush.h> | 
|  | 21 | #include <asm/pgtable.h> | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 22 | #include <asm/fcntl.h> | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 23 | #include <asm/e820.h> | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 24 | #include <asm/mtrr.h> | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 25 | #include <asm/page.h> | 
|  | 26 | #include <asm/msr.h> | 
|  | 27 | #include <asm/pat.h> | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 28 | #include <asm/io.h> | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 29 |  | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 30 | #ifdef CONFIG_X86_PAT | 
| Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 31 | int __read_mostly pat_enabled = 1; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 32 |  | 
| Avi Kivity | 31f4d87 | 2008-05-14 12:20:32 +0300 | [diff] [blame] | 33 | void __cpuinit pat_disable(char *reason) | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 34 | { | 
| Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 35 | pat_enabled = 0; | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 36 | printk(KERN_INFO "%s\n", reason); | 
|  | 37 | } | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 38 |  | 
| Andrew Morton | be524fb | 2008-05-29 00:01:28 -0700 | [diff] [blame] | 39 | static int __init nopat(char *str) | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 40 | { | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 41 | pat_disable("PAT support disabled."); | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 42 | return 0; | 
|  | 43 | } | 
|  | 44 | early_param("nopat", nopat); | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 45 | #endif | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 46 |  | 
| Venki Pallipadi | 77b52b4 | 2008-05-05 19:09:10 -0700 | [diff] [blame] | 47 |  | 
|  | 48 | static int debug_enable; | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 49 |  | 
| Venki Pallipadi | 77b52b4 | 2008-05-05 19:09:10 -0700 | [diff] [blame] | 50 | static int __init pat_debug_setup(char *str) | 
|  | 51 | { | 
|  | 52 | debug_enable = 1; | 
|  | 53 | return 0; | 
|  | 54 | } | 
|  | 55 | __setup("debugpat", pat_debug_setup); | 
|  | 56 |  | 
|  | 57 | #define dprintk(fmt, arg...) \ | 
|  | 58 | do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0) | 
|  | 59 |  | 
|  | 60 |  | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 61 | static u64 __read_mostly boot_pat_state; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 62 |  | 
|  | 63 | enum { | 
|  | 64 | PAT_UC = 0,		/* uncached */ | 
|  | 65 | PAT_WC = 1,		/* Write combining */ | 
|  | 66 | PAT_WT = 4,		/* Write Through */ | 
|  | 67 | PAT_WP = 5,		/* Write Protected */ | 
|  | 68 | PAT_WB = 6,		/* Write Back (default) */ | 
|  | 69 | PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */ | 
|  | 70 | }; | 
|  | 71 |  | 
| Andreas Herrmann | cd7a4e9 | 2008-06-10 16:05:39 +0200 | [diff] [blame] | 72 | #define PAT(x, y)	((u64)PAT_ ## y << ((x)*8)) | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 73 |  | 
|  | 74 | void pat_init(void) | 
|  | 75 | { | 
|  | 76 | u64 pat; | 
|  | 77 |  | 
| Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 78 | if (!pat_enabled) | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 79 | return; | 
|  | 80 |  | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 81 | /* Paranoia check. */ | 
| Andreas Herrmann | 97cfab6 | 2008-06-10 16:05:18 +0200 | [diff] [blame] | 82 | if (!cpu_has_pat && boot_pat_state) { | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 83 | /* | 
| Andreas Herrmann | 97cfab6 | 2008-06-10 16:05:18 +0200 | [diff] [blame] | 84 | * If this happens we are on a secondary CPU, but | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 85 | * switched to PAT on the boot CPU. We have no way to | 
|  | 86 | * undo PAT. | 
| Andreas Herrmann | 97cfab6 | 2008-06-10 16:05:18 +0200 | [diff] [blame] | 87 | */ | 
|  | 88 | printk(KERN_ERR "PAT enabled, " | 
|  | 89 | "but not supported by secondary CPU\n"); | 
|  | 90 | BUG(); | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 91 | } | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 92 |  | 
|  | 93 | /* Set PWT to Write-Combining. All other bits stay the same */ | 
|  | 94 | /* | 
|  | 95 | * PTE encoding used in Linux: | 
|  | 96 | *      PAT | 
|  | 97 | *      |PCD | 
|  | 98 | *      ||PWT | 
|  | 99 | *      ||| | 
|  | 100 | *      000 WB		_PAGE_CACHE_WB | 
|  | 101 | *      001 WC		_PAGE_CACHE_WC | 
|  | 102 | *      010 UC-		_PAGE_CACHE_UC_MINUS | 
|  | 103 | *      011 UC		_PAGE_CACHE_UC | 
|  | 104 | * PAT bit unused | 
|  | 105 | */ | 
| Andreas Herrmann | cd7a4e9 | 2008-06-10 16:05:39 +0200 | [diff] [blame] | 106 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | | 
|  | 107 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 108 |  | 
|  | 109 | /* Boot CPU check */ | 
| Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 110 | if (!boot_pat_state) | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 111 | rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 112 |  | 
|  | 113 | wrmsrl(MSR_IA32_CR_PAT, pat); | 
|  | 114 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", | 
|  | 115 | smp_processor_id(), boot_pat_state, pat); | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | #undef PAT | 
|  | 119 |  | 
|  | 120 | static char *cattr_name(unsigned long flags) | 
|  | 121 | { | 
|  | 122 | switch (flags & _PAGE_CACHE_MASK) { | 
| Andreas Herrmann | cd7a4e9 | 2008-06-10 16:05:39 +0200 | [diff] [blame] | 123 | case _PAGE_CACHE_UC:		return "uncached"; | 
|  | 124 | case _PAGE_CACHE_UC_MINUS:	return "uncached-minus"; | 
|  | 125 | case _PAGE_CACHE_WB:		return "write-back"; | 
|  | 126 | case _PAGE_CACHE_WC:		return "write-combining"; | 
|  | 127 | default:			return "broken"; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 128 | } | 
|  | 129 | } | 
|  | 130 |  | 
|  | 131 | /* | 
|  | 132 | * The global memtype list keeps track of memory type for specific | 
|  | 133 | * physical memory areas. Conflicting memory types in different | 
|  | 134 | * mappings can cause CPU cache corruption. To avoid this we keep track. | 
|  | 135 | * | 
|  | 136 | * The list is sorted based on starting address and can contain multiple | 
|  | 137 | * entries for each address (this allows reference counting for overlapping | 
|  | 138 | * areas). All the aliases have the same cache attributes of course. | 
|  | 139 | * Zero attributes are represented as holes. | 
|  | 140 | * | 
|  | 141 | * Currently the data structure is a list because the number of mappings | 
|  | 142 | * are expected to be relatively small. If this should be a problem | 
|  | 143 | * it could be changed to a rbtree or similar. | 
|  | 144 | * | 
|  | 145 | * memtype_lock protects the whole list. | 
|  | 146 | */ | 
|  | 147 |  | 
|  | 148 | struct memtype { | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 149 | u64			start; | 
|  | 150 | u64			end; | 
|  | 151 | unsigned long		type; | 
|  | 152 | struct list_head	nd; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 153 | }; | 
|  | 154 |  | 
|  | 155 | static LIST_HEAD(memtype_list); | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 156 | static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype list */ | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 157 |  | 
|  | 158 | /* | 
|  | 159 | * Does intersection of PAT memory type and MTRR memory type and returns | 
|  | 160 | * the resulting memory type as PAT understands it. | 
|  | 161 | * (Type in pat and mtrr will not have same value) | 
|  | 162 | * The intersection is based on "Effective Memory Type" tables in IA-32 | 
|  | 163 | * SDM vol 3a | 
|  | 164 | */ | 
| Hugh Dickins | 6cf514f | 2008-06-16 18:42:43 +0100 | [diff] [blame] | 165 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 166 | { | 
| Venki Pallipadi | c26421d | 2008-05-29 12:01:44 -0700 | [diff] [blame] | 167 | /* | 
|  | 168 | * Look for MTRR hint to get the effective type in case where PAT | 
|  | 169 | * request is for WB. | 
|  | 170 | */ | 
| Andreas Herrmann | dd0c7c4 | 2008-06-18 15:38:57 +0200 | [diff] [blame] | 171 | if (req_type == _PAGE_CACHE_WB) { | 
|  | 172 | u8 mtrr_type; | 
|  | 173 |  | 
|  | 174 | mtrr_type = mtrr_type_lookup(start, end); | 
|  | 175 | if (mtrr_type == MTRR_TYPE_UNCACHABLE) | 
|  | 176 | return _PAGE_CACHE_UC; | 
|  | 177 | if (mtrr_type == MTRR_TYPE_WRCOMB) | 
|  | 178 | return _PAGE_CACHE_WC; | 
|  | 179 | } | 
|  | 180 |  | 
|  | 181 | return req_type; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 182 | } | 
|  | 183 |  | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 184 | static int | 
|  | 185 | chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type) | 
| Andreas Herrmann | 64fe44c | 2008-06-20 22:07:09 +0200 | [diff] [blame] | 186 | { | 
|  | 187 | if (new->type != entry->type) { | 
|  | 188 | if (type) { | 
|  | 189 | new->type = entry->type; | 
|  | 190 | *type = entry->type; | 
|  | 191 | } else | 
|  | 192 | goto conflict; | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | /* check overlaps with more than one entry in the list */ | 
|  | 196 | list_for_each_entry_continue(entry, &memtype_list, nd) { | 
|  | 197 | if (new->end <= entry->start) | 
|  | 198 | break; | 
|  | 199 | else if (new->type != entry->type) | 
|  | 200 | goto conflict; | 
|  | 201 | } | 
|  | 202 | return 0; | 
|  | 203 |  | 
|  | 204 | conflict: | 
|  | 205 | printk(KERN_INFO "%s:%d conflicting memory types " | 
|  | 206 | "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start, | 
|  | 207 | new->end, cattr_name(new->type), cattr_name(entry->type)); | 
|  | 208 | return -EBUSY; | 
|  | 209 | } | 
|  | 210 |  | 
| Venki Pallipadi | 80c5e73 | 2008-08-19 16:28:01 -0700 | [diff] [blame] | 211 | static struct memtype *cached_entry; | 
|  | 212 | static u64 cached_start; | 
|  | 213 |  | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 214 | /* | 
| Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 215 | * For RAM pages, mark the pages as non WB memory type using | 
|  | 216 | * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or | 
|  | 217 | * set_memory_wc() on a RAM page at a time before marking it as WB again. | 
|  | 218 | * This is ok, because only one driver will be owning the page and | 
|  | 219 | * doing set_memory_*() calls. | 
|  | 220 | * | 
|  | 221 | * For now, we use PageNonWB to track that the RAM page is being mapped | 
|  | 222 | * as non WB. In future, we will have to use one more flag | 
|  | 223 | * (or some other mechanism in page_struct) to distinguish between | 
|  | 224 | * UC and WC mapping. | 
|  | 225 | */ | 
|  | 226 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 227 | unsigned long *new_type) | 
| Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 228 | { | 
|  | 229 | struct page *page; | 
|  | 230 | u64 pfn, end_pfn; | 
|  | 231 |  | 
|  | 232 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | 
|  | 233 | page = pfn_to_page(pfn); | 
|  | 234 | if (page_mapped(page) || PageNonWB(page)) | 
|  | 235 | goto out; | 
|  | 236 |  | 
|  | 237 | SetPageNonWB(page); | 
|  | 238 | } | 
|  | 239 | return 0; | 
|  | 240 |  | 
|  | 241 | out: | 
|  | 242 | end_pfn = pfn; | 
|  | 243 | for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { | 
|  | 244 | page = pfn_to_page(pfn); | 
|  | 245 | ClearPageNonWB(page); | 
|  | 246 | } | 
|  | 247 |  | 
|  | 248 | return -EINVAL; | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | static int free_ram_pages_type(u64 start, u64 end) | 
|  | 252 | { | 
|  | 253 | struct page *page; | 
|  | 254 | u64 pfn, end_pfn; | 
|  | 255 |  | 
|  | 256 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | 
|  | 257 | page = pfn_to_page(pfn); | 
|  | 258 | if (page_mapped(page) || !PageNonWB(page)) | 
|  | 259 | goto out; | 
|  | 260 |  | 
|  | 261 | ClearPageNonWB(page); | 
|  | 262 | } | 
|  | 263 | return 0; | 
|  | 264 |  | 
|  | 265 | out: | 
|  | 266 | end_pfn = pfn; | 
|  | 267 | for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { | 
|  | 268 | page = pfn_to_page(pfn); | 
|  | 269 | SetPageNonWB(page); | 
|  | 270 | } | 
|  | 271 | return -EINVAL; | 
|  | 272 | } | 
|  | 273 |  | 
|  | 274 | /* | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 275 | * req_type typically has one of the: | 
|  | 276 | * - _PAGE_CACHE_WB | 
|  | 277 | * - _PAGE_CACHE_WC | 
|  | 278 | * - _PAGE_CACHE_UC_MINUS | 
|  | 279 | * - _PAGE_CACHE_UC | 
|  | 280 | * | 
|  | 281 | * req_type will have a special case value '-1', when requester want to inherit | 
|  | 282 | * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. | 
|  | 283 | * | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 284 | * If new_type is NULL, function will return an error if it cannot reserve the | 
|  | 285 | * region with req_type. If new_type is non-NULL, function will return | 
|  | 286 | * available type in new_type in case of no error. In case of any error | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 287 | * it will return a negative return value. | 
|  | 288 | */ | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 289 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 290 | unsigned long *new_type) | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 291 | { | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 292 | struct memtype *new, *entry; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 293 | unsigned long actual_type; | 
| Andreas Herrmann | f688726 | 2008-06-20 22:05:37 +0200 | [diff] [blame] | 294 | struct list_head *where; | 
| Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 295 | int is_range_ram; | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 296 | int err = 0; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 297 |  | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 298 | BUG_ON(start >= end); /* end is exclusive */ | 
| Andreas Herrmann | 69e26be | 2008-06-20 22:03:06 +0200 | [diff] [blame] | 299 |  | 
| Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 300 | if (!pat_enabled) { | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 301 | /* This is identical to page table setting without PAT */ | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 302 | if (new_type) { | 
|  | 303 | if (req_type == -1) | 
|  | 304 | *new_type = _PAGE_CACHE_WB; | 
|  | 305 | else | 
|  | 306 | *new_type = req_type & _PAGE_CACHE_MASK; | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 307 | } | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 308 | return 0; | 
|  | 309 | } | 
|  | 310 |  | 
|  | 311 | /* Low ISA region is always mapped WB in page table. No need to track */ | 
| Andreas Herrmann | bcc643d | 2008-06-20 21:58:46 +0200 | [diff] [blame] | 312 | if (is_ISA_range(start, end - 1)) { | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 313 | if (new_type) | 
|  | 314 | *new_type = _PAGE_CACHE_WB; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 315 | return 0; | 
|  | 316 | } | 
|  | 317 |  | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 318 | if (req_type == -1) { | 
|  | 319 | /* | 
| Venki Pallipadi | c26421d | 2008-05-29 12:01:44 -0700 | [diff] [blame] | 320 | * Call mtrr_lookup to get the type hint. This is an | 
|  | 321 | * optimization for /dev/mem mmap'ers into WB memory (BIOS | 
|  | 322 | * tools and ACPI tools). Use WB request for WB memory and use | 
|  | 323 | * UC_MINUS otherwise. | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 324 | */ | 
|  | 325 | u8 mtrr_type = mtrr_type_lookup(start, end); | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 326 |  | 
| Andreas Herrmann | 69e26be | 2008-06-20 22:03:06 +0200 | [diff] [blame] | 327 | if (mtrr_type == MTRR_TYPE_WRBACK) | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 328 | actual_type = _PAGE_CACHE_WB; | 
| Andreas Herrmann | 69e26be | 2008-06-20 22:03:06 +0200 | [diff] [blame] | 329 | else | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 330 | actual_type = _PAGE_CACHE_UC_MINUS; | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 331 | } else { | 
| Andreas Herrmann | 69e26be | 2008-06-20 22:03:06 +0200 | [diff] [blame] | 332 | actual_type = pat_x_mtrr_type(start, end, | 
|  | 333 | req_type & _PAGE_CACHE_MASK); | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 334 | } | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 335 |  | 
| Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 336 | is_range_ram = pagerange_is_ram(start, end); | 
|  | 337 | if (is_range_ram == 1) | 
|  | 338 | return reserve_ram_pages_type(start, end, req_type, new_type); | 
|  | 339 | else if (is_range_ram < 0) | 
|  | 340 | return -EINVAL; | 
|  | 341 |  | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 342 | new  = kmalloc(sizeof(struct memtype), GFP_KERNEL); | 
|  | 343 | if (!new) | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 344 | return -ENOMEM; | 
|  | 345 |  | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 346 | new->start	= start; | 
|  | 347 | new->end	= end; | 
|  | 348 | new->type	= actual_type; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 349 |  | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 350 | if (new_type) | 
|  | 351 | *new_type = actual_type; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 352 |  | 
|  | 353 | spin_lock(&memtype_lock); | 
|  | 354 |  | 
| Venki Pallipadi | 80c5e73 | 2008-08-19 16:28:01 -0700 | [diff] [blame] | 355 | if (cached_entry && start >= cached_start) | 
|  | 356 | entry = cached_entry; | 
|  | 357 | else | 
|  | 358 | entry = list_entry(&memtype_list, struct memtype, nd); | 
|  | 359 |  | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 360 | /* Search for existing mapping that overlaps the current range */ | 
| Andreas Herrmann | f688726 | 2008-06-20 22:05:37 +0200 | [diff] [blame] | 361 | where = NULL; | 
| Venki Pallipadi | 80c5e73 | 2008-08-19 16:28:01 -0700 | [diff] [blame] | 362 | list_for_each_entry_continue(entry, &memtype_list, nd) { | 
| Andreas Herrmann | 33af903 | 2008-06-20 22:08:37 +0200 | [diff] [blame] | 363 | if (end <= entry->start) { | 
| Andreas Herrmann | f688726 | 2008-06-20 22:05:37 +0200 | [diff] [blame] | 364 | where = entry->nd.prev; | 
| Venki Pallipadi | 80c5e73 | 2008-08-19 16:28:01 -0700 | [diff] [blame] | 365 | cached_entry = list_entry(where, struct memtype, nd); | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 366 | break; | 
| Andreas Herrmann | 33af903 | 2008-06-20 22:08:37 +0200 | [diff] [blame] | 367 | } else if (start <= entry->start) { /* end > entry->start */ | 
| Andreas Herrmann | 64fe44c | 2008-06-20 22:07:09 +0200 | [diff] [blame] | 368 | err = chk_conflict(new, entry, new_type); | 
| Andreas Herrmann | 33af903 | 2008-06-20 22:08:37 +0200 | [diff] [blame] | 369 | if (!err) { | 
|  | 370 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 
|  | 371 | entry->start, entry->end); | 
|  | 372 | where = entry->nd.prev; | 
| Venki Pallipadi | 80c5e73 | 2008-08-19 16:28:01 -0700 | [diff] [blame] | 373 | cached_entry = list_entry(where, | 
|  | 374 | struct memtype, nd); | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 375 | } | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 376 | break; | 
| Andreas Herrmann | 33af903 | 2008-06-20 22:08:37 +0200 | [diff] [blame] | 377 | } else if (start < entry->end) { /* start > entry->start */ | 
| Andreas Herrmann | 64fe44c | 2008-06-20 22:07:09 +0200 | [diff] [blame] | 378 | err = chk_conflict(new, entry, new_type); | 
| Andreas Herrmann | 33af903 | 2008-06-20 22:08:37 +0200 | [diff] [blame] | 379 | if (!err) { | 
|  | 380 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 
|  | 381 | entry->start, entry->end); | 
| Venki Pallipadi | 80c5e73 | 2008-08-19 16:28:01 -0700 | [diff] [blame] | 382 | cached_entry = list_entry(entry->nd.prev, | 
|  | 383 | struct memtype, nd); | 
|  | 384 |  | 
|  | 385 | /* | 
|  | 386 | * Move to right position in the linked | 
|  | 387 | * list to add this new entry | 
|  | 388 | */ | 
|  | 389 | list_for_each_entry_continue(entry, | 
|  | 390 | &memtype_list, nd) { | 
|  | 391 | if (start <= entry->start) { | 
|  | 392 | where = entry->nd.prev; | 
|  | 393 | break; | 
|  | 394 | } | 
|  | 395 | } | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 396 | } | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 397 | break; | 
|  | 398 | } | 
|  | 399 | } | 
|  | 400 |  | 
|  | 401 | if (err) { | 
| Andreas Herrmann | 3e9c83b | 2008-06-20 22:04:02 +0200 | [diff] [blame] | 402 | printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " | 
|  | 403 | "track %s, req %s\n", | 
|  | 404 | start, end, cattr_name(new->type), cattr_name(req_type)); | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 405 | kfree(new); | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 406 | spin_unlock(&memtype_lock); | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 407 |  | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 408 | return err; | 
|  | 409 | } | 
|  | 410 |  | 
| Venki Pallipadi | 80c5e73 | 2008-08-19 16:28:01 -0700 | [diff] [blame] | 411 | cached_start = start; | 
|  | 412 |  | 
| Andreas Herrmann | f688726 | 2008-06-20 22:05:37 +0200 | [diff] [blame] | 413 | if (where) | 
|  | 414 | list_add(&new->nd, where); | 
|  | 415 | else | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 416 | list_add_tail(&new->nd, &memtype_list); | 
| venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 417 |  | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 418 | spin_unlock(&memtype_lock); | 
| Andreas Herrmann | 3e9c83b | 2008-06-20 22:04:02 +0200 | [diff] [blame] | 419 |  | 
|  | 420 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | 
|  | 421 | start, end, cattr_name(new->type), cattr_name(req_type), | 
|  | 422 | new_type ? cattr_name(*new_type) : "-"); | 
|  | 423 |  | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 424 | return err; | 
|  | 425 | } | 
|  | 426 |  | 
|  | 427 | int free_memtype(u64 start, u64 end) | 
|  | 428 | { | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 429 | struct memtype *entry; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 430 | int err = -EINVAL; | 
| Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 431 | int is_range_ram; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 432 |  | 
| Andreas Herrmann | 69e26be | 2008-06-20 22:03:06 +0200 | [diff] [blame] | 433 | if (!pat_enabled) | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 434 | return 0; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 435 |  | 
|  | 436 | /* Low ISA region is always mapped WB. No need to track */ | 
| Andreas Herrmann | bcc643d | 2008-06-20 21:58:46 +0200 | [diff] [blame] | 437 | if (is_ISA_range(start, end - 1)) | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 438 | return 0; | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 439 |  | 
| Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 440 | is_range_ram = pagerange_is_ram(start, end); | 
|  | 441 | if (is_range_ram == 1) | 
|  | 442 | return free_ram_pages_type(start, end); | 
|  | 443 | else if (is_range_ram < 0) | 
|  | 444 | return -EINVAL; | 
|  | 445 |  | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 446 | spin_lock(&memtype_lock); | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 447 | list_for_each_entry(entry, &memtype_list, nd) { | 
|  | 448 | if (entry->start == start && entry->end == end) { | 
| Venki Pallipadi | 80c5e73 | 2008-08-19 16:28:01 -0700 | [diff] [blame] | 449 | if (cached_entry == entry || cached_start == start) | 
|  | 450 | cached_entry = NULL; | 
|  | 451 |  | 
| Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 452 | list_del(&entry->nd); | 
|  | 453 | kfree(entry); | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 454 | err = 0; | 
|  | 455 | break; | 
|  | 456 | } | 
|  | 457 | } | 
|  | 458 | spin_unlock(&memtype_lock); | 
|  | 459 |  | 
|  | 460 | if (err) { | 
| Ingo Molnar | 28eb559b | 2008-04-03 10:14:33 +0200 | [diff] [blame] | 461 | printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 462 | current->comm, current->pid, start, end); | 
|  | 463 | } | 
| venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 464 |  | 
| Venki Pallipadi | 77b52b4 | 2008-05-05 19:09:10 -0700 | [diff] [blame] | 465 | dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 466 |  | 
| venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 467 | return err; | 
|  | 468 | } | 
|  | 469 |  | 
| venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 470 |  | 
| venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 471 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 
|  | 472 | unsigned long size, pgprot_t vma_prot) | 
|  | 473 | { | 
|  | 474 | return vma_prot; | 
|  | 475 | } | 
|  | 476 |  | 
| Ingo Molnar | d092633 | 2008-07-18 00:26:59 +0200 | [diff] [blame] | 477 | #ifdef CONFIG_STRICT_DEVMEM | 
|  | 478 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/ | 
| Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 479 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) | 
|  | 480 | { | 
|  | 481 | return 1; | 
|  | 482 | } | 
|  | 483 | #else | 
|  | 484 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) | 
|  | 485 | { | 
|  | 486 | u64 from = ((u64)pfn) << PAGE_SHIFT; | 
|  | 487 | u64 to = from + size; | 
|  | 488 | u64 cursor = from; | 
|  | 489 |  | 
|  | 490 | while (cursor < to) { | 
|  | 491 | if (!devmem_is_allowed(pfn)) { | 
|  | 492 | printk(KERN_INFO | 
|  | 493 | "Program %s tried to access /dev/mem between %Lx->%Lx.\n", | 
|  | 494 | current->comm, from, to); | 
|  | 495 | return 0; | 
|  | 496 | } | 
|  | 497 | cursor += PAGE_SIZE; | 
|  | 498 | pfn++; | 
|  | 499 | } | 
|  | 500 | return 1; | 
|  | 501 | } | 
| Ingo Molnar | d092633 | 2008-07-18 00:26:59 +0200 | [diff] [blame] | 502 | #endif /* CONFIG_STRICT_DEVMEM */ | 
| Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 503 |  | 
| venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 504 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | 
|  | 505 | unsigned long size, pgprot_t *vma_prot) | 
|  | 506 | { | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 507 | u64 offset = ((u64) pfn) << PAGE_SHIFT; | 
| venkatesh.pallipadi@intel.com | 28df82e | 2008-08-20 16:45:52 -0700 | [diff] [blame] | 508 | unsigned long flags = -1; | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 509 | int retval; | 
| venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 510 |  | 
| Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 511 | if (!range_is_allowed(pfn, size)) | 
|  | 512 | return 0; | 
|  | 513 |  | 
| venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 514 | if (file->f_flags & O_SYNC) { | 
| venkatesh.pallipadi@intel.com | 28df82e | 2008-08-20 16:45:52 -0700 | [diff] [blame] | 515 | flags = _PAGE_CACHE_UC_MINUS; | 
| venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 516 | } | 
|  | 517 |  | 
|  | 518 | #ifdef CONFIG_X86_32 | 
|  | 519 | /* | 
|  | 520 | * On the PPro and successors, the MTRRs are used to set | 
|  | 521 | * memory types for physical addresses outside main memory, | 
|  | 522 | * so blindly setting UC or PWT on those pages is wrong. | 
|  | 523 | * For Pentiums and earlier, the surround logic should disable | 
|  | 524 | * caching for the high addresses through the KEN pin, but | 
|  | 525 | * we maintain the tradition of paranoia in this code. | 
|  | 526 | */ | 
| Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 527 | if (!pat_enabled && | 
| Andreas Herrmann | cd7a4e9 | 2008-06-10 16:05:39 +0200 | [diff] [blame] | 528 | !(boot_cpu_has(X86_FEATURE_MTRR) || | 
|  | 529 | boot_cpu_has(X86_FEATURE_K6_MTRR) || | 
|  | 530 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || | 
|  | 531 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && | 
|  | 532 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 533 | flags = _PAGE_CACHE_UC; | 
| venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 534 | } | 
|  | 535 | #endif | 
|  | 536 |  | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 537 | /* | 
| venkatesh.pallipadi@intel.com | 28df82e | 2008-08-20 16:45:52 -0700 | [diff] [blame] | 538 | * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot. | 
|  | 539 | * | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 540 | * Without O_SYNC, we want to get | 
|  | 541 | * - WB for WB-able memory and no other conflicting mappings | 
|  | 542 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | 
|  | 543 | * - Inherit from confliting mappings otherwise | 
|  | 544 | */ | 
| venkatesh.pallipadi@intel.com | 28df82e | 2008-08-20 16:45:52 -0700 | [diff] [blame] | 545 | if (flags != -1) { | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 546 | retval = reserve_memtype(offset, offset + size, flags, NULL); | 
|  | 547 | } else { | 
| Ingo Molnar | f022bfd | 2008-03-21 15:42:28 +0100 | [diff] [blame] | 548 | retval = reserve_memtype(offset, offset + size, -1, &flags); | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 549 | } | 
|  | 550 |  | 
|  | 551 | if (retval < 0) | 
|  | 552 | return 0; | 
|  | 553 |  | 
| Yinghai Lu | 965194c | 2008-07-12 14:31:28 -0700 | [diff] [blame] | 554 | if (((pfn < max_low_pfn_mapped) || | 
|  | 555 | (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) && | 
| Andreas Herrmann | cd7a4e9 | 2008-06-10 16:05:39 +0200 | [diff] [blame] | 556 | ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 557 | free_memtype(offset, offset + size); | 
| Ingo Molnar | 28eb559b | 2008-04-03 10:14:33 +0200 | [diff] [blame] | 558 | printk(KERN_INFO | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 559 | "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", | 
|  | 560 | current->comm, current->pid, | 
|  | 561 | cattr_name(flags), | 
| Pranith Kumar | afc8534 | 2008-05-12 14:52:26 +0530 | [diff] [blame] | 562 | offset, (unsigned long long)(offset + size)); | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 563 | return 0; | 
|  | 564 | } | 
|  | 565 |  | 
|  | 566 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | | 
|  | 567 | flags); | 
| venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 568 | return 1; | 
|  | 569 | } | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 570 |  | 
|  | 571 | void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | 
|  | 572 | { | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 573 | unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 574 | u64 addr = (u64)pfn << PAGE_SHIFT; | 
|  | 575 | unsigned long flags; | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 576 |  | 
|  | 577 | reserve_memtype(addr, addr + size, want_flags, &flags); | 
|  | 578 | if (flags != want_flags) { | 
| Ingo Molnar | 28eb559b | 2008-04-03 10:14:33 +0200 | [diff] [blame] | 579 | printk(KERN_INFO | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 580 | "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n", | 
|  | 581 | current->comm, current->pid, | 
|  | 582 | cattr_name(want_flags), | 
| Pranith Kumar | afc8534 | 2008-05-12 14:52:26 +0530 | [diff] [blame] | 583 | addr, (unsigned long long)(addr + size), | 
| venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 584 | cattr_name(flags)); | 
|  | 585 | } | 
|  | 586 | } | 
|  | 587 |  | 
|  | 588 | void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | 
|  | 589 | { | 
|  | 590 | u64 addr = (u64)pfn << PAGE_SHIFT; | 
|  | 591 |  | 
|  | 592 | free_memtype(addr, addr + size); | 
|  | 593 | } | 
|  | 594 |  | 
| Andreas Herrmann | 012f09e | 2008-08-06 16:23:08 +0200 | [diff] [blame] | 595 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) | 
| venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 596 |  | 
|  | 597 | /* get Nth element of the linked list */ | 
|  | 598 | static struct memtype *memtype_get_idx(loff_t pos) | 
|  | 599 | { | 
|  | 600 | struct memtype *list_node, *print_entry; | 
|  | 601 | int i = 1; | 
|  | 602 |  | 
|  | 603 | print_entry  = kmalloc(sizeof(struct memtype), GFP_KERNEL); | 
|  | 604 | if (!print_entry) | 
|  | 605 | return NULL; | 
|  | 606 |  | 
|  | 607 | spin_lock(&memtype_lock); | 
|  | 608 | list_for_each_entry(list_node, &memtype_list, nd) { | 
|  | 609 | if (pos == i) { | 
|  | 610 | *print_entry = *list_node; | 
|  | 611 | spin_unlock(&memtype_lock); | 
|  | 612 | return print_entry; | 
|  | 613 | } | 
|  | 614 | ++i; | 
|  | 615 | } | 
|  | 616 | spin_unlock(&memtype_lock); | 
|  | 617 | kfree(print_entry); | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 618 |  | 
| venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 619 | return NULL; | 
|  | 620 | } | 
|  | 621 |  | 
|  | 622 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) | 
|  | 623 | { | 
|  | 624 | if (*pos == 0) { | 
|  | 625 | ++*pos; | 
|  | 626 | seq_printf(seq, "PAT memtype list:\n"); | 
|  | 627 | } | 
|  | 628 |  | 
|  | 629 | return memtype_get_idx(*pos); | 
|  | 630 | } | 
|  | 631 |  | 
|  | 632 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 
|  | 633 | { | 
|  | 634 | ++*pos; | 
|  | 635 | return memtype_get_idx(*pos); | 
|  | 636 | } | 
|  | 637 |  | 
|  | 638 | static void memtype_seq_stop(struct seq_file *seq, void *v) | 
|  | 639 | { | 
|  | 640 | } | 
|  | 641 |  | 
|  | 642 | static int memtype_seq_show(struct seq_file *seq, void *v) | 
|  | 643 | { | 
|  | 644 | struct memtype *print_entry = (struct memtype *)v; | 
|  | 645 |  | 
|  | 646 | seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), | 
|  | 647 | print_entry->start, print_entry->end); | 
|  | 648 | kfree(print_entry); | 
| Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 649 |  | 
| venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 650 | return 0; | 
|  | 651 | } | 
|  | 652 |  | 
|  | 653 | static struct seq_operations memtype_seq_ops = { | 
|  | 654 | .start = memtype_seq_start, | 
|  | 655 | .next  = memtype_seq_next, | 
|  | 656 | .stop  = memtype_seq_stop, | 
|  | 657 | .show  = memtype_seq_show, | 
|  | 658 | }; | 
|  | 659 |  | 
|  | 660 | static int memtype_seq_open(struct inode *inode, struct file *file) | 
|  | 661 | { | 
|  | 662 | return seq_open(file, &memtype_seq_ops); | 
|  | 663 | } | 
|  | 664 |  | 
|  | 665 | static const struct file_operations memtype_fops = { | 
|  | 666 | .open    = memtype_seq_open, | 
|  | 667 | .read    = seq_read, | 
|  | 668 | .llseek  = seq_lseek, | 
|  | 669 | .release = seq_release, | 
|  | 670 | }; | 
|  | 671 |  | 
|  | 672 | static int __init pat_memtype_list_init(void) | 
|  | 673 | { | 
|  | 674 | debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir, | 
|  | 675 | NULL, &memtype_fops); | 
|  | 676 | return 0; | 
|  | 677 | } | 
|  | 678 |  | 
|  | 679 | late_initcall(pat_memtype_list_init); | 
|  | 680 |  | 
| Andreas Herrmann | 012f09e | 2008-08-06 16:23:08 +0200 | [diff] [blame] | 681 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |