blob: 38a66ef9426da32a05d7e1b9cf39e84014b422f8 [file] [log] [blame]
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -07001/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
Ingo Molnarad2cde12008-09-30 13:20:45 +020010#include <linux/seq_file.h>
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -070011#include <linux/bootmem.h>
venkatesh.pallipadi@intel.comfec09622008-07-18 16:08:14 -070012#include <linux/debugfs.h>
Ingo Molnarad2cde12008-09-30 13:20:45 +020013#include <linux/kernel.h>
Ingo Molnar92b9af92009-02-28 14:09:27 +010014#include <linux/module.h>
Ingo Molnarad2cde12008-09-30 13:20:45 +020015#include <linux/gfp.h>
16#include <linux/mm.h>
17#include <linux/fs.h>
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -070018#include <linux/rbtree.h>
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070019
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070020#include <asm/cacheflush.h>
Ingo Molnarad2cde12008-09-30 13:20:45 +020021#include <asm/processor.h>
22#include <asm/tlbflush.h>
Jack Steinerfd12a0d2009-11-19 14:23:41 -060023#include <asm/x86_init.h>
Ingo Molnarad2cde12008-09-30 13:20:45 +020024#include <asm/pgtable.h>
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070025#include <asm/fcntl.h>
Ingo Molnarad2cde12008-09-30 13:20:45 +020026#include <asm/e820.h>
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070027#include <asm/mtrr.h>
Ingo Molnarad2cde12008-09-30 13:20:45 +020028#include <asm/page.h>
29#include <asm/msr.h>
30#include <asm/pat.h>
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -070031#include <asm/io.h>
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070032
Thomas Gleixner8d4a4302008-05-08 09:18:43 +020033#ifdef CONFIG_X86_PAT
Andreas Herrmann499f8f82008-06-10 16:06:21 +020034int __read_mostly pat_enabled = 1;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070035
Marcin Slusarz1ee4bd92009-04-10 22:47:17 +020036static inline void pat_disable(const char *reason)
Thomas Gleixner8d4a4302008-05-08 09:18:43 +020037{
Andreas Herrmann499f8f82008-06-10 16:06:21 +020038 pat_enabled = 0;
Thomas Gleixner8d4a4302008-05-08 09:18:43 +020039 printk(KERN_INFO "%s\n", reason);
40}
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070041
Andrew Mortonbe524fb2008-05-29 00:01:28 -070042static int __init nopat(char *str)
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070043{
Thomas Gleixner8d4a4302008-05-08 09:18:43 +020044 pat_disable("PAT support disabled.");
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070045 return 0;
46}
47early_param("nopat", nopat);
H. Peter Anvin75a04812009-01-22 16:17:05 -080048#else
49static inline void pat_disable(const char *reason)
50{
51 (void)reason;
52}
Thomas Gleixner8d4a4302008-05-08 09:18:43 +020053#endif
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070054
Venki Pallipadi77b52b42008-05-05 19:09:10 -070055
56static int debug_enable;
Ingo Molnarad2cde12008-09-30 13:20:45 +020057
Venki Pallipadi77b52b42008-05-05 19:09:10 -070058static int __init pat_debug_setup(char *str)
59{
60 debug_enable = 1;
61 return 0;
62}
63__setup("debugpat", pat_debug_setup);
64
65#define dprintk(fmt, arg...) \
66 do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
67
68
Thomas Gleixner8d4a4302008-05-08 09:18:43 +020069static u64 __read_mostly boot_pat_state;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070070
71enum {
72 PAT_UC = 0, /* uncached */
73 PAT_WC = 1, /* Write combining */
74 PAT_WT = 4, /* Write Through */
75 PAT_WP = 5, /* Write Protected */
76 PAT_WB = 6, /* Write Back (default) */
77 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
78};
79
Andreas Herrmanncd7a4e92008-06-10 16:05:39 +020080#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070081
82void pat_init(void)
83{
84 u64 pat;
Roland Dreiere23a8b62009-09-23 15:35:35 -070085 bool boot_cpu = !boot_pat_state;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070086
Andreas Herrmann499f8f82008-06-10 16:06:21 +020087 if (!pat_enabled)
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070088 return;
89
H. Peter Anvin75a04812009-01-22 16:17:05 -080090 if (!cpu_has_pat) {
91 if (!boot_pat_state) {
92 pat_disable("PAT not supported by CPU.");
93 return;
94 } else {
95 /*
96 * If this happens we are on a secondary CPU, but
97 * switched to PAT on the boot CPU. We have no way to
98 * undo PAT.
99 */
100 printk(KERN_ERR "PAT enabled, "
101 "but not supported by secondary CPU\n");
102 BUG();
103 }
Thomas Gleixner8d4a4302008-05-08 09:18:43 +0200104 }
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700105
106 /* Set PWT to Write-Combining. All other bits stay the same */
107 /*
108 * PTE encoding used in Linux:
109 * PAT
110 * |PCD
111 * ||PWT
112 * |||
113 * 000 WB _PAGE_CACHE_WB
114 * 001 WC _PAGE_CACHE_WC
115 * 010 UC- _PAGE_CACHE_UC_MINUS
116 * 011 UC _PAGE_CACHE_UC
117 * PAT bit unused
118 */
Andreas Herrmanncd7a4e92008-06-10 16:05:39 +0200119 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
120 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700121
122 /* Boot CPU check */
Thomas Gleixner8d4a4302008-05-08 09:18:43 +0200123 if (!boot_pat_state)
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700124 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700125
126 wrmsrl(MSR_IA32_CR_PAT, pat);
Roland Dreiere23a8b62009-09-23 15:35:35 -0700127
128 if (boot_cpu)
129 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
130 smp_processor_id(), boot_pat_state, pat);
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700131}
132
133#undef PAT
134
135static char *cattr_name(unsigned long flags)
136{
137 switch (flags & _PAGE_CACHE_MASK) {
Andreas Herrmanncd7a4e92008-06-10 16:05:39 +0200138 case _PAGE_CACHE_UC: return "uncached";
139 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
140 case _PAGE_CACHE_WB: return "write-back";
141 case _PAGE_CACHE_WC: return "write-combining";
142 default: return "broken";
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700143 }
144}
145
146/*
147 * The global memtype list keeps track of memory type for specific
148 * physical memory areas. Conflicting memory types in different
149 * mappings can cause CPU cache corruption. To avoid this we keep track.
150 *
151 * The list is sorted based on starting address and can contain multiple
152 * entries for each address (this allows reference counting for overlapping
153 * areas). All the aliases have the same cache attributes of course.
154 * Zero attributes are represented as holes.
155 *
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700156 * The data structure is a list that is also organized as an rbtree
157 * sorted on the start address of memtype range.
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700158 *
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700159 * memtype_lock protects both the linear list and rbtree.
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700160 */
161
162struct memtype {
Ingo Molnarad2cde12008-09-30 13:20:45 +0200163 u64 start;
164 u64 end;
165 unsigned long type;
166 struct list_head nd;
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700167 struct rb_node rb;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700168};
169
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700170static struct rb_root memtype_rbroot = RB_ROOT;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700171static LIST_HEAD(memtype_list);
Ingo Molnarad2cde12008-09-30 13:20:45 +0200172static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700173
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700174static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
175{
176 struct rb_node *node = root->rb_node;
177 struct memtype *last_lower = NULL;
178
179 while (node) {
180 struct memtype *data = container_of(node, struct memtype, rb);
181
182 if (data->start < start) {
183 last_lower = data;
184 node = node->rb_right;
185 } else if (data->start > start) {
186 node = node->rb_left;
187 } else
188 return data;
189 }
190
191 /* Will return NULL if there is no entry with its start <= start */
192 return last_lower;
193}
194
195static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
196{
197 struct rb_node **new = &(root->rb_node);
198 struct rb_node *parent = NULL;
199
200 while (*new) {
201 struct memtype *this = container_of(*new, struct memtype, rb);
202
203 parent = *new;
204 if (data->start <= this->start)
205 new = &((*new)->rb_left);
206 else if (data->start > this->start)
207 new = &((*new)->rb_right);
208 }
209
210 rb_link_node(&data->rb, parent, new);
211 rb_insert_color(&data->rb, root);
212}
213
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700214/*
215 * Does intersection of PAT memory type and MTRR memory type and returns
216 * the resulting memory type as PAT understands it.
217 * (Type in pat and mtrr will not have same value)
218 * The intersection is based on "Effective Memory Type" tables in IA-32
219 * SDM vol 3a
220 */
Hugh Dickins6cf514f2008-06-16 18:42:43 +0100221static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700222{
Venki Pallipadic26421d2008-05-29 12:01:44 -0700223 /*
224 * Look for MTRR hint to get the effective type in case where PAT
225 * request is for WB.
226 */
Andreas Herrmanndd0c7c42008-06-18 15:38:57 +0200227 if (req_type == _PAGE_CACHE_WB) {
228 u8 mtrr_type;
229
230 mtrr_type = mtrr_type_lookup(start, end);
Suresh Siddhab6ff32d2009-04-09 14:26:51 -0700231 if (mtrr_type != MTRR_TYPE_WRBACK)
232 return _PAGE_CACHE_UC_MINUS;
233
234 return _PAGE_CACHE_WB;
Andreas Herrmanndd0c7c42008-06-18 15:38:57 +0200235 }
236
237 return req_type;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700238}
239
Ingo Molnarad2cde12008-09-30 13:20:45 +0200240static int
241chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
Andreas Herrmann64fe44c2008-06-20 22:07:09 +0200242{
243 if (new->type != entry->type) {
244 if (type) {
245 new->type = entry->type;
246 *type = entry->type;
247 } else
248 goto conflict;
249 }
250
251 /* check overlaps with more than one entry in the list */
252 list_for_each_entry_continue(entry, &memtype_list, nd) {
253 if (new->end <= entry->start)
254 break;
255 else if (new->type != entry->type)
256 goto conflict;
257 }
258 return 0;
259
260 conflict:
261 printk(KERN_INFO "%s:%d conflicting memory types "
262 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
263 new->end, cattr_name(new->type), cattr_name(entry->type));
264 return -EBUSY;
265}
266
Suresh Siddhabe03d9e2009-02-11 11:20:23 -0800267static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
268{
269 int ram_page = 0, not_rampage = 0;
270 unsigned long page_nr;
271
272 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
273 ++page_nr) {
274 /*
275 * For legacy reasons, physical address range in the legacy ISA
276 * region is tracked as non-RAM. This will allow users of
277 * /dev/mem to map portions of legacy ISA region, even when
278 * some of those portions are listed(or not even listed) with
279 * different e820 types(RAM/reserved/..)
280 */
281 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
282 page_is_ram(page_nr))
283 ram_page = 1;
284 else
285 not_rampage = 1;
286
287 if (ram_page == not_rampage)
288 return -1;
289 }
290
291 return ram_page;
292}
293
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700294/*
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700295 * For RAM pages, we use page flags to mark the pages with appropriate type.
296 * Here we do two pass:
297 * - Find the memtype of all the pages in the range, look for any conflicts
298 * - In case of no conflicts, set the new memtype for pages in the range
Suresh Siddha9542ada2008-09-24 08:53:33 -0700299 *
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700300 * Caller must hold memtype_lock for atomicity.
Suresh Siddha9542ada2008-09-24 08:53:33 -0700301 */
302static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
Ingo Molnarad2cde12008-09-30 13:20:45 +0200303 unsigned long *new_type)
Suresh Siddha9542ada2008-09-24 08:53:33 -0700304{
305 struct page *page;
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700306 u64 pfn;
307
308 if (req_type == _PAGE_CACHE_UC) {
309 /* We do not support strong UC */
310 WARN_ON_ONCE(1);
311 req_type = _PAGE_CACHE_UC_MINUS;
312 }
313
314 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
315 unsigned long type;
316
317 page = pfn_to_page(pfn);
318 type = get_page_memtype(page);
319 if (type != -1) {
320 printk(KERN_INFO "reserve_ram_pages_type failed "
321 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
322 start, end, type, req_type);
323 if (new_type)
324 *new_type = type;
325
326 return -EBUSY;
327 }
328 }
329
330 if (new_type)
331 *new_type = req_type;
Suresh Siddha9542ada2008-09-24 08:53:33 -0700332
333 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
334 page = pfn_to_page(pfn);
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700335 set_page_memtype(page, req_type);
Suresh Siddha9542ada2008-09-24 08:53:33 -0700336 }
337 return 0;
Suresh Siddha9542ada2008-09-24 08:53:33 -0700338}
339
340static int free_ram_pages_type(u64 start, u64 end)
341{
342 struct page *page;
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700343 u64 pfn;
Suresh Siddha9542ada2008-09-24 08:53:33 -0700344
345 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
346 page = pfn_to_page(pfn);
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700347 set_page_memtype(page, -1);
Suresh Siddha9542ada2008-09-24 08:53:33 -0700348 }
349 return 0;
Suresh Siddha9542ada2008-09-24 08:53:33 -0700350}
351
Jack Steinerfd12a0d2009-11-19 14:23:41 -0600352int default_is_untracked_pat_range(u64 start, u64 end)
353{
354 return is_ISA_range(start, end);
355}
356
Suresh Siddha9542ada2008-09-24 08:53:33 -0700357/*
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700358 * req_type typically has one of the:
359 * - _PAGE_CACHE_WB
360 * - _PAGE_CACHE_WC
361 * - _PAGE_CACHE_UC_MINUS
362 * - _PAGE_CACHE_UC
363 *
364 * req_type will have a special case value '-1', when requester want to inherit
365 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
366 *
Andreas Herrmannac979912008-06-20 22:01:49 +0200367 * If new_type is NULL, function will return an error if it cannot reserve the
368 * region with req_type. If new_type is non-NULL, function will return
369 * available type in new_type in case of no error. In case of any error
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700370 * it will return a negative return value.
371 */
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700372int reserve_memtype(u64 start, u64 end, unsigned long req_type,
Ingo Molnarad2cde12008-09-30 13:20:45 +0200373 unsigned long *new_type)
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700374{
Andreas Herrmannac979912008-06-20 22:01:49 +0200375 struct memtype *new, *entry;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700376 unsigned long actual_type;
Andreas Herrmannf6887262008-06-20 22:05:37 +0200377 struct list_head *where;
Suresh Siddha9542ada2008-09-24 08:53:33 -0700378 int is_range_ram;
Ingo Molnarad2cde12008-09-30 13:20:45 +0200379 int err = 0;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700380
Ingo Molnarad2cde12008-09-30 13:20:45 +0200381 BUG_ON(start >= end); /* end is exclusive */
Andreas Herrmann69e26be2008-06-20 22:03:06 +0200382
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200383 if (!pat_enabled) {
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700384 /* This is identical to page table setting without PAT */
Andreas Herrmannac979912008-06-20 22:01:49 +0200385 if (new_type) {
386 if (req_type == -1)
387 *new_type = _PAGE_CACHE_WB;
Venkatesh Pallipadi5fc51742009-07-10 09:57:32 -0700388 else if (req_type == _PAGE_CACHE_WC)
389 *new_type = _PAGE_CACHE_UC_MINUS;
Andreas Herrmannac979912008-06-20 22:01:49 +0200390 else
391 *new_type = req_type & _PAGE_CACHE_MASK;
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700392 }
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700393 return 0;
394 }
395
396 /* Low ISA region is always mapped WB in page table. No need to track */
Jack Steinerfd12a0d2009-11-19 14:23:41 -0600397 if (x86_platform.is_untracked_pat_range(start, end - 1)) {
Andreas Herrmannac979912008-06-20 22:01:49 +0200398 if (new_type)
399 *new_type = _PAGE_CACHE_WB;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700400 return 0;
401 }
402
Suresh Siddhab6ff32d2009-04-09 14:26:51 -0700403 /*
404 * Call mtrr_lookup to get the type hint. This is an
405 * optimization for /dev/mem mmap'ers into WB memory (BIOS
406 * tools and ACPI tools). Use WB request for WB memory and use
407 * UC_MINUS otherwise.
408 */
409 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700410
Suresh Siddha95971342009-01-13 10:21:30 -0800411 if (new_type)
412 *new_type = actual_type;
413
Suresh Siddhabe03d9e2009-02-11 11:20:23 -0800414 is_range_ram = pat_pagerange_is_ram(start, end);
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700415 if (is_range_ram == 1) {
416
417 spin_lock(&memtype_lock);
418 err = reserve_ram_pages_type(start, end, req_type, new_type);
419 spin_unlock(&memtype_lock);
420
421 return err;
422 } else if (is_range_ram < 0) {
Suresh Siddha9542ada2008-09-24 08:53:33 -0700423 return -EINVAL;
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700424 }
Suresh Siddha9542ada2008-09-24 08:53:33 -0700425
Andreas Herrmannac979912008-06-20 22:01:49 +0200426 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
427 if (!new)
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700428 return -ENOMEM;
429
Ingo Molnarad2cde12008-09-30 13:20:45 +0200430 new->start = start;
431 new->end = end;
432 new->type = actual_type;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700433
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700434 spin_lock(&memtype_lock);
435
436 /* Search for existing mapping that overlaps the current range */
Andreas Herrmannf6887262008-06-20 22:05:37 +0200437 where = NULL;
Suresh Siddhadcb73bf2009-09-16 14:28:03 -0700438 list_for_each_entry(entry, &memtype_list, nd) {
Andreas Herrmann33af9032008-06-20 22:08:37 +0200439 if (end <= entry->start) {
Andreas Herrmannf6887262008-06-20 22:05:37 +0200440 where = entry->nd.prev;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700441 break;
Andreas Herrmann33af9032008-06-20 22:08:37 +0200442 } else if (start <= entry->start) { /* end > entry->start */
Andreas Herrmann64fe44c2008-06-20 22:07:09 +0200443 err = chk_conflict(new, entry, new_type);
Andreas Herrmann33af9032008-06-20 22:08:37 +0200444 if (!err) {
445 dprintk("Overlap at 0x%Lx-0x%Lx\n",
446 entry->start, entry->end);
447 where = entry->nd.prev;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700448 }
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700449 break;
Andreas Herrmann33af9032008-06-20 22:08:37 +0200450 } else if (start < entry->end) { /* start > entry->start */
Andreas Herrmann64fe44c2008-06-20 22:07:09 +0200451 err = chk_conflict(new, entry, new_type);
Andreas Herrmann33af9032008-06-20 22:08:37 +0200452 if (!err) {
453 dprintk("Overlap at 0x%Lx-0x%Lx\n",
454 entry->start, entry->end);
Venki Pallipadi80c5e732008-08-19 16:28:01 -0700455
456 /*
457 * Move to right position in the linked
458 * list to add this new entry
459 */
460 list_for_each_entry_continue(entry,
461 &memtype_list, nd) {
462 if (start <= entry->start) {
463 where = entry->nd.prev;
464 break;
465 }
466 }
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700467 }
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700468 break;
469 }
470 }
471
472 if (err) {
Andreas Herrmann3e9c83b2008-06-20 22:04:02 +0200473 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
474 "track %s, req %s\n",
475 start, end, cattr_name(new->type), cattr_name(req_type));
Andreas Herrmannac979912008-06-20 22:01:49 +0200476 kfree(new);
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700477 spin_unlock(&memtype_lock);
Ingo Molnarad2cde12008-09-30 13:20:45 +0200478
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700479 return err;
480 }
481
Andreas Herrmannf6887262008-06-20 22:05:37 +0200482 if (where)
483 list_add(&new->nd, where);
484 else
Andreas Herrmannac979912008-06-20 22:01:49 +0200485 list_add_tail(&new->nd, &memtype_list);
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700486
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700487 memtype_rb_insert(&memtype_rbroot, new);
488
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700489 spin_unlock(&memtype_lock);
Andreas Herrmann3e9c83b2008-06-20 22:04:02 +0200490
491 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
492 start, end, cattr_name(new->type), cattr_name(req_type),
493 new_type ? cattr_name(*new_type) : "-");
494
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700495 return err;
496}
497
498int free_memtype(u64 start, u64 end)
499{
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700500 struct memtype *entry, *saved_entry;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700501 int err = -EINVAL;
Suresh Siddha9542ada2008-09-24 08:53:33 -0700502 int is_range_ram;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700503
Andreas Herrmann69e26be2008-06-20 22:03:06 +0200504 if (!pat_enabled)
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700505 return 0;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700506
507 /* Low ISA region is always mapped WB. No need to track */
Jack Steinerfd12a0d2009-11-19 14:23:41 -0600508 if (x86_platform.is_untracked_pat_range(start, end - 1))
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700509 return 0;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700510
Suresh Siddhabe03d9e2009-02-11 11:20:23 -0800511 is_range_ram = pat_pagerange_is_ram(start, end);
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700512 if (is_range_ram == 1) {
513
514 spin_lock(&memtype_lock);
515 err = free_ram_pages_type(start, end);
516 spin_unlock(&memtype_lock);
517
518 return err;
519 } else if (is_range_ram < 0) {
Suresh Siddha9542ada2008-09-24 08:53:33 -0700520 return -EINVAL;
Venkatesh Pallipadif5841742009-07-10 09:57:38 -0700521 }
Suresh Siddha9542ada2008-09-24 08:53:33 -0700522
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700523 spin_lock(&memtype_lock);
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700524
525 entry = memtype_rb_search(&memtype_rbroot, start);
526 if (unlikely(entry == NULL))
527 goto unlock_ret;
528
529 /*
530 * Saved entry points to an entry with start same or less than what
531 * we searched for. Now go through the list in both directions to look
532 * for the entry that matches with both start and end, with list stored
533 * in sorted start address
534 */
535 saved_entry = entry;
Suresh Siddhadcb73bf2009-09-16 14:28:03 -0700536 list_for_each_entry_from(entry, &memtype_list, nd) {
Andreas Herrmannac979912008-06-20 22:01:49 +0200537 if (entry->start == start && entry->end == end) {
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700538 rb_erase(&entry->rb, &memtype_rbroot);
Andreas Herrmannac979912008-06-20 22:01:49 +0200539 list_del(&entry->nd);
540 kfree(entry);
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700541 err = 0;
542 break;
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700543 } else if (entry->start > start) {
544 break;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700545 }
546 }
Venkatesh Pallipadi335ef892009-07-10 09:57:36 -0700547
548 if (!err)
549 goto unlock_ret;
550
551 entry = saved_entry;
552 list_for_each_entry_reverse(entry, &memtype_list, nd) {
553 if (entry->start == start && entry->end == end) {
554 rb_erase(&entry->rb, &memtype_rbroot);
555 list_del(&entry->nd);
556 kfree(entry);
557 err = 0;
558 break;
559 } else if (entry->start < start) {
560 break;
561 }
562 }
563unlock_ret:
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700564 spin_unlock(&memtype_lock);
565
566 if (err) {
Ingo Molnar28eb559b2008-04-03 10:14:33 +0200567 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700568 current->comm, current->pid, start, end);
569 }
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700570
Venki Pallipadi77b52b42008-05-05 19:09:10 -0700571 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
Ingo Molnarad2cde12008-09-30 13:20:45 +0200572
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700573 return err;
574}
575
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700576
Venkatesh Pallipadi9fd126b2009-07-10 09:57:34 -0700577/**
Venkatesh Pallipadi637b86e2009-07-10 09:57:39 -0700578 * lookup_memtype - Looksup the memory type for a physical address
579 * @paddr: physical address of which memory type needs to be looked up
580 *
581 * Only to be called when PAT is enabled
582 *
583 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
584 * _PAGE_CACHE_UC
585 */
586static unsigned long lookup_memtype(u64 paddr)
587{
588 int rettype = _PAGE_CACHE_WB;
589 struct memtype *entry;
590
Jack Steinerfd12a0d2009-11-19 14:23:41 -0600591 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE - 1))
Venkatesh Pallipadi637b86e2009-07-10 09:57:39 -0700592 return rettype;
593
594 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
595 struct page *page;
596 spin_lock(&memtype_lock);
597 page = pfn_to_page(paddr >> PAGE_SHIFT);
598 rettype = get_page_memtype(page);
599 spin_unlock(&memtype_lock);
600 /*
601 * -1 from get_page_memtype() implies RAM page is in its
602 * default state and not reserved, and hence of type WB
603 */
604 if (rettype == -1)
605 rettype = _PAGE_CACHE_WB;
606
607 return rettype;
608 }
609
610 spin_lock(&memtype_lock);
611
612 entry = memtype_rb_search(&memtype_rbroot, paddr);
613 if (entry != NULL)
614 rettype = entry->type;
615 else
616 rettype = _PAGE_CACHE_UC_MINUS;
617
618 spin_unlock(&memtype_lock);
619 return rettype;
620}
621
622/**
Venkatesh Pallipadi9fd126b2009-07-10 09:57:34 -0700623 * io_reserve_memtype - Request a memory type mapping for a region of memory
624 * @start: start (physical address) of the region
625 * @end: end (physical address) of the region
626 * @type: A pointer to memtype, with requested type. On success, requested
627 * or any other compatible type that was available for the region is returned
628 *
629 * On success, returns 0
630 * On failure, returns non-zero
631 */
632int io_reserve_memtype(resource_size_t start, resource_size_t end,
633 unsigned long *type)
634{
H. Peter Anvinb8551922009-08-26 17:17:51 -0700635 resource_size_t size = end - start;
Venkatesh Pallipadi9fd126b2009-07-10 09:57:34 -0700636 unsigned long req_type = *type;
637 unsigned long new_type;
638 int ret;
639
H. Peter Anvinb8551922009-08-26 17:17:51 -0700640 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
Venkatesh Pallipadi9fd126b2009-07-10 09:57:34 -0700641
642 ret = reserve_memtype(start, end, req_type, &new_type);
643 if (ret)
644 goto out_err;
645
H. Peter Anvinb8551922009-08-26 17:17:51 -0700646 if (!is_new_memtype_allowed(start, size, req_type, new_type))
Venkatesh Pallipadi9fd126b2009-07-10 09:57:34 -0700647 goto out_free;
648
H. Peter Anvinb8551922009-08-26 17:17:51 -0700649 if (kernel_map_sync_memtype(start, size, new_type) < 0)
Venkatesh Pallipadi9fd126b2009-07-10 09:57:34 -0700650 goto out_free;
651
652 *type = new_type;
653 return 0;
654
655out_free:
656 free_memtype(start, end);
657 ret = -EBUSY;
658out_err:
659 return ret;
660}
661
662/**
663 * io_free_memtype - Release a memory type mapping for a region of memory
664 * @start: start (physical address) of the region
665 * @end: end (physical address) of the region
666 */
667void io_free_memtype(resource_size_t start, resource_size_t end)
668{
669 free_memtype(start, end);
670}
671
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700672pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
673 unsigned long size, pgprot_t vma_prot)
674{
675 return vma_prot;
676}
677
Ingo Molnard0926332008-07-18 00:26:59 +0200678#ifdef CONFIG_STRICT_DEVMEM
679/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
Venki Pallipadi0124cec2008-04-26 11:32:12 -0700680static inline int range_is_allowed(unsigned long pfn, unsigned long size)
681{
682 return 1;
683}
684#else
Ravikiran G Thirumalai9e41bff2008-10-30 13:59:21 -0700685/* This check is needed to avoid cache aliasing when PAT is enabled */
Venki Pallipadi0124cec2008-04-26 11:32:12 -0700686static inline int range_is_allowed(unsigned long pfn, unsigned long size)
687{
688 u64 from = ((u64)pfn) << PAGE_SHIFT;
689 u64 to = from + size;
690 u64 cursor = from;
691
Ravikiran G Thirumalai9e41bff2008-10-30 13:59:21 -0700692 if (!pat_enabled)
693 return 1;
694
Venki Pallipadi0124cec2008-04-26 11:32:12 -0700695 while (cursor < to) {
696 if (!devmem_is_allowed(pfn)) {
697 printk(KERN_INFO
698 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
699 current->comm, from, to);
700 return 0;
701 }
702 cursor += PAGE_SIZE;
703 pfn++;
704 }
705 return 1;
706}
Ingo Molnard0926332008-07-18 00:26:59 +0200707#endif /* CONFIG_STRICT_DEVMEM */
Venki Pallipadi0124cec2008-04-26 11:32:12 -0700708
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700709int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
710 unsigned long size, pgprot_t *vma_prot)
711{
Suresh Siddha0c3c8a12009-04-09 14:26:52 -0700712 unsigned long flags = _PAGE_CACHE_WB;
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700713
Venki Pallipadi0124cec2008-04-26 11:32:12 -0700714 if (!range_is_allowed(pfn, size))
715 return 0;
716
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700717 if (file->f_flags & O_SYNC) {
venkatesh.pallipadi@intel.com28df82e2008-08-20 16:45:52 -0700718 flags = _PAGE_CACHE_UC_MINUS;
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700719 }
720
721#ifdef CONFIG_X86_32
722 /*
723 * On the PPro and successors, the MTRRs are used to set
724 * memory types for physical addresses outside main memory,
725 * so blindly setting UC or PWT on those pages is wrong.
726 * For Pentiums and earlier, the surround logic should disable
727 * caching for the high addresses through the KEN pin, but
728 * we maintain the tradition of paranoia in this code.
729 */
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200730 if (!pat_enabled &&
Andreas Herrmanncd7a4e92008-06-10 16:05:39 +0200731 !(boot_cpu_has(X86_FEATURE_MTRR) ||
732 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
733 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
734 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
735 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700736 flags = _PAGE_CACHE_UC;
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700737 }
738#endif
739
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700740 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
741 flags);
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700742 return 1;
743}
venkatesh.pallipadi@intel.come7f260a2008-03-18 17:00:21 -0700744
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800745/*
Venkatesh Pallipadi7880f742009-02-24 17:35:13 -0800746 * Change the memory type for the physial address range in kernel identity
747 * mapping space if that range is a part of identity map.
748 */
749int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
750{
751 unsigned long id_sz;
752
Venkatesh Pallipadi5fc51742009-07-10 09:57:32 -0700753 if (base >= __pa(high_memory))
Venkatesh Pallipadi7880f742009-02-24 17:35:13 -0800754 return 0;
755
756 id_sz = (__pa(high_memory) < base + size) ?
757 __pa(high_memory) - base :
758 size;
759
760 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
761 printk(KERN_INFO
762 "%s:%d ioremap_change_attr failed %s "
763 "for %Lx-%Lx\n",
764 current->comm, current->pid,
765 cattr_name(flags),
766 base, (unsigned long long)(base + size));
767 return -EINVAL;
768 }
769 return 0;
770}
771
772/*
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800773 * Internal interface to reserve a range of physical memory with prot.
774 * Reserved non RAM regions only and after successful reserve_memtype,
775 * this func also keeps identity mapping (if any) in sync with this new prot.
776 */
venkatesh.pallipadi@intel.comcdecff62009-01-09 16:13:12 -0800777static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
778 int strict_prot)
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800779{
780 int is_ram = 0;
Venkatesh Pallipadi7880f742009-02-24 17:35:13 -0800781 int ret;
venkatesh.pallipadi@intel.comcdecff62009-01-09 16:13:12 -0800782 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
Suresh Siddha0c3c8a12009-04-09 14:26:52 -0700783 unsigned long flags = want_flags;
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800784
Suresh Siddhabe03d9e2009-02-11 11:20:23 -0800785 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800786
Suresh Siddhabe03d9e2009-02-11 11:20:23 -0800787 /*
Venkatesh Pallipadid886c732009-07-10 09:57:41 -0700788 * reserve_pfn_range() for RAM pages. We do not refcount to keep
789 * track of number of mappings of RAM pages. We can assert that
790 * the type requested matches the type of first page in the range.
Suresh Siddhabe03d9e2009-02-11 11:20:23 -0800791 */
Venkatesh Pallipadid886c732009-07-10 09:57:41 -0700792 if (is_ram) {
793 if (!pat_enabled)
794 return 0;
795
796 flags = lookup_memtype(paddr);
797 if (want_flags != flags) {
798 printk(KERN_WARNING
799 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
800 current->comm, current->pid,
801 cattr_name(want_flags),
802 (unsigned long long)paddr,
803 (unsigned long long)(paddr + size),
804 cattr_name(flags));
805 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
806 (~_PAGE_CACHE_MASK)) |
807 flags);
808 }
Pallipadi, Venkatesh4bb9c5c2009-03-12 17:45:27 -0700809 return 0;
Venkatesh Pallipadid886c732009-07-10 09:57:41 -0700810 }
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800811
812 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
813 if (ret)
814 return ret;
815
816 if (flags != want_flags) {
Suresh Siddha1adcaaf2009-08-17 13:23:50 -0700817 if (strict_prot ||
818 !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
venkatesh.pallipadi@intel.comcdecff62009-01-09 16:13:12 -0800819 free_memtype(paddr, paddr + size);
820 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
821 " for %Lx-%Lx, got %s\n",
822 current->comm, current->pid,
823 cattr_name(want_flags),
824 (unsigned long long)paddr,
825 (unsigned long long)(paddr + size),
826 cattr_name(flags));
827 return -EINVAL;
828 }
829 /*
830 * We allow returning different type than the one requested in
831 * non strict case.
832 */
833 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
834 (~_PAGE_CACHE_MASK)) |
835 flags);
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800836 }
837
Venkatesh Pallipadi7880f742009-02-24 17:35:13 -0800838 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800839 free_memtype(paddr, paddr + size);
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800840 return -EINVAL;
841 }
842 return 0;
843}
844
845/*
846 * Internal interface to free a range of physical memory.
847 * Frees non RAM regions only.
848 */
849static void free_pfn_range(u64 paddr, unsigned long size)
850{
851 int is_ram;
852
Suresh Siddhabe03d9e2009-02-11 11:20:23 -0800853 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800854 if (is_ram == 0)
855 free_memtype(paddr, paddr + size);
856}
857
858/*
859 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
860 * copied through copy_page_range().
861 *
862 * If the vma has a linear pfn mapping for the entire range, we get the prot
863 * from pte and reserve the entire vma range with single reserve_pfn_range call.
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800864 */
865int track_pfn_vma_copy(struct vm_area_struct *vma)
866{
H. Peter Anvinc1c15b62008-12-23 10:10:40 -0800867 resource_size_t paddr;
venkatesh.pallipadi@intel.com982d7892008-12-19 13:47:28 -0800868 unsigned long prot;
Pallipadi, Venkatesh4b065042009-04-08 15:37:16 -0700869 unsigned long vma_size = vma->vm_end - vma->vm_start;
venkatesh.pallipadi@intel.comcdecff62009-01-09 16:13:12 -0800870 pgprot_t pgprot;
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800871
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800872 if (is_linear_pfn_mapping(vma)) {
873 /*
venkatesh.pallipadi@intel.com982d7892008-12-19 13:47:28 -0800874 * reserve the whole chunk covered by vma. We need the
875 * starting address and protection from pte.
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800876 */
Pallipadi, Venkatesh4b065042009-04-08 15:37:16 -0700877 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800878 WARN_ON_ONCE(1);
venkatesh.pallipadi@intel.com982d7892008-12-19 13:47:28 -0800879 return -EINVAL;
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800880 }
venkatesh.pallipadi@intel.comcdecff62009-01-09 16:13:12 -0800881 pgprot = __pgprot(prot);
882 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800883 }
884
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800885 return 0;
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800886}
887
888/*
889 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
890 * for physical range indicated by pfn and size.
891 *
892 * prot is passed in as a parameter for the new mapping. If the vma has a
893 * linear pfn mapping for the entire range reserve the entire vma range with
894 * single reserve_pfn_range call.
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800895 */
venkatesh.pallipadi@intel.come4b866e2009-01-09 16:13:11 -0800896int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800897 unsigned long pfn, unsigned long size)
898{
Venkatesh Pallipadi10876372009-07-10 09:57:40 -0700899 unsigned long flags;
H. Peter Anvinc1c15b62008-12-23 10:10:40 -0800900 resource_size_t paddr;
Pallipadi, Venkatesh4b065042009-04-08 15:37:16 -0700901 unsigned long vma_size = vma->vm_end - vma->vm_start;
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800902
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800903 if (is_linear_pfn_mapping(vma)) {
904 /* reserve the whole chunk starting from vm_pgoff */
H. Peter Anvinc1c15b62008-12-23 10:10:40 -0800905 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
venkatesh.pallipadi@intel.comcdecff62009-01-09 16:13:12 -0800906 return reserve_pfn_range(paddr, vma_size, prot, 0);
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800907 }
908
Venkatesh Pallipadi10876372009-07-10 09:57:40 -0700909 if (!pat_enabled)
910 return 0;
911
912 /* for vm_insert_pfn and friends, we set prot based on lookup */
913 flags = lookup_memtype(pfn << PAGE_SHIFT);
914 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
915 flags);
916
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800917 return 0;
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800918}
919
920/*
921 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
922 * untrack can be called for a specific region indicated by pfn and size or
923 * can be for the entire vma (in which case size can be zero).
924 */
925void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
926 unsigned long size)
927{
H. Peter Anvinc1c15b62008-12-23 10:10:40 -0800928 resource_size_t paddr;
Pallipadi, Venkatesh4b065042009-04-08 15:37:16 -0700929 unsigned long vma_size = vma->vm_end - vma->vm_start;
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800930
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800931 if (is_linear_pfn_mapping(vma)) {
932 /* free the whole chunk starting from vm_pgoff */
H. Peter Anvinc1c15b62008-12-23 10:10:40 -0800933 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800934 free_pfn_range(paddr, vma_size);
935 return;
936 }
venkatesh.pallipadi@intel.com58993292008-12-18 11:41:30 -0800937}
938
venkatesh.pallipadi@intel.com2520bd32008-12-18 11:41:32 -0800939pgprot_t pgprot_writecombine(pgprot_t prot)
940{
941 if (pat_enabled)
942 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
943 else
944 return pgprot_noncached(prot);
945}
Ingo Molnar92b9af92009-02-28 14:09:27 +0100946EXPORT_SYMBOL_GPL(pgprot_writecombine);
venkatesh.pallipadi@intel.com2520bd32008-12-18 11:41:32 -0800947
Andreas Herrmann012f09e2008-08-06 16:23:08 +0200948#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
venkatesh.pallipadi@intel.comfec09622008-07-18 16:08:14 -0700949
950/* get Nth element of the linked list */
951static struct memtype *memtype_get_idx(loff_t pos)
952{
953 struct memtype *list_node, *print_entry;
954 int i = 1;
955
956 print_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
957 if (!print_entry)
958 return NULL;
959
960 spin_lock(&memtype_lock);
961 list_for_each_entry(list_node, &memtype_list, nd) {
962 if (pos == i) {
963 *print_entry = *list_node;
964 spin_unlock(&memtype_lock);
965 return print_entry;
966 }
967 ++i;
968 }
969 spin_unlock(&memtype_lock);
970 kfree(print_entry);
Ingo Molnarad2cde12008-09-30 13:20:45 +0200971
venkatesh.pallipadi@intel.comfec09622008-07-18 16:08:14 -0700972 return NULL;
973}
974
975static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
976{
977 if (*pos == 0) {
978 ++*pos;
979 seq_printf(seq, "PAT memtype list:\n");
980 }
981
982 return memtype_get_idx(*pos);
983}
984
985static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
986{
987 ++*pos;
988 return memtype_get_idx(*pos);
989}
990
991static void memtype_seq_stop(struct seq_file *seq, void *v)
992{
993}
994
995static int memtype_seq_show(struct seq_file *seq, void *v)
996{
997 struct memtype *print_entry = (struct memtype *)v;
998
999 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
1000 print_entry->start, print_entry->end);
1001 kfree(print_entry);
Ingo Molnarad2cde12008-09-30 13:20:45 +02001002
venkatesh.pallipadi@intel.comfec09622008-07-18 16:08:14 -07001003 return 0;
1004}
1005
Tobias Klauserd535e432009-09-04 15:53:09 +02001006static const struct seq_operations memtype_seq_ops = {
venkatesh.pallipadi@intel.comfec09622008-07-18 16:08:14 -07001007 .start = memtype_seq_start,
1008 .next = memtype_seq_next,
1009 .stop = memtype_seq_stop,
1010 .show = memtype_seq_show,
1011};
1012
1013static int memtype_seq_open(struct inode *inode, struct file *file)
1014{
1015 return seq_open(file, &memtype_seq_ops);
1016}
1017
1018static const struct file_operations memtype_fops = {
1019 .open = memtype_seq_open,
1020 .read = seq_read,
1021 .llseek = seq_lseek,
1022 .release = seq_release,
1023};
1024
1025static int __init pat_memtype_list_init(void)
1026{
1027 debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
1028 NULL, &memtype_fops);
1029 return 0;
1030}
1031
1032late_initcall(pat_memtype_list_init);
1033
Andreas Herrmann012f09e2008-08-06 16:23:08 +02001034#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */