| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_CACHEFLUSH_H | 
 | 2 | #define _ASM_X86_CACHEFLUSH_H | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 3 |  | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 4 | /* Caches aren't brain-dead on the intel. */ | 
| Akinobu Mita | cc67ba63 | 2011-01-20 20:32:14 +0900 | [diff] [blame] | 5 | #include <asm-generic/cacheflush.h> | 
| David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 6 | #include <asm/special_insns.h> | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 7 |  | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 8 | #ifdef CONFIG_X86_PAT | 
 | 9 | /* | 
 | 10 |  * X86 PAT uses page flags WC and Uncached together to keep track of | 
 | 11 |  * memory type of pages that have backing page struct. X86 PAT supports 3 | 
 | 12 |  * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and | 
 | 13 |  * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not | 
 | 14 |  * been changed from its default (value of -1 used to denote this). | 
 | 15 |  * Note we do not support _PAGE_CACHE_UC here. | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 16 |  */ | 
| Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 17 |  | 
 | 18 | #define _PGMT_DEFAULT		0 | 
 | 19 | #define _PGMT_WC		(1UL << PG_arch_1) | 
 | 20 | #define _PGMT_UC_MINUS		(1UL << PG_uncached) | 
 | 21 | #define _PGMT_WB		(1UL << PG_uncached | 1UL << PG_arch_1) | 
 | 22 | #define _PGMT_MASK		(1UL << PG_uncached | 1UL << PG_arch_1) | 
 | 23 | #define _PGMT_CLEAR_MASK	(~_PGMT_MASK) | 
 | 24 |  | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 25 | static inline unsigned long get_page_memtype(struct page *pg) | 
 | 26 | { | 
| Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 27 | 	unsigned long pg_flags = pg->flags & _PGMT_MASK; | 
 | 28 |  | 
 | 29 | 	if (pg_flags == _PGMT_DEFAULT) | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 30 | 		return -1; | 
| Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 31 | 	else if (pg_flags == _PGMT_WC) | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 32 | 		return _PAGE_CACHE_WC; | 
| Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 33 | 	else if (pg_flags == _PGMT_UC_MINUS) | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 34 | 		return _PAGE_CACHE_UC_MINUS; | 
 | 35 | 	else | 
 | 36 | 		return _PAGE_CACHE_WB; | 
 | 37 | } | 
 | 38 |  | 
 | 39 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) | 
 | 40 | { | 
| Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 41 | 	unsigned long memtype_flags = _PGMT_DEFAULT; | 
 | 42 | 	unsigned long old_flags; | 
 | 43 | 	unsigned long new_flags; | 
 | 44 |  | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 45 | 	switch (memtype) { | 
 | 46 | 	case _PAGE_CACHE_WC: | 
| Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 47 | 		memtype_flags = _PGMT_WC; | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 48 | 		break; | 
 | 49 | 	case _PAGE_CACHE_UC_MINUS: | 
| Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 50 | 		memtype_flags = _PGMT_UC_MINUS; | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 51 | 		break; | 
 | 52 | 	case _PAGE_CACHE_WB: | 
| Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 53 | 		memtype_flags = _PGMT_WB; | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 54 | 		break; | 
 | 55 | 	} | 
| Robin Holt | 1f9cc3c | 2010-04-23 10:36:22 -0500 | [diff] [blame] | 56 |  | 
 | 57 | 	do { | 
 | 58 | 		old_flags = pg->flags; | 
 | 59 | 		new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; | 
 | 60 | 	} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); | 
| Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 61 | } | 
 | 62 | #else | 
 | 63 | static inline unsigned long get_page_memtype(struct page *pg) { return -1; } | 
 | 64 | static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } | 
 | 65 | #endif | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 66 |  | 
| Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 67 | /* | 
 | 68 |  * The set_memory_* API can be used to change various attributes of a virtual | 
 | 69 |  * address range. The attributes include: | 
 | 70 |  * Cachability   : UnCached, WriteCombining, WriteBack | 
 | 71 |  * Executability : eXeutable, NoteXecutable | 
 | 72 |  * Read/Write    : ReadOnly, ReadWrite | 
 | 73 |  * Presence      : NotPresent | 
 | 74 |  * | 
| Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 75 |  * Within a category, the attributes are mutually exclusive. | 
| Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 76 |  * | 
 | 77 |  * The implementation of this API will take care of various aspects that | 
 | 78 |  * are associated with changing such attributes, such as: | 
 | 79 |  * - Flushing TLBs | 
 | 80 |  * - Flushing CPU caches | 
 | 81 |  * - Making sure aliases of the memory behind the mapping don't violate | 
 | 82 |  *   coherency rules as defined by the CPU in the system. | 
 | 83 |  * | 
 | 84 |  * What this API does not do: | 
 | 85 |  * - Provide exclusion between various callers - including callers that | 
 | 86 |  *   operation on other mappings of the same physical page | 
 | 87 |  * - Restore default attributes when a page is freed | 
 | 88 |  * - Guarantee that mappings other than the requested one are | 
 | 89 |  *   in any state, other than that these do not violate rules for | 
 | 90 |  *   the CPU you have. Do not depend on any effects on other mappings, | 
 | 91 |  *   CPUs other than the one you have may have more relaxed rules. | 
 | 92 |  * The caller is required to take care of these. | 
 | 93 |  */ | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 94 |  | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 95 | int _set_memory_uc(unsigned long addr, int numpages); | 
| venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 96 | int _set_memory_wc(unsigned long addr, int numpages); | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 97 | int _set_memory_wb(unsigned long addr, int numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 98 | int set_memory_uc(unsigned long addr, int numpages); | 
| venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 99 | int set_memory_wc(unsigned long addr, int numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 100 | int set_memory_wb(unsigned long addr, int numpages); | 
 | 101 | int set_memory_x(unsigned long addr, int numpages); | 
 | 102 | int set_memory_nx(unsigned long addr, int numpages); | 
 | 103 | int set_memory_ro(unsigned long addr, int numpages); | 
 | 104 | int set_memory_rw(unsigned long addr, int numpages); | 
| Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 105 | int set_memory_np(unsigned long addr, int numpages); | 
| Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 106 | int set_memory_4k(unsigned long addr, int numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 107 |  | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 108 | int set_memory_array_uc(unsigned long *addr, int addrinarray); | 
| Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 109 | int set_memory_array_wc(unsigned long *addr, int addrinarray); | 
| Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 110 | int set_memory_array_wb(unsigned long *addr, int addrinarray); | 
 | 111 |  | 
| venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 112 | int set_pages_array_uc(struct page **pages, int addrinarray); | 
| Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 113 | int set_pages_array_wc(struct page **pages, int addrinarray); | 
| venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 114 | int set_pages_array_wb(struct page **pages, int addrinarray); | 
 | 115 |  | 
| Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 116 | /* | 
 | 117 |  * For legacy compatibility with the old APIs, a few functions | 
 | 118 |  * are provided that work on a "struct page". | 
 | 119 |  * These functions operate ONLY on the 1:1 kernel mapping of the | 
 | 120 |  * memory that the struct page represents, and internally just | 
 | 121 |  * call the set_memory_* function. See the description of the | 
 | 122 |  * set_memory_* function for more details on conventions. | 
 | 123 |  * | 
 | 124 |  * These APIs should be considered *deprecated* and are likely going to | 
 | 125 |  * be removed in the future. | 
 | 126 |  * The reason for this is the implicit operation on the 1:1 mapping only, | 
 | 127 |  * making this not a generally useful API. | 
 | 128 |  * | 
 | 129 |  * Specifically, many users of the old APIs had a virtual address, | 
 | 130 |  * called virt_to_page() or vmalloc_to_page() on that address to | 
 | 131 |  * get a struct page* that the old API required. | 
 | 132 |  * To convert these cases, use set_memory_*() on the original | 
 | 133 |  * virtual address, do not use these functions. | 
 | 134 |  */ | 
 | 135 |  | 
 | 136 | int set_pages_uc(struct page *page, int numpages); | 
 | 137 | int set_pages_wb(struct page *page, int numpages); | 
 | 138 | int set_pages_x(struct page *page, int numpages); | 
 | 139 | int set_pages_nx(struct page *page, int numpages); | 
 | 140 | int set_pages_ro(struct page *page, int numpages); | 
 | 141 | int set_pages_rw(struct page *page, int numpages); | 
 | 142 |  | 
 | 143 |  | 
| Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 144 | void clflush_cache_range(void *addr, unsigned int size); | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 145 |  | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 146 | #ifdef CONFIG_DEBUG_RODATA | 
 | 147 | void mark_rodata_ro(void); | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 148 | extern const int rodata_test_data; | 
| Suresh Siddha | 502f660 | 2009-10-28 18:46:56 -0800 | [diff] [blame] | 149 | extern int kernel_set_to_readonly; | 
| Steven Rostedt | 1623963 | 2009-02-17 17:57:30 -0500 | [diff] [blame] | 150 | void set_kernel_text_rw(void); | 
 | 151 | void set_kernel_text_ro(void); | 
 | 152 | #else | 
 | 153 | static inline void set_kernel_text_rw(void) { } | 
 | 154 | static inline void set_kernel_text_ro(void) { } | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 155 | #endif | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 156 |  | 
| Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 157 | #ifdef CONFIG_DEBUG_RODATA_TEST | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 158 | int rodata_test(void); | 
| Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 159 | #else | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 160 | static inline int rodata_test(void) | 
| Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 161 | { | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 162 | 	return 0; | 
| Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 163 | } | 
 | 164 | #endif | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 165 |  | 
| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 166 | #endif /* _ASM_X86_CACHEFLUSH_H */ |