| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 1 | #ifndef _ASM_X86_CACHEFLUSH_H | 
 | 2 | #define _ASM_X86_CACHEFLUSH_H | 
 | 3 |  | 
 | 4 | /* Keep includes the same across arches.  */ | 
 | 5 | #include <linux/mm.h> | 
 | 6 |  | 
 | 7 | /* Caches aren't brain-dead on the intel. */ | 
 | 8 | #define flush_cache_all()			do { } while (0) | 
 | 9 | #define flush_cache_mm(mm)			do { } while (0) | 
 | 10 | #define flush_cache_dup_mm(mm)			do { } while (0) | 
 | 11 | #define flush_cache_range(vma, start, end)	do { } while (0) | 
 | 12 | #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0) | 
 | 13 | #define flush_dcache_page(page)			do { } while (0) | 
 | 14 | #define flush_dcache_mmap_lock(mapping)		do { } while (0) | 
 | 15 | #define flush_dcache_mmap_unlock(mapping)	do { } while (0) | 
 | 16 | #define flush_icache_range(start, end)		do { } while (0) | 
| Joe Perches | 3f61b19 | 2008-03-23 01:01:48 -0700 | [diff] [blame] | 17 | #define flush_icache_page(vma, pg)		do { } while (0) | 
 | 18 | #define flush_icache_user_range(vma, pg, adr, len)	do { } while (0) | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 19 | #define flush_cache_vmap(start, end)		do { } while (0) | 
 | 20 | #define flush_cache_vunmap(start, end)		do { } while (0) | 
 | 21 |  | 
| Joe Perches | 3f61b19 | 2008-03-23 01:01:48 -0700 | [diff] [blame] | 22 | #define copy_to_user_page(vma, page, vaddr, dst, src, len)	\ | 
 | 23 | 	memcpy((dst), (src), (len)) | 
 | 24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len)	\ | 
 | 25 | 	memcpy((dst), (src), (len)) | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 26 |  | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 27 |  | 
| Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 28 | /* | 
 | 29 |  * The set_memory_* API can be used to change various attributes of a virtual | 
 | 30 |  * address range. The attributes include: | 
 | 31 |  * Cachability   : UnCached, WriteCombining, WriteBack | 
 | 32 |  * Executability : eXeutable, NoteXecutable | 
 | 33 |  * Read/Write    : ReadOnly, ReadWrite | 
 | 34 |  * Presence      : NotPresent | 
 | 35 |  * | 
 | 36 |  * Within a catagory, the attributes are mutually exclusive. | 
 | 37 |  * | 
 | 38 |  * The implementation of this API will take care of various aspects that | 
 | 39 |  * are associated with changing such attributes, such as: | 
 | 40 |  * - Flushing TLBs | 
 | 41 |  * - Flushing CPU caches | 
 | 42 |  * - Making sure aliases of the memory behind the mapping don't violate | 
 | 43 |  *   coherency rules as defined by the CPU in the system. | 
 | 44 |  * | 
 | 45 |  * What this API does not do: | 
 | 46 |  * - Provide exclusion between various callers - including callers that | 
 | 47 |  *   operation on other mappings of the same physical page | 
 | 48 |  * - Restore default attributes when a page is freed | 
 | 49 |  * - Guarantee that mappings other than the requested one are | 
 | 50 |  *   in any state, other than that these do not violate rules for | 
 | 51 |  *   the CPU you have. Do not depend on any effects on other mappings, | 
 | 52 |  *   CPUs other than the one you have may have more relaxed rules. | 
 | 53 |  * The caller is required to take care of these. | 
 | 54 |  */ | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 55 |  | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 56 | int _set_memory_uc(unsigned long addr, int numpages); | 
| venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 57 | int _set_memory_wc(unsigned long addr, int numpages); | 
| venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 58 | int _set_memory_wb(unsigned long addr, int numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 59 | int set_memory_uc(unsigned long addr, int numpages); | 
| venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 60 | int set_memory_wc(unsigned long addr, int numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 61 | int set_memory_wb(unsigned long addr, int numpages); | 
 | 62 | int set_memory_x(unsigned long addr, int numpages); | 
 | 63 | int set_memory_nx(unsigned long addr, int numpages); | 
 | 64 | int set_memory_ro(unsigned long addr, int numpages); | 
 | 65 | int set_memory_rw(unsigned long addr, int numpages); | 
| Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 66 | int set_memory_np(unsigned long addr, int numpages); | 
| Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 67 | int set_memory_4k(unsigned long addr, int numpages); | 
| Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 68 |  | 
| Arjan van de Ven | 7219beb | 2008-04-17 17:41:31 +0200 | [diff] [blame] | 69 | /* | 
 | 70 |  * For legacy compatibility with the old APIs, a few functions | 
 | 71 |  * are provided that work on a "struct page". | 
 | 72 |  * These functions operate ONLY on the 1:1 kernel mapping of the | 
 | 73 |  * memory that the struct page represents, and internally just | 
 | 74 |  * call the set_memory_* function. See the description of the | 
 | 75 |  * set_memory_* function for more details on conventions. | 
 | 76 |  * | 
 | 77 |  * These APIs should be considered *deprecated* and are likely going to | 
 | 78 |  * be removed in the future. | 
 | 79 |  * The reason for this is the implicit operation on the 1:1 mapping only, | 
 | 80 |  * making this not a generally useful API. | 
 | 81 |  * | 
 | 82 |  * Specifically, many users of the old APIs had a virtual address, | 
 | 83 |  * called virt_to_page() or vmalloc_to_page() on that address to | 
 | 84 |  * get a struct page* that the old API required. | 
 | 85 |  * To convert these cases, use set_memory_*() on the original | 
 | 86 |  * virtual address, do not use these functions. | 
 | 87 |  */ | 
 | 88 |  | 
 | 89 | int set_pages_uc(struct page *page, int numpages); | 
 | 90 | int set_pages_wb(struct page *page, int numpages); | 
 | 91 | int set_pages_x(struct page *page, int numpages); | 
 | 92 | int set_pages_nx(struct page *page, int numpages); | 
 | 93 | int set_pages_ro(struct page *page, int numpages); | 
 | 94 | int set_pages_rw(struct page *page, int numpages); | 
 | 95 |  | 
 | 96 |  | 
| Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 97 | void clflush_cache_range(void *addr, unsigned int size); | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 98 |  | 
| Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 99 | void cpa_init(void); | 
 | 100 |  | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 101 | #ifdef CONFIG_DEBUG_RODATA | 
 | 102 | void mark_rodata_ro(void); | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 103 | extern const int rodata_test_data; | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 104 | #endif | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 105 |  | 
| Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 106 | #ifdef CONFIG_DEBUG_RODATA_TEST | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 107 | int rodata_test(void); | 
| Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 108 | #else | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 109 | static inline int rodata_test(void) | 
| Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 110 | { | 
| Harvey Harrison | 7bfeab9 | 2008-02-12 12:12:01 -0800 | [diff] [blame] | 111 | 	return 0; | 
| Arjan van de Ven | edeed30 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 112 | } | 
 | 113 | #endif | 
| Thomas Gleixner | b2bba72 | 2007-10-15 23:28:20 +0200 | [diff] [blame] | 114 |  | 
| Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 115 | #endif |