| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_HIGHMEM_H | 
 | 2 | #define _LINUX_HIGHMEM_H | 
 | 3 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/fs.h> | 
| Cesar Eduardo Barros | 597781f | 2010-08-09 17:18:32 -0700 | [diff] [blame] | 5 | #include <linux/kernel.h> | 
| Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 6 | #include <linux/bug.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/mm.h> | 
| Peter Zijlstra | ad76fb6 | 2006-12-06 20:32:21 -0800 | [diff] [blame] | 8 | #include <linux/uaccess.h> | 
| Catalin Marinas | 43b3a0c | 2010-11-11 14:05:10 -0800 | [diff] [blame] | 9 | #include <linux/hardirq.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 |  | 
 | 11 | #include <asm/cacheflush.h> | 
 | 12 |  | 
| James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 13 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE | 
| Russell King | a6f36be | 2006-12-30 22:24:19 +0000 | [diff] [blame] | 14 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | 
| James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 15 | { | 
 | 16 | } | 
 | 17 | #endif | 
 | 18 |  | 
| James Bottomley | 5a3a5a9 | 2006-03-26 01:36:59 -0800 | [diff] [blame] | 19 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | 
 | 20 | static inline void flush_kernel_dcache_page(struct page *page) | 
 | 21 | { | 
 | 22 | } | 
| James Bottomley | 9df5f741 | 2010-01-25 11:42:20 -0600 | [diff] [blame] | 23 | static inline void flush_kernel_vmap_range(void *vaddr, int size) | 
 | 24 | { | 
 | 25 | } | 
 | 26 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | 
 | 27 | { | 
 | 28 | } | 
| James Bottomley | 5a3a5a9 | 2006-03-26 01:36:59 -0800 | [diff] [blame] | 29 | #endif | 
 | 30 |  | 
| Kumar Gala | 3688e07 | 2009-04-01 23:38:49 -0500 | [diff] [blame] | 31 | #include <asm/kmap_types.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 |  | 
| Kumar Gala | 3688e07 | 2009-04-01 23:38:49 -0500 | [diff] [blame] | 33 | #ifdef CONFIG_HIGHMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <asm/highmem.h> | 
 | 35 |  | 
 | 36 | /* declarations for linux/mm/highmem.c */ | 
 | 37 | unsigned int nr_free_highpages(void); | 
| Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 38 | extern unsigned long totalhigh_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
| Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 40 | void kmap_flush_unused(void); | 
 | 41 |  | 
| Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 42 | struct page *kmap_to_page(void *addr); | 
 | 43 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #else /* CONFIG_HIGHMEM */ | 
 | 45 |  | 
 | 46 | static inline unsigned int nr_free_highpages(void) { return 0; } | 
 | 47 |  | 
| Mel Gorman | 5a17811 | 2012-07-31 16:45:02 -0700 | [diff] [blame] | 48 | static inline struct page *kmap_to_page(void *addr) | 
 | 49 | { | 
 | 50 | 	return virt_to_page(addr); | 
 | 51 | } | 
 | 52 |  | 
| Andreas Fenkart | 4b52940 | 2010-01-08 14:42:31 -0800 | [diff] [blame] | 53 | #define totalhigh_pages 0UL | 
| Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 54 |  | 
| James Bottomley | a6ca1b9 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 55 | #ifndef ARCH_HAS_KMAP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | static inline void *kmap(struct page *page) | 
 | 57 | { | 
 | 58 | 	might_sleep(); | 
 | 59 | 	return page_address(page); | 
 | 60 | } | 
 | 61 |  | 
| Matthew Wilcox | 31c9113 | 2009-06-16 15:32:45 -0700 | [diff] [blame] | 62 | static inline void kunmap(struct page *page) | 
 | 63 | { | 
 | 64 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  | 
| Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 66 | static inline void *kmap_atomic(struct page *page) | 
| Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 67 | { | 
 | 68 | 	pagefault_disable(); | 
 | 69 | 	return page_address(page); | 
 | 70 | } | 
| Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 71 | #define kmap_atomic_prot(page, prot)	kmap_atomic(page) | 
| Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 72 |  | 
| Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 73 | static inline void __kunmap_atomic(void *addr) | 
| Andi Kleen | 4e60c86 | 2010-08-09 17:19:03 -0700 | [diff] [blame] | 74 | { | 
 | 75 | 	pagefault_enable(); | 
 | 76 | } | 
 | 77 |  | 
| Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 78 | #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | #define kmap_atomic_to_page(ptr)	virt_to_page(ptr) | 
| Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 80 |  | 
 | 81 | #define kmap_flush_unused()	do {} while(0) | 
| James Bottomley | a6ca1b9 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 82 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 |  | 
 | 84 | #endif /* CONFIG_HIGHMEM */ | 
 | 85 |  | 
| Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 86 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) | 
 | 87 |  | 
 | 88 | DECLARE_PER_CPU(int, __kmap_atomic_idx); | 
 | 89 |  | 
 | 90 | static inline int kmap_atomic_idx_push(void) | 
 | 91 | { | 
| Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 92 | 	int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; | 
 | 93 |  | 
| Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 94 | #ifdef CONFIG_DEBUG_HIGHMEM | 
 | 95 | 	WARN_ON_ONCE(in_irq() && !irqs_disabled()); | 
 | 96 | 	BUG_ON(idx > KM_TYPE_NR); | 
 | 97 | #endif | 
 | 98 | 	return idx; | 
 | 99 | } | 
 | 100 |  | 
| Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 101 | static inline int kmap_atomic_idx(void) | 
 | 102 | { | 
| Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 103 | 	return __this_cpu_read(__kmap_atomic_idx) - 1; | 
| Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 104 | } | 
 | 105 |  | 
| Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 106 | static inline void kmap_atomic_idx_pop(void) | 
| Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 107 | { | 
| Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 108 | #ifdef CONFIG_DEBUG_HIGHMEM | 
| Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 109 | 	int idx = __this_cpu_dec_return(__kmap_atomic_idx); | 
 | 110 |  | 
| Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 111 | 	BUG_ON(idx < 0); | 
| Christoph Lameter | cfb8243 | 2010-12-06 11:40:03 -0600 | [diff] [blame] | 112 | #else | 
 | 113 | 	__this_cpu_dec(__kmap_atomic_idx); | 
| Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 114 | #endif | 
| Peter Zijlstra | a8e23a2 | 2010-10-27 15:32:57 -0700 | [diff] [blame] | 115 | } | 
 | 116 |  | 
 | 117 | #endif | 
 | 118 |  | 
| Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 119 | /* | 
| Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 120 |  * Prevent people trying to call kunmap_atomic() as if it were kunmap() | 
 | 121 |  * kunmap_atomic() should get the return value of kmap_atomic, not the page. | 
 | 122 |  */ | 
| Cong Wang | 1285e4c | 2012-06-22 23:17:53 +0800 | [diff] [blame] | 123 | #define kunmap_atomic(addr)                                     \ | 
| Cong Wang | 980c19e | 2011-11-25 22:08:45 +0800 | [diff] [blame] | 124 | do {                                                            \ | 
 | 125 | 	BUILD_BUG_ON(__same_type((addr), struct page *));       \ | 
 | 126 | 	__kunmap_atomic(addr);                                  \ | 
 | 127 | } while (0) | 
 | 128 |  | 
| Cong Wang | 980c19e | 2011-11-25 22:08:45 +0800 | [diff] [blame] | 129 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ | 
| Russell King | 487ff32 | 2008-11-27 11:13:58 +0000 | [diff] [blame] | 131 | #ifndef clear_user_highpage | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) | 
 | 133 | { | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 134 | 	void *addr = kmap_atomic(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | 	clear_user_page(addr, vaddr, page); | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 136 | 	kunmap_atomic(addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | } | 
| Russell King | 487ff32 | 2008-11-27 11:13:58 +0000 | [diff] [blame] | 138 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 |  | 
 | 140 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 141 | /** | 
 | 142 |  * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags | 
 | 143 |  * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE | 
 | 144 |  * @vma: The VMA the page is to be allocated for | 
 | 145 |  * @vaddr: The virtual address the page will be inserted into | 
 | 146 |  * | 
 | 147 |  * This function will allocate a page for a VMA but the caller is expected | 
 | 148 |  * to specify via movableflags whether the page will be movable in the | 
 | 149 |  * future or not | 
 | 150 |  * | 
 | 151 |  * An architecture may override this function by defining | 
 | 152 |  * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own | 
 | 153 |  * implementation. | 
 | 154 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | static inline struct page * | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 156 | __alloc_zeroed_user_highpage(gfp_t movableflags, | 
 | 157 | 			struct vm_area_struct *vma, | 
 | 158 | 			unsigned long vaddr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | { | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 160 | 	struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, | 
 | 161 | 			vma, vaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 |  | 
 | 163 | 	if (page) | 
 | 164 | 		clear_user_highpage(page, vaddr); | 
 | 165 |  | 
 | 166 | 	return page; | 
 | 167 | } | 
 | 168 | #endif | 
 | 169 |  | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 170 | /** | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 171 |  * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move | 
 | 172 |  * @vma: The VMA the page is to be allocated for | 
 | 173 |  * @vaddr: The virtual address the page will be inserted into | 
 | 174 |  * | 
 | 175 |  * This function will allocate a page for a VMA that the caller knows will | 
 | 176 |  * be able to migrate in the future using move_pages() or reclaimed | 
 | 177 |  */ | 
 | 178 | static inline struct page * | 
 | 179 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, | 
 | 180 | 					unsigned long vaddr) | 
 | 181 | { | 
 | 182 | 	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); | 
 | 183 | } | 
 | 184 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | static inline void clear_highpage(struct page *page) | 
 | 186 | { | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 187 | 	void *kaddr = kmap_atomic(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | 	clear_page(kaddr); | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 189 | 	kunmap_atomic(kaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | } | 
 | 191 |  | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 192 | static inline void zero_user_segments(struct page *page, | 
 | 193 | 	unsigned start1, unsigned end1, | 
 | 194 | 	unsigned start2, unsigned end2) | 
 | 195 | { | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 196 | 	void *kaddr = kmap_atomic(page); | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 197 |  | 
 | 198 | 	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); | 
 | 199 |  | 
 | 200 | 	if (end1 > start1) | 
 | 201 | 		memset(kaddr + start1, 0, end1 - start1); | 
 | 202 |  | 
 | 203 | 	if (end2 > start2) | 
 | 204 | 		memset(kaddr + start2, 0, end2 - start2); | 
 | 205 |  | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 206 | 	kunmap_atomic(kaddr); | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 207 | 	flush_dcache_page(page); | 
 | 208 | } | 
 | 209 |  | 
 | 210 | static inline void zero_user_segment(struct page *page, | 
 | 211 | 	unsigned start, unsigned end) | 
 | 212 | { | 
 | 213 | 	zero_user_segments(page, start, end, 0, 0); | 
 | 214 | } | 
 | 215 |  | 
 | 216 | static inline void zero_user(struct page *page, | 
 | 217 | 	unsigned start, unsigned size) | 
 | 218 | { | 
 | 219 | 	zero_user_segments(page, start, start + size, 0, 0); | 
 | 220 | } | 
| Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 221 |  | 
| Nate Diller | f37bc27 | 2007-05-09 02:35:09 -0700 | [diff] [blame] | 222 | static inline void __deprecated memclear_highpage_flush(struct page *page, | 
| Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 223 | 			unsigned int offset, unsigned int size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | { | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 225 | 	zero_user(page, offset, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | } | 
 | 227 |  | 
| Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 228 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE | 
 | 229 |  | 
| Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 230 | static inline void copy_user_highpage(struct page *to, struct page *from, | 
 | 231 | 	unsigned long vaddr, struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | { | 
 | 233 | 	char *vfrom, *vto; | 
 | 234 |  | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 235 | 	vfrom = kmap_atomic(from); | 
 | 236 | 	vto = kmap_atomic(to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | 	copy_user_page(vto, vfrom, vaddr, to); | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 238 | 	kunmap_atomic(vto); | 
 | 239 | 	kunmap_atomic(vfrom); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | } | 
 | 241 |  | 
| Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 242 | #endif | 
 | 243 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | static inline void copy_highpage(struct page *to, struct page *from) | 
 | 245 | { | 
 | 246 | 	char *vfrom, *vto; | 
 | 247 |  | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 248 | 	vfrom = kmap_atomic(from); | 
 | 249 | 	vto = kmap_atomic(to); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | 	copy_page(vto, vfrom); | 
| Cong Wang | 1ec9c5d | 2011-11-25 23:14:14 +0800 | [diff] [blame] | 251 | 	kunmap_atomic(vto); | 
 | 252 | 	kunmap_atomic(vfrom); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | } | 
 | 254 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | #endif /* _LINUX_HIGHMEM_H */ |