| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_HIGHMEM_H | 
|  | 2 | #define _LINUX_HIGHMEM_H | 
|  | 3 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/fs.h> | 
|  | 5 | #include <linux/mm.h> | 
| Peter Zijlstra | ad76fb6 | 2006-12-06 20:32:21 -0800 | [diff] [blame] | 6 | #include <linux/uaccess.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  | 
|  | 8 | #include <asm/cacheflush.h> | 
|  | 9 |  | 
| James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 10 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE | 
| Russell King | a6f36be | 2006-12-30 22:24:19 +0000 | [diff] [blame] | 11 | static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | 
| James Bottomley | 03beb07 | 2006-03-26 01:36:57 -0800 | [diff] [blame] | 12 | { | 
|  | 13 | } | 
|  | 14 | #endif | 
|  | 15 |  | 
| James Bottomley | 5a3a5a9 | 2006-03-26 01:36:59 -0800 | [diff] [blame] | 16 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | 
|  | 17 | static inline void flush_kernel_dcache_page(struct page *page) | 
|  | 18 | { | 
|  | 19 | } | 
|  | 20 | #endif | 
|  | 21 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_HIGHMEM | 
|  | 23 |  | 
|  | 24 | #include <asm/highmem.h> | 
|  | 25 |  | 
|  | 26 | /* declarations for linux/mm/highmem.c */ | 
|  | 27 | unsigned int nr_free_highpages(void); | 
| Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 28 | extern unsigned long totalhigh_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 |  | 
| Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 30 | void kmap_flush_unused(void); | 
|  | 31 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #else /* CONFIG_HIGHMEM */ | 
|  | 33 |  | 
|  | 34 | static inline unsigned int nr_free_highpages(void) { return 0; } | 
|  | 35 |  | 
| Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 36 | #define totalhigh_pages 0 | 
|  | 37 |  | 
| James Bottomley | a6ca1b9 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 38 | #ifndef ARCH_HAS_KMAP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | static inline void *kmap(struct page *page) | 
|  | 40 | { | 
|  | 41 | might_sleep(); | 
|  | 42 | return page_address(page); | 
|  | 43 | } | 
|  | 44 |  | 
|  | 45 | #define kunmap(page) do { (void) (page); } while (0) | 
|  | 46 |  | 
| Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 47 | #include <asm/kmap_types.h> | 
|  | 48 |  | 
|  | 49 | static inline void *kmap_atomic(struct page *page, enum km_type idx) | 
|  | 50 | { | 
|  | 51 | pagefault_disable(); | 
|  | 52 | return page_address(page); | 
|  | 53 | } | 
| Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 54 | #define kmap_atomic_prot(page, idx, prot)	kmap_atomic(page, idx) | 
| Geert Uytterhoeven | 254f9c5 | 2007-05-01 22:33:07 +0200 | [diff] [blame] | 55 |  | 
| Peter Zijlstra | ad76fb6 | 2006-12-06 20:32:21 -0800 | [diff] [blame] | 56 | #define kunmap_atomic(addr, idx)	do { pagefault_enable(); } while (0) | 
|  | 57 | #define kmap_atomic_pfn(pfn, idx)	kmap_atomic(pfn_to_page(pfn), (idx)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #define kmap_atomic_to_page(ptr)	virt_to_page(ptr) | 
| Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 59 |  | 
|  | 60 | #define kmap_flush_unused()	do {} while(0) | 
| James Bottomley | a6ca1b9 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 61 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 |  | 
|  | 63 | #endif /* CONFIG_HIGHMEM */ | 
|  | 64 |  | 
|  | 65 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ | 
|  | 66 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) | 
|  | 67 | { | 
|  | 68 | void *addr = kmap_atomic(page, KM_USER0); | 
|  | 69 | clear_user_page(addr, vaddr, page); | 
|  | 70 | kunmap_atomic(addr, KM_USER0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | } | 
|  | 72 |  | 
|  | 73 | #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 74 | /** | 
|  | 75 | * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags | 
|  | 76 | * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE | 
|  | 77 | * @vma: The VMA the page is to be allocated for | 
|  | 78 | * @vaddr: The virtual address the page will be inserted into | 
|  | 79 | * | 
|  | 80 | * This function will allocate a page for a VMA but the caller is expected | 
|  | 81 | * to specify via movableflags whether the page will be movable in the | 
|  | 82 | * future or not | 
|  | 83 | * | 
|  | 84 | * An architecture may override this function by defining | 
|  | 85 | * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own | 
|  | 86 | * implementation. | 
|  | 87 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | static inline struct page * | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 89 | __alloc_zeroed_user_highpage(gfp_t movableflags, | 
|  | 90 | struct vm_area_struct *vma, | 
|  | 91 | unsigned long vaddr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | { | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 93 | struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, | 
|  | 94 | vma, vaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  | 
|  | 96 | if (page) | 
|  | 97 | clear_user_highpage(page, vaddr); | 
|  | 98 |  | 
|  | 99 | return page; | 
|  | 100 | } | 
|  | 101 | #endif | 
|  | 102 |  | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 103 | /** | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 104 | * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move | 
|  | 105 | * @vma: The VMA the page is to be allocated for | 
|  | 106 | * @vaddr: The virtual address the page will be inserted into | 
|  | 107 | * | 
|  | 108 | * This function will allocate a page for a VMA that the caller knows will | 
|  | 109 | * be able to migrate in the future using move_pages() or reclaimed | 
|  | 110 | */ | 
|  | 111 | static inline struct page * | 
|  | 112 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, | 
|  | 113 | unsigned long vaddr) | 
|  | 114 | { | 
|  | 115 | return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); | 
|  | 116 | } | 
|  | 117 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | static inline void clear_highpage(struct page *page) | 
|  | 119 | { | 
|  | 120 | void *kaddr = kmap_atomic(page, KM_USER0); | 
|  | 121 | clear_page(kaddr); | 
|  | 122 | kunmap_atomic(kaddr, KM_USER0); | 
|  | 123 | } | 
|  | 124 |  | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 125 | static inline void zero_user_segments(struct page *page, | 
|  | 126 | unsigned start1, unsigned end1, | 
|  | 127 | unsigned start2, unsigned end2) | 
|  | 128 | { | 
|  | 129 | void *kaddr = kmap_atomic(page, KM_USER0); | 
|  | 130 |  | 
|  | 131 | BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); | 
|  | 132 |  | 
|  | 133 | if (end1 > start1) | 
|  | 134 | memset(kaddr + start1, 0, end1 - start1); | 
|  | 135 |  | 
|  | 136 | if (end2 > start2) | 
|  | 137 | memset(kaddr + start2, 0, end2 - start2); | 
|  | 138 |  | 
|  | 139 | kunmap_atomic(kaddr, KM_USER0); | 
|  | 140 | flush_dcache_page(page); | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | static inline void zero_user_segment(struct page *page, | 
|  | 144 | unsigned start, unsigned end) | 
|  | 145 | { | 
|  | 146 | zero_user_segments(page, start, end, 0, 0); | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | static inline void zero_user(struct page *page, | 
|  | 150 | unsigned start, unsigned size) | 
|  | 151 | { | 
|  | 152 | zero_user_segments(page, start, start + size, 0, 0); | 
|  | 153 | } | 
| Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 154 |  | 
| Nate Diller | f37bc27 | 2007-05-09 02:35:09 -0700 | [diff] [blame] | 155 | static inline void __deprecated memclear_highpage_flush(struct page *page, | 
| Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 156 | unsigned int offset, unsigned int size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | { | 
| Christoph Lameter | eebd2aa | 2008-02-04 22:28:29 -0800 | [diff] [blame] | 158 | zero_user(page, offset, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | } | 
|  | 160 |  | 
| Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 161 | #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE | 
|  | 162 |  | 
| Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 163 | static inline void copy_user_highpage(struct page *to, struct page *from, | 
|  | 164 | unsigned long vaddr, struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | { | 
|  | 166 | char *vfrom, *vto; | 
|  | 167 |  | 
|  | 168 | vfrom = kmap_atomic(from, KM_USER0); | 
|  | 169 | vto = kmap_atomic(to, KM_USER1); | 
|  | 170 | copy_user_page(vto, vfrom, vaddr, to); | 
|  | 171 | kunmap_atomic(vfrom, KM_USER0); | 
|  | 172 | kunmap_atomic(vto, KM_USER1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | } | 
|  | 174 |  | 
| Atsushi Nemoto | 77fff4a | 2006-12-12 17:14:54 +0000 | [diff] [blame] | 175 | #endif | 
|  | 176 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | static inline void copy_highpage(struct page *to, struct page *from) | 
|  | 178 | { | 
|  | 179 | char *vfrom, *vto; | 
|  | 180 |  | 
|  | 181 | vfrom = kmap_atomic(from, KM_USER0); | 
|  | 182 | vto = kmap_atomic(to, KM_USER1); | 
|  | 183 | copy_page(vto, vfrom); | 
|  | 184 | kunmap_atomic(vfrom, KM_USER0); | 
|  | 185 | kunmap_atomic(vto, KM_USER1); | 
|  | 186 | } | 
|  | 187 |  | 
|  | 188 | #endif /* _LINUX_HIGHMEM_H */ |