| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* internal.h: mm/ internal definitions | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | 
 | 4 |  * Written by David Howells (dhowells@redhat.com) | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or | 
 | 7 |  * modify it under the terms of the GNU General Public License | 
 | 8 |  * as published by the Free Software Foundation; either version | 
 | 9 |  * 2 of the License, or (at your option) any later version. | 
 | 10 |  */ | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 11 | #ifndef __MM_INTERNAL_H | 
 | 12 | #define __MM_INTERNAL_H | 
 | 13 |  | 
 | 14 | #include <linux/mm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 |  | 
| Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | 
 | 17 | 		unsigned long floor, unsigned long ceiling); | 
 | 18 |  | 
| Andi Kleen | 01ad1c0 | 2008-07-23 21:27:46 -0700 | [diff] [blame] | 19 | extern void prep_compound_page(struct page *page, unsigned long order); | 
| Andy Whitcroft | 18229df | 2008-11-06 12:53:27 -0800 | [diff] [blame] | 20 | extern void prep_compound_gigantic_page(struct page *page, unsigned long order); | 
| Andi Kleen | 01ad1c0 | 2008-07-23 21:27:46 -0700 | [diff] [blame] | 21 |  | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 22 | static inline void set_page_count(struct page *page, int v) | 
| Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 23 | { | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 24 | 	atomic_set(&page->_count, v); | 
 | 25 | } | 
 | 26 |  | 
 | 27 | /* | 
 | 28 |  * Turn a non-refcounted page (->_count == 0) into refcounted with | 
 | 29 |  * a count of one. | 
 | 30 |  */ | 
 | 31 | static inline void set_page_refcounted(struct page *page) | 
 | 32 | { | 
| Qi Yong | ae1276b | 2008-02-04 22:29:27 -0800 | [diff] [blame] | 33 | 	VM_BUG_ON(PageTail(page)); | 
| Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 34 | 	VM_BUG_ON(atomic_read(&page->_count)); | 
| Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 35 | 	set_page_count(page, 1); | 
| Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 36 | } | 
 | 37 |  | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 38 | static inline void __put_page(struct page *page) | 
 | 39 | { | 
 | 40 | 	atomic_dec(&page->_count); | 
 | 41 | } | 
 | 42 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 43 | /* | 
 | 44 |  * in mm/vmscan.c: | 
 | 45 |  */ | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 46 | extern int isolate_lru_page(struct page *page); | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 47 | extern void putback_lru_page(struct page *page); | 
| Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 48 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 49 | /* | 
 | 50 |  * in mm/page_alloc.c | 
 | 51 |  */ | 
| Hugh Dickins | 22b31ee | 2009-01-06 14:40:09 -0800 | [diff] [blame] | 52 | extern unsigned long highest_memmap_pfn; | 
| Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 53 | extern void __free_pages_bootmem(struct page *page, unsigned int order); | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 54 |  | 
| Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 55 | /* | 
 | 56 |  * function for dealing with page's order in buddy system. | 
 | 57 |  * zone->lock is already acquired when we use these. | 
 | 58 |  * So, we don't need atomic page->flags operations here. | 
 | 59 |  */ | 
 | 60 | static inline unsigned long page_order(struct page *page) | 
 | 61 | { | 
 | 62 | 	VM_BUG_ON(!PageBuddy(page)); | 
 | 63 | 	return page_private(page); | 
 | 64 | } | 
| Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 65 |  | 
| David Howells | 33925b2 | 2009-03-31 15:23:26 -0700 | [diff] [blame] | 66 | #ifdef CONFIG_HAVE_MLOCK | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 67 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 68 | 			unsigned long start, unsigned long end); | 
| Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 69 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, | 
 | 70 | 			unsigned long start, unsigned long end); | 
 | 71 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) | 
 | 72 | { | 
 | 73 | 	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); | 
 | 74 | } | 
| David Howells | 33925b2 | 2009-03-31 15:23:26 -0700 | [diff] [blame] | 75 | #endif | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 76 |  | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 77 | #ifdef CONFIG_UNEVICTABLE_LRU | 
 | 78 | /* | 
 | 79 |  * unevictable_migrate_page() called only from migrate_page_copy() to | 
 | 80 |  * migrate unevictable flag to new page. | 
 | 81 |  * Note that the old page has been isolated from the LRU lists at this | 
 | 82 |  * point so we don't need to worry about LRU statistics. | 
 | 83 |  */ | 
 | 84 | static inline void unevictable_migrate_page(struct page *new, struct page *old) | 
 | 85 | { | 
 | 86 | 	if (TestClearPageUnevictable(old)) | 
 | 87 | 		SetPageUnevictable(new); | 
 | 88 | } | 
 | 89 | #else | 
 | 90 | static inline void unevictable_migrate_page(struct page *new, struct page *old) | 
 | 91 | { | 
 | 92 | } | 
 | 93 | #endif | 
 | 94 |  | 
| David Howells | 33925b2 | 2009-03-31 15:23:26 -0700 | [diff] [blame] | 95 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 96 | /* | 
 | 97 |  * Called only in fault path via page_evictable() for a new page | 
 | 98 |  * to determine if it's being mapped into a LOCKED vma. | 
 | 99 |  * If so, mark page as mlocked. | 
 | 100 |  */ | 
 | 101 | static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) | 
 | 102 | { | 
 | 103 | 	VM_BUG_ON(PageLRU(page)); | 
 | 104 |  | 
 | 105 | 	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) | 
 | 106 | 		return 0; | 
 | 107 |  | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 108 | 	if (!TestSetPageMlocked(page)) { | 
 | 109 | 		inc_zone_page_state(page, NR_MLOCK); | 
 | 110 | 		count_vm_event(UNEVICTABLE_PGMLOCKED); | 
 | 111 | 	} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 112 | 	return 1; | 
 | 113 | } | 
 | 114 |  | 
 | 115 | /* | 
 | 116 |  * must be called with vma's mmap_sem held for read, and page locked. | 
 | 117 |  */ | 
 | 118 | extern void mlock_vma_page(struct page *page); | 
 | 119 |  | 
 | 120 | /* | 
 | 121 |  * Clear the page's PageMlocked().  This can be useful in a situation where | 
 | 122 |  * we want to unconditionally remove a page from the pagecache -- e.g., | 
 | 123 |  * on truncation or freeing. | 
 | 124 |  * | 
 | 125 |  * It is legal to call this function for any page, mlocked or not. | 
 | 126 |  * If called for a page that is still mapped by mlocked vmas, all we do | 
 | 127 |  * is revert to lazy LRU behaviour -- semantics are not broken. | 
 | 128 |  */ | 
 | 129 | extern void __clear_page_mlock(struct page *page); | 
 | 130 | static inline void clear_page_mlock(struct page *page) | 
 | 131 | { | 
 | 132 | 	if (unlikely(TestClearPageMlocked(page))) | 
 | 133 | 		__clear_page_mlock(page); | 
 | 134 | } | 
 | 135 |  | 
 | 136 | /* | 
 | 137 |  * mlock_migrate_page - called only from migrate_page_copy() to | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 138 |  * migrate the Mlocked page flag; update statistics. | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 139 |  */ | 
 | 140 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) | 
 | 141 | { | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 142 | 	if (TestClearPageMlocked(page)) { | 
 | 143 | 		unsigned long flags; | 
 | 144 |  | 
 | 145 | 		local_irq_save(flags); | 
 | 146 | 		__dec_zone_page_state(page, NR_MLOCK); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 147 | 		SetPageMlocked(newpage); | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 148 | 		__inc_zone_page_state(newpage, NR_MLOCK); | 
 | 149 | 		local_irq_restore(flags); | 
 | 150 | 	} | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 151 | } | 
 | 152 |  | 
| Lee Schermerhorn | 985737c | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 153 | /* | 
 | 154 |  * free_page_mlock() -- clean up attempts to free and mlocked() page. | 
 | 155 |  * Page should not be on lru, so no need to fix that up. | 
 | 156 |  * free_pages_check() will verify... | 
 | 157 |  */ | 
 | 158 | static inline void free_page_mlock(struct page *page) | 
 | 159 | { | 
 | 160 | 	if (unlikely(TestClearPageMlocked(page))) { | 
 | 161 | 		unsigned long flags; | 
 | 162 |  | 
 | 163 | 		local_irq_save(flags); | 
 | 164 | 		__dec_zone_page_state(page, NR_MLOCK); | 
 | 165 | 		__count_vm_event(UNEVICTABLE_MLOCKFREED); | 
 | 166 | 		local_irq_restore(flags); | 
 | 167 | 	} | 
 | 168 | } | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 169 |  | 
| David Howells | 33925b2 | 2009-03-31 15:23:26 -0700 | [diff] [blame] | 170 | #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 171 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) | 
 | 172 | { | 
 | 173 | 	return 0; | 
 | 174 | } | 
 | 175 | static inline void clear_page_mlock(struct page *page) { } | 
 | 176 | static inline void mlock_vma_page(struct page *page) { } | 
 | 177 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } | 
| Lee Schermerhorn | 985737c | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 178 | static inline void free_page_mlock(struct page *page) { } | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 179 |  | 
| David Howells | 33925b2 | 2009-03-31 15:23:26 -0700 | [diff] [blame] | 180 | #endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 181 |  | 
| Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 182 | /* | 
| Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 183 |  * Return the mem_map entry representing the 'offset' subpage within | 
 | 184 |  * the maximally aligned gigantic page 'base'.  Handle any discontiguity | 
 | 185 |  * in the mem_map at MAX_ORDER_NR_PAGES boundaries. | 
 | 186 |  */ | 
 | 187 | static inline struct page *mem_map_offset(struct page *base, int offset) | 
 | 188 | { | 
 | 189 | 	if (unlikely(offset >= MAX_ORDER_NR_PAGES)) | 
 | 190 | 		return pfn_to_page(page_to_pfn(base) + offset); | 
 | 191 | 	return base + offset; | 
 | 192 | } | 
 | 193 |  | 
 | 194 | /* | 
 | 195 |  * Iterator over all subpages withing the maximally aligned gigantic | 
 | 196 |  * page 'base'.  Handle any discontiguity in the mem_map. | 
 | 197 |  */ | 
 | 198 | static inline struct page *mem_map_next(struct page *iter, | 
 | 199 | 						struct page *base, int offset) | 
 | 200 | { | 
 | 201 | 	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { | 
 | 202 | 		unsigned long pfn = page_to_pfn(base) + offset; | 
 | 203 | 		if (!pfn_valid(pfn)) | 
 | 204 | 			return NULL; | 
 | 205 | 		return pfn_to_page(pfn); | 
 | 206 | 	} | 
 | 207 | 	return iter + 1; | 
 | 208 | } | 
 | 209 |  | 
 | 210 | /* | 
| Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 211 |  * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, | 
 | 212 |  * so all functions starting at paging_init should be marked __init | 
 | 213 |  * in those cases. SPARSEMEM, however, allows for memory hotplug, | 
 | 214 |  * and alloc_bootmem_node is not used. | 
 | 215 |  */ | 
 | 216 | #ifdef CONFIG_SPARSEMEM | 
 | 217 | #define __paginginit __meminit | 
 | 218 | #else | 
 | 219 | #define __paginginit __init | 
 | 220 | #endif | 
 | 221 |  | 
| Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 222 | /* Memory initialisation debug and verification */ | 
 | 223 | enum mminit_level { | 
 | 224 | 	MMINIT_WARNING, | 
 | 225 | 	MMINIT_VERIFY, | 
 | 226 | 	MMINIT_TRACE | 
 | 227 | }; | 
 | 228 |  | 
 | 229 | #ifdef CONFIG_DEBUG_MEMORY_INIT | 
 | 230 |  | 
 | 231 | extern int mminit_loglevel; | 
 | 232 |  | 
 | 233 | #define mminit_dprintk(level, prefix, fmt, arg...) \ | 
 | 234 | do { \ | 
 | 235 | 	if (level < mminit_loglevel) { \ | 
 | 236 | 		printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ | 
 | 237 | 		printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ | 
 | 238 | 	} \ | 
 | 239 | } while (0) | 
 | 240 |  | 
| Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 241 | extern void mminit_verify_pageflags_layout(void); | 
 | 242 | extern void mminit_verify_page_links(struct page *page, | 
 | 243 | 		enum zone_type zone, unsigned long nid, unsigned long pfn); | 
| Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 244 | extern void mminit_verify_zonelist(void); | 
| Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 245 |  | 
| Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 246 | #else | 
 | 247 |  | 
 | 248 | static inline void mminit_dprintk(enum mminit_level level, | 
 | 249 | 				const char *prefix, const char *fmt, ...) | 
 | 250 | { | 
 | 251 | } | 
 | 252 |  | 
| Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 253 | static inline void mminit_verify_pageflags_layout(void) | 
 | 254 | { | 
 | 255 | } | 
 | 256 |  | 
 | 257 | static inline void mminit_verify_page_links(struct page *page, | 
 | 258 | 		enum zone_type zone, unsigned long nid, unsigned long pfn) | 
 | 259 | { | 
 | 260 | } | 
| Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 261 |  | 
 | 262 | static inline void mminit_verify_zonelist(void) | 
 | 263 | { | 
 | 264 | } | 
| Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 265 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ | 
| Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 266 |  | 
 | 267 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ | 
 | 268 | #if defined(CONFIG_SPARSEMEM) | 
 | 269 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, | 
 | 270 | 				unsigned long *end_pfn); | 
 | 271 | #else | 
 | 272 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, | 
 | 273 | 				unsigned long *end_pfn) | 
 | 274 | { | 
 | 275 | } | 
 | 276 | #endif /* CONFIG_SPARSEMEM */ | 
 | 277 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 278 | #define GUP_FLAGS_WRITE                  0x1 | 
 | 279 | #define GUP_FLAGS_FORCE                  0x2 | 
 | 280 | #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4 | 
| Ying Han | 4779280 | 2009-01-06 14:40:18 -0800 | [diff] [blame] | 281 | #define GUP_FLAGS_IGNORE_SIGKILL         0x8 | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 282 |  | 
 | 283 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | 
 | 284 | 		     unsigned long start, int len, int flags, | 
 | 285 | 		     struct page **pages, struct vm_area_struct **vmas); | 
 | 286 |  | 
| Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 287 | #endif |