| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_RMAP_H | 
|  | 2 | #define _LINUX_RMAP_H | 
|  | 3 | /* | 
|  | 4 | * Declarations for Reverse Mapping functions in mm/rmap.c | 
|  | 5 | */ | 
|  | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/list.h> | 
|  | 8 | #include <linux/slab.h> | 
|  | 9 | #include <linux/mm.h> | 
| Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 10 | #include <linux/mutex.h> | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 11 | #include <linux/memcontrol.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 |  | 
|  | 13 | /* | 
|  | 14 | * The anon_vma heads a list of private "related" vmas, to scan if | 
|  | 15 | * an anonymous page pointing to this anon_vma needs to be unmapped: | 
|  | 16 | * the vmas on the list will be related by forking, or by splitting. | 
|  | 17 | * | 
|  | 18 | * Since vmas come and go as they are split and merged (particularly | 
|  | 19 | * in mprotect), the mapping field of an anonymous page cannot point | 
|  | 20 | * directly to a vma: instead it points to an anon_vma, on whose list | 
|  | 21 | * the related vmas can be easily linked or unlinked. | 
|  | 22 | * | 
|  | 23 | * After unlinking the last vma on the list, we must garbage collect | 
|  | 24 | * the anon_vma object itself: we're guaranteed no page can be | 
|  | 25 | * pointing to this anon_vma once its vma list is empty. | 
|  | 26 | */ | 
|  | 27 | struct anon_vma { | 
| Rik van Riel | 5c341ee | 2010-08-09 17:18:39 -0700 | [diff] [blame] | 28 | struct anon_vma *root;	/* Root of this anon_vma tree */ | 
| Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 29 | struct mutex mutex;	/* Serialize access to vma list */ | 
| Mel Gorman | 7f60c21 | 2010-05-24 14:32:18 -0700 | [diff] [blame] | 30 | /* | 
| Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 31 | * The refcount is taken on an anon_vma when there is no | 
| Mel Gorman | 7f60c21 | 2010-05-24 14:32:18 -0700 | [diff] [blame] | 32 | * guarantee that the vma of page tables will exist for | 
|  | 33 | * the duration of the operation. A caller that takes | 
|  | 34 | * the reference is responsible for clearing up the | 
|  | 35 | * anon_vma if they are the last user on release | 
|  | 36 | */ | 
| Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 37 | atomic_t refcount; | 
|  | 38 |  | 
| Andrea Arcangeli | 7906d00 | 2008-07-28 15:46:26 -0700 | [diff] [blame] | 39 | /* | 
|  | 40 | * NOTE: the LSB of the head.next is set by | 
|  | 41 | * mm_take_all_locks() _after_ taking the above lock. So the | 
|  | 42 | * head must only be read/written after taking the above lock | 
|  | 43 | * to be sure to see a valid next pointer. The LSB bit itself | 
|  | 44 | * is serialized by a system wide lock only visible to | 
|  | 45 | * mm_take_all_locks() (mm_all_locks_mutex). | 
|  | 46 | */ | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 47 | struct list_head head;	/* Chain of private "related" vmas */ | 
|  | 48 | }; | 
|  | 49 |  | 
|  | 50 | /* | 
|  | 51 | * The copy-on-write semantics of fork mean that an anon_vma | 
|  | 52 | * can become associated with multiple processes. Furthermore, | 
|  | 53 | * each child process will have its own anon_vma, where new | 
|  | 54 | * pages for that process are instantiated. | 
|  | 55 | * | 
|  | 56 | * This structure allows us to find the anon_vmas associated | 
|  | 57 | * with a VMA, or the VMAs associated with an anon_vma. | 
|  | 58 | * The "same_vma" list contains the anon_vma_chains linking | 
|  | 59 | * all the anon_vmas associated with this VMA. | 
|  | 60 | * The "same_anon_vma" list contains the anon_vma_chains | 
|  | 61 | * which link all the VMAs associated with this anon_vma. | 
|  | 62 | */ | 
|  | 63 | struct anon_vma_chain { | 
|  | 64 | struct vm_area_struct *vma; | 
|  | 65 | struct anon_vma *anon_vma; | 
|  | 66 | struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */ | 
| Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 67 | struct list_head same_anon_vma;	/* locked by anon_vma->mutex */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | }; | 
|  | 69 |  | 
| Minchan Kim | 31b02ab | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 70 | enum ttu_flags { | 
|  | 71 | TTU_UNMAP = 0,			/* unmap mode */ | 
|  | 72 | TTU_MIGRATION = 1,		/* migration mode */ | 
|  | 73 | TTU_MUNLOCK = 2,		/* munlock mode */ | 
|  | 74 | TTU_ACTION_MASK = 0xff, | 
|  | 75 |  | 
|  | 76 | TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */ | 
|  | 77 | TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */ | 
|  | 78 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ | 
|  | 79 | }; | 
|  | 80 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | #ifdef CONFIG_MMU | 
| Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 82 | static inline void get_anon_vma(struct anon_vma *anon_vma) | 
|  | 83 | { | 
| Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 84 | atomic_inc(&anon_vma->refcount); | 
| Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 85 | } | 
|  | 86 |  | 
| Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 87 | void __put_anon_vma(struct anon_vma *anon_vma); | 
|  | 88 |  | 
|  | 89 | static inline void put_anon_vma(struct anon_vma *anon_vma) | 
|  | 90 | { | 
|  | 91 | if (atomic_dec_and_test(&anon_vma->refcount)) | 
|  | 92 | __put_anon_vma(anon_vma); | 
|  | 93 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 |  | 
| Hugh Dickins | 3ca7b3c | 2009-12-14 17:58:57 -0800 | [diff] [blame] | 95 | static inline struct anon_vma *page_anon_vma(struct page *page) | 
|  | 96 | { | 
|  | 97 | if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != | 
|  | 98 | PAGE_MAPPING_ANON) | 
|  | 99 | return NULL; | 
|  | 100 | return page_rmapping(page); | 
|  | 101 | } | 
|  | 102 |  | 
| Rik van Riel | bb4a340 | 2010-08-09 17:18:37 -0700 | [diff] [blame] | 103 | static inline void vma_lock_anon_vma(struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | { | 
|  | 105 | struct anon_vma *anon_vma = vma->anon_vma; | 
|  | 106 | if (anon_vma) | 
| Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 107 | mutex_lock(&anon_vma->root->mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | } | 
|  | 109 |  | 
| Rik van Riel | bb4a340 | 2010-08-09 17:18:37 -0700 | [diff] [blame] | 110 | static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | { | 
|  | 112 | struct anon_vma *anon_vma = vma->anon_vma; | 
|  | 113 | if (anon_vma) | 
| Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 114 | mutex_unlock(&anon_vma->root->mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | } | 
|  | 116 |  | 
| Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 117 | static inline void anon_vma_lock(struct anon_vma *anon_vma) | 
|  | 118 | { | 
| Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 119 | mutex_lock(&anon_vma->root->mutex); | 
| Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 120 | } | 
|  | 121 |  | 
|  | 122 | static inline void anon_vma_unlock(struct anon_vma *anon_vma) | 
|  | 123 | { | 
| Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 124 | mutex_unlock(&anon_vma->root->mutex); | 
| Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 125 | } | 
|  | 126 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | /* | 
|  | 128 | * anon_vma helper functions. | 
|  | 129 | */ | 
|  | 130 | void anon_vma_init(void);	/* create anon_vma_cachep */ | 
|  | 131 | int  anon_vma_prepare(struct vm_area_struct *); | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 132 | void unlink_anon_vmas(struct vm_area_struct *); | 
|  | 133 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); | 
| Andrea Arcangeli | 948f017 | 2012-01-10 15:08:05 -0800 | [diff] [blame] | 134 | void anon_vma_moveto_tail(struct vm_area_struct *); | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 135 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 |  | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 137 | static inline void anon_vma_merge(struct vm_area_struct *vma, | 
|  | 138 | struct vm_area_struct *next) | 
|  | 139 | { | 
|  | 140 | VM_BUG_ON(vma->anon_vma != next->anon_vma); | 
|  | 141 | unlink_anon_vmas(next); | 
|  | 142 | } | 
|  | 143 |  | 
| Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 144 | struct anon_vma *page_get_anon_vma(struct page *page); | 
|  | 145 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | /* | 
|  | 147 | * rmap interfaces called when adding or removing pte of page | 
|  | 148 | */ | 
| Rik van Riel | c44b674 | 2010-03-05 13:42:09 -0800 | [diff] [blame] | 149 | void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 
| Rik van Riel | ad8c2ee | 2010-08-09 17:19:48 -0700 | [diff] [blame] | 151 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, | 
|  | 152 | unsigned long, int); | 
| Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 153 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | void page_add_file_rmap(struct page *); | 
| Hugh Dickins | edc315f | 2009-01-06 14:40:11 -0800 | [diff] [blame] | 155 | void page_remove_rmap(struct page *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 |  | 
| Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 157 | void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, | 
|  | 158 | unsigned long); | 
|  | 159 | void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, | 
|  | 160 | unsigned long); | 
|  | 161 |  | 
| Hugh Dickins | 21333b2 | 2009-09-21 17:01:59 -0700 | [diff] [blame] | 162 | static inline void page_dup_rmap(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | { | 
|  | 164 | atomic_inc(&page->_mapcount); | 
|  | 165 | } | 
|  | 166 |  | 
|  | 167 | /* | 
|  | 168 | * Called from mm/vmscan.c to handle paging out | 
|  | 169 | */ | 
| Wu Fengguang | 6fe6b7e | 2009-06-16 15:33:05 -0700 | [diff] [blame] | 170 | int page_referenced(struct page *, int is_locked, | 
| Johannes Weiner | 72835c8 | 2012-01-12 17:18:32 -0800 | [diff] [blame] | 171 | struct mem_cgroup *memcg, unsigned long *vm_flags); | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 172 | int page_referenced_one(struct page *, struct vm_area_struct *, | 
|  | 173 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | 
|  | 174 |  | 
| Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 175 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | 
|  | 176 |  | 
| Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 177 | bool is_vma_temporary_stack(struct vm_area_struct *vma); | 
|  | 178 |  | 
| Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 179 | int try_to_unmap(struct page *, enum ttu_flags flags); | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 180 | int try_to_unmap_one(struct page *, struct vm_area_struct *, | 
|  | 181 | unsigned long address, enum ttu_flags flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 |  | 
|  | 183 | /* | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 184 | * Called from mm/filemap_xip.c to unmap empty zero page | 
|  | 185 | */ | 
| Namhyung Kim | e9a81a8 | 2010-10-26 14:22:01 -0700 | [diff] [blame] | 186 | pte_t *__page_check_address(struct page *, struct mm_struct *, | 
| Nick Piggin | 479db0b | 2008-08-20 14:09:18 -0700 | [diff] [blame] | 187 | unsigned long, spinlock_t **, int); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 188 |  | 
| Namhyung Kim | e9a81a8 | 2010-10-26 14:22:01 -0700 | [diff] [blame] | 189 | static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, | 
|  | 190 | unsigned long address, | 
|  | 191 | spinlock_t **ptlp, int sync) | 
|  | 192 | { | 
|  | 193 | pte_t *ptep; | 
|  | 194 |  | 
|  | 195 | __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, | 
|  | 196 | ptlp, sync)); | 
|  | 197 | return ptep; | 
|  | 198 | } | 
|  | 199 |  | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 200 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | * Used by swapoff to help locate where page is expected in vma. | 
|  | 202 | */ | 
|  | 203 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | 
|  | 204 |  | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 205 | /* | 
|  | 206 | * Cleans the PTEs of shared mappings. | 
|  | 207 | * (and since clean PTEs should also be readonly, write protects them too) | 
|  | 208 | * | 
|  | 209 | * returns the number of cleaned PTEs. | 
|  | 210 | */ | 
|  | 211 | int page_mkclean(struct page *); | 
|  | 212 |  | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 213 | /* | 
|  | 214 | * called in munlock()/munmap() path to check for other vmas holding | 
|  | 215 | * the page mlocked. | 
|  | 216 | */ | 
|  | 217 | int try_to_munlock(struct page *); | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 218 |  | 
| Andi Kleen | 10be22d | 2009-09-16 11:50:04 +0200 | [diff] [blame] | 219 | /* | 
|  | 220 | * Called by memory-failure.c to kill processes. | 
|  | 221 | */ | 
| Peter Zijlstra | 25aeeb0 | 2011-05-24 17:12:07 -0700 | [diff] [blame] | 222 | struct anon_vma *page_lock_anon_vma(struct page *page); | 
| Andi Kleen | 10be22d | 2009-09-16 11:50:04 +0200 | [diff] [blame] | 223 | void page_unlock_anon_vma(struct anon_vma *anon_vma); | 
| Andi Kleen | 6a46079 | 2009-09-16 11:50:15 +0200 | [diff] [blame] | 224 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); | 
| Andi Kleen | 10be22d | 2009-09-16 11:50:04 +0200 | [diff] [blame] | 225 |  | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 226 | /* | 
|  | 227 | * Called by migrate.c to remove migration ptes, but might be used more later. | 
|  | 228 | */ | 
|  | 229 | int rmap_walk(struct page *page, int (*rmap_one)(struct page *, | 
|  | 230 | struct vm_area_struct *, unsigned long, void *), void *arg); | 
|  | 231 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | #else	/* !CONFIG_MMU */ | 
|  | 233 |  | 
|  | 234 | #define anon_vma_init()		do {} while (0) | 
|  | 235 | #define anon_vma_prepare(vma)	(0) | 
|  | 236 | #define anon_vma_link(vma)	do {} while (0) | 
|  | 237 |  | 
| Mike Frysinger | 01ff53f | 2009-06-23 12:37:01 -0700 | [diff] [blame] | 238 | static inline int page_referenced(struct page *page, int is_locked, | 
| Johannes Weiner | 72835c8 | 2012-01-12 17:18:32 -0800 | [diff] [blame] | 239 | struct mem_cgroup *memcg, | 
| Mike Frysinger | 01ff53f | 2009-06-23 12:37:01 -0700 | [diff] [blame] | 240 | unsigned long *vm_flags) | 
|  | 241 | { | 
|  | 242 | *vm_flags = 0; | 
| Johannes Weiner | 6457474 | 2010-03-05 13:42:22 -0800 | [diff] [blame] | 243 | return 0; | 
| Mike Frysinger | 01ff53f | 2009-06-23 12:37:01 -0700 | [diff] [blame] | 244 | } | 
|  | 245 |  | 
| Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 246 | #define try_to_unmap(page, refs) SWAP_FAIL | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 |  | 
| Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 248 | static inline int page_mkclean(struct page *page) | 
|  | 249 | { | 
|  | 250 | return 0; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | #endif	/* CONFIG_MMU */ | 
|  | 255 |  | 
|  | 256 | /* | 
|  | 257 | * Return values of try_to_unmap | 
|  | 258 | */ | 
|  | 259 | #define SWAP_SUCCESS	0 | 
|  | 260 | #define SWAP_AGAIN	1 | 
|  | 261 | #define SWAP_FAIL	2 | 
| Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 262 | #define SWAP_MLOCK	3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 |  | 
|  | 264 | #endif	/* _LINUX_RMAP_H */ |