| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_KSM_H | 
|  | 2 | #define __LINUX_KSM_H | 
|  | 3 | /* | 
|  | 4 | * Memory merging support. | 
|  | 5 | * | 
|  | 6 | * This code enables dynamic sharing of identical pages found in different | 
|  | 7 | * memory areas, even if they are not shared by fork(). | 
|  | 8 | */ | 
|  | 9 |  | 
|  | 10 | #include <linux/bitops.h> | 
|  | 11 | #include <linux/mm.h> | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 12 | #include <linux/pagemap.h> | 
|  | 13 | #include <linux/rmap.h> | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 14 | #include <linux/sched.h> | 
|  | 15 |  | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 16 | struct stable_node; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 17 | struct mem_cgroup; | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 18 |  | 
| Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 19 | struct page *ksm_does_need_to_copy(struct page *page, | 
|  | 20 | struct vm_area_struct *vma, unsigned long address); | 
|  | 21 |  | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_KSM | 
|  | 23 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 
|  | 24 | unsigned long end, int advice, unsigned long *vm_flags); | 
|  | 25 | int __ksm_enter(struct mm_struct *mm); | 
| Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 26 | void __ksm_exit(struct mm_struct *mm); | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 27 |  | 
|  | 28 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | 
|  | 29 | { | 
|  | 30 | if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) | 
|  | 31 | return __ksm_enter(mm); | 
|  | 32 | return 0; | 
|  | 33 | } | 
|  | 34 |  | 
| Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 35 | static inline void ksm_exit(struct mm_struct *mm) | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 36 | { | 
|  | 37 | if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) | 
| Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 38 | __ksm_exit(mm); | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 39 | } | 
| Hugh Dickins | 9a84089 | 2009-09-21 17:02:01 -0700 | [diff] [blame] | 40 |  | 
|  | 41 | /* | 
|  | 42 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | 
|  | 43 | * which KSM maps into multiple mms, wherever identical anonymous page content | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 44 | * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any | 
|  | 45 | * anon_vma, but to that page's node of the stable tree. | 
| Hugh Dickins | 9a84089 | 2009-09-21 17:02:01 -0700 | [diff] [blame] | 46 | */ | 
|  | 47 | static inline int PageKsm(struct page *page) | 
|  | 48 | { | 
| Hugh Dickins | 3ca7b3c | 2009-12-14 17:58:57 -0800 | [diff] [blame] | 49 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == | 
|  | 50 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | 
| Hugh Dickins | 9a84089 | 2009-09-21 17:02:01 -0700 | [diff] [blame] | 51 | } | 
|  | 52 |  | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 53 | static inline struct stable_node *page_stable_node(struct page *page) | 
|  | 54 | { | 
|  | 55 | return PageKsm(page) ? page_rmapping(page) : NULL; | 
|  | 56 | } | 
|  | 57 |  | 
|  | 58 | static inline void set_page_stable_node(struct page *page, | 
|  | 59 | struct stable_node *stable_node) | 
|  | 60 | { | 
|  | 61 | page->mapping = (void *)stable_node + | 
|  | 62 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | 
|  | 63 | } | 
|  | 64 |  | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 65 | /* | 
|  | 66 | * When do_swap_page() first faults in from swap what used to be a KSM page, | 
|  | 67 | * no problem, it will be assigned to this vma's anon_vma; but thereafter, | 
|  | 68 | * it might be faulted into a different anon_vma (or perhaps to a different | 
|  | 69 | * offset in the same anon_vma).  do_swap_page() cannot do all the locking | 
|  | 70 | * needed to reconstitute a cross-anon_vma KSM page: for now it has to make | 
|  | 71 | * a copy, and leave remerging the pages to a later pass of ksmd. | 
|  | 72 | * | 
|  | 73 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | 
|  | 74 | * but what if the vma was unmerged while the page was swapped out? | 
|  | 75 | */ | 
| Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 76 | static inline int ksm_might_need_to_copy(struct page *page, | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 77 | struct vm_area_struct *vma, unsigned long address) | 
| Hugh Dickins | 9a84089 | 2009-09-21 17:02:01 -0700 | [diff] [blame] | 78 | { | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 79 | struct anon_vma *anon_vma = page_anon_vma(page); | 
|  | 80 |  | 
| Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 81 | return anon_vma && | 
|  | 82 | (anon_vma->root != vma->anon_vma->root || | 
|  | 83 | page->index != linear_page_index(vma, address)); | 
| Hugh Dickins | 9a84089 | 2009-09-21 17:02:01 -0700 | [diff] [blame] | 84 | } | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 85 |  | 
|  | 86 | int page_referenced_ksm(struct page *page, | 
|  | 87 | struct mem_cgroup *memcg, unsigned long *vm_flags); | 
|  | 88 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 89 | int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, | 
|  | 90 | struct vm_area_struct *, unsigned long, void *), void *arg); | 
|  | 91 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 92 |  | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 93 | #else  /* !CONFIG_KSM */ | 
|  | 94 |  | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 95 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | 
|  | 96 | { | 
|  | 97 | return 0; | 
|  | 98 | } | 
|  | 99 |  | 
| Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 100 | static inline void ksm_exit(struct mm_struct *mm) | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 101 | { | 
|  | 102 | } | 
| Hugh Dickins | 9a84089 | 2009-09-21 17:02:01 -0700 | [diff] [blame] | 103 |  | 
|  | 104 | static inline int PageKsm(struct page *page) | 
|  | 105 | { | 
|  | 106 | return 0; | 
|  | 107 | } | 
|  | 108 |  | 
| Hugh Dickins | f42647a | 2009-12-16 08:56:57 +0000 | [diff] [blame] | 109 | #ifdef CONFIG_MMU | 
|  | 110 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 
|  | 111 | unsigned long end, int advice, unsigned long *vm_flags) | 
|  | 112 | { | 
|  | 113 | return 0; | 
|  | 114 | } | 
|  | 115 |  | 
| Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 116 | static inline int ksm_might_need_to_copy(struct page *page, | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 117 | struct vm_area_struct *vma, unsigned long address) | 
|  | 118 | { | 
| Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 119 | return 0; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 120 | } | 
|  | 121 |  | 
|  | 122 | static inline int page_referenced_ksm(struct page *page, | 
|  | 123 | struct mem_cgroup *memcg, unsigned long *vm_flags) | 
|  | 124 | { | 
|  | 125 | return 0; | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) | 
|  | 129 | { | 
|  | 130 | return 0; | 
|  | 131 | } | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 132 |  | 
|  | 133 | static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, | 
|  | 134 | struct vm_area_struct *, unsigned long, void *), void *arg) | 
|  | 135 | { | 
|  | 136 | return 0; | 
|  | 137 | } | 
|  | 138 |  | 
|  | 139 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) | 
|  | 140 | { | 
|  | 141 | } | 
| Hugh Dickins | f42647a | 2009-12-16 08:56:57 +0000 | [diff] [blame] | 142 | #endif /* CONFIG_MMU */ | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 143 | #endif /* !CONFIG_KSM */ | 
|  | 144 |  | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 145 | #endif /* __LINUX_KSM_H */ |