| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1 | /* | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2 |  * Memory merging support. | 
 | 3 |  * | 
 | 4 |  * This code enables dynamic sharing of identical pages found in different | 
 | 5 |  * memory areas, even if they are not shared by fork() | 
 | 6 |  * | 
| Izik Eidus | 36b2528 | 2009-09-21 17:02:06 -0700 | [diff] [blame] | 7 |  * Copyright (C) 2008-2009 Red Hat, Inc. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 8 |  * Authors: | 
 | 9 |  *	Izik Eidus | 
 | 10 |  *	Andrea Arcangeli | 
 | 11 |  *	Chris Wright | 
| Izik Eidus | 36b2528 | 2009-09-21 17:02:06 -0700 | [diff] [blame] | 12 |  *	Hugh Dickins | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 13 |  * | 
 | 14 |  * This work is licensed under the terms of the GNU GPL, version 2. | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 15 |  */ | 
 | 16 |  | 
 | 17 | #include <linux/errno.h> | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 18 | #include <linux/mm.h> | 
 | 19 | #include <linux/fs.h> | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 20 | #include <linux/mman.h> | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 21 | #include <linux/sched.h> | 
 | 22 | #include <linux/rwsem.h> | 
 | 23 | #include <linux/pagemap.h> | 
 | 24 | #include <linux/rmap.h> | 
 | 25 | #include <linux/spinlock.h> | 
 | 26 | #include <linux/jhash.h> | 
 | 27 | #include <linux/delay.h> | 
 | 28 | #include <linux/kthread.h> | 
 | 29 | #include <linux/wait.h> | 
 | 30 | #include <linux/slab.h> | 
 | 31 | #include <linux/rbtree.h> | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 32 | #include <linux/memory.h> | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 33 | #include <linux/mmu_notifier.h> | 
| Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 34 | #include <linux/swap.h> | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 35 | #include <linux/ksm.h> | 
| Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 36 | #include <linux/hash.h> | 
| Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 37 | #include <linux/freezer.h> | 
| David Rientjes | 72788c3 | 2011-05-24 17:11:40 -0700 | [diff] [blame] | 38 | #include <linux/oom.h> | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 39 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 40 | #include <asm/tlbflush.h> | 
| Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 41 | #include "internal.h" | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 42 |  | 
 | 43 | /* | 
 | 44 |  * A few notes about the KSM scanning process, | 
 | 45 |  * to make it easier to understand the data structures below: | 
 | 46 |  * | 
 | 47 |  * In order to reduce excessive scanning, KSM sorts the memory pages by their | 
 | 48 |  * contents into a data structure that holds pointers to the pages' locations. | 
 | 49 |  * | 
 | 50 |  * Since the contents of the pages may change at any moment, KSM cannot just | 
 | 51 |  * insert the pages into a normal sorted tree and expect it to find anything. | 
 | 52 |  * Therefore KSM uses two data structures - the stable and the unstable tree. | 
 | 53 |  * | 
 | 54 |  * The stable tree holds pointers to all the merged pages (ksm pages), sorted | 
 | 55 |  * by their contents.  Because each such page is write-protected, searching on | 
 | 56 |  * this tree is fully assured to be working (except when pages are unmapped), | 
 | 57 |  * and therefore this tree is called the stable tree. | 
 | 58 |  * | 
 | 59 |  * In addition to the stable tree, KSM uses a second data structure called the | 
 | 60 |  * unstable tree: this tree holds pointers to pages which have been found to | 
 | 61 |  * be "unchanged for a period of time".  The unstable tree sorts these pages | 
 | 62 |  * by their contents, but since they are not write-protected, KSM cannot rely | 
 | 63 |  * upon the unstable tree to work correctly - the unstable tree is liable to | 
 | 64 |  * be corrupted as its contents are modified, and so it is called unstable. | 
 | 65 |  * | 
 | 66 |  * KSM solves this problem by several techniques: | 
 | 67 |  * | 
 | 68 |  * 1) The unstable tree is flushed every time KSM completes scanning all | 
 | 69 |  *    memory areas, and then the tree is rebuilt again from the beginning. | 
 | 70 |  * 2) KSM will only insert into the unstable tree, pages whose hash value | 
 | 71 |  *    has not changed since the previous scan of all memory areas. | 
 | 72 |  * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the | 
 | 73 |  *    colors of the nodes and not on their contents, assuring that even when | 
 | 74 |  *    the tree gets "corrupted" it won't get out of balance, so scanning time | 
 | 75 |  *    remains the same (also, searching and inserting nodes in an rbtree uses | 
 | 76 |  *    the same algorithm, so we have no overhead when we flush and rebuild). | 
 | 77 |  * 4) KSM never flushes the stable tree, which means that even if it were to | 
 | 78 |  *    take 10 attempts to find a page in the unstable tree, once it is found, | 
 | 79 |  *    it is secured in the stable tree.  (When we scan a new page, we first | 
 | 80 |  *    compare it against the stable tree, and then against the unstable tree.) | 
 | 81 |  */ | 
 | 82 |  | 
 | 83 | /** | 
 | 84 |  * struct mm_slot - ksm information per mm that is being scanned | 
 | 85 |  * @link: link to the mm_slots hash list | 
 | 86 |  * @mm_list: link into the mm_slots list, rooted in ksm_mm_head | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 87 |  * @rmap_list: head for this mm_slot's singly-linked list of rmap_items | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 88 |  * @mm: the mm that this information is valid for | 
 | 89 |  */ | 
 | 90 | struct mm_slot { | 
 | 91 | 	struct hlist_node link; | 
 | 92 | 	struct list_head mm_list; | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 93 | 	struct rmap_item *rmap_list; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 94 | 	struct mm_struct *mm; | 
 | 95 | }; | 
 | 96 |  | 
 | 97 | /** | 
 | 98 |  * struct ksm_scan - cursor for scanning | 
 | 99 |  * @mm_slot: the current mm_slot we are scanning | 
 | 100 |  * @address: the next address inside that to be scanned | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 101 |  * @rmap_list: link to the next rmap to be scanned in the rmap_list | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 102 |  * @seqnr: count of completed full scans (needed when removing unstable node) | 
 | 103 |  * | 
 | 104 |  * There is only the one ksm_scan instance of this cursor structure. | 
 | 105 |  */ | 
 | 106 | struct ksm_scan { | 
 | 107 | 	struct mm_slot *mm_slot; | 
 | 108 | 	unsigned long address; | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 109 | 	struct rmap_item **rmap_list; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 110 | 	unsigned long seqnr; | 
 | 111 | }; | 
 | 112 |  | 
 | 113 | /** | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 114 |  * struct stable_node - node of the stable rbtree | 
 | 115 |  * @node: rb node of this ksm page in the stable tree | 
 | 116 |  * @hlist: hlist head of rmap_items using this ksm page | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 117 |  * @kpfn: page frame number of this ksm page | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 118 |  */ | 
 | 119 | struct stable_node { | 
 | 120 | 	struct rb_node node; | 
 | 121 | 	struct hlist_head hlist; | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 122 | 	unsigned long kpfn; | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 123 | }; | 
 | 124 |  | 
 | 125 | /** | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 126 |  * struct rmap_item - reverse mapping item for virtual addresses | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 127 |  * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 128 |  * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 129 |  * @mm: the memory structure this rmap_item is pointing into | 
 | 130 |  * @address: the virtual address this rmap_item tracks (+ flags in low bits) | 
 | 131 |  * @oldchecksum: previous checksum of the page at that virtual address | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 132 |  * @node: rb node of this rmap_item in the unstable tree | 
 | 133 |  * @head: pointer to stable_node heading this list in the stable tree | 
 | 134 |  * @hlist: link into hlist of rmap_items hanging off that stable_node | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 135 |  */ | 
 | 136 | struct rmap_item { | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 137 | 	struct rmap_item *rmap_list; | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 138 | 	struct anon_vma *anon_vma;	/* when stable */ | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 139 | 	struct mm_struct *mm; | 
 | 140 | 	unsigned long address;		/* + low bits used for flags below */ | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 141 | 	unsigned int oldchecksum;	/* when unstable */ | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 142 | 	union { | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 143 | 		struct rb_node node;	/* when node of unstable tree */ | 
 | 144 | 		struct {		/* when listed from stable tree */ | 
 | 145 | 			struct stable_node *head; | 
 | 146 | 			struct hlist_node hlist; | 
 | 147 | 		}; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 148 | 	}; | 
 | 149 | }; | 
 | 150 |  | 
 | 151 | #define SEQNR_MASK	0x0ff	/* low bits of unstable tree seqnr */ | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 152 | #define UNSTABLE_FLAG	0x100	/* is a node of the unstable tree */ | 
 | 153 | #define STABLE_FLAG	0x200	/* is listed from the stable tree */ | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 154 |  | 
 | 155 | /* The stable and unstable tree heads */ | 
 | 156 | static struct rb_root root_stable_tree = RB_ROOT; | 
 | 157 | static struct rb_root root_unstable_tree = RB_ROOT; | 
 | 158 |  | 
| Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 159 | #define MM_SLOTS_HASH_SHIFT 10 | 
 | 160 | #define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT) | 
 | 161 | static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS]; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 162 |  | 
 | 163 | static struct mm_slot ksm_mm_head = { | 
 | 164 | 	.mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), | 
 | 165 | }; | 
 | 166 | static struct ksm_scan ksm_scan = { | 
 | 167 | 	.mm_slot = &ksm_mm_head, | 
 | 168 | }; | 
 | 169 |  | 
 | 170 | static struct kmem_cache *rmap_item_cache; | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 171 | static struct kmem_cache *stable_node_cache; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 172 | static struct kmem_cache *mm_slot_cache; | 
 | 173 |  | 
 | 174 | /* The number of nodes in the stable tree */ | 
| Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 175 | static unsigned long ksm_pages_shared; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 176 |  | 
| Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 177 | /* The number of page slots additionally sharing those nodes */ | 
| Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 178 | static unsigned long ksm_pages_sharing; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 179 |  | 
| Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 180 | /* The number of nodes in the unstable tree */ | 
 | 181 | static unsigned long ksm_pages_unshared; | 
 | 182 |  | 
 | 183 | /* The number of rmap_items in use: to calculate pages_volatile */ | 
 | 184 | static unsigned long ksm_rmap_items; | 
 | 185 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 186 | /* Number of pages ksmd should scan in one batch */ | 
| Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 187 | static unsigned int ksm_thread_pages_to_scan = 100; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 188 |  | 
 | 189 | /* Milliseconds ksmd should sleep between batches */ | 
| Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 190 | static unsigned int ksm_thread_sleep_millisecs = 20; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 191 |  | 
 | 192 | #define KSM_RUN_STOP	0 | 
 | 193 | #define KSM_RUN_MERGE	1 | 
 | 194 | #define KSM_RUN_UNMERGE	2 | 
| Izik Eidus | 2c6854f | 2009-09-23 15:56:04 -0700 | [diff] [blame] | 195 | static unsigned int ksm_run = KSM_RUN_STOP; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 196 |  | 
 | 197 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); | 
 | 198 | static DEFINE_MUTEX(ksm_thread_mutex); | 
 | 199 | static DEFINE_SPINLOCK(ksm_mmlist_lock); | 
 | 200 |  | 
 | 201 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ | 
 | 202 | 		sizeof(struct __struct), __alignof__(struct __struct),\ | 
 | 203 | 		(__flags), NULL) | 
 | 204 |  | 
 | 205 | static int __init ksm_slab_init(void) | 
 | 206 | { | 
 | 207 | 	rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); | 
 | 208 | 	if (!rmap_item_cache) | 
 | 209 | 		goto out; | 
 | 210 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 211 | 	stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); | 
 | 212 | 	if (!stable_node_cache) | 
 | 213 | 		goto out_free1; | 
 | 214 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 215 | 	mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); | 
 | 216 | 	if (!mm_slot_cache) | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 217 | 		goto out_free2; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 218 |  | 
 | 219 | 	return 0; | 
 | 220 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 221 | out_free2: | 
 | 222 | 	kmem_cache_destroy(stable_node_cache); | 
 | 223 | out_free1: | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 224 | 	kmem_cache_destroy(rmap_item_cache); | 
 | 225 | out: | 
 | 226 | 	return -ENOMEM; | 
 | 227 | } | 
 | 228 |  | 
 | 229 | static void __init ksm_slab_free(void) | 
 | 230 | { | 
 | 231 | 	kmem_cache_destroy(mm_slot_cache); | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 232 | 	kmem_cache_destroy(stable_node_cache); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 233 | 	kmem_cache_destroy(rmap_item_cache); | 
 | 234 | 	mm_slot_cache = NULL; | 
 | 235 | } | 
 | 236 |  | 
 | 237 | static inline struct rmap_item *alloc_rmap_item(void) | 
 | 238 | { | 
| Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 239 | 	struct rmap_item *rmap_item; | 
 | 240 |  | 
 | 241 | 	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); | 
 | 242 | 	if (rmap_item) | 
 | 243 | 		ksm_rmap_items++; | 
 | 244 | 	return rmap_item; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 245 | } | 
 | 246 |  | 
 | 247 | static inline void free_rmap_item(struct rmap_item *rmap_item) | 
 | 248 | { | 
| Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 249 | 	ksm_rmap_items--; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 250 | 	rmap_item->mm = NULL;	/* debug safety */ | 
 | 251 | 	kmem_cache_free(rmap_item_cache, rmap_item); | 
 | 252 | } | 
 | 253 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 254 | static inline struct stable_node *alloc_stable_node(void) | 
 | 255 | { | 
 | 256 | 	return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); | 
 | 257 | } | 
 | 258 |  | 
 | 259 | static inline void free_stable_node(struct stable_node *stable_node) | 
 | 260 | { | 
 | 261 | 	kmem_cache_free(stable_node_cache, stable_node); | 
 | 262 | } | 
 | 263 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 264 | static inline struct mm_slot *alloc_mm_slot(void) | 
 | 265 | { | 
 | 266 | 	if (!mm_slot_cache)	/* initialization failed */ | 
 | 267 | 		return NULL; | 
 | 268 | 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); | 
 | 269 | } | 
 | 270 |  | 
 | 271 | static inline void free_mm_slot(struct mm_slot *mm_slot) | 
 | 272 | { | 
 | 273 | 	kmem_cache_free(mm_slot_cache, mm_slot); | 
 | 274 | } | 
 | 275 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 276 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) | 
 | 277 | { | 
 | 278 | 	struct mm_slot *mm_slot; | 
 | 279 | 	struct hlist_head *bucket; | 
 | 280 | 	struct hlist_node *node; | 
 | 281 |  | 
| Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 282 | 	bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 283 | 	hlist_for_each_entry(mm_slot, node, bucket, link) { | 
 | 284 | 		if (mm == mm_slot->mm) | 
 | 285 | 			return mm_slot; | 
 | 286 | 	} | 
 | 287 | 	return NULL; | 
 | 288 | } | 
 | 289 |  | 
 | 290 | static void insert_to_mm_slots_hash(struct mm_struct *mm, | 
 | 291 | 				    struct mm_slot *mm_slot) | 
 | 292 | { | 
 | 293 | 	struct hlist_head *bucket; | 
 | 294 |  | 
| Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 295 | 	bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 296 | 	mm_slot->mm = mm; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 297 | 	hlist_add_head(&mm_slot->link, bucket); | 
 | 298 | } | 
 | 299 |  | 
 | 300 | static inline int in_stable_tree(struct rmap_item *rmap_item) | 
 | 301 | { | 
 | 302 | 	return rmap_item->address & STABLE_FLAG; | 
 | 303 | } | 
 | 304 |  | 
 | 305 | /* | 
| Hugh Dickins | a913e18 | 2009-09-21 17:02:26 -0700 | [diff] [blame] | 306 |  * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's | 
 | 307 |  * page tables after it has passed through ksm_exit() - which, if necessary, | 
 | 308 |  * takes mmap_sem briefly to serialize against them.  ksm_exit() does not set | 
 | 309 |  * a special flag: they can just back out as soon as mm_users goes to zero. | 
 | 310 |  * ksm_test_exit() is used throughout to make this test for exit: in some | 
 | 311 |  * places for correctness, in some places just to avoid unnecessary work. | 
 | 312 |  */ | 
 | 313 | static inline bool ksm_test_exit(struct mm_struct *mm) | 
 | 314 | { | 
 | 315 | 	return atomic_read(&mm->mm_users) == 0; | 
 | 316 | } | 
 | 317 |  | 
 | 318 | /* | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 319 |  * We use break_ksm to break COW on a ksm page: it's a stripped down | 
 | 320 |  * | 
 | 321 |  *	if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) | 
 | 322 |  *		put_page(page); | 
 | 323 |  * | 
 | 324 |  * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, | 
 | 325 |  * in case the application has unmapped and remapped mm,addr meanwhile. | 
 | 326 |  * Could a ksm page appear anywhere else?  Actually yes, in a VM_PFNMAP | 
 | 327 |  * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. | 
 | 328 |  */ | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 329 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 330 | { | 
 | 331 | 	struct page *page; | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 332 | 	int ret = 0; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 333 |  | 
 | 334 | 	do { | 
 | 335 | 		cond_resched(); | 
 | 336 | 		page = follow_page(vma, addr, FOLL_GET); | 
| Dan Carpenter | 22eccdd | 2010-04-23 13:18:10 -0400 | [diff] [blame] | 337 | 		if (IS_ERR_OR_NULL(page)) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 338 | 			break; | 
 | 339 | 		if (PageKsm(page)) | 
 | 340 | 			ret = handle_mm_fault(vma->vm_mm, vma, addr, | 
 | 341 | 							FAULT_FLAG_WRITE); | 
 | 342 | 		else | 
 | 343 | 			ret = VM_FAULT_WRITE; | 
 | 344 | 		put_page(page); | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 345 | 	} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); | 
 | 346 | 	/* | 
 | 347 | 	 * We must loop because handle_mm_fault() may back out if there's | 
 | 348 | 	 * any difficulty e.g. if pte accessed bit gets updated concurrently. | 
 | 349 | 	 * | 
 | 350 | 	 * VM_FAULT_WRITE is what we have been hoping for: it indicates that | 
 | 351 | 	 * COW has been broken, even if the vma does not permit VM_WRITE; | 
 | 352 | 	 * but note that a concurrent fault might break PageKsm for us. | 
 | 353 | 	 * | 
 | 354 | 	 * VM_FAULT_SIGBUS could occur if we race with truncation of the | 
 | 355 | 	 * backing file, which also invalidates anonymous pages: that's | 
 | 356 | 	 * okay, that truncation will have unmapped the PageKsm for us. | 
 | 357 | 	 * | 
 | 358 | 	 * VM_FAULT_OOM: at the time of writing (late July 2009), setting | 
 | 359 | 	 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the | 
 | 360 | 	 * current task has TIF_MEMDIE set, and will be OOM killed on return | 
 | 361 | 	 * to user; and ksmd, having no mm, would never be chosen for that. | 
 | 362 | 	 * | 
 | 363 | 	 * But if the mm is in a limited mem_cgroup, then the fault may fail | 
 | 364 | 	 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and | 
 | 365 | 	 * even ksmd can fail in this way - though it's usually breaking ksm | 
 | 366 | 	 * just to undo a merge it made a moment before, so unlikely to oom. | 
 | 367 | 	 * | 
 | 368 | 	 * That's a pity: we might therefore have more kernel pages allocated | 
 | 369 | 	 * than we're counting as nodes in the stable tree; but ksm_do_scan | 
 | 370 | 	 * will retry to break_cow on each pass, so should recover the page | 
 | 371 | 	 * in due course.  The important thing is to not let VM_MERGEABLE | 
 | 372 | 	 * be cleared while any such pages might remain in the area. | 
 | 373 | 	 */ | 
 | 374 | 	return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 375 | } | 
 | 376 |  | 
| Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 377 | static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, | 
 | 378 | 		unsigned long addr) | 
 | 379 | { | 
 | 380 | 	struct vm_area_struct *vma; | 
 | 381 | 	if (ksm_test_exit(mm)) | 
 | 382 | 		return NULL; | 
 | 383 | 	vma = find_vma(mm, addr); | 
 | 384 | 	if (!vma || vma->vm_start > addr) | 
 | 385 | 		return NULL; | 
 | 386 | 	if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | 
 | 387 | 		return NULL; | 
 | 388 | 	return vma; | 
 | 389 | } | 
 | 390 |  | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 391 | static void break_cow(struct rmap_item *rmap_item) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 392 | { | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 393 | 	struct mm_struct *mm = rmap_item->mm; | 
 | 394 | 	unsigned long addr = rmap_item->address; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 395 | 	struct vm_area_struct *vma; | 
 | 396 |  | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 397 | 	/* | 
 | 398 | 	 * It is not an accident that whenever we want to break COW | 
 | 399 | 	 * to undo, we also need to drop a reference to the anon_vma. | 
 | 400 | 	 */ | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 401 | 	put_anon_vma(rmap_item->anon_vma); | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 402 |  | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 403 | 	down_read(&mm->mmap_sem); | 
| Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 404 | 	vma = find_mergeable_vma(mm, addr); | 
 | 405 | 	if (vma) | 
 | 406 | 		break_ksm(vma, addr); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 407 | 	up_read(&mm->mmap_sem); | 
 | 408 | } | 
 | 409 |  | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 410 | static struct page *page_trans_compound_anon(struct page *page) | 
 | 411 | { | 
 | 412 | 	if (PageTransCompound(page)) { | 
| Andrea Arcangeli | 22e5c47 | 2011-01-13 15:47:20 -0800 | [diff] [blame] | 413 | 		struct page *head = compound_trans_head(page); | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 414 | 		/* | 
| Andrea Arcangeli | 22e5c47 | 2011-01-13 15:47:20 -0800 | [diff] [blame] | 415 | 		 * head may actually be splitted and freed from under | 
 | 416 | 		 * us but it's ok here. | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 417 | 		 */ | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 418 | 		if (PageAnon(head)) | 
 | 419 | 			return head; | 
 | 420 | 	} | 
 | 421 | 	return NULL; | 
 | 422 | } | 
 | 423 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 424 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) | 
 | 425 | { | 
 | 426 | 	struct mm_struct *mm = rmap_item->mm; | 
 | 427 | 	unsigned long addr = rmap_item->address; | 
 | 428 | 	struct vm_area_struct *vma; | 
 | 429 | 	struct page *page; | 
 | 430 |  | 
 | 431 | 	down_read(&mm->mmap_sem); | 
| Bob Liu | ef69422 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 432 | 	vma = find_mergeable_vma(mm, addr); | 
 | 433 | 	if (!vma) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 434 | 		goto out; | 
 | 435 |  | 
 | 436 | 	page = follow_page(vma, addr, FOLL_GET); | 
| Dan Carpenter | 22eccdd | 2010-04-23 13:18:10 -0400 | [diff] [blame] | 437 | 	if (IS_ERR_OR_NULL(page)) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 438 | 		goto out; | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 439 | 	if (PageAnon(page) || page_trans_compound_anon(page)) { | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 440 | 		flush_anon_page(vma, page, addr); | 
 | 441 | 		flush_dcache_page(page); | 
 | 442 | 	} else { | 
 | 443 | 		put_page(page); | 
 | 444 | out:		page = NULL; | 
 | 445 | 	} | 
 | 446 | 	up_read(&mm->mmap_sem); | 
 | 447 | 	return page; | 
 | 448 | } | 
 | 449 |  | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 450 | static void remove_node_from_stable_tree(struct stable_node *stable_node) | 
 | 451 | { | 
 | 452 | 	struct rmap_item *rmap_item; | 
 | 453 | 	struct hlist_node *hlist; | 
 | 454 |  | 
 | 455 | 	hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { | 
 | 456 | 		if (rmap_item->hlist.next) | 
 | 457 | 			ksm_pages_sharing--; | 
 | 458 | 		else | 
 | 459 | 			ksm_pages_shared--; | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 460 | 		put_anon_vma(rmap_item->anon_vma); | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 461 | 		rmap_item->address &= PAGE_MASK; | 
 | 462 | 		cond_resched(); | 
 | 463 | 	} | 
 | 464 |  | 
 | 465 | 	rb_erase(&stable_node->node, &root_stable_tree); | 
 | 466 | 	free_stable_node(stable_node); | 
 | 467 | } | 
 | 468 |  | 
 | 469 | /* | 
 | 470 |  * get_ksm_page: checks if the page indicated by the stable node | 
 | 471 |  * is still its ksm page, despite having held no reference to it. | 
 | 472 |  * In which case we can trust the content of the page, and it | 
 | 473 |  * returns the gotten page; but if the page has now been zapped, | 
 | 474 |  * remove the stale node from the stable tree and return NULL. | 
 | 475 |  * | 
 | 476 |  * You would expect the stable_node to hold a reference to the ksm page. | 
 | 477 |  * But if it increments the page's count, swapping out has to wait for | 
 | 478 |  * ksmd to come around again before it can free the page, which may take | 
 | 479 |  * seconds or even minutes: much too unresponsive.  So instead we use a | 
 | 480 |  * "keyhole reference": access to the ksm page from the stable node peeps | 
 | 481 |  * out through its keyhole to see if that page still holds the right key, | 
 | 482 |  * pointing back to this stable node.  This relies on freeing a PageAnon | 
 | 483 |  * page to reset its page->mapping to NULL, and relies on no other use of | 
 | 484 |  * a page to put something that might look like our key in page->mapping. | 
 | 485 |  * | 
 | 486 |  * include/linux/pagemap.h page_cache_get_speculative() is a good reference, | 
 | 487 |  * but this is different - made simpler by ksm_thread_mutex being held, but | 
 | 488 |  * interesting for assuming that no other use of the struct page could ever | 
 | 489 |  * put our expected_mapping into page->mapping (or a field of the union which | 
 | 490 |  * coincides with page->mapping).  The RCU calls are not for KSM at all, but | 
 | 491 |  * to keep the page_count protocol described with page_cache_get_speculative. | 
 | 492 |  * | 
 | 493 |  * Note: it is possible that get_ksm_page() will return NULL one moment, | 
 | 494 |  * then page the next, if the page is in between page_freeze_refs() and | 
 | 495 |  * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page | 
 | 496 |  * is on its way to being freed; but it is an anomaly to bear in mind. | 
 | 497 |  */ | 
 | 498 | static struct page *get_ksm_page(struct stable_node *stable_node) | 
 | 499 | { | 
 | 500 | 	struct page *page; | 
 | 501 | 	void *expected_mapping; | 
 | 502 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 503 | 	page = pfn_to_page(stable_node->kpfn); | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 504 | 	expected_mapping = (void *)stable_node + | 
 | 505 | 				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | 
 | 506 | 	rcu_read_lock(); | 
 | 507 | 	if (page->mapping != expected_mapping) | 
 | 508 | 		goto stale; | 
 | 509 | 	if (!get_page_unless_zero(page)) | 
 | 510 | 		goto stale; | 
 | 511 | 	if (page->mapping != expected_mapping) { | 
 | 512 | 		put_page(page); | 
 | 513 | 		goto stale; | 
 | 514 | 	} | 
 | 515 | 	rcu_read_unlock(); | 
 | 516 | 	return page; | 
 | 517 | stale: | 
 | 518 | 	rcu_read_unlock(); | 
 | 519 | 	remove_node_from_stable_tree(stable_node); | 
 | 520 | 	return NULL; | 
 | 521 | } | 
 | 522 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 523 | /* | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 524 |  * Removing rmap_item from stable or unstable tree. | 
 | 525 |  * This function will clean the information from the stable/unstable tree. | 
 | 526 |  */ | 
 | 527 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) | 
 | 528 | { | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 529 | 	if (rmap_item->address & STABLE_FLAG) { | 
 | 530 | 		struct stable_node *stable_node; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 531 | 		struct page *page; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 532 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 533 | 		stable_node = rmap_item->head; | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 534 | 		page = get_ksm_page(stable_node); | 
 | 535 | 		if (!page) | 
 | 536 | 			goto out; | 
 | 537 |  | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 538 | 		lock_page(page); | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 539 | 		hlist_del(&rmap_item->hlist); | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 540 | 		unlock_page(page); | 
 | 541 | 		put_page(page); | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 542 |  | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 543 | 		if (stable_node->hlist.first) | 
 | 544 | 			ksm_pages_sharing--; | 
 | 545 | 		else | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 546 | 			ksm_pages_shared--; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 547 |  | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 548 | 		put_anon_vma(rmap_item->anon_vma); | 
| Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 549 | 		rmap_item->address &= PAGE_MASK; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 550 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 551 | 	} else if (rmap_item->address & UNSTABLE_FLAG) { | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 552 | 		unsigned char age; | 
 | 553 | 		/* | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 554 | 		 * Usually ksmd can and must skip the rb_erase, because | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 555 | 		 * root_unstable_tree was already reset to RB_ROOT. | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 556 | 		 * But be careful when an mm is exiting: do the rb_erase | 
 | 557 | 		 * if this rmap_item was inserted by this scan, rather | 
 | 558 | 		 * than left over from before. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 559 | 		 */ | 
 | 560 | 		age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 561 | 		BUG_ON(age > 1); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 562 | 		if (!age) | 
 | 563 | 			rb_erase(&rmap_item->node, &root_unstable_tree); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 564 |  | 
| Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 565 | 		ksm_pages_unshared--; | 
 | 566 | 		rmap_item->address &= PAGE_MASK; | 
 | 567 | 	} | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 568 | out: | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 569 | 	cond_resched();		/* we're called from many long loops */ | 
 | 570 | } | 
 | 571 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 572 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 573 | 				       struct rmap_item **rmap_list) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 574 | { | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 575 | 	while (*rmap_list) { | 
 | 576 | 		struct rmap_item *rmap_item = *rmap_list; | 
 | 577 | 		*rmap_list = rmap_item->rmap_list; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 578 | 		remove_rmap_item_from_tree(rmap_item); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 579 | 		free_rmap_item(rmap_item); | 
 | 580 | 	} | 
 | 581 | } | 
 | 582 |  | 
 | 583 | /* | 
 | 584 |  * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather | 
 | 585 |  * than check every pte of a given vma, the locking doesn't quite work for | 
 | 586 |  * that - an rmap_item is assigned to the stable tree after inserting ksm | 
 | 587 |  * page and upping mmap_sem.  Nor does it fit with the way we skip dup'ing | 
 | 588 |  * rmap_items from parent to child at fork time (so as not to waste time | 
 | 589 |  * if exit comes before the next scan reaches it). | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 590 |  * | 
 | 591 |  * Similarly, although we'd like to remove rmap_items (so updating counts | 
 | 592 |  * and freeing memory) when unmerging an area, it's easier to leave that | 
 | 593 |  * to the next pass of ksmd - consider, for example, how ksmd might be | 
 | 594 |  * in cmp_and_merge_page on one of the rmap_items we would be removing. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 595 |  */ | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 596 | static int unmerge_ksm_pages(struct vm_area_struct *vma, | 
 | 597 | 			     unsigned long start, unsigned long end) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 598 | { | 
 | 599 | 	unsigned long addr; | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 600 | 	int err = 0; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 601 |  | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 602 | 	for (addr = start; addr < end && !err; addr += PAGE_SIZE) { | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 603 | 		if (ksm_test_exit(vma->vm_mm)) | 
 | 604 | 			break; | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 605 | 		if (signal_pending(current)) | 
 | 606 | 			err = -ERESTARTSYS; | 
 | 607 | 		else | 
 | 608 | 			err = break_ksm(vma, addr); | 
 | 609 | 	} | 
 | 610 | 	return err; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 611 | } | 
 | 612 |  | 
| Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 613 | #ifdef CONFIG_SYSFS | 
 | 614 | /* | 
 | 615 |  * Only called through the sysfs control interface: | 
 | 616 |  */ | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 617 | static int unmerge_and_remove_all_rmap_items(void) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 618 | { | 
 | 619 | 	struct mm_slot *mm_slot; | 
 | 620 | 	struct mm_struct *mm; | 
 | 621 | 	struct vm_area_struct *vma; | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 622 | 	int err = 0; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 623 |  | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 624 | 	spin_lock(&ksm_mmlist_lock); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 625 | 	ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 626 | 						struct mm_slot, mm_list); | 
 | 627 | 	spin_unlock(&ksm_mmlist_lock); | 
 | 628 |  | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 629 | 	for (mm_slot = ksm_scan.mm_slot; | 
 | 630 | 			mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 631 | 		mm = mm_slot->mm; | 
 | 632 | 		down_read(&mm->mmap_sem); | 
 | 633 | 		for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 634 | 			if (ksm_test_exit(mm)) | 
 | 635 | 				break; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 636 | 			if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | 
 | 637 | 				continue; | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 638 | 			err = unmerge_ksm_pages(vma, | 
 | 639 | 						vma->vm_start, vma->vm_end); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 640 | 			if (err) | 
 | 641 | 				goto error; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 642 | 		} | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 643 |  | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 644 | 		remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 645 |  | 
 | 646 | 		spin_lock(&ksm_mmlist_lock); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 647 | 		ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 648 | 						struct mm_slot, mm_list); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 649 | 		if (ksm_test_exit(mm)) { | 
 | 650 | 			hlist_del(&mm_slot->link); | 
 | 651 | 			list_del(&mm_slot->mm_list); | 
 | 652 | 			spin_unlock(&ksm_mmlist_lock); | 
 | 653 |  | 
 | 654 | 			free_mm_slot(mm_slot); | 
 | 655 | 			clear_bit(MMF_VM_MERGEABLE, &mm->flags); | 
 | 656 | 			up_read(&mm->mmap_sem); | 
 | 657 | 			mmdrop(mm); | 
 | 658 | 		} else { | 
 | 659 | 			spin_unlock(&ksm_mmlist_lock); | 
 | 660 | 			up_read(&mm->mmap_sem); | 
 | 661 | 		} | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 662 | 	} | 
 | 663 |  | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 664 | 	ksm_scan.seqnr = 0; | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 665 | 	return 0; | 
 | 666 |  | 
 | 667 | error: | 
 | 668 | 	up_read(&mm->mmap_sem); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 669 | 	spin_lock(&ksm_mmlist_lock); | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 670 | 	ksm_scan.mm_slot = &ksm_mm_head; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 671 | 	spin_unlock(&ksm_mmlist_lock); | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 672 | 	return err; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 673 | } | 
| Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 674 | #endif /* CONFIG_SYSFS */ | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 675 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 676 | static u32 calc_checksum(struct page *page) | 
 | 677 | { | 
 | 678 | 	u32 checksum; | 
| Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 679 | 	void *addr = kmap_atomic(page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 680 | 	checksum = jhash2(addr, PAGE_SIZE / 4, 17); | 
| Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 681 | 	kunmap_atomic(addr); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 682 | 	return checksum; | 
 | 683 | } | 
 | 684 |  | 
 | 685 | static int memcmp_pages(struct page *page1, struct page *page2) | 
 | 686 | { | 
 | 687 | 	char *addr1, *addr2; | 
 | 688 | 	int ret; | 
 | 689 |  | 
| Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 690 | 	addr1 = kmap_atomic(page1); | 
 | 691 | 	addr2 = kmap_atomic(page2); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 692 | 	ret = memcmp(addr1, addr2, PAGE_SIZE); | 
| Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 693 | 	kunmap_atomic(addr2); | 
 | 694 | 	kunmap_atomic(addr1); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 695 | 	return ret; | 
 | 696 | } | 
 | 697 |  | 
 | 698 | static inline int pages_identical(struct page *page1, struct page *page2) | 
 | 699 | { | 
 | 700 | 	return !memcmp_pages(page1, page2); | 
 | 701 | } | 
 | 702 |  | 
 | 703 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, | 
 | 704 | 			      pte_t *orig_pte) | 
 | 705 | { | 
 | 706 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 707 | 	unsigned long addr; | 
 | 708 | 	pte_t *ptep; | 
 | 709 | 	spinlock_t *ptl; | 
 | 710 | 	int swapped; | 
 | 711 | 	int err = -EFAULT; | 
| Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 712 | 	unsigned long mmun_start;	/* For mmu_notifiers */ | 
 | 713 | 	unsigned long mmun_end;		/* For mmu_notifiers */ | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 714 |  | 
 | 715 | 	addr = page_address_in_vma(page, vma); | 
 | 716 | 	if (addr == -EFAULT) | 
 | 717 | 		goto out; | 
 | 718 |  | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 719 | 	BUG_ON(PageTransCompound(page)); | 
| Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 720 |  | 
 | 721 | 	mmun_start = addr; | 
 | 722 | 	mmun_end   = addr + PAGE_SIZE; | 
 | 723 | 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 
 | 724 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 725 | 	ptep = page_check_address(page, mm, addr, &ptl, 0); | 
 | 726 | 	if (!ptep) | 
| Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 727 | 		goto out_mn; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 728 |  | 
| Hugh Dickins | 4e31635 | 2010-10-02 17:49:08 -0700 | [diff] [blame] | 729 | 	if (pte_write(*ptep) || pte_dirty(*ptep)) { | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 730 | 		pte_t entry; | 
 | 731 |  | 
 | 732 | 		swapped = PageSwapCache(page); | 
 | 733 | 		flush_cache_page(vma, addr, page_to_pfn(page)); | 
 | 734 | 		/* | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 735 | 		 * Ok this is tricky, when get_user_pages_fast() run it doesn't | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 736 | 		 * take any lock, therefore the check that we are going to make | 
 | 737 | 		 * with the pagecount against the mapcount is racey and | 
 | 738 | 		 * O_DIRECT can happen right after the check. | 
 | 739 | 		 * So we clear the pte and flush the tlb before the check | 
 | 740 | 		 * this assure us that no O_DIRECT can happen after the check | 
 | 741 | 		 * or in the middle of the check. | 
 | 742 | 		 */ | 
 | 743 | 		entry = ptep_clear_flush(vma, addr, ptep); | 
 | 744 | 		/* | 
 | 745 | 		 * Check that no O_DIRECT or similar I/O is in progress on the | 
 | 746 | 		 * page | 
 | 747 | 		 */ | 
| Hugh Dickins | 31e855e | 2009-12-14 17:59:17 -0800 | [diff] [blame] | 748 | 		if (page_mapcount(page) + 1 + swapped != page_count(page)) { | 
| Robin Holt | cb53237 | 2010-03-23 13:35:26 -0700 | [diff] [blame] | 749 | 			set_pte_at(mm, addr, ptep, entry); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 750 | 			goto out_unlock; | 
 | 751 | 		} | 
| Hugh Dickins | 4e31635 | 2010-10-02 17:49:08 -0700 | [diff] [blame] | 752 | 		if (pte_dirty(entry)) | 
 | 753 | 			set_page_dirty(page); | 
 | 754 | 		entry = pte_mkclean(pte_wrprotect(entry)); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 755 | 		set_pte_at_notify(mm, addr, ptep, entry); | 
 | 756 | 	} | 
 | 757 | 	*orig_pte = *ptep; | 
 | 758 | 	err = 0; | 
 | 759 |  | 
 | 760 | out_unlock: | 
 | 761 | 	pte_unmap_unlock(ptep, ptl); | 
| Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 762 | out_mn: | 
 | 763 | 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 764 | out: | 
 | 765 | 	return err; | 
 | 766 | } | 
 | 767 |  | 
 | 768 | /** | 
 | 769 |  * replace_page - replace page in vma by new ksm page | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 770 |  * @vma:      vma that holds the pte pointing to page | 
 | 771 |  * @page:     the page we are replacing by kpage | 
 | 772 |  * @kpage:    the ksm page we replace page by | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 773 |  * @orig_pte: the original value of the pte | 
 | 774 |  * | 
 | 775 |  * Returns 0 on success, -EFAULT on failure. | 
 | 776 |  */ | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 777 | static int replace_page(struct vm_area_struct *vma, struct page *page, | 
 | 778 | 			struct page *kpage, pte_t orig_pte) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 779 | { | 
 | 780 | 	struct mm_struct *mm = vma->vm_mm; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 781 | 	pmd_t *pmd; | 
 | 782 | 	pte_t *ptep; | 
 | 783 | 	spinlock_t *ptl; | 
 | 784 | 	unsigned long addr; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 785 | 	int err = -EFAULT; | 
| Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 786 | 	unsigned long mmun_start;	/* For mmu_notifiers */ | 
 | 787 | 	unsigned long mmun_end;		/* For mmu_notifiers */ | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 788 |  | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 789 | 	addr = page_address_in_vma(page, vma); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 790 | 	if (addr == -EFAULT) | 
 | 791 | 		goto out; | 
 | 792 |  | 
| Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 793 | 	pmd = mm_find_pmd(mm, addr); | 
 | 794 | 	if (!pmd) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 795 | 		goto out; | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 796 | 	BUG_ON(pmd_trans_huge(*pmd)); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 797 |  | 
| Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 798 | 	mmun_start = addr; | 
 | 799 | 	mmun_end   = addr + PAGE_SIZE; | 
 | 800 | 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 
 | 801 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 802 | 	ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); | 
 | 803 | 	if (!pte_same(*ptep, orig_pte)) { | 
 | 804 | 		pte_unmap_unlock(ptep, ptl); | 
| Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 805 | 		goto out_mn; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 806 | 	} | 
 | 807 |  | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 808 | 	get_page(kpage); | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 809 | 	page_add_anon_rmap(kpage, vma, addr); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 810 |  | 
 | 811 | 	flush_cache_page(vma, addr, pte_pfn(*ptep)); | 
 | 812 | 	ptep_clear_flush(vma, addr, ptep); | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 813 | 	set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 814 |  | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 815 | 	page_remove_rmap(page); | 
| Hugh Dickins | ae52a2a | 2011-01-13 15:46:28 -0800 | [diff] [blame] | 816 | 	if (!page_mapped(page)) | 
 | 817 | 		try_to_free_swap(page); | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 818 | 	put_page(page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 819 |  | 
 | 820 | 	pte_unmap_unlock(ptep, ptl); | 
 | 821 | 	err = 0; | 
| Haggai Eran | 6bdb913 | 2012-10-08 16:33:35 -0700 | [diff] [blame] | 822 | out_mn: | 
 | 823 | 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 824 | out: | 
 | 825 | 	return err; | 
 | 826 | } | 
 | 827 |  | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 828 | static int page_trans_compound_anon_split(struct page *page) | 
 | 829 | { | 
 | 830 | 	int ret = 0; | 
 | 831 | 	struct page *transhuge_head = page_trans_compound_anon(page); | 
 | 832 | 	if (transhuge_head) { | 
 | 833 | 		/* Get the reference on the head to split it. */ | 
 | 834 | 		if (get_page_unless_zero(transhuge_head)) { | 
 | 835 | 			/* | 
 | 836 | 			 * Recheck we got the reference while the head | 
 | 837 | 			 * was still anonymous. | 
 | 838 | 			 */ | 
 | 839 | 			if (PageAnon(transhuge_head)) | 
 | 840 | 				ret = split_huge_page(transhuge_head); | 
 | 841 | 			else | 
 | 842 | 				/* | 
 | 843 | 				 * Retry later if split_huge_page run | 
 | 844 | 				 * from under us. | 
 | 845 | 				 */ | 
 | 846 | 				ret = 1; | 
 | 847 | 			put_page(transhuge_head); | 
 | 848 | 		} else | 
 | 849 | 			/* Retry later if split_huge_page run from under us. */ | 
 | 850 | 			ret = 1; | 
 | 851 | 	} | 
 | 852 | 	return ret; | 
 | 853 | } | 
 | 854 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 855 | /* | 
 | 856 |  * try_to_merge_one_page - take two pages and merge them into one | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 857 |  * @vma: the vma that holds the pte pointing to page | 
 | 858 |  * @page: the PageAnon page that we want to replace with kpage | 
| Hugh Dickins | 80e1482 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 859 |  * @kpage: the PageKsm page that we want to map instead of page, | 
 | 860 |  *         or NULL the first time when we want to use page as kpage. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 861 |  * | 
 | 862 |  * This function returns 0 if the pages were merged, -EFAULT otherwise. | 
 | 863 |  */ | 
 | 864 | static int try_to_merge_one_page(struct vm_area_struct *vma, | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 865 | 				 struct page *page, struct page *kpage) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 866 | { | 
 | 867 | 	pte_t orig_pte = __pte(0); | 
 | 868 | 	int err = -EFAULT; | 
 | 869 |  | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 870 | 	if (page == kpage)			/* ksm page forked */ | 
 | 871 | 		return 0; | 
 | 872 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 873 | 	if (!(vma->vm_flags & VM_MERGEABLE)) | 
 | 874 | 		goto out; | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 875 | 	if (PageTransCompound(page) && page_trans_compound_anon_split(page)) | 
 | 876 | 		goto out; | 
 | 877 | 	BUG_ON(PageTransCompound(page)); | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 878 | 	if (!PageAnon(page)) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 879 | 		goto out; | 
 | 880 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 881 | 	/* | 
 | 882 | 	 * We need the page lock to read a stable PageSwapCache in | 
 | 883 | 	 * write_protect_page().  We use trylock_page() instead of | 
 | 884 | 	 * lock_page() because we don't want to wait here - we | 
 | 885 | 	 * prefer to continue scanning and merging different pages, | 
 | 886 | 	 * then come back to this page when it is unlocked. | 
 | 887 | 	 */ | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 888 | 	if (!trylock_page(page)) | 
| Hugh Dickins | 31e855e | 2009-12-14 17:59:17 -0800 | [diff] [blame] | 889 | 		goto out; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 890 | 	/* | 
 | 891 | 	 * If this anonymous page is mapped only here, its pte may need | 
 | 892 | 	 * to be write-protected.  If it's mapped elsewhere, all of its | 
 | 893 | 	 * ptes are necessarily already write-protected.  But in either | 
 | 894 | 	 * case, we need to lock and check page_count is not raised. | 
 | 895 | 	 */ | 
| Hugh Dickins | 80e1482 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 896 | 	if (write_protect_page(vma, page, &orig_pte) == 0) { | 
 | 897 | 		if (!kpage) { | 
 | 898 | 			/* | 
 | 899 | 			 * While we hold page lock, upgrade page from | 
 | 900 | 			 * PageAnon+anon_vma to PageKsm+NULL stable_node: | 
 | 901 | 			 * stable_tree_insert() will update stable_node. | 
 | 902 | 			 */ | 
 | 903 | 			set_page_stable_node(page, NULL); | 
 | 904 | 			mark_page_accessed(page); | 
 | 905 | 			err = 0; | 
 | 906 | 		} else if (pages_identical(page, kpage)) | 
 | 907 | 			err = replace_page(vma, page, kpage, orig_pte); | 
 | 908 | 	} | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 909 |  | 
| Hugh Dickins | 80e1482 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 910 | 	if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { | 
| Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 911 | 		munlock_vma_page(page); | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 912 | 		if (!PageMlocked(kpage)) { | 
 | 913 | 			unlock_page(page); | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 914 | 			lock_page(kpage); | 
 | 915 | 			mlock_vma_page(kpage); | 
 | 916 | 			page = kpage;		/* for final unlock */ | 
 | 917 | 		} | 
 | 918 | 	} | 
| Hugh Dickins | 73848b4 | 2009-12-14 17:59:22 -0800 | [diff] [blame] | 919 |  | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 920 | 	unlock_page(page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 921 | out: | 
 | 922 | 	return err; | 
 | 923 | } | 
 | 924 |  | 
 | 925 | /* | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 926 |  * try_to_merge_with_ksm_page - like try_to_merge_two_pages, | 
 | 927 |  * but no new kernel page is allocated: kpage must already be a ksm page. | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 928 |  * | 
 | 929 |  * This function returns 0 if the pages were merged, -EFAULT otherwise. | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 930 |  */ | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 931 | static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, | 
 | 932 | 				      struct page *page, struct page *kpage) | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 933 | { | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 934 | 	struct mm_struct *mm = rmap_item->mm; | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 935 | 	struct vm_area_struct *vma; | 
 | 936 | 	int err = -EFAULT; | 
 | 937 |  | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 938 | 	down_read(&mm->mmap_sem); | 
 | 939 | 	if (ksm_test_exit(mm)) | 
 | 940 | 		goto out; | 
 | 941 | 	vma = find_vma(mm, rmap_item->address); | 
 | 942 | 	if (!vma || vma->vm_start > rmap_item->address) | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 943 | 		goto out; | 
 | 944 |  | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 945 | 	err = try_to_merge_one_page(vma, page, kpage); | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 946 | 	if (err) | 
 | 947 | 		goto out; | 
 | 948 |  | 
 | 949 | 	/* Must get reference to anon_vma while still holding mmap_sem */ | 
| Peter Zijlstra | 9e60109 | 2011-03-22 16:32:46 -0700 | [diff] [blame] | 950 | 	rmap_item->anon_vma = vma->anon_vma; | 
 | 951 | 	get_anon_vma(vma->anon_vma); | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 952 | out: | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 953 | 	up_read(&mm->mmap_sem); | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 954 | 	return err; | 
 | 955 | } | 
 | 956 |  | 
 | 957 | /* | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 958 |  * try_to_merge_two_pages - take two identical pages and prepare them | 
 | 959 |  * to be merged into one page. | 
 | 960 |  * | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 961 |  * This function returns the kpage if we successfully merged two identical | 
 | 962 |  * pages into one ksm page, NULL otherwise. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 963 |  * | 
| Hugh Dickins | 80e1482 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 964 |  * Note that this function upgrades page to ksm page: if one of the pages | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 965 |  * is already a ksm page, try_to_merge_with_ksm_page should be used. | 
 | 966 |  */ | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 967 | static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, | 
 | 968 | 					   struct page *page, | 
 | 969 | 					   struct rmap_item *tree_rmap_item, | 
 | 970 | 					   struct page *tree_page) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 971 | { | 
| Hugh Dickins | 80e1482 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 972 | 	int err; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 973 |  | 
| Hugh Dickins | 80e1482 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 974 | 	err = try_to_merge_with_ksm_page(rmap_item, page, NULL); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 975 | 	if (!err) { | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 976 | 		err = try_to_merge_with_ksm_page(tree_rmap_item, | 
| Hugh Dickins | 80e1482 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 977 | 							tree_page, page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 978 | 		/* | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 979 | 		 * If that fails, we have a ksm page with only one pte | 
 | 980 | 		 * pointing to it: so break it. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 981 | 		 */ | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 982 | 		if (err) | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 983 | 			break_cow(rmap_item); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 984 | 	} | 
| Hugh Dickins | 80e1482 | 2009-12-14 17:59:29 -0800 | [diff] [blame] | 985 | 	return err ? NULL : page; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 986 | } | 
 | 987 |  | 
 | 988 | /* | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 989 |  * stable_tree_search - search for page inside the stable tree | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 990 |  * | 
 | 991 |  * This function checks if there is a page inside the stable tree | 
 | 992 |  * with identical content to the page that we are scanning right now. | 
 | 993 |  * | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 994 |  * This function returns the stable tree node of identical content if found, | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 995 |  * NULL otherwise. | 
 | 996 |  */ | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 997 | static struct page *stable_tree_search(struct page *page) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 998 | { | 
 | 999 | 	struct rb_node *node = root_stable_tree.rb_node; | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1000 | 	struct stable_node *stable_node; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1001 |  | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1002 | 	stable_node = page_stable_node(page); | 
 | 1003 | 	if (stable_node) {			/* ksm page forked */ | 
 | 1004 | 		get_page(page); | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1005 | 		return page; | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1006 | 	} | 
 | 1007 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1008 | 	while (node) { | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1009 | 		struct page *tree_page; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1010 | 		int ret; | 
 | 1011 |  | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1012 | 		cond_resched(); | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1013 | 		stable_node = rb_entry(node, struct stable_node, node); | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1014 | 		tree_page = get_ksm_page(stable_node); | 
 | 1015 | 		if (!tree_page) | 
 | 1016 | 			return NULL; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1017 |  | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1018 | 		ret = memcmp_pages(page, tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1019 |  | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1020 | 		if (ret < 0) { | 
 | 1021 | 			put_page(tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1022 | 			node = node->rb_left; | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1023 | 		} else if (ret > 0) { | 
 | 1024 | 			put_page(tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1025 | 			node = node->rb_right; | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1026 | 		} else | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1027 | 			return tree_page; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1028 | 	} | 
 | 1029 |  | 
 | 1030 | 	return NULL; | 
 | 1031 | } | 
 | 1032 |  | 
 | 1033 | /* | 
 | 1034 |  * stable_tree_insert - insert rmap_item pointing to new ksm page | 
 | 1035 |  * into the stable tree. | 
 | 1036 |  * | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1037 |  * This function returns the stable tree node just allocated on success, | 
 | 1038 |  * NULL otherwise. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1039 |  */ | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1040 | static struct stable_node *stable_tree_insert(struct page *kpage) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1041 | { | 
 | 1042 | 	struct rb_node **new = &root_stable_tree.rb_node; | 
 | 1043 | 	struct rb_node *parent = NULL; | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1044 | 	struct stable_node *stable_node; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1045 |  | 
 | 1046 | 	while (*new) { | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1047 | 		struct page *tree_page; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1048 | 		int ret; | 
 | 1049 |  | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1050 | 		cond_resched(); | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1051 | 		stable_node = rb_entry(*new, struct stable_node, node); | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1052 | 		tree_page = get_ksm_page(stable_node); | 
 | 1053 | 		if (!tree_page) | 
 | 1054 | 			return NULL; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1055 |  | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1056 | 		ret = memcmp_pages(kpage, tree_page); | 
 | 1057 | 		put_page(tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1058 |  | 
 | 1059 | 		parent = *new; | 
 | 1060 | 		if (ret < 0) | 
 | 1061 | 			new = &parent->rb_left; | 
 | 1062 | 		else if (ret > 0) | 
 | 1063 | 			new = &parent->rb_right; | 
 | 1064 | 		else { | 
 | 1065 | 			/* | 
 | 1066 | 			 * It is not a bug that stable_tree_search() didn't | 
 | 1067 | 			 * find this node: because at that time our page was | 
 | 1068 | 			 * not yet write-protected, so may have changed since. | 
 | 1069 | 			 */ | 
 | 1070 | 			return NULL; | 
 | 1071 | 		} | 
 | 1072 | 	} | 
 | 1073 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1074 | 	stable_node = alloc_stable_node(); | 
 | 1075 | 	if (!stable_node) | 
 | 1076 | 		return NULL; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1077 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1078 | 	rb_link_node(&stable_node->node, parent, new); | 
 | 1079 | 	rb_insert_color(&stable_node->node, &root_stable_tree); | 
 | 1080 |  | 
 | 1081 | 	INIT_HLIST_HEAD(&stable_node->hlist); | 
 | 1082 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1083 | 	stable_node->kpfn = page_to_pfn(kpage); | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1084 | 	set_page_stable_node(kpage, stable_node); | 
 | 1085 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1086 | 	return stable_node; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1087 | } | 
 | 1088 |  | 
 | 1089 | /* | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1090 |  * unstable_tree_search_insert - search for identical page, | 
 | 1091 |  * else insert rmap_item into the unstable tree. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1092 |  * | 
 | 1093 |  * This function searches for a page in the unstable tree identical to the | 
 | 1094 |  * page currently being scanned; and if no identical page is found in the | 
 | 1095 |  * tree, we insert rmap_item as a new object into the unstable tree. | 
 | 1096 |  * | 
 | 1097 |  * This function returns pointer to rmap_item found to be identical | 
 | 1098 |  * to the currently scanned page, NULL otherwise. | 
 | 1099 |  * | 
 | 1100 |  * This function does both searching and inserting, because they share | 
 | 1101 |  * the same walking algorithm in an rbtree. | 
 | 1102 |  */ | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1103 | static | 
 | 1104 | struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | 
 | 1105 | 					      struct page *page, | 
 | 1106 | 					      struct page **tree_pagep) | 
 | 1107 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1108 | { | 
 | 1109 | 	struct rb_node **new = &root_unstable_tree.rb_node; | 
 | 1110 | 	struct rb_node *parent = NULL; | 
 | 1111 |  | 
 | 1112 | 	while (*new) { | 
 | 1113 | 		struct rmap_item *tree_rmap_item; | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1114 | 		struct page *tree_page; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1115 | 		int ret; | 
 | 1116 |  | 
| Hugh Dickins | d178f27 | 2009-11-09 15:58:23 +0000 | [diff] [blame] | 1117 | 		cond_resched(); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1118 | 		tree_rmap_item = rb_entry(*new, struct rmap_item, node); | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1119 | 		tree_page = get_mergeable_page(tree_rmap_item); | 
| Dan Carpenter | 22eccdd | 2010-04-23 13:18:10 -0400 | [diff] [blame] | 1120 | 		if (IS_ERR_OR_NULL(tree_page)) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1121 | 			return NULL; | 
 | 1122 |  | 
 | 1123 | 		/* | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1124 | 		 * Don't substitute a ksm page for a forked page. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1125 | 		 */ | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1126 | 		if (page == tree_page) { | 
 | 1127 | 			put_page(tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1128 | 			return NULL; | 
 | 1129 | 		} | 
 | 1130 |  | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1131 | 		ret = memcmp_pages(page, tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1132 |  | 
 | 1133 | 		parent = *new; | 
 | 1134 | 		if (ret < 0) { | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1135 | 			put_page(tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1136 | 			new = &parent->rb_left; | 
 | 1137 | 		} else if (ret > 0) { | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1138 | 			put_page(tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1139 | 			new = &parent->rb_right; | 
 | 1140 | 		} else { | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1141 | 			*tree_pagep = tree_page; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1142 | 			return tree_rmap_item; | 
 | 1143 | 		} | 
 | 1144 | 	} | 
 | 1145 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1146 | 	rmap_item->address |= UNSTABLE_FLAG; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1147 | 	rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); | 
 | 1148 | 	rb_link_node(&rmap_item->node, parent, new); | 
 | 1149 | 	rb_insert_color(&rmap_item->node, &root_unstable_tree); | 
 | 1150 |  | 
| Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 1151 | 	ksm_pages_unshared++; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1152 | 	return NULL; | 
 | 1153 | } | 
 | 1154 |  | 
 | 1155 | /* | 
 | 1156 |  * stable_tree_append - add another rmap_item to the linked list of | 
 | 1157 |  * rmap_items hanging off a given node of the stable tree, all sharing | 
 | 1158 |  * the same ksm page. | 
 | 1159 |  */ | 
 | 1160 | static void stable_tree_append(struct rmap_item *rmap_item, | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1161 | 			       struct stable_node *stable_node) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1162 | { | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1163 | 	rmap_item->head = stable_node; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1164 | 	rmap_item->address |= STABLE_FLAG; | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1165 | 	hlist_add_head(&rmap_item->hlist, &stable_node->hlist); | 
| Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 1166 |  | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1167 | 	if (rmap_item->hlist.next) | 
 | 1168 | 		ksm_pages_sharing++; | 
 | 1169 | 	else | 
 | 1170 | 		ksm_pages_shared++; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1171 | } | 
 | 1172 |  | 
 | 1173 | /* | 
| Hugh Dickins | 81464e30 | 2009-09-21 17:02:15 -0700 | [diff] [blame] | 1174 |  * cmp_and_merge_page - first see if page can be merged into the stable tree; | 
 | 1175 |  * if not, compare checksum to previous and if it's the same, see if page can | 
 | 1176 |  * be inserted into the unstable tree, or merged with a page already there and | 
 | 1177 |  * both transferred to the stable tree. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1178 |  * | 
 | 1179 |  * @page: the page that we are searching identical page to. | 
 | 1180 |  * @rmap_item: the reverse mapping into the virtual address of this page | 
 | 1181 |  */ | 
 | 1182 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | 
 | 1183 | { | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1184 | 	struct rmap_item *tree_rmap_item; | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1185 | 	struct page *tree_page = NULL; | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1186 | 	struct stable_node *stable_node; | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1187 | 	struct page *kpage; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1188 | 	unsigned int checksum; | 
 | 1189 | 	int err; | 
 | 1190 |  | 
| Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 1191 | 	remove_rmap_item_from_tree(rmap_item); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1192 |  | 
 | 1193 | 	/* We first start with searching the page inside the stable tree */ | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1194 | 	kpage = stable_tree_search(page); | 
 | 1195 | 	if (kpage) { | 
| Hugh Dickins | 08beca4 | 2009-12-14 17:59:21 -0800 | [diff] [blame] | 1196 | 		err = try_to_merge_with_ksm_page(rmap_item, page, kpage); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1197 | 		if (!err) { | 
 | 1198 | 			/* | 
 | 1199 | 			 * The page was successfully merged: | 
 | 1200 | 			 * add its rmap_item to the stable tree. | 
 | 1201 | 			 */ | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1202 | 			lock_page(kpage); | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1203 | 			stable_tree_append(rmap_item, page_stable_node(kpage)); | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1204 | 			unlock_page(kpage); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1205 | 		} | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1206 | 		put_page(kpage); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1207 | 		return; | 
 | 1208 | 	} | 
 | 1209 |  | 
 | 1210 | 	/* | 
| Hugh Dickins | 4035c07a | 2009-12-14 17:59:27 -0800 | [diff] [blame] | 1211 | 	 * If the hash value of the page has changed from the last time | 
 | 1212 | 	 * we calculated it, this page is changing frequently: therefore we | 
 | 1213 | 	 * don't want to insert it in the unstable tree, and we don't want | 
 | 1214 | 	 * to waste our time searching for something identical to it there. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1215 | 	 */ | 
 | 1216 | 	checksum = calc_checksum(page); | 
 | 1217 | 	if (rmap_item->oldchecksum != checksum) { | 
 | 1218 | 		rmap_item->oldchecksum = checksum; | 
 | 1219 | 		return; | 
 | 1220 | 	} | 
 | 1221 |  | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1222 | 	tree_rmap_item = | 
 | 1223 | 		unstable_tree_search_insert(rmap_item, page, &tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1224 | 	if (tree_rmap_item) { | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1225 | 		kpage = try_to_merge_two_pages(rmap_item, page, | 
 | 1226 | 						tree_rmap_item, tree_page); | 
 | 1227 | 		put_page(tree_page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1228 | 		/* | 
 | 1229 | 		 * As soon as we merge this page, we want to remove the | 
 | 1230 | 		 * rmap_item of the page we have merged with from the unstable | 
 | 1231 | 		 * tree, and insert it instead as new node in the stable tree. | 
 | 1232 | 		 */ | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1233 | 		if (kpage) { | 
| Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 1234 | 			remove_rmap_item_from_tree(tree_rmap_item); | 
| Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 1235 |  | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1236 | 			lock_page(kpage); | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1237 | 			stable_node = stable_tree_insert(kpage); | 
 | 1238 | 			if (stable_node) { | 
 | 1239 | 				stable_tree_append(tree_rmap_item, stable_node); | 
 | 1240 | 				stable_tree_append(rmap_item, stable_node); | 
 | 1241 | 			} | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1242 | 			unlock_page(kpage); | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1243 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1244 | 			/* | 
 | 1245 | 			 * If we fail to insert the page into the stable tree, | 
 | 1246 | 			 * we will have 2 virtual addresses that are pointing | 
 | 1247 | 			 * to a ksm page left outside the stable tree, | 
 | 1248 | 			 * in which case we need to break_cow on both. | 
 | 1249 | 			 */ | 
| Hugh Dickins | 7b6ba2c | 2009-12-14 17:59:20 -0800 | [diff] [blame] | 1250 | 			if (!stable_node) { | 
| Hugh Dickins | 8dd3557 | 2009-12-14 17:59:18 -0800 | [diff] [blame] | 1251 | 				break_cow(tree_rmap_item); | 
 | 1252 | 				break_cow(rmap_item); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1253 | 			} | 
 | 1254 | 		} | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1255 | 	} | 
 | 1256 | } | 
 | 1257 |  | 
 | 1258 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1259 | 					    struct rmap_item **rmap_list, | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1260 | 					    unsigned long addr) | 
 | 1261 | { | 
 | 1262 | 	struct rmap_item *rmap_item; | 
 | 1263 |  | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1264 | 	while (*rmap_list) { | 
 | 1265 | 		rmap_item = *rmap_list; | 
| Hugh Dickins | 93d1771 | 2009-12-14 17:59:16 -0800 | [diff] [blame] | 1266 | 		if ((rmap_item->address & PAGE_MASK) == addr) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1267 | 			return rmap_item; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1268 | 		if (rmap_item->address > addr) | 
 | 1269 | 			break; | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1270 | 		*rmap_list = rmap_item->rmap_list; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1271 | 		remove_rmap_item_from_tree(rmap_item); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1272 | 		free_rmap_item(rmap_item); | 
 | 1273 | 	} | 
 | 1274 |  | 
 | 1275 | 	rmap_item = alloc_rmap_item(); | 
 | 1276 | 	if (rmap_item) { | 
 | 1277 | 		/* It has already been zeroed */ | 
 | 1278 | 		rmap_item->mm = mm_slot->mm; | 
 | 1279 | 		rmap_item->address = addr; | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1280 | 		rmap_item->rmap_list = *rmap_list; | 
 | 1281 | 		*rmap_list = rmap_item; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1282 | 	} | 
 | 1283 | 	return rmap_item; | 
 | 1284 | } | 
 | 1285 |  | 
 | 1286 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) | 
 | 1287 | { | 
 | 1288 | 	struct mm_struct *mm; | 
 | 1289 | 	struct mm_slot *slot; | 
 | 1290 | 	struct vm_area_struct *vma; | 
 | 1291 | 	struct rmap_item *rmap_item; | 
 | 1292 |  | 
 | 1293 | 	if (list_empty(&ksm_mm_head.mm_list)) | 
 | 1294 | 		return NULL; | 
 | 1295 |  | 
 | 1296 | 	slot = ksm_scan.mm_slot; | 
 | 1297 | 	if (slot == &ksm_mm_head) { | 
| Hugh Dickins | 2919bfd | 2011-01-13 15:47:29 -0800 | [diff] [blame] | 1298 | 		/* | 
 | 1299 | 		 * A number of pages can hang around indefinitely on per-cpu | 
 | 1300 | 		 * pagevecs, raised page count preventing write_protect_page | 
 | 1301 | 		 * from merging them.  Though it doesn't really matter much, | 
 | 1302 | 		 * it is puzzling to see some stuck in pages_volatile until | 
 | 1303 | 		 * other activity jostles them out, and they also prevented | 
 | 1304 | 		 * LTP's KSM test from succeeding deterministically; so drain | 
 | 1305 | 		 * them here (here rather than on entry to ksm_do_scan(), | 
 | 1306 | 		 * so we don't IPI too often when pages_to_scan is set low). | 
 | 1307 | 		 */ | 
 | 1308 | 		lru_add_drain_all(); | 
 | 1309 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1310 | 		root_unstable_tree = RB_ROOT; | 
 | 1311 |  | 
 | 1312 | 		spin_lock(&ksm_mmlist_lock); | 
 | 1313 | 		slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); | 
 | 1314 | 		ksm_scan.mm_slot = slot; | 
 | 1315 | 		spin_unlock(&ksm_mmlist_lock); | 
| Hugh Dickins | 2b47261 | 2011-06-15 15:08:58 -0700 | [diff] [blame] | 1316 | 		/* | 
 | 1317 | 		 * Although we tested list_empty() above, a racing __ksm_exit | 
 | 1318 | 		 * of the last mm on the list may have removed it since then. | 
 | 1319 | 		 */ | 
 | 1320 | 		if (slot == &ksm_mm_head) | 
 | 1321 | 			return NULL; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1322 | next_mm: | 
 | 1323 | 		ksm_scan.address = 0; | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1324 | 		ksm_scan.rmap_list = &slot->rmap_list; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1325 | 	} | 
 | 1326 |  | 
 | 1327 | 	mm = slot->mm; | 
 | 1328 | 	down_read(&mm->mmap_sem); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1329 | 	if (ksm_test_exit(mm)) | 
 | 1330 | 		vma = NULL; | 
 | 1331 | 	else | 
 | 1332 | 		vma = find_vma(mm, ksm_scan.address); | 
 | 1333 |  | 
 | 1334 | 	for (; vma; vma = vma->vm_next) { | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1335 | 		if (!(vma->vm_flags & VM_MERGEABLE)) | 
 | 1336 | 			continue; | 
 | 1337 | 		if (ksm_scan.address < vma->vm_start) | 
 | 1338 | 			ksm_scan.address = vma->vm_start; | 
 | 1339 | 		if (!vma->anon_vma) | 
 | 1340 | 			ksm_scan.address = vma->vm_end; | 
 | 1341 |  | 
 | 1342 | 		while (ksm_scan.address < vma->vm_end) { | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1343 | 			if (ksm_test_exit(mm)) | 
 | 1344 | 				break; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1345 | 			*page = follow_page(vma, ksm_scan.address, FOLL_GET); | 
| Andrea Arcangeli | 21ae5b0 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 1346 | 			if (IS_ERR_OR_NULL(*page)) { | 
 | 1347 | 				ksm_scan.address += PAGE_SIZE; | 
 | 1348 | 				cond_resched(); | 
 | 1349 | 				continue; | 
 | 1350 | 			} | 
| Andrea Arcangeli | 29ad768 | 2011-01-13 15:47:19 -0800 | [diff] [blame] | 1351 | 			if (PageAnon(*page) || | 
 | 1352 | 			    page_trans_compound_anon(*page)) { | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1353 | 				flush_anon_page(vma, *page, ksm_scan.address); | 
 | 1354 | 				flush_dcache_page(*page); | 
 | 1355 | 				rmap_item = get_next_rmap_item(slot, | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1356 | 					ksm_scan.rmap_list, ksm_scan.address); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1357 | 				if (rmap_item) { | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1358 | 					ksm_scan.rmap_list = | 
 | 1359 | 							&rmap_item->rmap_list; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1360 | 					ksm_scan.address += PAGE_SIZE; | 
 | 1361 | 				} else | 
 | 1362 | 					put_page(*page); | 
 | 1363 | 				up_read(&mm->mmap_sem); | 
 | 1364 | 				return rmap_item; | 
 | 1365 | 			} | 
| Andrea Arcangeli | 21ae5b0 | 2011-01-13 15:47:00 -0800 | [diff] [blame] | 1366 | 			put_page(*page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1367 | 			ksm_scan.address += PAGE_SIZE; | 
 | 1368 | 			cond_resched(); | 
 | 1369 | 		} | 
 | 1370 | 	} | 
 | 1371 |  | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1372 | 	if (ksm_test_exit(mm)) { | 
 | 1373 | 		ksm_scan.address = 0; | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1374 | 		ksm_scan.rmap_list = &slot->rmap_list; | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1375 | 	} | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1376 | 	/* | 
 | 1377 | 	 * Nuke all the rmap_items that are above this current rmap: | 
 | 1378 | 	 * because there were no VM_MERGEABLE vmas with such addresses. | 
 | 1379 | 	 */ | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1380 | 	remove_trailing_rmap_items(slot, ksm_scan.rmap_list); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1381 |  | 
 | 1382 | 	spin_lock(&ksm_mmlist_lock); | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1383 | 	ksm_scan.mm_slot = list_entry(slot->mm_list.next, | 
 | 1384 | 						struct mm_slot, mm_list); | 
 | 1385 | 	if (ksm_scan.address == 0) { | 
 | 1386 | 		/* | 
 | 1387 | 		 * We've completed a full scan of all vmas, holding mmap_sem | 
 | 1388 | 		 * throughout, and found no VM_MERGEABLE: so do the same as | 
 | 1389 | 		 * __ksm_exit does to remove this mm from all our lists now. | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1390 | 		 * This applies either when cleaning up after __ksm_exit | 
 | 1391 | 		 * (but beware: we can reach here even before __ksm_exit), | 
 | 1392 | 		 * or when all VM_MERGEABLE areas have been unmapped (and | 
 | 1393 | 		 * mmap_sem then protects against race with MADV_MERGEABLE). | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1394 | 		 */ | 
 | 1395 | 		hlist_del(&slot->link); | 
 | 1396 | 		list_del(&slot->mm_list); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1397 | 		spin_unlock(&ksm_mmlist_lock); | 
 | 1398 |  | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1399 | 		free_mm_slot(slot); | 
 | 1400 | 		clear_bit(MMF_VM_MERGEABLE, &mm->flags); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1401 | 		up_read(&mm->mmap_sem); | 
 | 1402 | 		mmdrop(mm); | 
 | 1403 | 	} else { | 
 | 1404 | 		spin_unlock(&ksm_mmlist_lock); | 
 | 1405 | 		up_read(&mm->mmap_sem); | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1406 | 	} | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1407 |  | 
 | 1408 | 	/* Repeat until we've completed scanning the whole list */ | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1409 | 	slot = ksm_scan.mm_slot; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1410 | 	if (slot != &ksm_mm_head) | 
 | 1411 | 		goto next_mm; | 
 | 1412 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1413 | 	ksm_scan.seqnr++; | 
 | 1414 | 	return NULL; | 
 | 1415 | } | 
 | 1416 |  | 
 | 1417 | /** | 
 | 1418 |  * ksm_do_scan  - the ksm scanner main worker function. | 
 | 1419 |  * @scan_npages - number of pages we want to scan before we return. | 
 | 1420 |  */ | 
 | 1421 | static void ksm_do_scan(unsigned int scan_npages) | 
 | 1422 | { | 
 | 1423 | 	struct rmap_item *rmap_item; | 
| Dan Carpenter | 22eccdd | 2010-04-23 13:18:10 -0400 | [diff] [blame] | 1424 | 	struct page *uninitialized_var(page); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1425 |  | 
| Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1426 | 	while (scan_npages-- && likely(!freezing(current))) { | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1427 | 		cond_resched(); | 
 | 1428 | 		rmap_item = scan_get_next_rmap_item(&page); | 
 | 1429 | 		if (!rmap_item) | 
 | 1430 | 			return; | 
 | 1431 | 		if (!PageKsm(page) || !in_stable_tree(rmap_item)) | 
 | 1432 | 			cmp_and_merge_page(page, rmap_item); | 
 | 1433 | 		put_page(page); | 
 | 1434 | 	} | 
 | 1435 | } | 
 | 1436 |  | 
| Hugh Dickins | 6e15838 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1437 | static int ksmd_should_run(void) | 
 | 1438 | { | 
 | 1439 | 	return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); | 
 | 1440 | } | 
 | 1441 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1442 | static int ksm_scan_thread(void *nothing) | 
 | 1443 | { | 
| Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1444 | 	set_freezable(); | 
| Izik Eidus | 339aa62 | 2009-09-21 17:02:07 -0700 | [diff] [blame] | 1445 | 	set_user_nice(current, 5); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1446 |  | 
 | 1447 | 	while (!kthread_should_stop()) { | 
| Hugh Dickins | 6e15838 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1448 | 		mutex_lock(&ksm_thread_mutex); | 
 | 1449 | 		if (ksmd_should_run()) | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1450 | 			ksm_do_scan(ksm_thread_pages_to_scan); | 
| Hugh Dickins | 6e15838 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1451 | 		mutex_unlock(&ksm_thread_mutex); | 
 | 1452 |  | 
| Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1453 | 		try_to_freeze(); | 
 | 1454 |  | 
| Hugh Dickins | 6e15838 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1455 | 		if (ksmd_should_run()) { | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1456 | 			schedule_timeout_interruptible( | 
 | 1457 | 				msecs_to_jiffies(ksm_thread_sleep_millisecs)); | 
 | 1458 | 		} else { | 
| Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 1459 | 			wait_event_freezable(ksm_thread_wait, | 
| Hugh Dickins | 6e15838 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1460 | 				ksmd_should_run() || kthread_should_stop()); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1461 | 		} | 
 | 1462 | 	} | 
 | 1463 | 	return 0; | 
 | 1464 | } | 
 | 1465 |  | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1466 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 
 | 1467 | 		unsigned long end, int advice, unsigned long *vm_flags) | 
 | 1468 | { | 
 | 1469 | 	struct mm_struct *mm = vma->vm_mm; | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1470 | 	int err; | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1471 |  | 
 | 1472 | 	switch (advice) { | 
 | 1473 | 	case MADV_MERGEABLE: | 
 | 1474 | 		/* | 
 | 1475 | 		 * Be somewhat over-protective for now! | 
 | 1476 | 		 */ | 
 | 1477 | 		if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   | | 
 | 1478 | 				 VM_PFNMAP    | VM_IO      | VM_DONTEXPAND | | 
| Konstantin Khlebnikov | 314e51b | 2012-10-08 16:29:02 -0700 | [diff] [blame] | 1479 | 				 VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP)) | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1480 | 			return 0;		/* just ignore the advice */ | 
 | 1481 |  | 
| Konstantin Khlebnikov | cc2383e | 2012-10-08 16:28:37 -0700 | [diff] [blame] | 1482 | #ifdef VM_SAO | 
 | 1483 | 		if (*vm_flags & VM_SAO) | 
 | 1484 | 			return 0; | 
 | 1485 | #endif | 
 | 1486 |  | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1487 | 		if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { | 
 | 1488 | 			err = __ksm_enter(mm); | 
 | 1489 | 			if (err) | 
 | 1490 | 				return err; | 
 | 1491 | 		} | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1492 |  | 
 | 1493 | 		*vm_flags |= VM_MERGEABLE; | 
 | 1494 | 		break; | 
 | 1495 |  | 
 | 1496 | 	case MADV_UNMERGEABLE: | 
 | 1497 | 		if (!(*vm_flags & VM_MERGEABLE)) | 
 | 1498 | 			return 0;		/* just ignore the advice */ | 
 | 1499 |  | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1500 | 		if (vma->anon_vma) { | 
 | 1501 | 			err = unmerge_ksm_pages(vma, start, end); | 
 | 1502 | 			if (err) | 
 | 1503 | 				return err; | 
 | 1504 | 		} | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1505 |  | 
 | 1506 | 		*vm_flags &= ~VM_MERGEABLE; | 
 | 1507 | 		break; | 
 | 1508 | 	} | 
 | 1509 |  | 
 | 1510 | 	return 0; | 
 | 1511 | } | 
 | 1512 |  | 
 | 1513 | int __ksm_enter(struct mm_struct *mm) | 
 | 1514 | { | 
| Hugh Dickins | 6e15838 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1515 | 	struct mm_slot *mm_slot; | 
 | 1516 | 	int needs_wakeup; | 
 | 1517 |  | 
 | 1518 | 	mm_slot = alloc_mm_slot(); | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1519 | 	if (!mm_slot) | 
 | 1520 | 		return -ENOMEM; | 
 | 1521 |  | 
| Hugh Dickins | 6e15838 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1522 | 	/* Check ksm_run too?  Would need tighter locking */ | 
 | 1523 | 	needs_wakeup = list_empty(&ksm_mm_head.mm_list); | 
 | 1524 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1525 | 	spin_lock(&ksm_mmlist_lock); | 
 | 1526 | 	insert_to_mm_slots_hash(mm, mm_slot); | 
 | 1527 | 	/* | 
 | 1528 | 	 * Insert just behind the scanning cursor, to let the area settle | 
 | 1529 | 	 * down a little; when fork is followed by immediate exec, we don't | 
 | 1530 | 	 * want ksmd to waste time setting up and tearing down an rmap_list. | 
 | 1531 | 	 */ | 
 | 1532 | 	list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); | 
 | 1533 | 	spin_unlock(&ksm_mmlist_lock); | 
 | 1534 |  | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1535 | 	set_bit(MMF_VM_MERGEABLE, &mm->flags); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1536 | 	atomic_inc(&mm->mm_count); | 
| Hugh Dickins | 6e15838 | 2009-09-21 17:02:14 -0700 | [diff] [blame] | 1537 |  | 
 | 1538 | 	if (needs_wakeup) | 
 | 1539 | 		wake_up_interruptible(&ksm_thread_wait); | 
 | 1540 |  | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1541 | 	return 0; | 
 | 1542 | } | 
 | 1543 |  | 
| Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 1544 | void __ksm_exit(struct mm_struct *mm) | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1545 | { | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1546 | 	struct mm_slot *mm_slot; | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1547 | 	int easy_to_free = 0; | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1548 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1549 | 	/* | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1550 | 	 * This process is exiting: if it's straightforward (as is the | 
 | 1551 | 	 * case when ksmd was never running), free mm_slot immediately. | 
 | 1552 | 	 * But if it's at the cursor or has rmap_items linked to it, use | 
 | 1553 | 	 * mmap_sem to synchronize with any break_cows before pagetables | 
 | 1554 | 	 * are freed, and leave the mm_slot on the list for ksmd to free. | 
 | 1555 | 	 * Beware: ksm may already have noticed it exiting and freed the slot. | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1556 | 	 */ | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1557 |  | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1558 | 	spin_lock(&ksm_mmlist_lock); | 
 | 1559 | 	mm_slot = get_mm_slot(mm); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1560 | 	if (mm_slot && ksm_scan.mm_slot != mm_slot) { | 
| Hugh Dickins | 6514d51 | 2009-12-14 17:59:19 -0800 | [diff] [blame] | 1561 | 		if (!mm_slot->rmap_list) { | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1562 | 			hlist_del(&mm_slot->link); | 
 | 1563 | 			list_del(&mm_slot->mm_list); | 
 | 1564 | 			easy_to_free = 1; | 
 | 1565 | 		} else { | 
 | 1566 | 			list_move(&mm_slot->mm_list, | 
 | 1567 | 				  &ksm_scan.mm_slot->mm_list); | 
 | 1568 | 		} | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1569 | 	} | 
| Hugh Dickins | cd551f9 | 2009-09-21 17:02:17 -0700 | [diff] [blame] | 1570 | 	spin_unlock(&ksm_mmlist_lock); | 
 | 1571 |  | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1572 | 	if (easy_to_free) { | 
 | 1573 | 		free_mm_slot(mm_slot); | 
 | 1574 | 		clear_bit(MMF_VM_MERGEABLE, &mm->flags); | 
 | 1575 | 		mmdrop(mm); | 
 | 1576 | 	} else if (mm_slot) { | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1577 | 		down_write(&mm->mmap_sem); | 
 | 1578 | 		up_write(&mm->mmap_sem); | 
| Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 1579 | 	} | 
| Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 1580 | } | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1581 |  | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1582 | struct page *ksm_does_need_to_copy(struct page *page, | 
 | 1583 | 			struct vm_area_struct *vma, unsigned long address) | 
 | 1584 | { | 
 | 1585 | 	struct page *new_page; | 
 | 1586 |  | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1587 | 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | 
 | 1588 | 	if (new_page) { | 
 | 1589 | 		copy_user_highpage(new_page, page, address, vma); | 
 | 1590 |  | 
 | 1591 | 		SetPageDirty(new_page); | 
 | 1592 | 		__SetPageUptodate(new_page); | 
 | 1593 | 		SetPageSwapBacked(new_page); | 
 | 1594 | 		__set_page_locked(new_page); | 
 | 1595 |  | 
| Hugh Dickins | 39b5f29 | 2012-10-08 16:33:18 -0700 | [diff] [blame] | 1596 | 		if (!mlocked_vma_newpage(vma, new_page)) | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1597 | 			lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); | 
 | 1598 | 		else | 
 | 1599 | 			add_page_to_unevictable_list(new_page); | 
 | 1600 | 	} | 
 | 1601 |  | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1602 | 	return new_page; | 
 | 1603 | } | 
 | 1604 |  | 
 | 1605 | int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, | 
 | 1606 | 			unsigned long *vm_flags) | 
 | 1607 | { | 
 | 1608 | 	struct stable_node *stable_node; | 
 | 1609 | 	struct rmap_item *rmap_item; | 
 | 1610 | 	struct hlist_node *hlist; | 
 | 1611 | 	unsigned int mapcount = page_mapcount(page); | 
 | 1612 | 	int referenced = 0; | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1613 | 	int search_new_forks = 0; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1614 |  | 
 | 1615 | 	VM_BUG_ON(!PageKsm(page)); | 
 | 1616 | 	VM_BUG_ON(!PageLocked(page)); | 
 | 1617 |  | 
 | 1618 | 	stable_node = page_stable_node(page); | 
 | 1619 | 	if (!stable_node) | 
 | 1620 | 		return 0; | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1621 | again: | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1622 | 	hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1623 | 		struct anon_vma *anon_vma = rmap_item->anon_vma; | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1624 | 		struct anon_vma_chain *vmac; | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1625 | 		struct vm_area_struct *vma; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1626 |  | 
| Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1627 | 		anon_vma_lock_read(anon_vma); | 
| Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 1628 | 		anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, | 
 | 1629 | 					       0, ULONG_MAX) { | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1630 | 			vma = vmac->vma; | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1631 | 			if (rmap_item->address < vma->vm_start || | 
 | 1632 | 			    rmap_item->address >= vma->vm_end) | 
 | 1633 | 				continue; | 
 | 1634 | 			/* | 
 | 1635 | 			 * Initially we examine only the vma which covers this | 
 | 1636 | 			 * rmap_item; but later, if there is still work to do, | 
 | 1637 | 			 * we examine covering vmas in other mms: in case they | 
 | 1638 | 			 * were forked from the original since ksmd passed. | 
 | 1639 | 			 */ | 
 | 1640 | 			if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | 
 | 1641 | 				continue; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1642 |  | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1643 | 			if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) | 
 | 1644 | 				continue; | 
 | 1645 |  | 
 | 1646 | 			referenced += page_referenced_one(page, vma, | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1647 | 				rmap_item->address, &mapcount, vm_flags); | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1648 | 			if (!search_new_forks || !mapcount) | 
 | 1649 | 				break; | 
 | 1650 | 		} | 
| Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1651 | 		anon_vma_unlock_read(anon_vma); | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1652 | 		if (!mapcount) | 
 | 1653 | 			goto out; | 
 | 1654 | 	} | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1655 | 	if (!search_new_forks++) | 
 | 1656 | 		goto again; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1657 | out: | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1658 | 	return referenced; | 
 | 1659 | } | 
 | 1660 |  | 
 | 1661 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) | 
 | 1662 | { | 
 | 1663 | 	struct stable_node *stable_node; | 
 | 1664 | 	struct hlist_node *hlist; | 
 | 1665 | 	struct rmap_item *rmap_item; | 
 | 1666 | 	int ret = SWAP_AGAIN; | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1667 | 	int search_new_forks = 0; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1668 |  | 
 | 1669 | 	VM_BUG_ON(!PageKsm(page)); | 
 | 1670 | 	VM_BUG_ON(!PageLocked(page)); | 
 | 1671 |  | 
 | 1672 | 	stable_node = page_stable_node(page); | 
 | 1673 | 	if (!stable_node) | 
 | 1674 | 		return SWAP_FAIL; | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1675 | again: | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1676 | 	hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1677 | 		struct anon_vma *anon_vma = rmap_item->anon_vma; | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1678 | 		struct anon_vma_chain *vmac; | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1679 | 		struct vm_area_struct *vma; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1680 |  | 
| Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1681 | 		anon_vma_lock_read(anon_vma); | 
| Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 1682 | 		anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, | 
 | 1683 | 					       0, ULONG_MAX) { | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1684 | 			vma = vmac->vma; | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1685 | 			if (rmap_item->address < vma->vm_start || | 
 | 1686 | 			    rmap_item->address >= vma->vm_end) | 
 | 1687 | 				continue; | 
 | 1688 | 			/* | 
 | 1689 | 			 * Initially we examine only the vma which covers this | 
 | 1690 | 			 * rmap_item; but later, if there is still work to do, | 
 | 1691 | 			 * we examine covering vmas in other mms: in case they | 
 | 1692 | 			 * were forked from the original since ksmd passed. | 
 | 1693 | 			 */ | 
 | 1694 | 			if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | 
 | 1695 | 				continue; | 
 | 1696 |  | 
 | 1697 | 			ret = try_to_unmap_one(page, vma, | 
 | 1698 | 					rmap_item->address, flags); | 
 | 1699 | 			if (ret != SWAP_AGAIN || !page_mapped(page)) { | 
| Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1700 | 				anon_vma_unlock_read(anon_vma); | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1701 | 				goto out; | 
 | 1702 | 			} | 
 | 1703 | 		} | 
| Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1704 | 		anon_vma_unlock_read(anon_vma); | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1705 | 	} | 
| Hugh Dickins | db114b8 | 2009-12-14 17:59:25 -0800 | [diff] [blame] | 1706 | 	if (!search_new_forks++) | 
 | 1707 | 		goto again; | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1708 | out: | 
| Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 1709 | 	return ret; | 
 | 1710 | } | 
 | 1711 |  | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1712 | #ifdef CONFIG_MIGRATION | 
 | 1713 | int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, | 
 | 1714 | 		  struct vm_area_struct *, unsigned long, void *), void *arg) | 
 | 1715 | { | 
 | 1716 | 	struct stable_node *stable_node; | 
 | 1717 | 	struct hlist_node *hlist; | 
 | 1718 | 	struct rmap_item *rmap_item; | 
 | 1719 | 	int ret = SWAP_AGAIN; | 
 | 1720 | 	int search_new_forks = 0; | 
 | 1721 |  | 
 | 1722 | 	VM_BUG_ON(!PageKsm(page)); | 
 | 1723 | 	VM_BUG_ON(!PageLocked(page)); | 
 | 1724 |  | 
 | 1725 | 	stable_node = page_stable_node(page); | 
 | 1726 | 	if (!stable_node) | 
 | 1727 | 		return ret; | 
 | 1728 | again: | 
 | 1729 | 	hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { | 
 | 1730 | 		struct anon_vma *anon_vma = rmap_item->anon_vma; | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1731 | 		struct anon_vma_chain *vmac; | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1732 | 		struct vm_area_struct *vma; | 
 | 1733 |  | 
| Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1734 | 		anon_vma_lock_read(anon_vma); | 
| Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 1735 | 		anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, | 
 | 1736 | 					       0, ULONG_MAX) { | 
| Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1737 | 			vma = vmac->vma; | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1738 | 			if (rmap_item->address < vma->vm_start || | 
 | 1739 | 			    rmap_item->address >= vma->vm_end) | 
 | 1740 | 				continue; | 
 | 1741 | 			/* | 
 | 1742 | 			 * Initially we examine only the vma which covers this | 
 | 1743 | 			 * rmap_item; but later, if there is still work to do, | 
 | 1744 | 			 * we examine covering vmas in other mms: in case they | 
 | 1745 | 			 * were forked from the original since ksmd passed. | 
 | 1746 | 			 */ | 
 | 1747 | 			if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | 
 | 1748 | 				continue; | 
 | 1749 |  | 
 | 1750 | 			ret = rmap_one(page, vma, rmap_item->address, arg); | 
 | 1751 | 			if (ret != SWAP_AGAIN) { | 
| Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1752 | 				anon_vma_unlock_read(anon_vma); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1753 | 				goto out; | 
 | 1754 | 			} | 
 | 1755 | 		} | 
| Hugh Dickins | b6b19f2 | 2012-12-19 17:44:29 -0800 | [diff] [blame] | 1756 | 		anon_vma_unlock_read(anon_vma); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1757 | 	} | 
 | 1758 | 	if (!search_new_forks++) | 
 | 1759 | 		goto again; | 
 | 1760 | out: | 
 | 1761 | 	return ret; | 
 | 1762 | } | 
 | 1763 |  | 
 | 1764 | void ksm_migrate_page(struct page *newpage, struct page *oldpage) | 
 | 1765 | { | 
 | 1766 | 	struct stable_node *stable_node; | 
 | 1767 |  | 
 | 1768 | 	VM_BUG_ON(!PageLocked(oldpage)); | 
 | 1769 | 	VM_BUG_ON(!PageLocked(newpage)); | 
 | 1770 | 	VM_BUG_ON(newpage->mapping != oldpage->mapping); | 
 | 1771 |  | 
 | 1772 | 	stable_node = page_stable_node(newpage); | 
 | 1773 | 	if (stable_node) { | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1774 | 		VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); | 
 | 1775 | 		stable_node->kpfn = page_to_pfn(newpage); | 
| Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 1776 | 	} | 
 | 1777 | } | 
 | 1778 | #endif /* CONFIG_MIGRATION */ | 
 | 1779 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1780 | #ifdef CONFIG_MEMORY_HOTREMOVE | 
 | 1781 | static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn, | 
 | 1782 | 						 unsigned long end_pfn) | 
 | 1783 | { | 
 | 1784 | 	struct rb_node *node; | 
 | 1785 |  | 
 | 1786 | 	for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) { | 
 | 1787 | 		struct stable_node *stable_node; | 
 | 1788 |  | 
 | 1789 | 		stable_node = rb_entry(node, struct stable_node, node); | 
 | 1790 | 		if (stable_node->kpfn >= start_pfn && | 
 | 1791 | 		    stable_node->kpfn < end_pfn) | 
 | 1792 | 			return stable_node; | 
 | 1793 | 	} | 
 | 1794 | 	return NULL; | 
 | 1795 | } | 
 | 1796 |  | 
 | 1797 | static int ksm_memory_callback(struct notifier_block *self, | 
 | 1798 | 			       unsigned long action, void *arg) | 
 | 1799 | { | 
 | 1800 | 	struct memory_notify *mn = arg; | 
 | 1801 | 	struct stable_node *stable_node; | 
 | 1802 |  | 
 | 1803 | 	switch (action) { | 
 | 1804 | 	case MEM_GOING_OFFLINE: | 
 | 1805 | 		/* | 
 | 1806 | 		 * Keep it very simple for now: just lock out ksmd and | 
 | 1807 | 		 * MADV_UNMERGEABLE while any memory is going offline. | 
| KOSAKI Motohiro | a0b0f58 | 2010-12-02 14:31:20 -0800 | [diff] [blame] | 1808 | 		 * mutex_lock_nested() is necessary because lockdep was alarmed | 
 | 1809 | 		 * that here we take ksm_thread_mutex inside notifier chain | 
 | 1810 | 		 * mutex, and later take notifier chain mutex inside | 
 | 1811 | 		 * ksm_thread_mutex to unlock it.   But that's safe because both | 
 | 1812 | 		 * are inside mem_hotplug_mutex. | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1813 | 		 */ | 
| KOSAKI Motohiro | a0b0f58 | 2010-12-02 14:31:20 -0800 | [diff] [blame] | 1814 | 		mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING); | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 1815 | 		break; | 
 | 1816 |  | 
 | 1817 | 	case MEM_OFFLINE: | 
 | 1818 | 		/* | 
 | 1819 | 		 * Most of the work is done by page migration; but there might | 
 | 1820 | 		 * be a few stable_nodes left over, still pointing to struct | 
 | 1821 | 		 * pages which have been offlined: prune those from the tree. | 
 | 1822 | 		 */ | 
 | 1823 | 		while ((stable_node = ksm_check_stable_tree(mn->start_pfn, | 
 | 1824 | 					mn->start_pfn + mn->nr_pages)) != NULL) | 
 | 1825 | 			remove_node_from_stable_tree(stable_node); | 
 | 1826 | 		/* fallthrough */ | 
 | 1827 |  | 
 | 1828 | 	case MEM_CANCEL_OFFLINE: | 
 | 1829 | 		mutex_unlock(&ksm_thread_mutex); | 
 | 1830 | 		break; | 
 | 1831 | 	} | 
 | 1832 | 	return NOTIFY_OK; | 
 | 1833 | } | 
 | 1834 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 
 | 1835 |  | 
| Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 1836 | #ifdef CONFIG_SYSFS | 
 | 1837 | /* | 
 | 1838 |  * This all compiles without CONFIG_SYSFS, but is a waste of space. | 
 | 1839 |  */ | 
 | 1840 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1841 | #define KSM_ATTR_RO(_name) \ | 
 | 1842 | 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | 
 | 1843 | #define KSM_ATTR(_name) \ | 
 | 1844 | 	static struct kobj_attribute _name##_attr = \ | 
 | 1845 | 		__ATTR(_name, 0644, _name##_show, _name##_store) | 
 | 1846 |  | 
 | 1847 | static ssize_t sleep_millisecs_show(struct kobject *kobj, | 
 | 1848 | 				    struct kobj_attribute *attr, char *buf) | 
 | 1849 | { | 
 | 1850 | 	return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); | 
 | 1851 | } | 
 | 1852 |  | 
 | 1853 | static ssize_t sleep_millisecs_store(struct kobject *kobj, | 
 | 1854 | 				     struct kobj_attribute *attr, | 
 | 1855 | 				     const char *buf, size_t count) | 
 | 1856 | { | 
 | 1857 | 	unsigned long msecs; | 
 | 1858 | 	int err; | 
 | 1859 |  | 
 | 1860 | 	err = strict_strtoul(buf, 10, &msecs); | 
 | 1861 | 	if (err || msecs > UINT_MAX) | 
 | 1862 | 		return -EINVAL; | 
 | 1863 |  | 
 | 1864 | 	ksm_thread_sleep_millisecs = msecs; | 
 | 1865 |  | 
 | 1866 | 	return count; | 
 | 1867 | } | 
 | 1868 | KSM_ATTR(sleep_millisecs); | 
 | 1869 |  | 
 | 1870 | static ssize_t pages_to_scan_show(struct kobject *kobj, | 
 | 1871 | 				  struct kobj_attribute *attr, char *buf) | 
 | 1872 | { | 
 | 1873 | 	return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); | 
 | 1874 | } | 
 | 1875 |  | 
 | 1876 | static ssize_t pages_to_scan_store(struct kobject *kobj, | 
 | 1877 | 				   struct kobj_attribute *attr, | 
 | 1878 | 				   const char *buf, size_t count) | 
 | 1879 | { | 
 | 1880 | 	int err; | 
 | 1881 | 	unsigned long nr_pages; | 
 | 1882 |  | 
 | 1883 | 	err = strict_strtoul(buf, 10, &nr_pages); | 
 | 1884 | 	if (err || nr_pages > UINT_MAX) | 
 | 1885 | 		return -EINVAL; | 
 | 1886 |  | 
 | 1887 | 	ksm_thread_pages_to_scan = nr_pages; | 
 | 1888 |  | 
 | 1889 | 	return count; | 
 | 1890 | } | 
 | 1891 | KSM_ATTR(pages_to_scan); | 
 | 1892 |  | 
 | 1893 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, | 
 | 1894 | 			char *buf) | 
 | 1895 | { | 
 | 1896 | 	return sprintf(buf, "%u\n", ksm_run); | 
 | 1897 | } | 
 | 1898 |  | 
 | 1899 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, | 
 | 1900 | 			 const char *buf, size_t count) | 
 | 1901 | { | 
 | 1902 | 	int err; | 
 | 1903 | 	unsigned long flags; | 
 | 1904 |  | 
 | 1905 | 	err = strict_strtoul(buf, 10, &flags); | 
 | 1906 | 	if (err || flags > UINT_MAX) | 
 | 1907 | 		return -EINVAL; | 
 | 1908 | 	if (flags > KSM_RUN_UNMERGE) | 
 | 1909 | 		return -EINVAL; | 
 | 1910 |  | 
 | 1911 | 	/* | 
 | 1912 | 	 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. | 
 | 1913 | 	 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, | 
| Hugh Dickins | d0f209f | 2009-12-14 17:59:34 -0800 | [diff] [blame] | 1914 | 	 * breaking COW to free the pages_shared (but leaves mm_slots | 
 | 1915 | 	 * on the list for when ksmd may be set running again). | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1916 | 	 */ | 
 | 1917 |  | 
 | 1918 | 	mutex_lock(&ksm_thread_mutex); | 
 | 1919 | 	if (ksm_run != flags) { | 
 | 1920 | 		ksm_run = flags; | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1921 | 		if (flags & KSM_RUN_UNMERGE) { | 
| David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 1922 | 			set_current_oom_origin(); | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1923 | 			err = unmerge_and_remove_all_rmap_items(); | 
| David Rientjes | e1e12d2 | 2012-12-11 16:02:56 -0800 | [diff] [blame] | 1924 | 			clear_current_oom_origin(); | 
| Hugh Dickins | d952b79 | 2009-09-21 17:02:16 -0700 | [diff] [blame] | 1925 | 			if (err) { | 
 | 1926 | 				ksm_run = KSM_RUN_STOP; | 
 | 1927 | 				count = err; | 
 | 1928 | 			} | 
 | 1929 | 		} | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1930 | 	} | 
 | 1931 | 	mutex_unlock(&ksm_thread_mutex); | 
 | 1932 |  | 
 | 1933 | 	if (flags & KSM_RUN_MERGE) | 
 | 1934 | 		wake_up_interruptible(&ksm_thread_wait); | 
 | 1935 |  | 
 | 1936 | 	return count; | 
 | 1937 | } | 
 | 1938 | KSM_ATTR(run); | 
 | 1939 |  | 
| Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 1940 | static ssize_t pages_shared_show(struct kobject *kobj, | 
 | 1941 | 				 struct kobj_attribute *attr, char *buf) | 
 | 1942 | { | 
 | 1943 | 	return sprintf(buf, "%lu\n", ksm_pages_shared); | 
 | 1944 | } | 
 | 1945 | KSM_ATTR_RO(pages_shared); | 
 | 1946 |  | 
 | 1947 | static ssize_t pages_sharing_show(struct kobject *kobj, | 
 | 1948 | 				  struct kobj_attribute *attr, char *buf) | 
 | 1949 | { | 
| Hugh Dickins | e178dfd | 2009-09-21 17:02:10 -0700 | [diff] [blame] | 1950 | 	return sprintf(buf, "%lu\n", ksm_pages_sharing); | 
| Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 1951 | } | 
 | 1952 | KSM_ATTR_RO(pages_sharing); | 
 | 1953 |  | 
| Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 1954 | static ssize_t pages_unshared_show(struct kobject *kobj, | 
 | 1955 | 				   struct kobj_attribute *attr, char *buf) | 
 | 1956 | { | 
 | 1957 | 	return sprintf(buf, "%lu\n", ksm_pages_unshared); | 
 | 1958 | } | 
 | 1959 | KSM_ATTR_RO(pages_unshared); | 
 | 1960 |  | 
 | 1961 | static ssize_t pages_volatile_show(struct kobject *kobj, | 
 | 1962 | 				   struct kobj_attribute *attr, char *buf) | 
 | 1963 | { | 
 | 1964 | 	long ksm_pages_volatile; | 
 | 1965 |  | 
 | 1966 | 	ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared | 
 | 1967 | 				- ksm_pages_sharing - ksm_pages_unshared; | 
 | 1968 | 	/* | 
 | 1969 | 	 * It was not worth any locking to calculate that statistic, | 
 | 1970 | 	 * but it might therefore sometimes be negative: conceal that. | 
 | 1971 | 	 */ | 
 | 1972 | 	if (ksm_pages_volatile < 0) | 
 | 1973 | 		ksm_pages_volatile = 0; | 
 | 1974 | 	return sprintf(buf, "%ld\n", ksm_pages_volatile); | 
 | 1975 | } | 
 | 1976 | KSM_ATTR_RO(pages_volatile); | 
 | 1977 |  | 
 | 1978 | static ssize_t full_scans_show(struct kobject *kobj, | 
 | 1979 | 			       struct kobj_attribute *attr, char *buf) | 
 | 1980 | { | 
 | 1981 | 	return sprintf(buf, "%lu\n", ksm_scan.seqnr); | 
 | 1982 | } | 
 | 1983 | KSM_ATTR_RO(full_scans); | 
 | 1984 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1985 | static struct attribute *ksm_attrs[] = { | 
 | 1986 | 	&sleep_millisecs_attr.attr, | 
 | 1987 | 	&pages_to_scan_attr.attr, | 
 | 1988 | 	&run_attr.attr, | 
| Hugh Dickins | b402826 | 2009-09-21 17:02:09 -0700 | [diff] [blame] | 1989 | 	&pages_shared_attr.attr, | 
 | 1990 | 	&pages_sharing_attr.attr, | 
| Hugh Dickins | 473b0ce | 2009-09-21 17:02:11 -0700 | [diff] [blame] | 1991 | 	&pages_unshared_attr.attr, | 
 | 1992 | 	&pages_volatile_attr.attr, | 
 | 1993 | 	&full_scans_attr.attr, | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 1994 | 	NULL, | 
 | 1995 | }; | 
 | 1996 |  | 
 | 1997 | static struct attribute_group ksm_attr_group = { | 
 | 1998 | 	.attrs = ksm_attrs, | 
 | 1999 | 	.name = "ksm", | 
 | 2000 | }; | 
| Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 2001 | #endif /* CONFIG_SYSFS */ | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2002 |  | 
 | 2003 | static int __init ksm_init(void) | 
 | 2004 | { | 
 | 2005 | 	struct task_struct *ksm_thread; | 
 | 2006 | 	int err; | 
 | 2007 |  | 
 | 2008 | 	err = ksm_slab_init(); | 
 | 2009 | 	if (err) | 
 | 2010 | 		goto out; | 
 | 2011 |  | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2012 | 	ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); | 
 | 2013 | 	if (IS_ERR(ksm_thread)) { | 
 | 2014 | 		printk(KERN_ERR "ksm: creating kthread failed\n"); | 
 | 2015 | 		err = PTR_ERR(ksm_thread); | 
| Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 2016 | 		goto out_free; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2017 | 	} | 
 | 2018 |  | 
| Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 2019 | #ifdef CONFIG_SYSFS | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2020 | 	err = sysfs_create_group(mm_kobj, &ksm_attr_group); | 
 | 2021 | 	if (err) { | 
 | 2022 | 		printk(KERN_ERR "ksm: register sysfs failed\n"); | 
| Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 2023 | 		kthread_stop(ksm_thread); | 
| Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 2024 | 		goto out_free; | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2025 | 	} | 
| Hugh Dickins | c73602a | 2009-10-07 16:32:22 -0700 | [diff] [blame] | 2026 | #else | 
 | 2027 | 	ksm_run = KSM_RUN_MERGE;	/* no way for user to start it */ | 
 | 2028 |  | 
| Hugh Dickins | 2ffd867 | 2009-09-21 17:02:23 -0700 | [diff] [blame] | 2029 | #endif /* CONFIG_SYSFS */ | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2030 |  | 
| Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 2031 | #ifdef CONFIG_MEMORY_HOTREMOVE | 
 | 2032 | 	/* | 
 | 2033 | 	 * Choose a high priority since the callback takes ksm_thread_mutex: | 
 | 2034 | 	 * later callbacks could only be taking locks which nest within that. | 
 | 2035 | 	 */ | 
 | 2036 | 	hotplug_memory_notifier(ksm_memory_callback, 100); | 
 | 2037 | #endif | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2038 | 	return 0; | 
 | 2039 |  | 
| Lai Jiangshan | d9f8984 | 2010-08-09 17:20:02 -0700 | [diff] [blame] | 2040 | out_free: | 
| Izik Eidus | 31dbd01 | 2009-09-21 17:02:03 -0700 | [diff] [blame] | 2041 | 	ksm_slab_free(); | 
 | 2042 | out: | 
 | 2043 | 	return err; | 
 | 2044 | } | 
 | 2045 | module_init(ksm_init) |