| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1 | /* memcontrol.c - Memory Controller | 
 | 2 |  * | 
 | 3 |  * Copyright IBM Corporation, 2007 | 
 | 4 |  * Author Balbir Singh <balbir@linux.vnet.ibm.com> | 
 | 5 |  * | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 6 |  * Copyright 2007 OpenVZ SWsoft Inc | 
 | 7 |  * Author: Pavel Emelianov <xemul@openvz.org> | 
 | 8 |  * | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 9 |  * This program is free software; you can redistribute it and/or modify | 
 | 10 |  * it under the terms of the GNU General Public License as published by | 
 | 11 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 12 |  * (at your option) any later version. | 
 | 13 |  * | 
 | 14 |  * This program is distributed in the hope that it will be useful, | 
 | 15 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 16 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 17 |  * GNU General Public License for more details. | 
 | 18 |  */ | 
 | 19 |  | 
 | 20 | #include <linux/res_counter.h> | 
 | 21 | #include <linux/memcontrol.h> | 
 | 22 | #include <linux/cgroup.h> | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 23 | #include <linux/mm.h> | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 24 | #include <linux/smp.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 25 | #include <linux/page-flags.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 26 | #include <linux/backing-dev.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 27 | #include <linux/bit_spinlock.h> | 
 | 28 | #include <linux/rcupdate.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 29 | #include <linux/swap.h> | 
 | 30 | #include <linux/spinlock.h> | 
 | 31 | #include <linux/fs.h> | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 32 | #include <linux/seq_file.h> | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 33 |  | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 34 | #include <asm/uaccess.h> | 
 | 35 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 36 | struct cgroup_subsys mem_cgroup_subsys; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 37 | static const int MEM_CGROUP_RECLAIM_RETRIES = 5; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 38 |  | 
 | 39 | /* | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 40 |  * Statistics for memory cgroup. | 
 | 41 |  */ | 
 | 42 | enum mem_cgroup_stat_index { | 
 | 43 | 	/* | 
 | 44 | 	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. | 
 | 45 | 	 */ | 
 | 46 | 	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */ | 
 | 47 | 	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */ | 
 | 48 |  | 
 | 49 | 	MEM_CGROUP_STAT_NSTATS, | 
 | 50 | }; | 
 | 51 |  | 
 | 52 | struct mem_cgroup_stat_cpu { | 
 | 53 | 	s64 count[MEM_CGROUP_STAT_NSTATS]; | 
 | 54 | } ____cacheline_aligned_in_smp; | 
 | 55 |  | 
 | 56 | struct mem_cgroup_stat { | 
 | 57 | 	struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; | 
 | 58 | }; | 
 | 59 |  | 
 | 60 | /* | 
 | 61 |  * For accounting under irq disable, no need for increment preempt count. | 
 | 62 |  */ | 
 | 63 | static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat, | 
 | 64 | 		enum mem_cgroup_stat_index idx, int val) | 
 | 65 | { | 
 | 66 | 	int cpu = smp_processor_id(); | 
 | 67 | 	stat->cpustat[cpu].count[idx] += val; | 
 | 68 | } | 
 | 69 |  | 
 | 70 | static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, | 
 | 71 | 		enum mem_cgroup_stat_index idx) | 
 | 72 | { | 
 | 73 | 	int cpu; | 
 | 74 | 	s64 ret = 0; | 
 | 75 | 	for_each_possible_cpu(cpu) | 
 | 76 | 		ret += stat->cpustat[cpu].count[idx]; | 
 | 77 | 	return ret; | 
 | 78 | } | 
 | 79 |  | 
 | 80 | /* | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 81 |  * per-zone information in memory controller. | 
 | 82 |  */ | 
 | 83 |  | 
 | 84 | enum mem_cgroup_zstat_index { | 
 | 85 | 	MEM_CGROUP_ZSTAT_ACTIVE, | 
 | 86 | 	MEM_CGROUP_ZSTAT_INACTIVE, | 
 | 87 |  | 
 | 88 | 	NR_MEM_CGROUP_ZSTAT, | 
 | 89 | }; | 
 | 90 |  | 
 | 91 | struct mem_cgroup_per_zone { | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 92 | 	/* | 
 | 93 | 	 * spin_lock to protect the per cgroup LRU | 
 | 94 | 	 */ | 
 | 95 | 	spinlock_t		lru_lock; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 96 | 	struct list_head	active_list; | 
 | 97 | 	struct list_head	inactive_list; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 98 | 	unsigned long count[NR_MEM_CGROUP_ZSTAT]; | 
 | 99 | }; | 
 | 100 | /* Macro for accessing counter */ | 
 | 101 | #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)]) | 
 | 102 |  | 
 | 103 | struct mem_cgroup_per_node { | 
 | 104 | 	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; | 
 | 105 | }; | 
 | 106 |  | 
 | 107 | struct mem_cgroup_lru_info { | 
 | 108 | 	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; | 
 | 109 | }; | 
 | 110 |  | 
 | 111 | /* | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 112 |  * The memory controller data structure. The memory controller controls both | 
 | 113 |  * page cache and RSS per cgroup. We would eventually like to provide | 
 | 114 |  * statistics based on the statistics developed by Rik Van Riel for clock-pro, | 
 | 115 |  * to help the administrator determine what knobs to tune. | 
 | 116 |  * | 
 | 117 |  * TODO: Add a water mark for the memory controller. Reclaim will begin when | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 118 |  * we hit the water mark. May be even add a low water mark, such that | 
 | 119 |  * no reclaim occurs from a cgroup at it's low water mark, this is | 
 | 120 |  * a feature that will be implemented much later in the future. | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 121 |  */ | 
 | 122 | struct mem_cgroup { | 
 | 123 | 	struct cgroup_subsys_state css; | 
 | 124 | 	/* | 
 | 125 | 	 * the counter to account for memory usage | 
 | 126 | 	 */ | 
 | 127 | 	struct res_counter res; | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 128 | 	/* | 
 | 129 | 	 * Per cgroup active and inactive list, similar to the | 
 | 130 | 	 * per zone LRU lists. | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 131 | 	 */ | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 132 | 	struct mem_cgroup_lru_info info; | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 133 |  | 
| KAMEZAWA Hiroyuki | 6c48a1d | 2008-02-07 00:14:34 -0800 | [diff] [blame] | 134 | 	int	prev_priority;	/* for recording reclaim priority */ | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 135 | 	/* | 
 | 136 | 	 * statistics. | 
 | 137 | 	 */ | 
 | 138 | 	struct mem_cgroup_stat stat; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 139 | }; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 140 | static struct mem_cgroup init_mem_cgroup; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 141 |  | 
 | 142 | /* | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 143 |  * We use the lower bit of the page->page_cgroup pointer as a bit spin | 
| Hugh Dickins | 9442ec9 | 2008-03-04 14:29:07 -0800 | [diff] [blame] | 144 |  * lock.  We need to ensure that page->page_cgroup is at least two | 
 | 145 |  * byte aligned (based on comments from Nick Piggin).  But since | 
 | 146 |  * bit_spin_lock doesn't actually set that lock bit in a non-debug | 
 | 147 |  * uniprocessor kernel, we should avoid setting it here too. | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 148 |  */ | 
 | 149 | #define PAGE_CGROUP_LOCK_BIT 	0x0 | 
| Hugh Dickins | 9442ec9 | 2008-03-04 14:29:07 -0800 | [diff] [blame] | 150 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 
 | 151 | #define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT) | 
 | 152 | #else | 
 | 153 | #define PAGE_CGROUP_LOCK	0x0 | 
 | 154 | #endif | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 155 |  | 
 | 156 | /* | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 157 |  * A page_cgroup page is associated with every page descriptor. The | 
 | 158 |  * page_cgroup helps us identify information about the cgroup | 
 | 159 |  */ | 
 | 160 | struct page_cgroup { | 
 | 161 | 	struct list_head lru;		/* per cgroup LRU list */ | 
 | 162 | 	struct page *page; | 
 | 163 | 	struct mem_cgroup *mem_cgroup; | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 164 | 	int ref_cnt;			/* cached, mapped, migrating */ | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 165 | 	int flags; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 166 | }; | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 167 | #define PAGE_CGROUP_FLAG_CACHE	(0x1)	/* charged as cache */ | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 168 | #define PAGE_CGROUP_FLAG_ACTIVE (0x2)	/* page is active in this cgroup */ | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 169 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 170 | static int page_cgroup_nid(struct page_cgroup *pc) | 
| KAMEZAWA Hiroyuki | c014953 | 2008-02-07 00:14:30 -0800 | [diff] [blame] | 171 | { | 
 | 172 | 	return page_to_nid(pc->page); | 
 | 173 | } | 
 | 174 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 175 | static enum zone_type page_cgroup_zid(struct page_cgroup *pc) | 
| KAMEZAWA Hiroyuki | c014953 | 2008-02-07 00:14:30 -0800 | [diff] [blame] | 176 | { | 
 | 177 | 	return page_zonenum(pc->page); | 
 | 178 | } | 
 | 179 |  | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 180 | enum charge_type { | 
 | 181 | 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0, | 
 | 182 | 	MEM_CGROUP_CHARGE_TYPE_MAPPED, | 
 | 183 | }; | 
 | 184 |  | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 185 | /* | 
 | 186 |  * Always modified under lru lock. Then, not necessary to preempt_disable() | 
 | 187 |  */ | 
 | 188 | static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, | 
 | 189 | 					bool charge) | 
 | 190 | { | 
 | 191 | 	int val = (charge)? 1 : -1; | 
 | 192 | 	struct mem_cgroup_stat *stat = &mem->stat; | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 193 |  | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 194 | 	VM_BUG_ON(!irqs_disabled()); | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 195 | 	if (flags & PAGE_CGROUP_FLAG_CACHE) | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 196 | 		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val); | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 197 | 	else | 
 | 198 | 		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 199 | } | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 200 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 201 | static struct mem_cgroup_per_zone * | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 202 | mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) | 
 | 203 | { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 204 | 	return &mem->info.nodeinfo[nid]->zoneinfo[zid]; | 
 | 205 | } | 
 | 206 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 207 | static struct mem_cgroup_per_zone * | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 208 | page_cgroup_zoneinfo(struct page_cgroup *pc) | 
 | 209 | { | 
 | 210 | 	struct mem_cgroup *mem = pc->mem_cgroup; | 
 | 211 | 	int nid = page_cgroup_nid(pc); | 
 | 212 | 	int zid = page_cgroup_zid(pc); | 
 | 213 |  | 
 | 214 | 	return mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 215 | } | 
 | 216 |  | 
 | 217 | static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, | 
 | 218 | 					enum mem_cgroup_zstat_index idx) | 
 | 219 | { | 
 | 220 | 	int nid, zid; | 
 | 221 | 	struct mem_cgroup_per_zone *mz; | 
 | 222 | 	u64 total = 0; | 
 | 223 |  | 
 | 224 | 	for_each_online_node(nid) | 
 | 225 | 		for (zid = 0; zid < MAX_NR_ZONES; zid++) { | 
 | 226 | 			mz = mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 227 | 			total += MEM_CGROUP_ZSTAT(mz, idx); | 
 | 228 | 		} | 
 | 229 | 	return total; | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 230 | } | 
 | 231 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 232 | static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 233 | { | 
 | 234 | 	return container_of(cgroup_subsys_state(cont, | 
 | 235 | 				mem_cgroup_subsys_id), struct mem_cgroup, | 
 | 236 | 				css); | 
 | 237 | } | 
 | 238 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 239 | static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 240 | { | 
 | 241 | 	return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | 
 | 242 | 				struct mem_cgroup, css); | 
 | 243 | } | 
 | 244 |  | 
 | 245 | void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p) | 
 | 246 | { | 
 | 247 | 	struct mem_cgroup *mem; | 
 | 248 |  | 
 | 249 | 	mem = mem_cgroup_from_task(p); | 
 | 250 | 	css_get(&mem->css); | 
 | 251 | 	mm->mem_cgroup = mem; | 
 | 252 | } | 
 | 253 |  | 
 | 254 | void mm_free_cgroup(struct mm_struct *mm) | 
 | 255 | { | 
 | 256 | 	css_put(&mm->mem_cgroup->css); | 
 | 257 | } | 
 | 258 |  | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 259 | static inline int page_cgroup_locked(struct page *page) | 
 | 260 | { | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 261 | 	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 262 | } | 
 | 263 |  | 
| Hugh Dickins | 9442ec9 | 2008-03-04 14:29:07 -0800 | [diff] [blame] | 264 | static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 265 | { | 
| Hugh Dickins | 9442ec9 | 2008-03-04 14:29:07 -0800 | [diff] [blame] | 266 | 	VM_BUG_ON(!page_cgroup_locked(page)); | 
 | 267 | 	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK); | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 268 | } | 
 | 269 |  | 
 | 270 | struct page_cgroup *page_get_page_cgroup(struct page *page) | 
 | 271 | { | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 272 | 	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 273 | } | 
 | 274 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 275 | static void lock_page_cgroup(struct page *page) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 276 | { | 
 | 277 | 	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 278 | } | 
 | 279 |  | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 280 | static int try_lock_page_cgroup(struct page *page) | 
 | 281 | { | 
 | 282 | 	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 
 | 283 | } | 
 | 284 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 285 | static void unlock_page_cgroup(struct page *page) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 286 | { | 
 | 287 | 	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 
 | 288 | } | 
 | 289 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 290 | static void __mem_cgroup_remove_list(struct page_cgroup *pc) | 
 | 291 | { | 
 | 292 | 	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 
 | 293 | 	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); | 
 | 294 |  | 
 | 295 | 	if (from) | 
 | 296 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; | 
 | 297 | 	else | 
 | 298 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; | 
 | 299 |  | 
 | 300 | 	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); | 
 | 301 | 	list_del_init(&pc->lru); | 
 | 302 | } | 
 | 303 |  | 
 | 304 | static void __mem_cgroup_add_list(struct page_cgroup *pc) | 
 | 305 | { | 
 | 306 | 	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 
 | 307 | 	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); | 
 | 308 |  | 
 | 309 | 	if (!to) { | 
 | 310 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 311 | 		list_add(&pc->lru, &mz->inactive_list); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 312 | 	} else { | 
 | 313 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 314 | 		list_add(&pc->lru, &mz->active_list); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 315 | 	} | 
 | 316 | 	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); | 
 | 317 | } | 
 | 318 |  | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 319 | static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 320 | { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 321 | 	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 
 | 322 | 	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); | 
 | 323 |  | 
 | 324 | 	if (from) | 
 | 325 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; | 
 | 326 | 	else | 
 | 327 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; | 
 | 328 |  | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 329 | 	if (active) { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 330 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 331 | 		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 332 | 		list_move(&pc->lru, &mz->active_list); | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 333 | 	} else { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 334 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 335 | 		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 336 | 		list_move(&pc->lru, &mz->inactive_list); | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 337 | 	} | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 338 | } | 
 | 339 |  | 
| David Rientjes | 4c4a221 | 2008-02-07 00:14:06 -0800 | [diff] [blame] | 340 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | 
 | 341 | { | 
 | 342 | 	int ret; | 
 | 343 |  | 
 | 344 | 	task_lock(task); | 
| Hugh Dickins | bd845e3 | 2008-03-04 14:29:01 -0800 | [diff] [blame] | 345 | 	ret = task->mm && mm_match_cgroup(task->mm, mem); | 
| David Rientjes | 4c4a221 | 2008-02-07 00:14:06 -0800 | [diff] [blame] | 346 | 	task_unlock(task); | 
 | 347 | 	return ret; | 
 | 348 | } | 
 | 349 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 350 | /* | 
 | 351 |  * This routine assumes that the appropriate zone's lru lock is already held | 
 | 352 |  */ | 
| Hugh Dickins | 427d541 | 2008-03-04 14:29:03 -0800 | [diff] [blame] | 353 | void mem_cgroup_move_lists(struct page *page, bool active) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 354 | { | 
| Hugh Dickins | 427d541 | 2008-03-04 14:29:03 -0800 | [diff] [blame] | 355 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 356 | 	struct mem_cgroup_per_zone *mz; | 
 | 357 | 	unsigned long flags; | 
 | 358 |  | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 359 | 	/* | 
 | 360 | 	 * We cannot lock_page_cgroup while holding zone's lru_lock, | 
 | 361 | 	 * because other holders of lock_page_cgroup can be interrupted | 
 | 362 | 	 * with an attempt to rotate_reclaimable_page.  But we cannot | 
 | 363 | 	 * safely get to page_cgroup without it, so just try_lock it: | 
 | 364 | 	 * mem_cgroup_isolate_pages allows for page left on wrong list. | 
 | 365 | 	 */ | 
 | 366 | 	if (!try_lock_page_cgroup(page)) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 367 | 		return; | 
 | 368 |  | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 369 | 	pc = page_get_page_cgroup(page); | 
 | 370 | 	if (pc) { | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 371 | 		mz = page_cgroup_zoneinfo(pc); | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 372 | 		spin_lock_irqsave(&mz->lru_lock, flags); | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 373 | 		__mem_cgroup_move_lists(pc, active); | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 374 | 		spin_unlock_irqrestore(&mz->lru_lock, flags); | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 375 | 	} | 
 | 376 | 	unlock_page_cgroup(page); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 377 | } | 
 | 378 |  | 
| KAMEZAWA Hiroyuki | 58ae83d | 2008-02-07 00:14:32 -0800 | [diff] [blame] | 379 | /* | 
 | 380 |  * Calculate mapped_ratio under memory controller. This will be used in | 
 | 381 |  * vmscan.c for deteremining we have to reclaim mapped pages. | 
 | 382 |  */ | 
 | 383 | int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) | 
 | 384 | { | 
 | 385 | 	long total, rss; | 
 | 386 |  | 
 | 387 | 	/* | 
 | 388 | 	 * usage is recorded in bytes. But, here, we assume the number of | 
 | 389 | 	 * physical pages can be represented by "long" on any arch. | 
 | 390 | 	 */ | 
 | 391 | 	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L; | 
 | 392 | 	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); | 
 | 393 | 	return (int)((rss * 100L) / total); | 
 | 394 | } | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 395 |  | 
| KAMEZAWA Hiroyuki | 5932f36 | 2008-02-07 00:14:33 -0800 | [diff] [blame] | 396 | /* | 
 | 397 |  * This function is called from vmscan.c. In page reclaiming loop. balance | 
 | 398 |  * between active and inactive list is calculated. For memory controller | 
 | 399 |  * page reclaiming, we should use using mem_cgroup's imbalance rather than | 
 | 400 |  * zone's global lru imbalance. | 
 | 401 |  */ | 
 | 402 | long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) | 
 | 403 | { | 
 | 404 | 	unsigned long active, inactive; | 
 | 405 | 	/* active and inactive are the number of pages. 'long' is ok.*/ | 
 | 406 | 	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); | 
 | 407 | 	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); | 
 | 408 | 	return (long) (active / (inactive + 1)); | 
 | 409 | } | 
| KAMEZAWA Hiroyuki | 58ae83d | 2008-02-07 00:14:32 -0800 | [diff] [blame] | 410 |  | 
| KAMEZAWA Hiroyuki | 6c48a1d | 2008-02-07 00:14:34 -0800 | [diff] [blame] | 411 | /* | 
 | 412 |  * prev_priority control...this will be used in memory reclaim path. | 
 | 413 |  */ | 
 | 414 | int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) | 
 | 415 | { | 
 | 416 | 	return mem->prev_priority; | 
 | 417 | } | 
 | 418 |  | 
 | 419 | void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority) | 
 | 420 | { | 
 | 421 | 	if (priority < mem->prev_priority) | 
 | 422 | 		mem->prev_priority = priority; | 
 | 423 | } | 
 | 424 |  | 
 | 425 | void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) | 
 | 426 | { | 
 | 427 | 	mem->prev_priority = priority; | 
 | 428 | } | 
 | 429 |  | 
| KAMEZAWA Hiroyuki | cc38108 | 2008-02-07 00:14:35 -0800 | [diff] [blame] | 430 | /* | 
 | 431 |  * Calculate # of pages to be scanned in this priority/zone. | 
 | 432 |  * See also vmscan.c | 
 | 433 |  * | 
 | 434 |  * priority starts from "DEF_PRIORITY" and decremented in each loop. | 
 | 435 |  * (see include/linux/mmzone.h) | 
 | 436 |  */ | 
 | 437 |  | 
 | 438 | long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, | 
 | 439 | 				   struct zone *zone, int priority) | 
 | 440 | { | 
 | 441 | 	long nr_active; | 
 | 442 | 	int nid = zone->zone_pgdat->node_id; | 
 | 443 | 	int zid = zone_idx(zone); | 
 | 444 | 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 445 |  | 
 | 446 | 	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE); | 
 | 447 | 	return (nr_active >> priority); | 
 | 448 | } | 
 | 449 |  | 
 | 450 | long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, | 
 | 451 | 					struct zone *zone, int priority) | 
 | 452 | { | 
 | 453 | 	long nr_inactive; | 
 | 454 | 	int nid = zone->zone_pgdat->node_id; | 
 | 455 | 	int zid = zone_idx(zone); | 
 | 456 | 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 457 |  | 
 | 458 | 	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); | 
| KAMEZAWA Hiroyuki | cc38108 | 2008-02-07 00:14:35 -0800 | [diff] [blame] | 459 | 	return (nr_inactive >> priority); | 
 | 460 | } | 
 | 461 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 462 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 
 | 463 | 					struct list_head *dst, | 
 | 464 | 					unsigned long *scanned, int order, | 
 | 465 | 					int mode, struct zone *z, | 
 | 466 | 					struct mem_cgroup *mem_cont, | 
 | 467 | 					int active) | 
 | 468 | { | 
 | 469 | 	unsigned long nr_taken = 0; | 
 | 470 | 	struct page *page; | 
 | 471 | 	unsigned long scan; | 
 | 472 | 	LIST_HEAD(pc_list); | 
 | 473 | 	struct list_head *src; | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 474 | 	struct page_cgroup *pc, *tmp; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 475 | 	int nid = z->zone_pgdat->node_id; | 
 | 476 | 	int zid = zone_idx(z); | 
 | 477 | 	struct mem_cgroup_per_zone *mz; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 478 |  | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 479 | 	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 480 | 	if (active) | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 481 | 		src = &mz->active_list; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 482 | 	else | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 483 | 		src = &mz->inactive_list; | 
 | 484 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 485 |  | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 486 | 	spin_lock(&mz->lru_lock); | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 487 | 	scan = 0; | 
 | 488 | 	list_for_each_entry_safe_reverse(pc, tmp, src, lru) { | 
| Hugh Dickins | 436c6541 | 2008-02-07 00:14:12 -0800 | [diff] [blame] | 489 | 		if (scan >= nr_to_scan) | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 490 | 			break; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 491 | 		page = pc->page; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 492 |  | 
| Hugh Dickins | 436c6541 | 2008-02-07 00:14:12 -0800 | [diff] [blame] | 493 | 		if (unlikely(!PageLRU(page))) | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 494 | 			continue; | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 495 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 496 | 		if (PageActive(page) && !active) { | 
 | 497 | 			__mem_cgroup_move_lists(pc, true); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 498 | 			continue; | 
 | 499 | 		} | 
 | 500 | 		if (!PageActive(page) && active) { | 
 | 501 | 			__mem_cgroup_move_lists(pc, false); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 502 | 			continue; | 
 | 503 | 		} | 
 | 504 |  | 
| Hugh Dickins | 436c6541 | 2008-02-07 00:14:12 -0800 | [diff] [blame] | 505 | 		scan++; | 
 | 506 | 		list_move(&pc->lru, &pc_list); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 507 |  | 
 | 508 | 		if (__isolate_lru_page(page, mode) == 0) { | 
 | 509 | 			list_move(&page->lru, dst); | 
 | 510 | 			nr_taken++; | 
 | 511 | 		} | 
 | 512 | 	} | 
 | 513 |  | 
 | 514 | 	list_splice(&pc_list, src); | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 515 | 	spin_unlock(&mz->lru_lock); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 516 |  | 
 | 517 | 	*scanned = scan; | 
 | 518 | 	return nr_taken; | 
 | 519 | } | 
 | 520 |  | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 521 | /* | 
 | 522 |  * Charge the memory controller for page usage. | 
 | 523 |  * Return | 
 | 524 |  * 0 if the charge was successful | 
 | 525 |  * < 0 if the cgroup is over its limit | 
 | 526 |  */ | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 527 | static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | 
 | 528 | 				gfp_t gfp_mask, enum charge_type ctype) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 529 | { | 
 | 530 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | 9175e03 | 2008-02-07 00:14:08 -0800 | [diff] [blame] | 531 | 	struct page_cgroup *pc; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 532 | 	unsigned long flags; | 
 | 533 | 	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 534 | 	struct mem_cgroup_per_zone *mz; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 535 |  | 
 | 536 | 	/* | 
 | 537 | 	 * Should page_cgroup's go to their own slab? | 
 | 538 | 	 * One could optimize the performance of the charging routine | 
 | 539 | 	 * by saving a bit in the page_flags and using it as a lock | 
 | 540 | 	 * to see if the cgroup page already has a page_cgroup associated | 
 | 541 | 	 * with it | 
 | 542 | 	 */ | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 543 | retry: | 
| Hugh Dickins | 7e924aa | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 544 | 	lock_page_cgroup(page); | 
 | 545 | 	pc = page_get_page_cgroup(page); | 
 | 546 | 	/* | 
 | 547 | 	 * The page_cgroup exists and | 
 | 548 | 	 * the page has already been accounted. | 
 | 549 | 	 */ | 
 | 550 | 	if (pc) { | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 551 | 		VM_BUG_ON(pc->page != page); | 
 | 552 | 		VM_BUG_ON(pc->ref_cnt <= 0); | 
 | 553 |  | 
 | 554 | 		pc->ref_cnt++; | 
 | 555 | 		unlock_page_cgroup(page); | 
 | 556 | 		goto done; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 557 | 	} | 
| Hugh Dickins | 7e924aa | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 558 | 	unlock_page_cgroup(page); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 559 |  | 
| Balbir Singh | e1a1cd5 | 2008-02-07 00:14:02 -0800 | [diff] [blame] | 560 | 	pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 561 | 	if (pc == NULL) | 
 | 562 | 		goto err; | 
 | 563 |  | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 564 | 	/* | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 565 | 	 * We always charge the cgroup the mm_struct belongs to. | 
 | 566 | 	 * The mm_struct's mem_cgroup changes on task migration if the | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 567 | 	 * thread group leader migrates. It's possible that mm is not | 
 | 568 | 	 * set, if so charge the init_mm (happens for pagecache usage). | 
 | 569 | 	 */ | 
 | 570 | 	if (!mm) | 
 | 571 | 		mm = &init_mm; | 
 | 572 |  | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 573 | 	rcu_read_lock(); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 574 | 	mem = rcu_dereference(mm->mem_cgroup); | 
 | 575 | 	/* | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 576 | 	 * For every charge from the cgroup, increment reference count | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 577 | 	 */ | 
 | 578 | 	css_get(&mem->css); | 
 | 579 | 	rcu_read_unlock(); | 
 | 580 |  | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 581 | 	while (res_counter_charge(&mem->res, PAGE_SIZE)) { | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 582 | 		if (!(gfp_mask & __GFP_WAIT)) | 
 | 583 | 			goto out; | 
| Balbir Singh | e1a1cd5 | 2008-02-07 00:14:02 -0800 | [diff] [blame] | 584 |  | 
 | 585 | 		if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 586 | 			continue; | 
 | 587 |  | 
 | 588 | 		/* | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 589 | 		 * try_to_free_mem_cgroup_pages() might not give us a full | 
 | 590 | 		 * picture of reclaim. Some pages are reclaimed and might be | 
 | 591 | 		 * moved to swap cache or just unmapped from the cgroup. | 
 | 592 | 		 * Check the limit again to see if the reclaim reduced the | 
 | 593 | 		 * current usage of the cgroup before giving up | 
 | 594 | 		 */ | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 595 | 		if (res_counter_check_under_limit(&mem->res)) | 
 | 596 | 			continue; | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 597 |  | 
 | 598 | 		if (!nr_retries--) { | 
 | 599 | 			mem_cgroup_out_of_memory(mem, gfp_mask); | 
 | 600 | 			goto out; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 601 | 		} | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 602 | 		congestion_wait(WRITE, HZ/10); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 603 | 	} | 
 | 604 |  | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 605 | 	pc->ref_cnt = 1; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 606 | 	pc->mem_cgroup = mem; | 
 | 607 | 	pc->page = page; | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 608 | 	pc->flags = PAGE_CGROUP_FLAG_ACTIVE; | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 609 | 	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) | 
 | 610 | 		pc->flags |= PAGE_CGROUP_FLAG_CACHE; | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 611 |  | 
| Hugh Dickins | 7e924aa | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 612 | 	lock_page_cgroup(page); | 
 | 613 | 	if (page_get_page_cgroup(page)) { | 
 | 614 | 		unlock_page_cgroup(page); | 
| KAMEZAWA Hiroyuki | 9175e03 | 2008-02-07 00:14:08 -0800 | [diff] [blame] | 615 | 		/* | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 616 | 		 * Another charge has been added to this page already. | 
 | 617 | 		 * We take lock_page_cgroup(page) again and read | 
| KAMEZAWA Hiroyuki | 9175e03 | 2008-02-07 00:14:08 -0800 | [diff] [blame] | 618 | 		 * page->cgroup, increment refcnt.... just retry is OK. | 
 | 619 | 		 */ | 
 | 620 | 		res_counter_uncharge(&mem->res, PAGE_SIZE); | 
 | 621 | 		css_put(&mem->css); | 
 | 622 | 		kfree(pc); | 
 | 623 | 		goto retry; | 
 | 624 | 	} | 
| Hugh Dickins | 7e924aa | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 625 | 	page_assign_page_cgroup(page, pc); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 626 |  | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 627 | 	mz = page_cgroup_zoneinfo(pc); | 
 | 628 | 	spin_lock_irqsave(&mz->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 629 | 	__mem_cgroup_add_list(pc); | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 630 | 	spin_unlock_irqrestore(&mz->lru_lock, flags); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 631 |  | 
| Hugh Dickins | fb59e9f | 2008-03-04 14:29:16 -0800 | [diff] [blame] | 632 | 	unlock_page_cgroup(page); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 633 | done: | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 634 | 	return 0; | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 635 | out: | 
 | 636 | 	css_put(&mem->css); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 637 | 	kfree(pc); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 638 | err: | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 639 | 	return -ENOMEM; | 
 | 640 | } | 
 | 641 |  | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 642 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 643 | { | 
 | 644 | 	return mem_cgroup_charge_common(page, mm, gfp_mask, | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 645 | 				MEM_CGROUP_CHARGE_TYPE_MAPPED); | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 646 | } | 
 | 647 |  | 
| Balbir Singh | e1a1cd5 | 2008-02-07 00:14:02 -0800 | [diff] [blame] | 648 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 
 | 649 | 				gfp_t gfp_mask) | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 650 | { | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 651 | 	if (!mm) | 
 | 652 | 		mm = &init_mm; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 653 | 	return mem_cgroup_charge_common(page, mm, gfp_mask, | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 654 | 				MEM_CGROUP_CHARGE_TYPE_CACHE); | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 655 | } | 
 | 656 |  | 
 | 657 | /* | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 658 |  * Uncharging is always a welcome operation, we never complain, simply | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 659 |  * uncharge. | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 660 |  */ | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 661 | void mem_cgroup_uncharge_page(struct page *page) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 662 | { | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 663 | 	struct page_cgroup *pc; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 664 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 665 | 	struct mem_cgroup_per_zone *mz; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 666 | 	unsigned long flags; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 667 |  | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 668 | 	/* | 
| Balbir Singh | 3c541e1 | 2008-02-07 00:14:41 -0800 | [diff] [blame] | 669 | 	 * Check if our page_cgroup is valid | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 670 | 	 */ | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 671 | 	lock_page_cgroup(page); | 
 | 672 | 	pc = page_get_page_cgroup(page); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 673 | 	if (!pc) | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 674 | 		goto unlock; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 675 |  | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 676 | 	VM_BUG_ON(pc->page != page); | 
 | 677 | 	VM_BUG_ON(pc->ref_cnt <= 0); | 
 | 678 |  | 
 | 679 | 	if (--(pc->ref_cnt) == 0) { | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 680 | 		mz = page_cgroup_zoneinfo(pc); | 
 | 681 | 		spin_lock_irqsave(&mz->lru_lock, flags); | 
 | 682 | 		__mem_cgroup_remove_list(pc); | 
 | 683 | 		spin_unlock_irqrestore(&mz->lru_lock, flags); | 
 | 684 |  | 
| Hugh Dickins | fb59e9f | 2008-03-04 14:29:16 -0800 | [diff] [blame] | 685 | 		page_assign_page_cgroup(page, NULL); | 
 | 686 | 		unlock_page_cgroup(page); | 
 | 687 |  | 
| Hugh Dickins | 6d48ff8 | 2008-03-04 14:29:12 -0800 | [diff] [blame] | 688 | 		mem = pc->mem_cgroup; | 
 | 689 | 		res_counter_uncharge(&mem->res, PAGE_SIZE); | 
 | 690 | 		css_put(&mem->css); | 
 | 691 |  | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 692 | 		kfree(pc); | 
 | 693 | 		return; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 694 | 	} | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 695 |  | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 696 | unlock: | 
| Balbir Singh | 3c541e1 | 2008-02-07 00:14:41 -0800 | [diff] [blame] | 697 | 	unlock_page_cgroup(page); | 
 | 698 | } | 
 | 699 |  | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 700 | /* | 
 | 701 |  * Returns non-zero if a page (under migration) has valid page_cgroup member. | 
 | 702 |  * Refcnt of page_cgroup is incremented. | 
 | 703 |  */ | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 704 | int mem_cgroup_prepare_migration(struct page *page) | 
 | 705 | { | 
 | 706 | 	struct page_cgroup *pc; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 707 |  | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 708 | 	lock_page_cgroup(page); | 
 | 709 | 	pc = page_get_page_cgroup(page); | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 710 | 	if (pc) | 
 | 711 | 		pc->ref_cnt++; | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 712 | 	unlock_page_cgroup(page); | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 713 | 	return pc != NULL; | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 714 | } | 
 | 715 |  | 
 | 716 | void mem_cgroup_end_migration(struct page *page) | 
 | 717 | { | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 718 | 	mem_cgroup_uncharge_page(page); | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 719 | } | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 720 |  | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 721 | /* | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 722 |  * We know both *page* and *newpage* are now not-on-LRU and PG_locked. | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 723 |  * And no race with uncharge() routines because page_cgroup for *page* | 
 | 724 |  * has extra one reference by mem_cgroup_prepare_migration. | 
 | 725 |  */ | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 726 | void mem_cgroup_page_migration(struct page *page, struct page *newpage) | 
 | 727 | { | 
 | 728 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 729 | 	struct mem_cgroup_per_zone *mz; | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 730 | 	unsigned long flags; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 731 |  | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 732 | 	lock_page_cgroup(page); | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 733 | 	pc = page_get_page_cgroup(page); | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 734 | 	if (!pc) { | 
 | 735 | 		unlock_page_cgroup(page); | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 736 | 		return; | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 737 | 	} | 
 | 738 |  | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 739 | 	mz = page_cgroup_zoneinfo(pc); | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 740 | 	spin_lock_irqsave(&mz->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 741 | 	__mem_cgroup_remove_list(pc); | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 742 | 	spin_unlock_irqrestore(&mz->lru_lock, flags); | 
 | 743 |  | 
| Hugh Dickins | fb59e9f | 2008-03-04 14:29:16 -0800 | [diff] [blame] | 744 | 	page_assign_page_cgroup(page, NULL); | 
 | 745 | 	unlock_page_cgroup(page); | 
 | 746 |  | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 747 | 	pc->page = newpage; | 
 | 748 | 	lock_page_cgroup(newpage); | 
 | 749 | 	page_assign_page_cgroup(newpage, pc); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 750 |  | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 751 | 	mz = page_cgroup_zoneinfo(pc); | 
 | 752 | 	spin_lock_irqsave(&mz->lru_lock, flags); | 
 | 753 | 	__mem_cgroup_add_list(pc); | 
 | 754 | 	spin_unlock_irqrestore(&mz->lru_lock, flags); | 
| Hugh Dickins | fb59e9f | 2008-03-04 14:29:16 -0800 | [diff] [blame] | 755 |  | 
 | 756 | 	unlock_page_cgroup(newpage); | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 757 | } | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 758 |  | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 759 | /* | 
 | 760 |  * This routine traverse page_cgroup in given list and drop them all. | 
 | 761 |  * This routine ignores page_cgroup->ref_cnt. | 
 | 762 |  * *And* this routine doesn't reclaim page itself, just removes page_cgroup. | 
 | 763 |  */ | 
 | 764 | #define FORCE_UNCHARGE_BATCH	(128) | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 765 | static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 766 | 			    struct mem_cgroup_per_zone *mz, | 
 | 767 | 			    int active) | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 768 | { | 
 | 769 | 	struct page_cgroup *pc; | 
 | 770 | 	struct page *page; | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 771 | 	int count = FORCE_UNCHARGE_BATCH; | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 772 | 	unsigned long flags; | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 773 | 	struct list_head *list; | 
 | 774 |  | 
 | 775 | 	if (active) | 
 | 776 | 		list = &mz->active_list; | 
 | 777 | 	else | 
 | 778 | 		list = &mz->inactive_list; | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 779 |  | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 780 | 	spin_lock_irqsave(&mz->lru_lock, flags); | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 781 | 	while (!list_empty(list)) { | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 782 | 		pc = list_entry(list->prev, struct page_cgroup, lru); | 
 | 783 | 		page = pc->page; | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 784 | 		get_page(page); | 
 | 785 | 		spin_unlock_irqrestore(&mz->lru_lock, flags); | 
 | 786 | 		mem_cgroup_uncharge_page(page); | 
 | 787 | 		put_page(page); | 
 | 788 | 		if (--count <= 0) { | 
 | 789 | 			count = FORCE_UNCHARGE_BATCH; | 
 | 790 | 			cond_resched(); | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 791 | 		} | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 792 | 		spin_lock_irqsave(&mz->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 793 | 	} | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 794 | 	spin_unlock_irqrestore(&mz->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 795 | } | 
 | 796 |  | 
 | 797 | /* | 
 | 798 |  * make mem_cgroup's charge to be 0 if there is no task. | 
 | 799 |  * This enables deleting this mem_cgroup. | 
 | 800 |  */ | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 801 | static int mem_cgroup_force_empty(struct mem_cgroup *mem) | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 802 | { | 
 | 803 | 	int ret = -EBUSY; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 804 | 	int node, zid; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 805 |  | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 806 | 	css_get(&mem->css); | 
 | 807 | 	/* | 
 | 808 | 	 * page reclaim code (kswapd etc..) will move pages between | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 809 | 	 * active_list <-> inactive_list while we don't take a lock. | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 810 | 	 * So, we have to do loop here until all lists are empty. | 
 | 811 | 	 */ | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 812 | 	while (mem->res.usage > 0) { | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 813 | 		if (atomic_read(&mem->css.cgroup->count) > 0) | 
 | 814 | 			goto out; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 815 | 		for_each_node_state(node, N_POSSIBLE) | 
 | 816 | 			for (zid = 0; zid < MAX_NR_ZONES; zid++) { | 
 | 817 | 				struct mem_cgroup_per_zone *mz; | 
 | 818 | 				mz = mem_cgroup_zoneinfo(mem, node, zid); | 
 | 819 | 				/* drop all page_cgroup in active_list */ | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 820 | 				mem_cgroup_force_empty_list(mem, mz, 1); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 821 | 				/* drop all page_cgroup in inactive_list */ | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 822 | 				mem_cgroup_force_empty_list(mem, mz, 0); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 823 | 			} | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 824 | 	} | 
 | 825 | 	ret = 0; | 
 | 826 | out: | 
 | 827 | 	css_put(&mem->css); | 
 | 828 | 	return ret; | 
 | 829 | } | 
 | 830 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 831 | static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 832 | { | 
 | 833 | 	*tmp = memparse(buf, &buf); | 
 | 834 | 	if (*buf != '\0') | 
 | 835 | 		return -EINVAL; | 
 | 836 |  | 
 | 837 | 	/* | 
 | 838 | 	 * Round up the value to the closest page size | 
 | 839 | 	 */ | 
 | 840 | 	*tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT; | 
 | 841 | 	return 0; | 
 | 842 | } | 
 | 843 |  | 
 | 844 | static ssize_t mem_cgroup_read(struct cgroup *cont, | 
 | 845 | 			struct cftype *cft, struct file *file, | 
 | 846 | 			char __user *userbuf, size_t nbytes, loff_t *ppos) | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 847 | { | 
 | 848 | 	return res_counter_read(&mem_cgroup_from_cont(cont)->res, | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 849 | 				cft->private, userbuf, nbytes, ppos, | 
 | 850 | 				NULL); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 851 | } | 
 | 852 |  | 
 | 853 | static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, | 
 | 854 | 				struct file *file, const char __user *userbuf, | 
 | 855 | 				size_t nbytes, loff_t *ppos) | 
 | 856 | { | 
 | 857 | 	return res_counter_write(&mem_cgroup_from_cont(cont)->res, | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 858 | 				cft->private, userbuf, nbytes, ppos, | 
 | 859 | 				mem_cgroup_write_strategy); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 860 | } | 
 | 861 |  | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 862 | static ssize_t mem_force_empty_write(struct cgroup *cont, | 
 | 863 | 				struct cftype *cft, struct file *file, | 
 | 864 | 				const char __user *userbuf, | 
 | 865 | 				size_t nbytes, loff_t *ppos) | 
 | 866 | { | 
 | 867 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 868 | 	int ret = mem_cgroup_force_empty(mem); | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 869 | 	if (!ret) | 
 | 870 | 		ret = nbytes; | 
 | 871 | 	return ret; | 
 | 872 | } | 
 | 873 |  | 
 | 874 | /* | 
 | 875 |  * Note: This should be removed if cgroup supports write-only file. | 
 | 876 |  */ | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 877 | static ssize_t mem_force_empty_read(struct cgroup *cont, | 
 | 878 | 				struct cftype *cft, | 
 | 879 | 				struct file *file, char __user *userbuf, | 
 | 880 | 				size_t nbytes, loff_t *ppos) | 
 | 881 | { | 
 | 882 | 	return -EINVAL; | 
 | 883 | } | 
 | 884 |  | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 885 | static const struct mem_cgroup_stat_desc { | 
 | 886 | 	const char *msg; | 
 | 887 | 	u64 unit; | 
 | 888 | } mem_cgroup_stat_desc[] = { | 
 | 889 | 	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, }, | 
 | 890 | 	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, }, | 
 | 891 | }; | 
 | 892 |  | 
 | 893 | static int mem_control_stat_show(struct seq_file *m, void *arg) | 
 | 894 | { | 
 | 895 | 	struct cgroup *cont = m->private; | 
 | 896 | 	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); | 
 | 897 | 	struct mem_cgroup_stat *stat = &mem_cont->stat; | 
 | 898 | 	int i; | 
 | 899 |  | 
 | 900 | 	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) { | 
 | 901 | 		s64 val; | 
 | 902 |  | 
 | 903 | 		val = mem_cgroup_read_stat(stat, i); | 
 | 904 | 		val *= mem_cgroup_stat_desc[i].unit; | 
 | 905 | 		seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg, | 
 | 906 | 				(long long)val); | 
 | 907 | 	} | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 908 | 	/* showing # of active pages */ | 
 | 909 | 	{ | 
 | 910 | 		unsigned long active, inactive; | 
 | 911 |  | 
 | 912 | 		inactive = mem_cgroup_get_all_zonestat(mem_cont, | 
 | 913 | 						MEM_CGROUP_ZSTAT_INACTIVE); | 
 | 914 | 		active = mem_cgroup_get_all_zonestat(mem_cont, | 
 | 915 | 						MEM_CGROUP_ZSTAT_ACTIVE); | 
 | 916 | 		seq_printf(m, "active %ld\n", (active) * PAGE_SIZE); | 
 | 917 | 		seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE); | 
 | 918 | 	} | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 919 | 	return 0; | 
 | 920 | } | 
 | 921 |  | 
 | 922 | static const struct file_operations mem_control_stat_file_operations = { | 
 | 923 | 	.read = seq_read, | 
 | 924 | 	.llseek = seq_lseek, | 
 | 925 | 	.release = single_release, | 
 | 926 | }; | 
 | 927 |  | 
 | 928 | static int mem_control_stat_open(struct inode *unused, struct file *file) | 
 | 929 | { | 
 | 930 | 	/* XXX __d_cont */ | 
 | 931 | 	struct cgroup *cont = file->f_dentry->d_parent->d_fsdata; | 
 | 932 |  | 
 | 933 | 	file->f_op = &mem_control_stat_file_operations; | 
 | 934 | 	return single_open(file, mem_control_stat_show, cont); | 
 | 935 | } | 
 | 936 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 937 | static struct cftype mem_cgroup_files[] = { | 
 | 938 | 	{ | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 939 | 		.name = "usage_in_bytes", | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 940 | 		.private = RES_USAGE, | 
 | 941 | 		.read = mem_cgroup_read, | 
 | 942 | 	}, | 
 | 943 | 	{ | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 944 | 		.name = "limit_in_bytes", | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 945 | 		.private = RES_LIMIT, | 
 | 946 | 		.write = mem_cgroup_write, | 
 | 947 | 		.read = mem_cgroup_read, | 
 | 948 | 	}, | 
 | 949 | 	{ | 
 | 950 | 		.name = "failcnt", | 
 | 951 | 		.private = RES_FAILCNT, | 
 | 952 | 		.read = mem_cgroup_read, | 
 | 953 | 	}, | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 954 | 	{ | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 955 | 		.name = "force_empty", | 
 | 956 | 		.write = mem_force_empty_write, | 
 | 957 | 		.read = mem_force_empty_read, | 
 | 958 | 	}, | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 959 | 	{ | 
 | 960 | 		.name = "stat", | 
 | 961 | 		.open = mem_control_stat_open, | 
 | 962 | 	}, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 963 | }; | 
 | 964 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 965 | static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | 
 | 966 | { | 
 | 967 | 	struct mem_cgroup_per_node *pn; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 968 | 	struct mem_cgroup_per_zone *mz; | 
 | 969 | 	int zone; | 
 | 970 | 	/* | 
 | 971 | 	 * This routine is called against possible nodes. | 
 | 972 | 	 * But it's BUG to call kmalloc() against offline node. | 
 | 973 | 	 * | 
 | 974 | 	 * TODO: this routine can waste much memory for nodes which will | 
 | 975 | 	 *       never be onlined. It's better to use memory hotplug callback | 
 | 976 | 	 *       function. | 
 | 977 | 	 */ | 
 | 978 | 	if (node_state(node, N_HIGH_MEMORY)) | 
 | 979 | 		pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node); | 
 | 980 | 	else | 
 | 981 | 		pn = kmalloc(sizeof(*pn), GFP_KERNEL); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 982 | 	if (!pn) | 
 | 983 | 		return 1; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 984 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 985 | 	mem->info.nodeinfo[node] = pn; | 
 | 986 | 	memset(pn, 0, sizeof(*pn)); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 987 |  | 
 | 988 | 	for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 
 | 989 | 		mz = &pn->zoneinfo[zone]; | 
 | 990 | 		INIT_LIST_HEAD(&mz->active_list); | 
 | 991 | 		INIT_LIST_HEAD(&mz->inactive_list); | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 992 | 		spin_lock_init(&mz->lru_lock); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 993 | 	} | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 994 | 	return 0; | 
 | 995 | } | 
 | 996 |  | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 997 | static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | 
 | 998 | { | 
 | 999 | 	kfree(mem->info.nodeinfo[node]); | 
 | 1000 | } | 
 | 1001 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1002 | static struct cgroup_subsys_state * | 
 | 1003 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | 
 | 1004 | { | 
 | 1005 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1006 | 	int node; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1007 |  | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 1008 | 	if (unlikely((cont->parent) == NULL)) { | 
 | 1009 | 		mem = &init_mem_cgroup; | 
 | 1010 | 		init_mm.mem_cgroup = mem; | 
 | 1011 | 	} else | 
 | 1012 | 		mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); | 
 | 1013 |  | 
 | 1014 | 	if (mem == NULL) | 
| Li Zefan | 2dda81c | 2008-02-23 15:24:14 -0800 | [diff] [blame] | 1015 | 		return ERR_PTR(-ENOMEM); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1016 |  | 
 | 1017 | 	res_counter_init(&mem->res); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1018 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1019 | 	memset(&mem->info, 0, sizeof(mem->info)); | 
 | 1020 |  | 
 | 1021 | 	for_each_node_state(node, N_POSSIBLE) | 
 | 1022 | 		if (alloc_mem_cgroup_per_zone_info(mem, node)) | 
 | 1023 | 			goto free_out; | 
 | 1024 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1025 | 	return &mem->css; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1026 | free_out: | 
 | 1027 | 	for_each_node_state(node, N_POSSIBLE) | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1028 | 		free_mem_cgroup_per_zone_info(mem, node); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1029 | 	if (cont->parent != NULL) | 
 | 1030 | 		kfree(mem); | 
| Li Zefan | 2dda81c | 2008-02-23 15:24:14 -0800 | [diff] [blame] | 1031 | 	return ERR_PTR(-ENOMEM); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1032 | } | 
 | 1033 |  | 
| KAMEZAWA Hiroyuki | df878fb | 2008-02-07 00:14:28 -0800 | [diff] [blame] | 1034 | static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, | 
 | 1035 | 					struct cgroup *cont) | 
 | 1036 | { | 
 | 1037 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 
 | 1038 | 	mem_cgroup_force_empty(mem); | 
 | 1039 | } | 
 | 1040 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1041 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, | 
 | 1042 | 				struct cgroup *cont) | 
 | 1043 | { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1044 | 	int node; | 
 | 1045 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 
 | 1046 |  | 
 | 1047 | 	for_each_node_state(node, N_POSSIBLE) | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1048 | 		free_mem_cgroup_per_zone_info(mem, node); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1049 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1050 | 	kfree(mem_cgroup_from_cont(cont)); | 
 | 1051 | } | 
 | 1052 |  | 
 | 1053 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | 
 | 1054 | 				struct cgroup *cont) | 
 | 1055 | { | 
 | 1056 | 	return cgroup_add_files(cont, ss, mem_cgroup_files, | 
 | 1057 | 					ARRAY_SIZE(mem_cgroup_files)); | 
 | 1058 | } | 
 | 1059 |  | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 1060 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 
 | 1061 | 				struct cgroup *cont, | 
 | 1062 | 				struct cgroup *old_cont, | 
 | 1063 | 				struct task_struct *p) | 
 | 1064 | { | 
 | 1065 | 	struct mm_struct *mm; | 
 | 1066 | 	struct mem_cgroup *mem, *old_mem; | 
 | 1067 |  | 
 | 1068 | 	mm = get_task_mm(p); | 
 | 1069 | 	if (mm == NULL) | 
 | 1070 | 		return; | 
 | 1071 |  | 
 | 1072 | 	mem = mem_cgroup_from_cont(cont); | 
 | 1073 | 	old_mem = mem_cgroup_from_cont(old_cont); | 
 | 1074 |  | 
 | 1075 | 	if (mem == old_mem) | 
 | 1076 | 		goto out; | 
 | 1077 |  | 
 | 1078 | 	/* | 
 | 1079 | 	 * Only thread group leaders are allowed to migrate, the mm_struct is | 
 | 1080 | 	 * in effect owned by the leader | 
 | 1081 | 	 */ | 
| Pavel Emelyanov | 52ea27e | 2008-03-19 17:00:45 -0700 | [diff] [blame] | 1082 | 	if (!thread_group_leader(p)) | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 1083 | 		goto out; | 
 | 1084 |  | 
 | 1085 | 	css_get(&mem->css); | 
 | 1086 | 	rcu_assign_pointer(mm->mem_cgroup, mem); | 
 | 1087 | 	css_put(&old_mem->css); | 
 | 1088 |  | 
 | 1089 | out: | 
 | 1090 | 	mmput(mm); | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 1091 | } | 
 | 1092 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1093 | struct cgroup_subsys mem_cgroup_subsys = { | 
 | 1094 | 	.name = "memory", | 
 | 1095 | 	.subsys_id = mem_cgroup_subsys_id, | 
 | 1096 | 	.create = mem_cgroup_create, | 
| KAMEZAWA Hiroyuki | df878fb | 2008-02-07 00:14:28 -0800 | [diff] [blame] | 1097 | 	.pre_destroy = mem_cgroup_pre_destroy, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1098 | 	.destroy = mem_cgroup_destroy, | 
 | 1099 | 	.populate = mem_cgroup_populate, | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 1100 | 	.attach = mem_cgroup_move_task, | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1101 | 	.early_init = 0, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1102 | }; |