| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1 | /* memcontrol.c - Memory Controller | 
 | 2 |  * | 
 | 3 |  * Copyright IBM Corporation, 2007 | 
 | 4 |  * Author Balbir Singh <balbir@linux.vnet.ibm.com> | 
 | 5 |  * | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 6 |  * Copyright 2007 OpenVZ SWsoft Inc | 
 | 7 |  * Author: Pavel Emelianov <xemul@openvz.org> | 
 | 8 |  * | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 9 |  * Memory thresholds | 
 | 10 |  * Copyright (C) 2009 Nokia Corporation | 
 | 11 |  * Author: Kirill A. Shutemov | 
 | 12 |  * | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 13 |  * This program is free software; you can redistribute it and/or modify | 
 | 14 |  * it under the terms of the GNU General Public License as published by | 
 | 15 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 16 |  * (at your option) any later version. | 
 | 17 |  * | 
 | 18 |  * This program is distributed in the hope that it will be useful, | 
 | 19 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 20 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 21 |  * GNU General Public License for more details. | 
 | 22 |  */ | 
 | 23 |  | 
 | 24 | #include <linux/res_counter.h> | 
 | 25 | #include <linux/memcontrol.h> | 
 | 26 | #include <linux/cgroup.h> | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 27 | #include <linux/mm.h> | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 28 | #include <linux/hugetlb.h> | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 29 | #include <linux/pagemap.h> | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 30 | #include <linux/smp.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 31 | #include <linux/page-flags.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 32 | #include <linux/backing-dev.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 33 | #include <linux/bit_spinlock.h> | 
 | 34 | #include <linux/rcupdate.h> | 
| Balbir Singh | e222432 | 2009-04-02 16:57:39 -0700 | [diff] [blame] | 35 | #include <linux/limits.h> | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 36 | #include <linux/mutex.h> | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 37 | #include <linux/rbtree.h> | 
| Balbir Singh | b6ac57d | 2008-04-29 01:00:19 -0700 | [diff] [blame] | 38 | #include <linux/slab.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 39 | #include <linux/swap.h> | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 40 | #include <linux/swapops.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 41 | #include <linux/spinlock.h> | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 42 | #include <linux/eventfd.h> | 
 | 43 | #include <linux/sort.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 44 | #include <linux/fs.h> | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 45 | #include <linux/seq_file.h> | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 46 | #include <linux/vmalloc.h> | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 47 | #include <linux/mm_inline.h> | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 48 | #include <linux/page_cgroup.h> | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 49 | #include <linux/cpu.h> | 
| KAMEZAWA Hiroyuki | 158e0a2 | 2010-08-10 18:03:00 -0700 | [diff] [blame] | 50 | #include <linux/oom.h> | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 51 | #include "internal.h" | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 52 |  | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 53 | #include <asm/uaccess.h> | 
 | 54 |  | 
| KOSAKI Motohiro | cc8e970 | 2010-08-09 17:19:57 -0700 | [diff] [blame] | 55 | #include <trace/events/vmscan.h> | 
 | 56 |  | 
| KAMEZAWA Hiroyuki | a181b0e | 2008-07-25 01:47:08 -0700 | [diff] [blame] | 57 | struct cgroup_subsys mem_cgroup_subsys __read_mostly; | 
| KAMEZAWA Hiroyuki | a181b0e | 2008-07-25 01:47:08 -0700 | [diff] [blame] | 58 | #define MEM_CGROUP_RECLAIM_RETRIES	5 | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 59 | struct mem_cgroup *root_mem_cgroup __read_mostly; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 60 |  | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 61 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 
| Li Zefan | 338c843 | 2009-06-17 16:27:15 -0700 | [diff] [blame] | 62 | /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 63 | int do_swap_account __read_mostly; | 
| Michal Hocko | a42c390 | 2010-11-24 12:57:08 -0800 | [diff] [blame] | 64 |  | 
 | 65 | /* for remember boot option*/ | 
 | 66 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED | 
 | 67 | static int really_do_swap_account __initdata = 1; | 
 | 68 | #else | 
 | 69 | static int really_do_swap_account __initdata = 0; | 
 | 70 | #endif | 
 | 71 |  | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 72 | #else | 
 | 73 | #define do_swap_account		(0) | 
 | 74 | #endif | 
 | 75 |  | 
| KAMEZAWA Hiroyuki | d2265e6 | 2010-03-10 15:22:31 -0800 | [diff] [blame] | 76 | /* | 
 | 77 |  * Per memcg event counter is incremented at every pagein/pageout. This counter | 
 | 78 |  * is used for trigger some periodic events. This is straightforward and better | 
 | 79 |  * than using jiffies etc. to handle periodic memcg event. | 
 | 80 |  * | 
 | 81 |  * These values will be used as !((event) & ((1 <<(thresh)) - 1)) | 
 | 82 |  */ | 
 | 83 | #define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */ | 
 | 84 | #define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */ | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 85 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 86 | /* | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 87 |  * Statistics for memory cgroup. | 
 | 88 |  */ | 
 | 89 | enum mem_cgroup_stat_index { | 
 | 90 | 	/* | 
 | 91 | 	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. | 
 | 92 | 	 */ | 
 | 93 | 	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */ | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 94 | 	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */ | 
| KAMEZAWA Hiroyuki | d804658 | 2009-12-15 16:47:09 -0800 | [diff] [blame] | 95 | 	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */ | 
| Balaji Rao | 55e462b | 2008-05-01 04:35:12 -0700 | [diff] [blame] | 96 | 	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */ | 
 | 97 | 	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */ | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 98 | 	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 99 | 	MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ | 
 | 100 | 	/* incremented at every  pagein/pageout */ | 
 | 101 | 	MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA, | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 102 | 	MEM_CGROUP_ON_MOVE,	/* someone is moving account between groups */ | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 103 |  | 
 | 104 | 	MEM_CGROUP_STAT_NSTATS, | 
 | 105 | }; | 
 | 106 |  | 
 | 107 | struct mem_cgroup_stat_cpu { | 
 | 108 | 	s64 count[MEM_CGROUP_STAT_NSTATS]; | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 109 | }; | 
 | 110 |  | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 111 | /* | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 112 |  * per-zone information in memory controller. | 
 | 113 |  */ | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 114 | struct mem_cgroup_per_zone { | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 115 | 	/* | 
 | 116 | 	 * spin_lock to protect the per cgroup LRU | 
 | 117 | 	 */ | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 118 | 	struct list_head	lists[NR_LRU_LISTS]; | 
 | 119 | 	unsigned long		count[NR_LRU_LISTS]; | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 120 |  | 
 | 121 | 	struct zone_reclaim_stat reclaim_stat; | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 122 | 	struct rb_node		tree_node;	/* RB tree node */ | 
 | 123 | 	unsigned long long	usage_in_excess;/* Set to the value by which */ | 
 | 124 | 						/* the soft limit is exceeded*/ | 
 | 125 | 	bool			on_tree; | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 126 | 	struct mem_cgroup	*mem;		/* Back pointer, we cannot */ | 
 | 127 | 						/* use container_of	   */ | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 128 | }; | 
 | 129 | /* Macro for accessing counter */ | 
 | 130 | #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)]) | 
 | 131 |  | 
 | 132 | struct mem_cgroup_per_node { | 
 | 133 | 	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; | 
 | 134 | }; | 
 | 135 |  | 
 | 136 | struct mem_cgroup_lru_info { | 
 | 137 | 	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; | 
 | 138 | }; | 
 | 139 |  | 
 | 140 | /* | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 141 |  * Cgroups above their limits are maintained in a RB-Tree, independent of | 
 | 142 |  * their hierarchy representation | 
 | 143 |  */ | 
 | 144 |  | 
 | 145 | struct mem_cgroup_tree_per_zone { | 
 | 146 | 	struct rb_root rb_root; | 
 | 147 | 	spinlock_t lock; | 
 | 148 | }; | 
 | 149 |  | 
 | 150 | struct mem_cgroup_tree_per_node { | 
 | 151 | 	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES]; | 
 | 152 | }; | 
 | 153 |  | 
 | 154 | struct mem_cgroup_tree { | 
 | 155 | 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES]; | 
 | 156 | }; | 
 | 157 |  | 
 | 158 | static struct mem_cgroup_tree soft_limit_tree __read_mostly; | 
 | 159 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 160 | struct mem_cgroup_threshold { | 
 | 161 | 	struct eventfd_ctx *eventfd; | 
 | 162 | 	u64 threshold; | 
 | 163 | }; | 
 | 164 |  | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 165 | /* For threshold */ | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 166 | struct mem_cgroup_threshold_ary { | 
 | 167 | 	/* An array index points to threshold just below usage. */ | 
| Phil Carmody | 5407a56 | 2010-05-26 14:42:42 -0700 | [diff] [blame] | 168 | 	int current_threshold; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 169 | 	/* Size of entries[] */ | 
 | 170 | 	unsigned int size; | 
 | 171 | 	/* Array of thresholds */ | 
 | 172 | 	struct mem_cgroup_threshold entries[0]; | 
 | 173 | }; | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 174 |  | 
 | 175 | struct mem_cgroup_thresholds { | 
 | 176 | 	/* Primary thresholds array */ | 
 | 177 | 	struct mem_cgroup_threshold_ary *primary; | 
 | 178 | 	/* | 
 | 179 | 	 * Spare threshold array. | 
 | 180 | 	 * This is needed to make mem_cgroup_unregister_event() "never fail". | 
 | 181 | 	 * It must be able to store at least primary->size - 1 entries. | 
 | 182 | 	 */ | 
 | 183 | 	struct mem_cgroup_threshold_ary *spare; | 
 | 184 | }; | 
 | 185 |  | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 186 | /* for OOM */ | 
 | 187 | struct mem_cgroup_eventfd_list { | 
 | 188 | 	struct list_head list; | 
 | 189 | 	struct eventfd_ctx *eventfd; | 
 | 190 | }; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 191 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 192 | static void mem_cgroup_threshold(struct mem_cgroup *mem); | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 193 | static void mem_cgroup_oom_notify(struct mem_cgroup *mem); | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 194 |  | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 195 | /* | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 196 |  * The memory controller data structure. The memory controller controls both | 
 | 197 |  * page cache and RSS per cgroup. We would eventually like to provide | 
 | 198 |  * statistics based on the statistics developed by Rik Van Riel for clock-pro, | 
 | 199 |  * to help the administrator determine what knobs to tune. | 
 | 200 |  * | 
 | 201 |  * TODO: Add a water mark for the memory controller. Reclaim will begin when | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 202 |  * we hit the water mark. May be even add a low water mark, such that | 
 | 203 |  * no reclaim occurs from a cgroup at it's low water mark, this is | 
 | 204 |  * a feature that will be implemented much later in the future. | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 205 |  */ | 
 | 206 | struct mem_cgroup { | 
 | 207 | 	struct cgroup_subsys_state css; | 
 | 208 | 	/* | 
 | 209 | 	 * the counter to account for memory usage | 
 | 210 | 	 */ | 
 | 211 | 	struct res_counter res; | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 212 | 	/* | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 213 | 	 * the counter to account for mem+swap usage. | 
 | 214 | 	 */ | 
 | 215 | 	struct res_counter memsw; | 
 | 216 | 	/* | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 217 | 	 * Per cgroup active and inactive list, similar to the | 
 | 218 | 	 * per zone LRU lists. | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 219 | 	 */ | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 220 | 	struct mem_cgroup_lru_info info; | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 221 |  | 
| KOSAKI Motohiro | 2733c06 | 2009-01-07 18:08:23 -0800 | [diff] [blame] | 222 | 	/* | 
 | 223 | 	  protect against reclaim related member. | 
 | 224 | 	*/ | 
 | 225 | 	spinlock_t reclaim_param_lock; | 
 | 226 |  | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 227 | 	/* | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 228 | 	 * While reclaiming in a hierarchy, we cache the last child we | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 229 | 	 * reclaimed from. | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 230 | 	 */ | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 231 | 	int last_scanned_child; | 
| Balbir Singh | 18f59ea | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 232 | 	/* | 
 | 233 | 	 * Should the accounting and control be hierarchical, per subtree? | 
 | 234 | 	 */ | 
 | 235 | 	bool use_hierarchy; | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 236 | 	atomic_t	oom_lock; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 237 | 	atomic_t	refcnt; | 
| KOSAKI Motohiro | 14797e2 | 2009-01-07 18:08:18 -0800 | [diff] [blame] | 238 |  | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 239 | 	unsigned int	swappiness; | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 240 | 	/* OOM-Killer disable */ | 
 | 241 | 	int		oom_kill_disable; | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 242 |  | 
| KAMEZAWA Hiroyuki | 22a668d | 2009-06-17 16:27:19 -0700 | [diff] [blame] | 243 | 	/* set when res.limit == memsw.limit */ | 
 | 244 | 	bool		memsw_is_minimum; | 
 | 245 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 246 | 	/* protect arrays of thresholds */ | 
 | 247 | 	struct mutex thresholds_lock; | 
 | 248 |  | 
 | 249 | 	/* thresholds for memory usage. RCU-protected */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 250 | 	struct mem_cgroup_thresholds thresholds; | 
| Kirill A. Shutemov | 907860e | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 251 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 252 | 	/* thresholds for mem+swap usage. RCU-protected */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 253 | 	struct mem_cgroup_thresholds memsw_thresholds; | 
| Kirill A. Shutemov | 907860e | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 254 |  | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 255 | 	/* For oom notifier event fd */ | 
 | 256 | 	struct list_head oom_notify; | 
 | 257 |  | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 258 | 	/* | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 259 | 	 * Should we move charges of a task when a task is moved into this | 
 | 260 | 	 * mem_cgroup ? And what type of charges should we move ? | 
 | 261 | 	 */ | 
 | 262 | 	unsigned long 	move_charge_at_immigrate; | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 263 | 	/* | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 264 | 	 * percpu counter. | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 265 | 	 */ | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 266 | 	struct mem_cgroup_stat_cpu *stat; | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 267 | 	/* | 
 | 268 | 	 * used when a cpu is offlined or other synchronizations | 
 | 269 | 	 * See mem_cgroup_read_stat(). | 
 | 270 | 	 */ | 
 | 271 | 	struct mem_cgroup_stat_cpu nocpu_base; | 
 | 272 | 	spinlock_t pcp_counter_lock; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 273 | }; | 
 | 274 |  | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 275 | /* Stuffs for move charges at task migration. */ | 
 | 276 | /* | 
 | 277 |  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a | 
 | 278 |  * left-shifted bitmap of these types. | 
 | 279 |  */ | 
 | 280 | enum move_type { | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 281 | 	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */ | 
| Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 282 | 	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */ | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 283 | 	NR_MOVE_TYPE, | 
 | 284 | }; | 
 | 285 |  | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 286 | /* "mc" and its members are protected by cgroup_mutex */ | 
 | 287 | static struct move_charge_struct { | 
| Daisuke Nishimura | b1dd693 | 2010-11-24 12:57:06 -0800 | [diff] [blame] | 288 | 	spinlock_t	  lock; /* for from, to */ | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 289 | 	struct mem_cgroup *from; | 
 | 290 | 	struct mem_cgroup *to; | 
 | 291 | 	unsigned long precharge; | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 292 | 	unsigned long moved_charge; | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 293 | 	unsigned long moved_swap; | 
| Daisuke Nishimura | 8033b97 | 2010-03-10 15:22:16 -0800 | [diff] [blame] | 294 | 	struct task_struct *moving_task;	/* a task moving charges */ | 
 | 295 | 	wait_queue_head_t waitq;		/* a waitq for other context */ | 
 | 296 | } mc = { | 
| KAMEZAWA Hiroyuki | 2bd9bb2 | 2010-08-10 18:02:58 -0700 | [diff] [blame] | 297 | 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock), | 
| Daisuke Nishimura | 8033b97 | 2010-03-10 15:22:16 -0800 | [diff] [blame] | 298 | 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), | 
 | 299 | }; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 300 |  | 
| Daisuke Nishimura | 90254a6 | 2010-05-26 14:42:38 -0700 | [diff] [blame] | 301 | static bool move_anon(void) | 
 | 302 | { | 
 | 303 | 	return test_bit(MOVE_CHARGE_TYPE_ANON, | 
 | 304 | 					&mc.to->move_charge_at_immigrate); | 
 | 305 | } | 
 | 306 |  | 
| Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 307 | static bool move_file(void) | 
 | 308 | { | 
 | 309 | 	return test_bit(MOVE_CHARGE_TYPE_FILE, | 
 | 310 | 					&mc.to->move_charge_at_immigrate); | 
 | 311 | } | 
 | 312 |  | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 313 | /* | 
 | 314 |  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft | 
 | 315 |  * limit reclaim to prevent infinite loops, if they ever occur. | 
 | 316 |  */ | 
 | 317 | #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100) | 
 | 318 | #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2) | 
 | 319 |  | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 320 | enum charge_type { | 
 | 321 | 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0, | 
 | 322 | 	MEM_CGROUP_CHARGE_TYPE_MAPPED, | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 323 | 	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */ | 
| KAMEZAWA Hiroyuki | c05555b | 2008-10-18 20:28:11 -0700 | [diff] [blame] | 324 | 	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */ | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 325 | 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */ | 
| KAMEZAWA Hiroyuki | 8a9478c | 2009-06-17 16:27:17 -0700 | [diff] [blame] | 326 | 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */ | 
| KAMEZAWA Hiroyuki | c05555b | 2008-10-18 20:28:11 -0700 | [diff] [blame] | 327 | 	NR_CHARGE_TYPE, | 
 | 328 | }; | 
 | 329 |  | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 330 | /* only for here (for easy reading.) */ | 
 | 331 | #define PCGF_CACHE	(1UL << PCG_CACHE) | 
 | 332 | #define PCGF_USED	(1UL << PCG_USED) | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 333 | #define PCGF_LOCK	(1UL << PCG_LOCK) | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 334 | /* Not used, but added here for completeness */ | 
 | 335 | #define PCGF_ACCT	(1UL << PCG_ACCT) | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 336 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 337 | /* for encoding cft->private value on file */ | 
 | 338 | #define _MEM			(0) | 
 | 339 | #define _MEMSWAP		(1) | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 340 | #define _OOM_TYPE		(2) | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 341 | #define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val)) | 
 | 342 | #define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff) | 
 | 343 | #define MEMFILE_ATTR(val)	((val) & 0xffff) | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 344 | /* Used for OOM nofiier */ | 
 | 345 | #define OOM_CONTROL		(0) | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 346 |  | 
| Balbir Singh | 75822b4 | 2009-09-23 15:56:38 -0700 | [diff] [blame] | 347 | /* | 
 | 348 |  * Reclaim flags for mem_cgroup_hierarchical_reclaim | 
 | 349 |  */ | 
 | 350 | #define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0 | 
 | 351 | #define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) | 
 | 352 | #define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1 | 
 | 353 | #define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 354 | #define MEM_CGROUP_RECLAIM_SOFT_BIT	0x2 | 
 | 355 | #define MEM_CGROUP_RECLAIM_SOFT		(1 << MEM_CGROUP_RECLAIM_SOFT_BIT) | 
| Balbir Singh | 75822b4 | 2009-09-23 15:56:38 -0700 | [diff] [blame] | 356 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 357 | static void mem_cgroup_get(struct mem_cgroup *mem); | 
 | 358 | static void mem_cgroup_put(struct mem_cgroup *mem); | 
| Daisuke Nishimura | 7bcc1bb | 2009-01-29 14:25:11 -0800 | [diff] [blame] | 359 | static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 360 | static void drain_all_stock_async(void); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 361 |  | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 362 | static struct mem_cgroup_per_zone * | 
 | 363 | mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) | 
 | 364 | { | 
 | 365 | 	return &mem->info.nodeinfo[nid]->zoneinfo[zid]; | 
 | 366 | } | 
 | 367 |  | 
| Wu Fengguang | d324236 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 368 | struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) | 
 | 369 | { | 
 | 370 | 	return &mem->css; | 
 | 371 | } | 
 | 372 |  | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 373 | static struct mem_cgroup_per_zone * | 
 | 374 | page_cgroup_zoneinfo(struct page_cgroup *pc) | 
 | 375 | { | 
 | 376 | 	struct mem_cgroup *mem = pc->mem_cgroup; | 
 | 377 | 	int nid = page_cgroup_nid(pc); | 
 | 378 | 	int zid = page_cgroup_zid(pc); | 
 | 379 |  | 
 | 380 | 	if (!mem) | 
 | 381 | 		return NULL; | 
 | 382 |  | 
 | 383 | 	return mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 384 | } | 
 | 385 |  | 
 | 386 | static struct mem_cgroup_tree_per_zone * | 
 | 387 | soft_limit_tree_node_zone(int nid, int zid) | 
 | 388 | { | 
 | 389 | 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; | 
 | 390 | } | 
 | 391 |  | 
 | 392 | static struct mem_cgroup_tree_per_zone * | 
 | 393 | soft_limit_tree_from_page(struct page *page) | 
 | 394 | { | 
 | 395 | 	int nid = page_to_nid(page); | 
 | 396 | 	int zid = page_zonenum(page); | 
 | 397 |  | 
 | 398 | 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; | 
 | 399 | } | 
 | 400 |  | 
 | 401 | static void | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 402 | __mem_cgroup_insert_exceeded(struct mem_cgroup *mem, | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 403 | 				struct mem_cgroup_per_zone *mz, | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 404 | 				struct mem_cgroup_tree_per_zone *mctz, | 
 | 405 | 				unsigned long long new_usage_in_excess) | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 406 | { | 
 | 407 | 	struct rb_node **p = &mctz->rb_root.rb_node; | 
 | 408 | 	struct rb_node *parent = NULL; | 
 | 409 | 	struct mem_cgroup_per_zone *mz_node; | 
 | 410 |  | 
 | 411 | 	if (mz->on_tree) | 
 | 412 | 		return; | 
 | 413 |  | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 414 | 	mz->usage_in_excess = new_usage_in_excess; | 
 | 415 | 	if (!mz->usage_in_excess) | 
 | 416 | 		return; | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 417 | 	while (*p) { | 
 | 418 | 		parent = *p; | 
 | 419 | 		mz_node = rb_entry(parent, struct mem_cgroup_per_zone, | 
 | 420 | 					tree_node); | 
 | 421 | 		if (mz->usage_in_excess < mz_node->usage_in_excess) | 
 | 422 | 			p = &(*p)->rb_left; | 
 | 423 | 		/* | 
 | 424 | 		 * We can't avoid mem cgroups that are over their soft | 
 | 425 | 		 * limit by the same amount | 
 | 426 | 		 */ | 
 | 427 | 		else if (mz->usage_in_excess >= mz_node->usage_in_excess) | 
 | 428 | 			p = &(*p)->rb_right; | 
 | 429 | 	} | 
 | 430 | 	rb_link_node(&mz->tree_node, parent, p); | 
 | 431 | 	rb_insert_color(&mz->tree_node, &mctz->rb_root); | 
 | 432 | 	mz->on_tree = true; | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 433 | } | 
 | 434 |  | 
 | 435 | static void | 
 | 436 | __mem_cgroup_remove_exceeded(struct mem_cgroup *mem, | 
 | 437 | 				struct mem_cgroup_per_zone *mz, | 
 | 438 | 				struct mem_cgroup_tree_per_zone *mctz) | 
 | 439 | { | 
 | 440 | 	if (!mz->on_tree) | 
 | 441 | 		return; | 
 | 442 | 	rb_erase(&mz->tree_node, &mctz->rb_root); | 
 | 443 | 	mz->on_tree = false; | 
 | 444 | } | 
 | 445 |  | 
 | 446 | static void | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 447 | mem_cgroup_remove_exceeded(struct mem_cgroup *mem, | 
 | 448 | 				struct mem_cgroup_per_zone *mz, | 
 | 449 | 				struct mem_cgroup_tree_per_zone *mctz) | 
 | 450 | { | 
 | 451 | 	spin_lock(&mctz->lock); | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 452 | 	__mem_cgroup_remove_exceeded(mem, mz, mctz); | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 453 | 	spin_unlock(&mctz->lock); | 
 | 454 | } | 
 | 455 |  | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 456 |  | 
 | 457 | static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) | 
 | 458 | { | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 459 | 	unsigned long long excess; | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 460 | 	struct mem_cgroup_per_zone *mz; | 
 | 461 | 	struct mem_cgroup_tree_per_zone *mctz; | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 462 | 	int nid = page_to_nid(page); | 
 | 463 | 	int zid = page_zonenum(page); | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 464 | 	mctz = soft_limit_tree_from_page(page); | 
 | 465 |  | 
 | 466 | 	/* | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 467 | 	 * Necessary to update all ancestors when hierarchy is used. | 
 | 468 | 	 * because their event counter is not touched. | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 469 | 	 */ | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 470 | 	for (; mem; mem = parent_mem_cgroup(mem)) { | 
 | 471 | 		mz = mem_cgroup_zoneinfo(mem, nid, zid); | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 472 | 		excess = res_counter_soft_limit_excess(&mem->res); | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 473 | 		/* | 
 | 474 | 		 * We have to update the tree if mz is on RB-tree or | 
 | 475 | 		 * mem is over its softlimit. | 
 | 476 | 		 */ | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 477 | 		if (excess || mz->on_tree) { | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 478 | 			spin_lock(&mctz->lock); | 
 | 479 | 			/* if on-tree, remove it */ | 
 | 480 | 			if (mz->on_tree) | 
 | 481 | 				__mem_cgroup_remove_exceeded(mem, mz, mctz); | 
 | 482 | 			/* | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 483 | 			 * Insert again. mz->usage_in_excess will be updated. | 
 | 484 | 			 * If excess is 0, no tree ops. | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 485 | 			 */ | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 486 | 			__mem_cgroup_insert_exceeded(mem, mz, mctz, excess); | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 487 | 			spin_unlock(&mctz->lock); | 
 | 488 | 		} | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 489 | 	} | 
 | 490 | } | 
 | 491 |  | 
 | 492 | static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem) | 
 | 493 | { | 
 | 494 | 	int node, zone; | 
 | 495 | 	struct mem_cgroup_per_zone *mz; | 
 | 496 | 	struct mem_cgroup_tree_per_zone *mctz; | 
 | 497 |  | 
 | 498 | 	for_each_node_state(node, N_POSSIBLE) { | 
 | 499 | 		for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 
 | 500 | 			mz = mem_cgroup_zoneinfo(mem, node, zone); | 
 | 501 | 			mctz = soft_limit_tree_node_zone(node, zone); | 
 | 502 | 			mem_cgroup_remove_exceeded(mem, mz, mctz); | 
 | 503 | 		} | 
 | 504 | 	} | 
 | 505 | } | 
 | 506 |  | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 507 | static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem) | 
 | 508 | { | 
 | 509 | 	return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT; | 
 | 510 | } | 
 | 511 |  | 
 | 512 | static struct mem_cgroup_per_zone * | 
 | 513 | __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) | 
 | 514 | { | 
 | 515 | 	struct rb_node *rightmost = NULL; | 
| KAMEZAWA Hiroyuki | 26251ea | 2009-10-01 15:44:08 -0700 | [diff] [blame] | 516 | 	struct mem_cgroup_per_zone *mz; | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 517 |  | 
 | 518 | retry: | 
| KAMEZAWA Hiroyuki | 26251ea | 2009-10-01 15:44:08 -0700 | [diff] [blame] | 519 | 	mz = NULL; | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 520 | 	rightmost = rb_last(&mctz->rb_root); | 
 | 521 | 	if (!rightmost) | 
 | 522 | 		goto done;		/* Nothing to reclaim from */ | 
 | 523 |  | 
 | 524 | 	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); | 
 | 525 | 	/* | 
 | 526 | 	 * Remove the node now but someone else can add it back, | 
 | 527 | 	 * we will to add it back at the end of reclaim to its correct | 
 | 528 | 	 * position in the tree. | 
 | 529 | 	 */ | 
 | 530 | 	__mem_cgroup_remove_exceeded(mz->mem, mz, mctz); | 
 | 531 | 	if (!res_counter_soft_limit_excess(&mz->mem->res) || | 
 | 532 | 		!css_tryget(&mz->mem->css)) | 
 | 533 | 		goto retry; | 
 | 534 | done: | 
 | 535 | 	return mz; | 
 | 536 | } | 
 | 537 |  | 
 | 538 | static struct mem_cgroup_per_zone * | 
 | 539 | mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) | 
 | 540 | { | 
 | 541 | 	struct mem_cgroup_per_zone *mz; | 
 | 542 |  | 
 | 543 | 	spin_lock(&mctz->lock); | 
 | 544 | 	mz = __mem_cgroup_largest_soft_limit_node(mctz); | 
 | 545 | 	spin_unlock(&mctz->lock); | 
 | 546 | 	return mz; | 
 | 547 | } | 
 | 548 |  | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 549 | /* | 
 | 550 |  * Implementation Note: reading percpu statistics for memcg. | 
 | 551 |  * | 
 | 552 |  * Both of vmstat[] and percpu_counter has threshold and do periodic | 
 | 553 |  * synchronization to implement "quick" read. There are trade-off between | 
 | 554 |  * reading cost and precision of value. Then, we may have a chance to implement | 
 | 555 |  * a periodic synchronizion of counter in memcg's counter. | 
 | 556 |  * | 
 | 557 |  * But this _read() function is used for user interface now. The user accounts | 
 | 558 |  * memory usage by memory cgroup and he _always_ requires exact value because | 
 | 559 |  * he accounts memory. Even if we provide quick-and-fuzzy read, we always | 
 | 560 |  * have to visit all online cpus and make sum. So, for now, unnecessary | 
 | 561 |  * synchronization is not implemented. (just implemented for cpu hotplug) | 
 | 562 |  * | 
 | 563 |  * If there are kernel internal actions which can make use of some not-exact | 
 | 564 |  * value, and reading all cpu value can be performance bottleneck in some | 
 | 565 |  * common workload, threashold and synchonization as vmstat[] should be | 
 | 566 |  * implemented. | 
 | 567 |  */ | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 568 | static s64 mem_cgroup_read_stat(struct mem_cgroup *mem, | 
 | 569 | 		enum mem_cgroup_stat_index idx) | 
 | 570 | { | 
 | 571 | 	int cpu; | 
 | 572 | 	s64 val = 0; | 
 | 573 |  | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 574 | 	get_online_cpus(); | 
 | 575 | 	for_each_online_cpu(cpu) | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 576 | 		val += per_cpu(mem->stat->count[idx], cpu); | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 577 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 578 | 	spin_lock(&mem->pcp_counter_lock); | 
 | 579 | 	val += mem->nocpu_base.count[idx]; | 
 | 580 | 	spin_unlock(&mem->pcp_counter_lock); | 
 | 581 | #endif | 
 | 582 | 	put_online_cpus(); | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 583 | 	return val; | 
 | 584 | } | 
 | 585 |  | 
 | 586 | static s64 mem_cgroup_local_usage(struct mem_cgroup *mem) | 
 | 587 | { | 
 | 588 | 	s64 ret; | 
 | 589 |  | 
 | 590 | 	ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); | 
 | 591 | 	ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); | 
 | 592 | 	return ret; | 
 | 593 | } | 
 | 594 |  | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 595 | static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, | 
 | 596 | 					 bool charge) | 
 | 597 | { | 
 | 598 | 	int val = (charge) ? 1 : -1; | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 599 | 	this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 600 | } | 
 | 601 |  | 
| KAMEZAWA Hiroyuki | c05555b | 2008-10-18 20:28:11 -0700 | [diff] [blame] | 602 | static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, | 
| KAMEZAWA Hiroyuki | e401f17 | 2011-01-20 14:44:23 -0800 | [diff] [blame] | 603 | 					 bool file, int nr_pages) | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 604 | { | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 605 | 	preempt_disable(); | 
 | 606 |  | 
| KAMEZAWA Hiroyuki | e401f17 | 2011-01-20 14:44:23 -0800 | [diff] [blame] | 607 | 	if (file) | 
 | 608 | 		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages); | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 609 | 	else | 
| KAMEZAWA Hiroyuki | e401f17 | 2011-01-20 14:44:23 -0800 | [diff] [blame] | 610 | 		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages); | 
| Balaji Rao | 55e462b | 2008-05-01 04:35:12 -0700 | [diff] [blame] | 611 |  | 
| KAMEZAWA Hiroyuki | e401f17 | 2011-01-20 14:44:23 -0800 | [diff] [blame] | 612 | 	/* pagein of a big page is an event. So, ignore page size */ | 
 | 613 | 	if (nr_pages > 0) | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 614 | 		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]); | 
| KAMEZAWA Hiroyuki | 3751d60 | 2011-02-01 15:52:45 -0800 | [diff] [blame] | 615 | 	else { | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 616 | 		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]); | 
| KAMEZAWA Hiroyuki | 3751d60 | 2011-02-01 15:52:45 -0800 | [diff] [blame] | 617 | 		nr_pages = -nr_pages; /* for event */ | 
 | 618 | 	} | 
| KAMEZAWA Hiroyuki | e401f17 | 2011-01-20 14:44:23 -0800 | [diff] [blame] | 619 |  | 
 | 620 | 	__this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages); | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 621 |  | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 622 | 	preempt_enable(); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 623 | } | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 624 |  | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 625 | static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 626 | 					enum lru_list idx) | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 627 | { | 
 | 628 | 	int nid, zid; | 
 | 629 | 	struct mem_cgroup_per_zone *mz; | 
 | 630 | 	u64 total = 0; | 
 | 631 |  | 
 | 632 | 	for_each_online_node(nid) | 
 | 633 | 		for (zid = 0; zid < MAX_NR_ZONES; zid++) { | 
 | 634 | 			mz = mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 635 | 			total += MEM_CGROUP_ZSTAT(mz, idx); | 
 | 636 | 		} | 
 | 637 | 	return total; | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 638 | } | 
 | 639 |  | 
| KAMEZAWA Hiroyuki | d2265e6 | 2010-03-10 15:22:31 -0800 | [diff] [blame] | 640 | static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift) | 
 | 641 | { | 
 | 642 | 	s64 val; | 
 | 643 |  | 
 | 644 | 	val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]); | 
 | 645 |  | 
 | 646 | 	return !(val & ((1 << event_mask_shift) - 1)); | 
 | 647 | } | 
 | 648 |  | 
 | 649 | /* | 
 | 650 |  * Check events in order. | 
 | 651 |  * | 
 | 652 |  */ | 
 | 653 | static void memcg_check_events(struct mem_cgroup *mem, struct page *page) | 
 | 654 | { | 
 | 655 | 	/* threshold event is triggered in finer grain than soft limit */ | 
 | 656 | 	if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) { | 
 | 657 | 		mem_cgroup_threshold(mem); | 
 | 658 | 		if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH))) | 
 | 659 | 			mem_cgroup_update_tree(mem, page); | 
 | 660 | 	} | 
 | 661 | } | 
 | 662 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 663 | static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 664 | { | 
 | 665 | 	return container_of(cgroup_subsys_state(cont, | 
 | 666 | 				mem_cgroup_subsys_id), struct mem_cgroup, | 
 | 667 | 				css); | 
 | 668 | } | 
 | 669 |  | 
| Balbir Singh | cf475ad | 2008-04-29 01:00:16 -0700 | [diff] [blame] | 670 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 671 | { | 
| Balbir Singh | 31a78f2 | 2008-09-28 23:09:31 +0100 | [diff] [blame] | 672 | 	/* | 
 | 673 | 	 * mm_update_next_owner() may clear mm->owner to NULL | 
 | 674 | 	 * if it races with swapoff, page migration, etc. | 
 | 675 | 	 * So this can be called with p == NULL. | 
 | 676 | 	 */ | 
 | 677 | 	if (unlikely(!p)) | 
 | 678 | 		return NULL; | 
 | 679 |  | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 680 | 	return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | 
 | 681 | 				struct mem_cgroup, css); | 
 | 682 | } | 
 | 683 |  | 
| KAMEZAWA Hiroyuki | 54595fe | 2009-01-07 18:08:33 -0800 | [diff] [blame] | 684 | static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) | 
 | 685 | { | 
 | 686 | 	struct mem_cgroup *mem = NULL; | 
| KAMEZAWA Hiroyuki | 0b7f569 | 2009-04-02 16:57:38 -0700 | [diff] [blame] | 687 |  | 
 | 688 | 	if (!mm) | 
 | 689 | 		return NULL; | 
| KAMEZAWA Hiroyuki | 54595fe | 2009-01-07 18:08:33 -0800 | [diff] [blame] | 690 | 	/* | 
 | 691 | 	 * Because we have no locks, mm->owner's may be being moved to other | 
 | 692 | 	 * cgroup. We use css_tryget() here even if this looks | 
 | 693 | 	 * pessimistic (rather than adding locks here). | 
 | 694 | 	 */ | 
 | 695 | 	rcu_read_lock(); | 
 | 696 | 	do { | 
 | 697 | 		mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 
 | 698 | 		if (unlikely(!mem)) | 
 | 699 | 			break; | 
 | 700 | 	} while (!css_tryget(&mem->css)); | 
 | 701 | 	rcu_read_unlock(); | 
 | 702 | 	return mem; | 
 | 703 | } | 
 | 704 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 705 | /* The caller has to guarantee "mem" exists before calling this */ | 
 | 706 | static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem) | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 707 | { | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 708 | 	struct cgroup_subsys_state *css; | 
 | 709 | 	int found; | 
 | 710 |  | 
 | 711 | 	if (!mem) /* ROOT cgroup has the smallest ID */ | 
 | 712 | 		return root_mem_cgroup; /*css_put/get against root is ignored*/ | 
 | 713 | 	if (!mem->use_hierarchy) { | 
 | 714 | 		if (css_tryget(&mem->css)) | 
 | 715 | 			return mem; | 
 | 716 | 		return NULL; | 
 | 717 | 	} | 
 | 718 | 	rcu_read_lock(); | 
 | 719 | 	/* | 
 | 720 | 	 * searching a memory cgroup which has the smallest ID under given | 
 | 721 | 	 * ROOT cgroup. (ID >= 1) | 
 | 722 | 	 */ | 
 | 723 | 	css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found); | 
 | 724 | 	if (css && css_tryget(css)) | 
 | 725 | 		mem = container_of(css, struct mem_cgroup, css); | 
 | 726 | 	else | 
 | 727 | 		mem = NULL; | 
 | 728 | 	rcu_read_unlock(); | 
 | 729 | 	return mem; | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 730 | } | 
 | 731 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 732 | static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, | 
 | 733 | 					struct mem_cgroup *root, | 
 | 734 | 					bool cond) | 
 | 735 | { | 
 | 736 | 	int nextid = css_id(&iter->css) + 1; | 
 | 737 | 	int found; | 
 | 738 | 	int hierarchy_used; | 
 | 739 | 	struct cgroup_subsys_state *css; | 
 | 740 |  | 
 | 741 | 	hierarchy_used = iter->use_hierarchy; | 
 | 742 |  | 
 | 743 | 	css_put(&iter->css); | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 744 | 	/* If no ROOT, walk all, ignore hierarchy */ | 
 | 745 | 	if (!cond || (root && !hierarchy_used)) | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 746 | 		return NULL; | 
 | 747 |  | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 748 | 	if (!root) | 
 | 749 | 		root = root_mem_cgroup; | 
 | 750 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 751 | 	do { | 
 | 752 | 		iter = NULL; | 
 | 753 | 		rcu_read_lock(); | 
 | 754 |  | 
 | 755 | 		css = css_get_next(&mem_cgroup_subsys, nextid, | 
 | 756 | 				&root->css, &found); | 
 | 757 | 		if (css && css_tryget(css)) | 
 | 758 | 			iter = container_of(css, struct mem_cgroup, css); | 
 | 759 | 		rcu_read_unlock(); | 
 | 760 | 		/* If css is NULL, no more cgroups will be found */ | 
 | 761 | 		nextid = found + 1; | 
 | 762 | 	} while (css && !iter); | 
 | 763 |  | 
 | 764 | 	return iter; | 
 | 765 | } | 
 | 766 | /* | 
 | 767 |  * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please | 
 | 768 |  * be careful that "break" loop is not allowed. We have reference count. | 
 | 769 |  * Instead of that modify "cond" to be false and "continue" to exit the loop. | 
 | 770 |  */ | 
 | 771 | #define for_each_mem_cgroup_tree_cond(iter, root, cond)	\ | 
 | 772 | 	for (iter = mem_cgroup_start_loop(root);\ | 
 | 773 | 	     iter != NULL;\ | 
 | 774 | 	     iter = mem_cgroup_get_next(iter, root, cond)) | 
 | 775 |  | 
 | 776 | #define for_each_mem_cgroup_tree(iter, root) \ | 
 | 777 | 	for_each_mem_cgroup_tree_cond(iter, root, true) | 
 | 778 |  | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 779 | #define for_each_mem_cgroup_all(iter) \ | 
 | 780 | 	for_each_mem_cgroup_tree_cond(iter, NULL, true) | 
 | 781 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 782 |  | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 783 | static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) | 
 | 784 | { | 
 | 785 | 	return (mem == root_mem_cgroup); | 
 | 786 | } | 
 | 787 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 788 | /* | 
 | 789 |  * Following LRU functions are allowed to be used without PCG_LOCK. | 
 | 790 |  * Operations are called by routine of global LRU independently from memcg. | 
 | 791 |  * What we have to take care of here is validness of pc->mem_cgroup. | 
 | 792 |  * | 
 | 793 |  * Changes to pc->mem_cgroup happens when | 
 | 794 |  * 1. charge | 
 | 795 |  * 2. moving account | 
 | 796 |  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. | 
 | 797 |  * It is added to LRU before charge. | 
 | 798 |  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. | 
 | 799 |  * When moving account, the page is not on LRU. It's isolated. | 
 | 800 |  */ | 
 | 801 |  | 
 | 802 | void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 803 | { | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 804 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 805 | 	struct mem_cgroup_per_zone *mz; | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 806 |  | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 807 | 	if (mem_cgroup_disabled()) | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 808 | 		return; | 
 | 809 | 	pc = lookup_page_cgroup(page); | 
 | 810 | 	/* can happen while we handle swapcache. */ | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 811 | 	if (!TestClearPageCgroupAcctLRU(pc)) | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 812 | 		return; | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 813 | 	VM_BUG_ON(!pc->mem_cgroup); | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 814 | 	/* | 
 | 815 | 	 * We don't check PCG_USED bit. It's cleared when the "page" is finally | 
 | 816 | 	 * removed from global LRU. | 
 | 817 | 	 */ | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 818 | 	mz = page_cgroup_zoneinfo(pc); | 
| KAMEZAWA Hiroyuki | ece35ca | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 819 | 	/* huge page split is done under lru_lock. so, we have no races. */ | 
 | 820 | 	MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 821 | 	if (mem_cgroup_is_root(pc->mem_cgroup)) | 
 | 822 | 		return; | 
 | 823 | 	VM_BUG_ON(list_empty(&pc->lru)); | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 824 | 	list_del_init(&pc->lru); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 825 | } | 
 | 826 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 827 | void mem_cgroup_del_lru(struct page *page) | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 828 | { | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 829 | 	mem_cgroup_del_lru_list(page, page_lru(page)); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 830 | } | 
 | 831 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 832 | void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 833 | { | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 834 | 	struct mem_cgroup_per_zone *mz; | 
 | 835 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 836 |  | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 837 | 	if (mem_cgroup_disabled()) | 
| Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 838 | 		return; | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 839 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 840 | 	pc = lookup_page_cgroup(page); | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 841 | 	/* unused or root page is not rotated. */ | 
| Johannes Weiner | 713735b | 2011-01-20 14:44:31 -0800 | [diff] [blame] | 842 | 	if (!PageCgroupUsed(pc)) | 
 | 843 | 		return; | 
 | 844 | 	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ | 
 | 845 | 	smp_rmb(); | 
 | 846 | 	if (mem_cgroup_is_root(pc->mem_cgroup)) | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 847 | 		return; | 
 | 848 | 	mz = page_cgroup_zoneinfo(pc); | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 849 | 	list_move(&pc->lru, &mz->lists[lru]); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 850 | } | 
 | 851 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 852 | void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) | 
 | 853 | { | 
 | 854 | 	struct page_cgroup *pc; | 
 | 855 | 	struct mem_cgroup_per_zone *mz; | 
 | 856 |  | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 857 | 	if (mem_cgroup_disabled()) | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 858 | 		return; | 
 | 859 | 	pc = lookup_page_cgroup(page); | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 860 | 	VM_BUG_ON(PageCgroupAcctLRU(pc)); | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 861 | 	if (!PageCgroupUsed(pc)) | 
 | 862 | 		return; | 
| Johannes Weiner | 713735b | 2011-01-20 14:44:31 -0800 | [diff] [blame] | 863 | 	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ | 
 | 864 | 	smp_rmb(); | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 865 | 	mz = page_cgroup_zoneinfo(pc); | 
| KAMEZAWA Hiroyuki | ece35ca | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 866 | 	/* huge page split is done under lru_lock. so, we have no races. */ | 
 | 867 | 	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 868 | 	SetPageCgroupAcctLRU(pc); | 
 | 869 | 	if (mem_cgroup_is_root(pc->mem_cgroup)) | 
 | 870 | 		return; | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 871 | 	list_add(&pc->lru, &mz->lists[lru]); | 
 | 872 | } | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 873 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 874 | /* | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 875 |  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to | 
 | 876 |  * lru because the page may.be reused after it's fully uncharged (because of | 
 | 877 |  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge | 
 | 878 |  * it again. This function is only used to charge SwapCache. It's done under | 
 | 879 |  * lock_page and expected that zone->lru_lock is never held. | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 880 |  */ | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 881 | static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page) | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 882 | { | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 883 | 	unsigned long flags; | 
 | 884 | 	struct zone *zone = page_zone(page); | 
 | 885 | 	struct page_cgroup *pc = lookup_page_cgroup(page); | 
 | 886 |  | 
 | 887 | 	spin_lock_irqsave(&zone->lru_lock, flags); | 
 | 888 | 	/* | 
 | 889 | 	 * Forget old LRU when this page_cgroup is *not* used. This Used bit | 
 | 890 | 	 * is guarded by lock_page() because the page is SwapCache. | 
 | 891 | 	 */ | 
 | 892 | 	if (!PageCgroupUsed(pc)) | 
 | 893 | 		mem_cgroup_del_lru_list(page, page_lru(page)); | 
 | 894 | 	spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 895 | } | 
 | 896 |  | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 897 | static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page) | 
 | 898 | { | 
 | 899 | 	unsigned long flags; | 
 | 900 | 	struct zone *zone = page_zone(page); | 
 | 901 | 	struct page_cgroup *pc = lookup_page_cgroup(page); | 
 | 902 |  | 
 | 903 | 	spin_lock_irqsave(&zone->lru_lock, flags); | 
 | 904 | 	/* link when the page is linked to LRU but page_cgroup isn't */ | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 905 | 	if (PageLRU(page) && !PageCgroupAcctLRU(pc)) | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 906 | 		mem_cgroup_add_lru_list(page, page_lru(page)); | 
 | 907 | 	spin_unlock_irqrestore(&zone->lru_lock, flags); | 
 | 908 | } | 
 | 909 |  | 
 | 910 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 911 | void mem_cgroup_move_lists(struct page *page, | 
 | 912 | 			   enum lru_list from, enum lru_list to) | 
 | 913 | { | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 914 | 	if (mem_cgroup_disabled()) | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 915 | 		return; | 
 | 916 | 	mem_cgroup_del_lru_list(page, from); | 
 | 917 | 	mem_cgroup_add_lru_list(page, to); | 
 | 918 | } | 
 | 919 |  | 
| David Rientjes | 4c4a221 | 2008-02-07 00:14:06 -0800 | [diff] [blame] | 920 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | 
 | 921 | { | 
 | 922 | 	int ret; | 
| KAMEZAWA Hiroyuki | 0b7f569 | 2009-04-02 16:57:38 -0700 | [diff] [blame] | 923 | 	struct mem_cgroup *curr = NULL; | 
| KAMEZAWA Hiroyuki | 158e0a2 | 2010-08-10 18:03:00 -0700 | [diff] [blame] | 924 | 	struct task_struct *p; | 
| David Rientjes | 4c4a221 | 2008-02-07 00:14:06 -0800 | [diff] [blame] | 925 |  | 
| KAMEZAWA Hiroyuki | 158e0a2 | 2010-08-10 18:03:00 -0700 | [diff] [blame] | 926 | 	p = find_lock_task_mm(task); | 
 | 927 | 	if (!p) | 
 | 928 | 		return 0; | 
 | 929 | 	curr = try_get_mem_cgroup_from_mm(p->mm); | 
 | 930 | 	task_unlock(p); | 
| KAMEZAWA Hiroyuki | 0b7f569 | 2009-04-02 16:57:38 -0700 | [diff] [blame] | 931 | 	if (!curr) | 
 | 932 | 		return 0; | 
| Daisuke Nishimura | d31f56d | 2009-12-15 16:47:12 -0800 | [diff] [blame] | 933 | 	/* | 
 | 934 | 	 * We should check use_hierarchy of "mem" not "curr". Because checking | 
 | 935 | 	 * use_hierarchy of "curr" here make this function true if hierarchy is | 
 | 936 | 	 * enabled in "curr" and "curr" is a child of "mem" in *cgroup* | 
 | 937 | 	 * hierarchy(even if use_hierarchy is disabled in "mem"). | 
 | 938 | 	 */ | 
 | 939 | 	if (mem->use_hierarchy) | 
| KAMEZAWA Hiroyuki | 0b7f569 | 2009-04-02 16:57:38 -0700 | [diff] [blame] | 940 | 		ret = css_is_ancestor(&curr->css, &mem->css); | 
 | 941 | 	else | 
 | 942 | 		ret = (curr == mem); | 
 | 943 | 	css_put(&curr->css); | 
| David Rientjes | 4c4a221 | 2008-02-07 00:14:06 -0800 | [diff] [blame] | 944 | 	return ret; | 
 | 945 | } | 
 | 946 |  | 
| KOSAKI Motohiro | c772be9 | 2009-01-07 18:08:25 -0800 | [diff] [blame] | 947 | static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages) | 
| KOSAKI Motohiro | 14797e2 | 2009-01-07 18:08:18 -0800 | [diff] [blame] | 948 | { | 
 | 949 | 	unsigned long active; | 
 | 950 | 	unsigned long inactive; | 
| KOSAKI Motohiro | c772be9 | 2009-01-07 18:08:25 -0800 | [diff] [blame] | 951 | 	unsigned long gb; | 
 | 952 | 	unsigned long inactive_ratio; | 
| KOSAKI Motohiro | 14797e2 | 2009-01-07 18:08:18 -0800 | [diff] [blame] | 953 |  | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 954 | 	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON); | 
 | 955 | 	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON); | 
| KOSAKI Motohiro | 14797e2 | 2009-01-07 18:08:18 -0800 | [diff] [blame] | 956 |  | 
| KOSAKI Motohiro | c772be9 | 2009-01-07 18:08:25 -0800 | [diff] [blame] | 957 | 	gb = (inactive + active) >> (30 - PAGE_SHIFT); | 
 | 958 | 	if (gb) | 
 | 959 | 		inactive_ratio = int_sqrt(10 * gb); | 
 | 960 | 	else | 
 | 961 | 		inactive_ratio = 1; | 
 | 962 |  | 
 | 963 | 	if (present_pages) { | 
 | 964 | 		present_pages[0] = inactive; | 
 | 965 | 		present_pages[1] = active; | 
 | 966 | 	} | 
 | 967 |  | 
 | 968 | 	return inactive_ratio; | 
 | 969 | } | 
 | 970 |  | 
 | 971 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) | 
 | 972 | { | 
 | 973 | 	unsigned long active; | 
 | 974 | 	unsigned long inactive; | 
 | 975 | 	unsigned long present_pages[2]; | 
 | 976 | 	unsigned long inactive_ratio; | 
 | 977 |  | 
 | 978 | 	inactive_ratio = calc_inactive_ratio(memcg, present_pages); | 
 | 979 |  | 
 | 980 | 	inactive = present_pages[0]; | 
 | 981 | 	active = present_pages[1]; | 
 | 982 |  | 
 | 983 | 	if (inactive * inactive_ratio < active) | 
| KOSAKI Motohiro | 14797e2 | 2009-01-07 18:08:18 -0800 | [diff] [blame] | 984 | 		return 1; | 
 | 985 |  | 
 | 986 | 	return 0; | 
 | 987 | } | 
 | 988 |  | 
| Rik van Riel | 56e49d2 | 2009-06-16 15:32:28 -0700 | [diff] [blame] | 989 | int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) | 
 | 990 | { | 
 | 991 | 	unsigned long active; | 
 | 992 | 	unsigned long inactive; | 
 | 993 |  | 
 | 994 | 	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE); | 
 | 995 | 	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE); | 
 | 996 |  | 
 | 997 | 	return (active > inactive); | 
 | 998 | } | 
 | 999 |  | 
| KOSAKI Motohiro | a3d8e05 | 2009-01-07 18:08:19 -0800 | [diff] [blame] | 1000 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | 
 | 1001 | 				       struct zone *zone, | 
 | 1002 | 				       enum lru_list lru) | 
 | 1003 | { | 
| KOSAKI Motohiro | 13d7e3a | 2010-08-10 18:03:06 -0700 | [diff] [blame] | 1004 | 	int nid = zone_to_nid(zone); | 
| KOSAKI Motohiro | a3d8e05 | 2009-01-07 18:08:19 -0800 | [diff] [blame] | 1005 | 	int zid = zone_idx(zone); | 
 | 1006 | 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); | 
 | 1007 |  | 
 | 1008 | 	return MEM_CGROUP_ZSTAT(mz, lru); | 
 | 1009 | } | 
 | 1010 |  | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 1011 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | 
 | 1012 | 						      struct zone *zone) | 
 | 1013 | { | 
| KOSAKI Motohiro | 13d7e3a | 2010-08-10 18:03:06 -0700 | [diff] [blame] | 1014 | 	int nid = zone_to_nid(zone); | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 1015 | 	int zid = zone_idx(zone); | 
 | 1016 | 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); | 
 | 1017 |  | 
 | 1018 | 	return &mz->reclaim_stat; | 
 | 1019 | } | 
 | 1020 |  | 
 | 1021 | struct zone_reclaim_stat * | 
 | 1022 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) | 
 | 1023 | { | 
 | 1024 | 	struct page_cgroup *pc; | 
 | 1025 | 	struct mem_cgroup_per_zone *mz; | 
 | 1026 |  | 
 | 1027 | 	if (mem_cgroup_disabled()) | 
 | 1028 | 		return NULL; | 
 | 1029 |  | 
 | 1030 | 	pc = lookup_page_cgroup(page); | 
| Daisuke Nishimura | bd112db | 2009-01-15 13:51:11 -0800 | [diff] [blame] | 1031 | 	if (!PageCgroupUsed(pc)) | 
 | 1032 | 		return NULL; | 
| Johannes Weiner | 713735b | 2011-01-20 14:44:31 -0800 | [diff] [blame] | 1033 | 	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ | 
 | 1034 | 	smp_rmb(); | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 1035 | 	mz = page_cgroup_zoneinfo(pc); | 
 | 1036 | 	if (!mz) | 
 | 1037 | 		return NULL; | 
 | 1038 |  | 
 | 1039 | 	return &mz->reclaim_stat; | 
 | 1040 | } | 
 | 1041 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1042 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 
 | 1043 | 					struct list_head *dst, | 
 | 1044 | 					unsigned long *scanned, int order, | 
 | 1045 | 					int mode, struct zone *z, | 
 | 1046 | 					struct mem_cgroup *mem_cont, | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 1047 | 					int active, int file) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1048 | { | 
 | 1049 | 	unsigned long nr_taken = 0; | 
 | 1050 | 	struct page *page; | 
 | 1051 | 	unsigned long scan; | 
 | 1052 | 	LIST_HEAD(pc_list); | 
 | 1053 | 	struct list_head *src; | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 1054 | 	struct page_cgroup *pc, *tmp; | 
| KOSAKI Motohiro | 13d7e3a | 2010-08-10 18:03:06 -0700 | [diff] [blame] | 1055 | 	int nid = zone_to_nid(z); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1056 | 	int zid = zone_idx(z); | 
 | 1057 | 	struct mem_cgroup_per_zone *mz; | 
| Johannes Weiner | b7c46d1 | 2009-09-21 17:02:56 -0700 | [diff] [blame] | 1058 | 	int lru = LRU_FILE * file + active; | 
| KAMEZAWA Hiroyuki | 2ffebca | 2009-06-17 16:27:21 -0700 | [diff] [blame] | 1059 | 	int ret; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1060 |  | 
| Balbir Singh | cf475ad | 2008-04-29 01:00:16 -0700 | [diff] [blame] | 1061 | 	BUG_ON(!mem_cont); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1062 | 	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 1063 | 	src = &mz->lists[lru]; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1064 |  | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 1065 | 	scan = 0; | 
 | 1066 | 	list_for_each_entry_safe_reverse(pc, tmp, src, lru) { | 
| Hugh Dickins | 436c6541 | 2008-02-07 00:14:12 -0800 | [diff] [blame] | 1067 | 		if (scan >= nr_to_scan) | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 1068 | 			break; | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 1069 |  | 
 | 1070 | 		page = pc->page; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 1071 | 		if (unlikely(!PageCgroupUsed(pc))) | 
 | 1072 | 			continue; | 
| Hugh Dickins | 436c6541 | 2008-02-07 00:14:12 -0800 | [diff] [blame] | 1073 | 		if (unlikely(!PageLRU(page))) | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 1074 | 			continue; | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 1075 |  | 
| Hugh Dickins | 436c6541 | 2008-02-07 00:14:12 -0800 | [diff] [blame] | 1076 | 		scan++; | 
| KAMEZAWA Hiroyuki | 2ffebca | 2009-06-17 16:27:21 -0700 | [diff] [blame] | 1077 | 		ret = __isolate_lru_page(page, mode, file); | 
 | 1078 | 		switch (ret) { | 
 | 1079 | 		case 0: | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1080 | 			list_move(&page->lru, dst); | 
| KAMEZAWA Hiroyuki | 2ffebca | 2009-06-17 16:27:21 -0700 | [diff] [blame] | 1081 | 			mem_cgroup_del_lru(page); | 
| Rik van Riel | 2c888cf | 2011-01-13 15:47:13 -0800 | [diff] [blame] | 1082 | 			nr_taken += hpage_nr_pages(page); | 
| KAMEZAWA Hiroyuki | 2ffebca | 2009-06-17 16:27:21 -0700 | [diff] [blame] | 1083 | 			break; | 
 | 1084 | 		case -EBUSY: | 
 | 1085 | 			/* we don't affect global LRU but rotate in our LRU */ | 
 | 1086 | 			mem_cgroup_rotate_lru_list(page, page_lru(page)); | 
 | 1087 | 			break; | 
 | 1088 | 		default: | 
 | 1089 | 			break; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1090 | 		} | 
 | 1091 | 	} | 
 | 1092 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1093 | 	*scanned = scan; | 
| KOSAKI Motohiro | cc8e970 | 2010-08-09 17:19:57 -0700 | [diff] [blame] | 1094 |  | 
 | 1095 | 	trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken, | 
 | 1096 | 				      0, 0, 0, mode); | 
 | 1097 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 1098 | 	return nr_taken; | 
 | 1099 | } | 
 | 1100 |  | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 1101 | #define mem_cgroup_from_res_counter(counter, member)	\ | 
 | 1102 | 	container_of(counter, struct mem_cgroup, member) | 
 | 1103 |  | 
| Daisuke Nishimura | b85a96c | 2009-01-07 18:08:12 -0800 | [diff] [blame] | 1104 | static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem) | 
 | 1105 | { | 
 | 1106 | 	if (do_swap_account) { | 
 | 1107 | 		if (res_counter_check_under_limit(&mem->res) && | 
 | 1108 | 			res_counter_check_under_limit(&mem->memsw)) | 
 | 1109 | 			return true; | 
 | 1110 | 	} else | 
 | 1111 | 		if (res_counter_check_under_limit(&mem->res)) | 
 | 1112 | 			return true; | 
 | 1113 | 	return false; | 
 | 1114 | } | 
 | 1115 |  | 
| Johannes Weiner | 1994282 | 2011-02-01 15:52:43 -0800 | [diff] [blame] | 1116 | /** | 
 | 1117 |  * mem_cgroup_check_margin - check if the memory cgroup allows charging | 
 | 1118 |  * @mem: memory cgroup to check | 
 | 1119 |  * @bytes: the number of bytes the caller intends to charge | 
 | 1120 |  * | 
 | 1121 |  * Returns a boolean value on whether @mem can be charged @bytes or | 
 | 1122 |  * whether this would exceed the limit. | 
 | 1123 |  */ | 
 | 1124 | static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes) | 
 | 1125 | { | 
 | 1126 | 	if (!res_counter_check_margin(&mem->res, bytes)) | 
 | 1127 | 		return false; | 
 | 1128 | 	if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes)) | 
 | 1129 | 		return false; | 
 | 1130 | 	return true; | 
 | 1131 | } | 
 | 1132 |  | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 1133 | static unsigned int get_swappiness(struct mem_cgroup *memcg) | 
 | 1134 | { | 
 | 1135 | 	struct cgroup *cgrp = memcg->css.cgroup; | 
 | 1136 | 	unsigned int swappiness; | 
 | 1137 |  | 
 | 1138 | 	/* root ? */ | 
 | 1139 | 	if (cgrp->parent == NULL) | 
 | 1140 | 		return vm_swappiness; | 
 | 1141 |  | 
 | 1142 | 	spin_lock(&memcg->reclaim_param_lock); | 
 | 1143 | 	swappiness = memcg->swappiness; | 
 | 1144 | 	spin_unlock(&memcg->reclaim_param_lock); | 
 | 1145 |  | 
 | 1146 | 	return swappiness; | 
 | 1147 | } | 
 | 1148 |  | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1149 | static void mem_cgroup_start_move(struct mem_cgroup *mem) | 
 | 1150 | { | 
 | 1151 | 	int cpu; | 
| KAMEZAWA Hiroyuki | 1489eba | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1152 |  | 
 | 1153 | 	get_online_cpus(); | 
 | 1154 | 	spin_lock(&mem->pcp_counter_lock); | 
 | 1155 | 	for_each_online_cpu(cpu) | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1156 | 		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; | 
| KAMEZAWA Hiroyuki | 1489eba | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1157 | 	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; | 
 | 1158 | 	spin_unlock(&mem->pcp_counter_lock); | 
 | 1159 | 	put_online_cpus(); | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1160 |  | 
 | 1161 | 	synchronize_rcu(); | 
 | 1162 | } | 
 | 1163 |  | 
 | 1164 | static void mem_cgroup_end_move(struct mem_cgroup *mem) | 
 | 1165 | { | 
 | 1166 | 	int cpu; | 
 | 1167 |  | 
 | 1168 | 	if (!mem) | 
 | 1169 | 		return; | 
| KAMEZAWA Hiroyuki | 1489eba | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1170 | 	get_online_cpus(); | 
 | 1171 | 	spin_lock(&mem->pcp_counter_lock); | 
 | 1172 | 	for_each_online_cpu(cpu) | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1173 | 		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; | 
| KAMEZAWA Hiroyuki | 1489eba | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1174 | 	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; | 
 | 1175 | 	spin_unlock(&mem->pcp_counter_lock); | 
 | 1176 | 	put_online_cpus(); | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1177 | } | 
 | 1178 | /* | 
 | 1179 |  * 2 routines for checking "mem" is under move_account() or not. | 
 | 1180 |  * | 
 | 1181 |  * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used | 
 | 1182 |  *			  for avoiding race in accounting. If true, | 
 | 1183 |  *			  pc->mem_cgroup may be overwritten. | 
 | 1184 |  * | 
 | 1185 |  * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or | 
 | 1186 |  *			  under hierarchy of moving cgroups. This is for | 
 | 1187 |  *			  waiting at hith-memory prressure caused by "move". | 
 | 1188 |  */ | 
 | 1189 |  | 
 | 1190 | static bool mem_cgroup_stealed(struct mem_cgroup *mem) | 
 | 1191 | { | 
 | 1192 | 	VM_BUG_ON(!rcu_read_lock_held()); | 
 | 1193 | 	return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0; | 
 | 1194 | } | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1195 |  | 
 | 1196 | static bool mem_cgroup_under_move(struct mem_cgroup *mem) | 
 | 1197 | { | 
| KAMEZAWA Hiroyuki | 2bd9bb2 | 2010-08-10 18:02:58 -0700 | [diff] [blame] | 1198 | 	struct mem_cgroup *from; | 
 | 1199 | 	struct mem_cgroup *to; | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1200 | 	bool ret = false; | 
| KAMEZAWA Hiroyuki | 2bd9bb2 | 2010-08-10 18:02:58 -0700 | [diff] [blame] | 1201 | 	/* | 
 | 1202 | 	 * Unlike task_move routines, we access mc.to, mc.from not under | 
 | 1203 | 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead. | 
 | 1204 | 	 */ | 
 | 1205 | 	spin_lock(&mc.lock); | 
 | 1206 | 	from = mc.from; | 
 | 1207 | 	to = mc.to; | 
 | 1208 | 	if (!from) | 
 | 1209 | 		goto unlock; | 
 | 1210 | 	if (from == mem || to == mem | 
 | 1211 | 	    || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css)) | 
 | 1212 | 	    || (mem->use_hierarchy && css_is_ancestor(&to->css,	&mem->css))) | 
 | 1213 | 		ret = true; | 
 | 1214 | unlock: | 
 | 1215 | 	spin_unlock(&mc.lock); | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1216 | 	return ret; | 
 | 1217 | } | 
 | 1218 |  | 
 | 1219 | static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem) | 
 | 1220 | { | 
 | 1221 | 	if (mc.moving_task && current != mc.moving_task) { | 
 | 1222 | 		if (mem_cgroup_under_move(mem)) { | 
 | 1223 | 			DEFINE_WAIT(wait); | 
 | 1224 | 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE); | 
 | 1225 | 			/* moving charge context might have finished. */ | 
 | 1226 | 			if (mc.moving_task) | 
 | 1227 | 				schedule(); | 
 | 1228 | 			finish_wait(&mc.waitq, &wait); | 
 | 1229 | 			return true; | 
 | 1230 | 		} | 
 | 1231 | 	} | 
 | 1232 | 	return false; | 
 | 1233 | } | 
 | 1234 |  | 
| Balbir Singh | e222432 | 2009-04-02 16:57:39 -0700 | [diff] [blame] | 1235 | /** | 
| Kirill A. Shutemov | 6a6135b | 2010-03-10 15:22:25 -0800 | [diff] [blame] | 1236 |  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. | 
| Balbir Singh | e222432 | 2009-04-02 16:57:39 -0700 | [diff] [blame] | 1237 |  * @memcg: The memory cgroup that went over limit | 
 | 1238 |  * @p: Task that is going to be killed | 
 | 1239 |  * | 
 | 1240 |  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is | 
 | 1241 |  * enabled | 
 | 1242 |  */ | 
 | 1243 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | 
 | 1244 | { | 
 | 1245 | 	struct cgroup *task_cgrp; | 
 | 1246 | 	struct cgroup *mem_cgrp; | 
 | 1247 | 	/* | 
 | 1248 | 	 * Need a buffer in BSS, can't rely on allocations. The code relies | 
 | 1249 | 	 * on the assumption that OOM is serialized for memory controller. | 
 | 1250 | 	 * If this assumption is broken, revisit this code. | 
 | 1251 | 	 */ | 
 | 1252 | 	static char memcg_name[PATH_MAX]; | 
 | 1253 | 	int ret; | 
 | 1254 |  | 
| Daisuke Nishimura | d31f56d | 2009-12-15 16:47:12 -0800 | [diff] [blame] | 1255 | 	if (!memcg || !p) | 
| Balbir Singh | e222432 | 2009-04-02 16:57:39 -0700 | [diff] [blame] | 1256 | 		return; | 
 | 1257 |  | 
 | 1258 |  | 
 | 1259 | 	rcu_read_lock(); | 
 | 1260 |  | 
 | 1261 | 	mem_cgrp = memcg->css.cgroup; | 
 | 1262 | 	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); | 
 | 1263 |  | 
 | 1264 | 	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); | 
 | 1265 | 	if (ret < 0) { | 
 | 1266 | 		/* | 
 | 1267 | 		 * Unfortunately, we are unable to convert to a useful name | 
 | 1268 | 		 * But we'll still print out the usage information | 
 | 1269 | 		 */ | 
 | 1270 | 		rcu_read_unlock(); | 
 | 1271 | 		goto done; | 
 | 1272 | 	} | 
 | 1273 | 	rcu_read_unlock(); | 
 | 1274 |  | 
 | 1275 | 	printk(KERN_INFO "Task in %s killed", memcg_name); | 
 | 1276 |  | 
 | 1277 | 	rcu_read_lock(); | 
 | 1278 | 	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); | 
 | 1279 | 	if (ret < 0) { | 
 | 1280 | 		rcu_read_unlock(); | 
 | 1281 | 		goto done; | 
 | 1282 | 	} | 
 | 1283 | 	rcu_read_unlock(); | 
 | 1284 |  | 
 | 1285 | 	/* | 
 | 1286 | 	 * Continues from above, so we don't need an KERN_ level | 
 | 1287 | 	 */ | 
 | 1288 | 	printk(KERN_CONT " as a result of limit of %s\n", memcg_name); | 
 | 1289 | done: | 
 | 1290 |  | 
 | 1291 | 	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n", | 
 | 1292 | 		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, | 
 | 1293 | 		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, | 
 | 1294 | 		res_counter_read_u64(&memcg->res, RES_FAILCNT)); | 
 | 1295 | 	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, " | 
 | 1296 | 		"failcnt %llu\n", | 
 | 1297 | 		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, | 
 | 1298 | 		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, | 
 | 1299 | 		res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); | 
 | 1300 | } | 
 | 1301 |  | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 1302 | /* | 
 | 1303 |  * This function returns the number of memcg under hierarchy tree. Returns | 
 | 1304 |  * 1(self count) if no children. | 
 | 1305 |  */ | 
 | 1306 | static int mem_cgroup_count_children(struct mem_cgroup *mem) | 
 | 1307 | { | 
 | 1308 | 	int num = 0; | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 1309 | 	struct mem_cgroup *iter; | 
 | 1310 |  | 
 | 1311 | 	for_each_mem_cgroup_tree(iter, mem) | 
 | 1312 | 		num++; | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 1313 | 	return num; | 
 | 1314 | } | 
 | 1315 |  | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 1316 | /* | 
| David Rientjes | a63d83f | 2010-08-09 17:19:46 -0700 | [diff] [blame] | 1317 |  * Return the memory (and swap, if configured) limit for a memcg. | 
 | 1318 |  */ | 
 | 1319 | u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) | 
 | 1320 | { | 
 | 1321 | 	u64 limit; | 
 | 1322 | 	u64 memsw; | 
 | 1323 |  | 
| Johannes Weiner | f3e8eb7 | 2011-01-13 15:47:39 -0800 | [diff] [blame] | 1324 | 	limit = res_counter_read_u64(&memcg->res, RES_LIMIT); | 
 | 1325 | 	limit += total_swap_pages << PAGE_SHIFT; | 
 | 1326 |  | 
| David Rientjes | a63d83f | 2010-08-09 17:19:46 -0700 | [diff] [blame] | 1327 | 	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | 
 | 1328 | 	/* | 
 | 1329 | 	 * If memsw is finite and limits the amount of swap space available | 
 | 1330 | 	 * to this memcg, return that limit. | 
 | 1331 | 	 */ | 
 | 1332 | 	return min(limit, memsw); | 
 | 1333 | } | 
 | 1334 |  | 
 | 1335 | /* | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1336 |  * Visit the first child (need not be the first child as per the ordering | 
 | 1337 |  * of the cgroup list, since we track last_scanned_child) of @mem and use | 
 | 1338 |  * that to reclaim free pages from. | 
 | 1339 |  */ | 
 | 1340 | static struct mem_cgroup * | 
 | 1341 | mem_cgroup_select_victim(struct mem_cgroup *root_mem) | 
 | 1342 | { | 
 | 1343 | 	struct mem_cgroup *ret = NULL; | 
 | 1344 | 	struct cgroup_subsys_state *css; | 
 | 1345 | 	int nextid, found; | 
 | 1346 |  | 
 | 1347 | 	if (!root_mem->use_hierarchy) { | 
 | 1348 | 		css_get(&root_mem->css); | 
 | 1349 | 		ret = root_mem; | 
 | 1350 | 	} | 
 | 1351 |  | 
 | 1352 | 	while (!ret) { | 
 | 1353 | 		rcu_read_lock(); | 
 | 1354 | 		nextid = root_mem->last_scanned_child + 1; | 
 | 1355 | 		css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css, | 
 | 1356 | 				   &found); | 
 | 1357 | 		if (css && css_tryget(css)) | 
 | 1358 | 			ret = container_of(css, struct mem_cgroup, css); | 
 | 1359 |  | 
 | 1360 | 		rcu_read_unlock(); | 
 | 1361 | 		/* Updates scanning parameter */ | 
 | 1362 | 		spin_lock(&root_mem->reclaim_param_lock); | 
 | 1363 | 		if (!css) { | 
 | 1364 | 			/* this means start scan from ID:1 */ | 
 | 1365 | 			root_mem->last_scanned_child = 0; | 
 | 1366 | 		} else | 
 | 1367 | 			root_mem->last_scanned_child = found; | 
 | 1368 | 		spin_unlock(&root_mem->reclaim_param_lock); | 
 | 1369 | 	} | 
 | 1370 |  | 
 | 1371 | 	return ret; | 
 | 1372 | } | 
 | 1373 |  | 
 | 1374 | /* | 
 | 1375 |  * Scan the hierarchy if needed to reclaim memory. We remember the last child | 
 | 1376 |  * we reclaimed from, so that we don't end up penalizing one child extensively | 
 | 1377 |  * based on its position in the children list. | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 1378 |  * | 
 | 1379 |  * root_mem is the original ancestor that we've been reclaim from. | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1380 |  * | 
 | 1381 |  * We give up and return to the caller when we visit root_mem twice. | 
 | 1382 |  * (other groups can be removed while we're walking....) | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 1383 |  * | 
 | 1384 |  * If shrink==true, for avoiding to free too much, this returns immedieately. | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 1385 |  */ | 
 | 1386 | static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 1387 | 						struct zone *zone, | 
| Balbir Singh | 75822b4 | 2009-09-23 15:56:38 -0700 | [diff] [blame] | 1388 | 						gfp_t gfp_mask, | 
 | 1389 | 						unsigned long reclaim_options) | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 1390 | { | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1391 | 	struct mem_cgroup *victim; | 
 | 1392 | 	int ret, total = 0; | 
 | 1393 | 	int loop = 0; | 
| Balbir Singh | 75822b4 | 2009-09-23 15:56:38 -0700 | [diff] [blame] | 1394 | 	bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; | 
 | 1395 | 	bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 1396 | 	bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; | 
 | 1397 | 	unsigned long excess = mem_cgroup_get_excess(root_mem); | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 1398 |  | 
| KAMEZAWA Hiroyuki | 22a668d | 2009-06-17 16:27:19 -0700 | [diff] [blame] | 1399 | 	/* If memsw_is_minimum==1, swap-out is of-no-use. */ | 
 | 1400 | 	if (root_mem->memsw_is_minimum) | 
 | 1401 | 		noswap = true; | 
 | 1402 |  | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 1403 | 	while (1) { | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1404 | 		victim = mem_cgroup_select_victim(root_mem); | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 1405 | 		if (victim == root_mem) { | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1406 | 			loop++; | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 1407 | 			if (loop >= 1) | 
 | 1408 | 				drain_all_stock_async(); | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 1409 | 			if (loop >= 2) { | 
 | 1410 | 				/* | 
 | 1411 | 				 * If we have not been able to reclaim | 
 | 1412 | 				 * anything, it might because there are | 
 | 1413 | 				 * no reclaimable pages under this hierarchy | 
 | 1414 | 				 */ | 
 | 1415 | 				if (!check_soft || !total) { | 
 | 1416 | 					css_put(&victim->css); | 
 | 1417 | 					break; | 
 | 1418 | 				} | 
 | 1419 | 				/* | 
 | 1420 | 				 * We want to do more targetted reclaim. | 
 | 1421 | 				 * excess >> 2 is not to excessive so as to | 
 | 1422 | 				 * reclaim too much, nor too less that we keep | 
 | 1423 | 				 * coming back to reclaim from this cgroup | 
 | 1424 | 				 */ | 
 | 1425 | 				if (total >= (excess >> 2) || | 
 | 1426 | 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) { | 
 | 1427 | 					css_put(&victim->css); | 
 | 1428 | 					break; | 
 | 1429 | 				} | 
 | 1430 | 			} | 
 | 1431 | 		} | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 1432 | 		if (!mem_cgroup_local_usage(victim)) { | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1433 | 			/* this cgroup's local usage == 0 */ | 
 | 1434 | 			css_put(&victim->css); | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 1435 | 			continue; | 
 | 1436 | 		} | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1437 | 		/* we use swappiness of local cgroup */ | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 1438 | 		if (check_soft) | 
 | 1439 | 			ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, | 
| KOSAKI Motohiro | 14fec79 | 2010-08-10 18:03:05 -0700 | [diff] [blame] | 1440 | 				noswap, get_swappiness(victim), zone); | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 1441 | 		else | 
 | 1442 | 			ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, | 
 | 1443 | 						noswap, get_swappiness(victim)); | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1444 | 		css_put(&victim->css); | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 1445 | 		/* | 
 | 1446 | 		 * At shrinking usage, we can't check we should stop here or | 
 | 1447 | 		 * reclaim more. It's depends on callers. last_scanned_child | 
 | 1448 | 		 * will work enough for keeping fairness under tree. | 
 | 1449 | 		 */ | 
 | 1450 | 		if (shrink) | 
 | 1451 | 			return ret; | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1452 | 		total += ret; | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 1453 | 		if (check_soft) { | 
 | 1454 | 			if (res_counter_check_under_soft_limit(&root_mem->res)) | 
 | 1455 | 				return total; | 
 | 1456 | 		} else if (mem_cgroup_check_under_limit(root_mem)) | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1457 | 			return 1 + total; | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 1458 | 	} | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 1459 | 	return total; | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 1460 | } | 
 | 1461 |  | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1462 | /* | 
 | 1463 |  * Check OOM-Killer is already running under our hierarchy. | 
 | 1464 |  * If someone is running, return false. | 
 | 1465 |  */ | 
 | 1466 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | 
 | 1467 | { | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 1468 | 	int x, lock_count = 0; | 
 | 1469 | 	struct mem_cgroup *iter; | 
| KAMEZAWA Hiroyuki | a636b32 | 2009-01-07 18:08:08 -0800 | [diff] [blame] | 1470 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 1471 | 	for_each_mem_cgroup_tree(iter, mem) { | 
 | 1472 | 		x = atomic_inc_return(&iter->oom_lock); | 
 | 1473 | 		lock_count = max(x, lock_count); | 
 | 1474 | 	} | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1475 |  | 
 | 1476 | 	if (lock_count == 1) | 
 | 1477 | 		return true; | 
 | 1478 | 	return false; | 
| KAMEZAWA Hiroyuki | a636b32 | 2009-01-07 18:08:08 -0800 | [diff] [blame] | 1479 | } | 
| KAMEZAWA Hiroyuki | 0b7f569 | 2009-04-02 16:57:38 -0700 | [diff] [blame] | 1480 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 1481 | static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) | 
| KAMEZAWA Hiroyuki | 0b7f569 | 2009-04-02 16:57:38 -0700 | [diff] [blame] | 1482 | { | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 1483 | 	struct mem_cgroup *iter; | 
 | 1484 |  | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1485 | 	/* | 
 | 1486 | 	 * When a new child is created while the hierarchy is under oom, | 
 | 1487 | 	 * mem_cgroup_oom_lock() may not be called. We have to use | 
 | 1488 | 	 * atomic_add_unless() here. | 
 | 1489 | 	 */ | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 1490 | 	for_each_mem_cgroup_tree(iter, mem) | 
 | 1491 | 		atomic_add_unless(&iter->oom_lock, -1, 0); | 
| KAMEZAWA Hiroyuki | 0b7f569 | 2009-04-02 16:57:38 -0700 | [diff] [blame] | 1492 | 	return 0; | 
 | 1493 | } | 
 | 1494 |  | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1495 |  | 
 | 1496 | static DEFINE_MUTEX(memcg_oom_mutex); | 
 | 1497 | static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); | 
 | 1498 |  | 
| KAMEZAWA Hiroyuki | dc98df5 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 1499 | struct oom_wait_info { | 
 | 1500 | 	struct mem_cgroup *mem; | 
 | 1501 | 	wait_queue_t	wait; | 
 | 1502 | }; | 
 | 1503 |  | 
 | 1504 | static int memcg_oom_wake_function(wait_queue_t *wait, | 
 | 1505 | 	unsigned mode, int sync, void *arg) | 
 | 1506 | { | 
 | 1507 | 	struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg; | 
 | 1508 | 	struct oom_wait_info *oom_wait_info; | 
 | 1509 |  | 
 | 1510 | 	oom_wait_info = container_of(wait, struct oom_wait_info, wait); | 
 | 1511 |  | 
 | 1512 | 	if (oom_wait_info->mem == wake_mem) | 
 | 1513 | 		goto wakeup; | 
 | 1514 | 	/* if no hierarchy, no match */ | 
 | 1515 | 	if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy) | 
 | 1516 | 		return 0; | 
 | 1517 | 	/* | 
 | 1518 | 	 * Both of oom_wait_info->mem and wake_mem are stable under us. | 
 | 1519 | 	 * Then we can use css_is_ancestor without taking care of RCU. | 
 | 1520 | 	 */ | 
 | 1521 | 	if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) && | 
 | 1522 | 	    !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css)) | 
 | 1523 | 		return 0; | 
 | 1524 |  | 
 | 1525 | wakeup: | 
 | 1526 | 	return autoremove_wake_function(wait, mode, sync, arg); | 
 | 1527 | } | 
 | 1528 |  | 
 | 1529 | static void memcg_wakeup_oom(struct mem_cgroup *mem) | 
 | 1530 | { | 
 | 1531 | 	/* for filtering, pass "mem" as argument. */ | 
 | 1532 | 	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem); | 
 | 1533 | } | 
 | 1534 |  | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 1535 | static void memcg_oom_recover(struct mem_cgroup *mem) | 
 | 1536 | { | 
| KAMEZAWA Hiroyuki | 2bd9bb2 | 2010-08-10 18:02:58 -0700 | [diff] [blame] | 1537 | 	if (mem && atomic_read(&mem->oom_lock)) | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 1538 | 		memcg_wakeup_oom(mem); | 
 | 1539 | } | 
 | 1540 |  | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1541 | /* | 
 | 1542 |  * try to call OOM killer. returns false if we should exit memory-reclaim loop. | 
 | 1543 |  */ | 
 | 1544 | bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) | 
 | 1545 | { | 
| KAMEZAWA Hiroyuki | dc98df5 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 1546 | 	struct oom_wait_info owait; | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 1547 | 	bool locked, need_to_kill; | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1548 |  | 
| KAMEZAWA Hiroyuki | dc98df5 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 1549 | 	owait.mem = mem; | 
 | 1550 | 	owait.wait.flags = 0; | 
 | 1551 | 	owait.wait.func = memcg_oom_wake_function; | 
 | 1552 | 	owait.wait.private = current; | 
 | 1553 | 	INIT_LIST_HEAD(&owait.wait.task_list); | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 1554 | 	need_to_kill = true; | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1555 | 	/* At first, try to OOM lock hierarchy under mem.*/ | 
 | 1556 | 	mutex_lock(&memcg_oom_mutex); | 
 | 1557 | 	locked = mem_cgroup_oom_lock(mem); | 
 | 1558 | 	/* | 
 | 1559 | 	 * Even if signal_pending(), we can't quit charge() loop without | 
 | 1560 | 	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL | 
 | 1561 | 	 * under OOM is always welcomed, use TASK_KILLABLE here. | 
 | 1562 | 	 */ | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 1563 | 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE); | 
 | 1564 | 	if (!locked || mem->oom_kill_disable) | 
 | 1565 | 		need_to_kill = false; | 
 | 1566 | 	if (locked) | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 1567 | 		mem_cgroup_oom_notify(mem); | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1568 | 	mutex_unlock(&memcg_oom_mutex); | 
 | 1569 |  | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 1570 | 	if (need_to_kill) { | 
 | 1571 | 		finish_wait(&memcg_oom_waitq, &owait.wait); | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1572 | 		mem_cgroup_out_of_memory(mem, mask); | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 1573 | 	} else { | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1574 | 		schedule(); | 
| KAMEZAWA Hiroyuki | dc98df5 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 1575 | 		finish_wait(&memcg_oom_waitq, &owait.wait); | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1576 | 	} | 
 | 1577 | 	mutex_lock(&memcg_oom_mutex); | 
 | 1578 | 	mem_cgroup_oom_unlock(mem); | 
| KAMEZAWA Hiroyuki | dc98df5 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 1579 | 	memcg_wakeup_oom(mem); | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1580 | 	mutex_unlock(&memcg_oom_mutex); | 
 | 1581 |  | 
 | 1582 | 	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) | 
 | 1583 | 		return false; | 
 | 1584 | 	/* Give chance to dying process */ | 
 | 1585 | 	schedule_timeout(1); | 
 | 1586 | 	return true; | 
| KAMEZAWA Hiroyuki | 0b7f569 | 2009-04-02 16:57:38 -0700 | [diff] [blame] | 1587 | } | 
 | 1588 |  | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 1589 | /* | 
 | 1590 |  * Currently used to update mapped file statistics, but the routine can be | 
 | 1591 |  * generalized to update other statistics as well. | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1592 |  * | 
 | 1593 |  * Notes: Race condition | 
 | 1594 |  * | 
 | 1595 |  * We usually use page_cgroup_lock() for accessing page_cgroup member but | 
 | 1596 |  * it tends to be costly. But considering some conditions, we doesn't need | 
 | 1597 |  * to do so _always_. | 
 | 1598 |  * | 
 | 1599 |  * Considering "charge", lock_page_cgroup() is not required because all | 
 | 1600 |  * file-stat operations happen after a page is attached to radix-tree. There | 
 | 1601 |  * are no race with "charge". | 
 | 1602 |  * | 
 | 1603 |  * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup | 
 | 1604 |  * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even | 
 | 1605 |  * if there are race with "uncharge". Statistics itself is properly handled | 
 | 1606 |  * by flags. | 
 | 1607 |  * | 
 | 1608 |  * Considering "move", this is an only case we see a race. To make the race | 
 | 1609 |  * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are | 
 | 1610 |  * possibility of race condition. If there is, we take a lock. | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 1611 |  */ | 
| KAMEZAWA Hiroyuki | 26174ef | 2010-10-27 15:33:43 -0700 | [diff] [blame] | 1612 |  | 
| Greg Thelen | 2a7106f | 2011-01-13 15:47:37 -0800 | [diff] [blame] | 1613 | void mem_cgroup_update_page_stat(struct page *page, | 
 | 1614 | 				 enum mem_cgroup_page_stat_item idx, int val) | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 1615 | { | 
 | 1616 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1617 | 	struct page_cgroup *pc = lookup_page_cgroup(page); | 
 | 1618 | 	bool need_unlock = false; | 
| KAMEZAWA Hiroyuki | dbd4ea7 | 2011-01-13 15:47:38 -0800 | [diff] [blame] | 1619 | 	unsigned long uninitialized_var(flags); | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 1620 |  | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 1621 | 	if (unlikely(!pc)) | 
 | 1622 | 		return; | 
 | 1623 |  | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1624 | 	rcu_read_lock(); | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 1625 | 	mem = pc->mem_cgroup; | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1626 | 	if (unlikely(!mem || !PageCgroupUsed(pc))) | 
 | 1627 | 		goto out; | 
 | 1628 | 	/* pc->mem_cgroup is unstable ? */ | 
| KAMEZAWA Hiroyuki | ca3e021 | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 1629 | 	if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) { | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1630 | 		/* take a lock against to access pc->mem_cgroup */ | 
| KAMEZAWA Hiroyuki | dbd4ea7 | 2011-01-13 15:47:38 -0800 | [diff] [blame] | 1631 | 		move_lock_page_cgroup(pc, &flags); | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1632 | 		need_unlock = true; | 
 | 1633 | 		mem = pc->mem_cgroup; | 
 | 1634 | 		if (!mem || !PageCgroupUsed(pc)) | 
 | 1635 | 			goto out; | 
 | 1636 | 	} | 
| KAMEZAWA Hiroyuki | 26174ef | 2010-10-27 15:33:43 -0700 | [diff] [blame] | 1637 |  | 
| KAMEZAWA Hiroyuki | 26174ef | 2010-10-27 15:33:43 -0700 | [diff] [blame] | 1638 | 	switch (idx) { | 
| Greg Thelen | 2a7106f | 2011-01-13 15:47:37 -0800 | [diff] [blame] | 1639 | 	case MEMCG_NR_FILE_MAPPED: | 
| KAMEZAWA Hiroyuki | 26174ef | 2010-10-27 15:33:43 -0700 | [diff] [blame] | 1640 | 		if (val > 0) | 
 | 1641 | 			SetPageCgroupFileMapped(pc); | 
 | 1642 | 		else if (!page_mapped(page)) | 
| KAMEZAWA Hiroyuki | 0c270f8 | 2010-10-27 15:33:39 -0700 | [diff] [blame] | 1643 | 			ClearPageCgroupFileMapped(pc); | 
| Greg Thelen | 2a7106f | 2011-01-13 15:47:37 -0800 | [diff] [blame] | 1644 | 		idx = MEM_CGROUP_STAT_FILE_MAPPED; | 
| KAMEZAWA Hiroyuki | 26174ef | 2010-10-27 15:33:43 -0700 | [diff] [blame] | 1645 | 		break; | 
 | 1646 | 	default: | 
 | 1647 | 		BUG(); | 
| KAMEZAWA Hiroyuki | 8725d54 | 2010-04-06 14:35:05 -0700 | [diff] [blame] | 1648 | 	} | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 1649 |  | 
| Greg Thelen | 2a7106f | 2011-01-13 15:47:37 -0800 | [diff] [blame] | 1650 | 	this_cpu_add(mem->stat->count[idx], val); | 
 | 1651 |  | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1652 | out: | 
 | 1653 | 	if (unlikely(need_unlock)) | 
| KAMEZAWA Hiroyuki | dbd4ea7 | 2011-01-13 15:47:38 -0800 | [diff] [blame] | 1654 | 		move_unlock_page_cgroup(pc, &flags); | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 1655 | 	rcu_read_unlock(); | 
 | 1656 | 	return; | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 1657 | } | 
| Greg Thelen | 2a7106f | 2011-01-13 15:47:37 -0800 | [diff] [blame] | 1658 | EXPORT_SYMBOL(mem_cgroup_update_page_stat); | 
| KAMEZAWA Hiroyuki | 26174ef | 2010-10-27 15:33:43 -0700 | [diff] [blame] | 1659 |  | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 1660 | /* | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 1661 |  * size of first charge trial. "32" comes from vmscan.c's magic value. | 
 | 1662 |  * TODO: maybe necessary to use big numbers in big irons. | 
 | 1663 |  */ | 
 | 1664 | #define CHARGE_SIZE	(32 * PAGE_SIZE) | 
 | 1665 | struct memcg_stock_pcp { | 
 | 1666 | 	struct mem_cgroup *cached; /* this never be root cgroup */ | 
 | 1667 | 	int charge; | 
 | 1668 | 	struct work_struct work; | 
 | 1669 | }; | 
 | 1670 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); | 
 | 1671 | static atomic_t memcg_drain_count; | 
 | 1672 |  | 
 | 1673 | /* | 
 | 1674 |  * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed | 
 | 1675 |  * from local stock and true is returned. If the stock is 0 or charges from a | 
 | 1676 |  * cgroup which is not current target, returns false. This stock will be | 
 | 1677 |  * refilled. | 
 | 1678 |  */ | 
 | 1679 | static bool consume_stock(struct mem_cgroup *mem) | 
 | 1680 | { | 
 | 1681 | 	struct memcg_stock_pcp *stock; | 
 | 1682 | 	bool ret = true; | 
 | 1683 |  | 
 | 1684 | 	stock = &get_cpu_var(memcg_stock); | 
 | 1685 | 	if (mem == stock->cached && stock->charge) | 
 | 1686 | 		stock->charge -= PAGE_SIZE; | 
 | 1687 | 	else /* need to call res_counter_charge */ | 
 | 1688 | 		ret = false; | 
 | 1689 | 	put_cpu_var(memcg_stock); | 
 | 1690 | 	return ret; | 
 | 1691 | } | 
 | 1692 |  | 
 | 1693 | /* | 
 | 1694 |  * Returns stocks cached in percpu to res_counter and reset cached information. | 
 | 1695 |  */ | 
 | 1696 | static void drain_stock(struct memcg_stock_pcp *stock) | 
 | 1697 | { | 
 | 1698 | 	struct mem_cgroup *old = stock->cached; | 
 | 1699 |  | 
 | 1700 | 	if (stock->charge) { | 
 | 1701 | 		res_counter_uncharge(&old->res, stock->charge); | 
 | 1702 | 		if (do_swap_account) | 
 | 1703 | 			res_counter_uncharge(&old->memsw, stock->charge); | 
 | 1704 | 	} | 
 | 1705 | 	stock->cached = NULL; | 
 | 1706 | 	stock->charge = 0; | 
 | 1707 | } | 
 | 1708 |  | 
 | 1709 | /* | 
 | 1710 |  * This must be called under preempt disabled or must be called by | 
 | 1711 |  * a thread which is pinned to local cpu. | 
 | 1712 |  */ | 
 | 1713 | static void drain_local_stock(struct work_struct *dummy) | 
 | 1714 | { | 
 | 1715 | 	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); | 
 | 1716 | 	drain_stock(stock); | 
 | 1717 | } | 
 | 1718 |  | 
 | 1719 | /* | 
 | 1720 |  * Cache charges(val) which is from res_counter, to local per_cpu area. | 
| Greg Thelen | 320cc51 | 2010-03-15 15:27:28 +0100 | [diff] [blame] | 1721 |  * This will be consumed by consume_stock() function, later. | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 1722 |  */ | 
 | 1723 | static void refill_stock(struct mem_cgroup *mem, int val) | 
 | 1724 | { | 
 | 1725 | 	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); | 
 | 1726 |  | 
 | 1727 | 	if (stock->cached != mem) { /* reset if necessary */ | 
 | 1728 | 		drain_stock(stock); | 
 | 1729 | 		stock->cached = mem; | 
 | 1730 | 	} | 
 | 1731 | 	stock->charge += val; | 
 | 1732 | 	put_cpu_var(memcg_stock); | 
 | 1733 | } | 
 | 1734 |  | 
 | 1735 | /* | 
 | 1736 |  * Tries to drain stocked charges in other cpus. This function is asynchronous | 
 | 1737 |  * and just put a work per cpu for draining localy on each cpu. Caller can | 
 | 1738 |  * expects some charges will be back to res_counter later but cannot wait for | 
 | 1739 |  * it. | 
 | 1740 |  */ | 
 | 1741 | static void drain_all_stock_async(void) | 
 | 1742 | { | 
 | 1743 | 	int cpu; | 
 | 1744 | 	/* This function is for scheduling "drain" in asynchronous way. | 
 | 1745 | 	 * The result of "drain" is not directly handled by callers. Then, | 
 | 1746 | 	 * if someone is calling drain, we don't have to call drain more. | 
 | 1747 | 	 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if | 
 | 1748 | 	 * there is a race. We just do loose check here. | 
 | 1749 | 	 */ | 
 | 1750 | 	if (atomic_read(&memcg_drain_count)) | 
 | 1751 | 		return; | 
 | 1752 | 	/* Notify other cpus that system-wide "drain" is running */ | 
 | 1753 | 	atomic_inc(&memcg_drain_count); | 
 | 1754 | 	get_online_cpus(); | 
 | 1755 | 	for_each_online_cpu(cpu) { | 
 | 1756 | 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 
 | 1757 | 		schedule_work_on(cpu, &stock->work); | 
 | 1758 | 	} | 
 | 1759 |  	put_online_cpus(); | 
 | 1760 | 	atomic_dec(&memcg_drain_count); | 
 | 1761 | 	/* We don't wait for flush_work */ | 
 | 1762 | } | 
 | 1763 |  | 
 | 1764 | /* This is a synchronous drain interface. */ | 
 | 1765 | static void drain_all_stock_sync(void) | 
 | 1766 | { | 
 | 1767 | 	/* called when force_empty is called */ | 
 | 1768 | 	atomic_inc(&memcg_drain_count); | 
 | 1769 | 	schedule_on_each_cpu(drain_local_stock); | 
 | 1770 | 	atomic_dec(&memcg_drain_count); | 
 | 1771 | } | 
 | 1772 |  | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1773 | /* | 
 | 1774 |  * This function drains percpu counter value from DEAD cpu and | 
 | 1775 |  * move it to local cpu. Note that this function can be preempted. | 
 | 1776 |  */ | 
 | 1777 | static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) | 
 | 1778 | { | 
 | 1779 | 	int i; | 
 | 1780 |  | 
 | 1781 | 	spin_lock(&mem->pcp_counter_lock); | 
 | 1782 | 	for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { | 
 | 1783 | 		s64 x = per_cpu(mem->stat->count[i], cpu); | 
 | 1784 |  | 
 | 1785 | 		per_cpu(mem->stat->count[i], cpu) = 0; | 
 | 1786 | 		mem->nocpu_base.count[i] += x; | 
 | 1787 | 	} | 
| KAMEZAWA Hiroyuki | 1489eba | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1788 | 	/* need to clear ON_MOVE value, works as a kind of lock. */ | 
 | 1789 | 	per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; | 
 | 1790 | 	spin_unlock(&mem->pcp_counter_lock); | 
 | 1791 | } | 
 | 1792 |  | 
 | 1793 | static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu) | 
 | 1794 | { | 
 | 1795 | 	int idx = MEM_CGROUP_ON_MOVE; | 
 | 1796 |  | 
 | 1797 | 	spin_lock(&mem->pcp_counter_lock); | 
 | 1798 | 	per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx]; | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1799 | 	spin_unlock(&mem->pcp_counter_lock); | 
 | 1800 | } | 
 | 1801 |  | 
 | 1802 | static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 1803 | 					unsigned long action, | 
 | 1804 | 					void *hcpu) | 
 | 1805 | { | 
 | 1806 | 	int cpu = (unsigned long)hcpu; | 
 | 1807 | 	struct memcg_stock_pcp *stock; | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1808 | 	struct mem_cgroup *iter; | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 1809 |  | 
| KAMEZAWA Hiroyuki | 1489eba | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1810 | 	if ((action == CPU_ONLINE)) { | 
 | 1811 | 		for_each_mem_cgroup_all(iter) | 
 | 1812 | 			synchronize_mem_cgroup_on_move(iter, cpu); | 
 | 1813 | 		return NOTIFY_OK; | 
 | 1814 | 	} | 
 | 1815 |  | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1816 | 	if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 1817 | 		return NOTIFY_OK; | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 1818 |  | 
 | 1819 | 	for_each_mem_cgroup_all(iter) | 
 | 1820 | 		mem_cgroup_drain_pcp_counter(iter, cpu); | 
 | 1821 |  | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 1822 | 	stock = &per_cpu(memcg_stock, cpu); | 
 | 1823 | 	drain_stock(stock); | 
 | 1824 | 	return NOTIFY_OK; | 
 | 1825 | } | 
 | 1826 |  | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1827 |  | 
 | 1828 | /* See __mem_cgroup_try_charge() for details */ | 
 | 1829 | enum { | 
 | 1830 | 	CHARGE_OK,		/* success */ | 
 | 1831 | 	CHARGE_RETRY,		/* need to retry but retry is not bad */ | 
 | 1832 | 	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */ | 
 | 1833 | 	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */ | 
 | 1834 | 	CHARGE_OOM_DIE,		/* the current is killed because of OOM */ | 
 | 1835 | }; | 
 | 1836 |  | 
 | 1837 | static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, | 
 | 1838 | 				int csize, bool oom_check) | 
 | 1839 | { | 
 | 1840 | 	struct mem_cgroup *mem_over_limit; | 
 | 1841 | 	struct res_counter *fail_res; | 
 | 1842 | 	unsigned long flags = 0; | 
 | 1843 | 	int ret; | 
 | 1844 |  | 
 | 1845 | 	ret = res_counter_charge(&mem->res, csize, &fail_res); | 
 | 1846 |  | 
 | 1847 | 	if (likely(!ret)) { | 
 | 1848 | 		if (!do_swap_account) | 
 | 1849 | 			return CHARGE_OK; | 
 | 1850 | 		ret = res_counter_charge(&mem->memsw, csize, &fail_res); | 
 | 1851 | 		if (likely(!ret)) | 
 | 1852 | 			return CHARGE_OK; | 
 | 1853 |  | 
| KAMEZAWA Hiroyuki | 01c88e2 | 2011-01-25 15:07:27 -0800 | [diff] [blame] | 1854 | 		res_counter_uncharge(&mem->res, csize); | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1855 | 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); | 
 | 1856 | 		flags |= MEM_CGROUP_RECLAIM_NOSWAP; | 
 | 1857 | 	} else | 
 | 1858 | 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); | 
| Johannes Weiner | 9221edb | 2011-02-01 15:52:42 -0800 | [diff] [blame] | 1859 | 	/* | 
 | 1860 | 	 * csize can be either a huge page (HPAGE_SIZE), a batch of | 
 | 1861 | 	 * regular pages (CHARGE_SIZE), or a single regular page | 
 | 1862 | 	 * (PAGE_SIZE). | 
 | 1863 | 	 * | 
 | 1864 | 	 * Never reclaim on behalf of optional batching, retry with a | 
 | 1865 | 	 * single page instead. | 
 | 1866 | 	 */ | 
 | 1867 | 	if (csize == CHARGE_SIZE) | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1868 | 		return CHARGE_RETRY; | 
 | 1869 |  | 
 | 1870 | 	if (!(gfp_mask & __GFP_WAIT)) | 
 | 1871 | 		return CHARGE_WOULDBLOCK; | 
 | 1872 |  | 
 | 1873 | 	ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, | 
| Johannes Weiner | 1994282 | 2011-02-01 15:52:43 -0800 | [diff] [blame] | 1874 | 					      gfp_mask, flags); | 
 | 1875 | 	if (mem_cgroup_check_margin(mem_over_limit, csize)) | 
 | 1876 | 		return CHARGE_RETRY; | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1877 | 	/* | 
| Johannes Weiner | 1994282 | 2011-02-01 15:52:43 -0800 | [diff] [blame] | 1878 | 	 * Even though the limit is exceeded at this point, reclaim | 
 | 1879 | 	 * may have been able to free some pages.  Retry the charge | 
 | 1880 | 	 * before killing the task. | 
 | 1881 | 	 * | 
 | 1882 | 	 * Only for regular pages, though: huge pages are rather | 
 | 1883 | 	 * unlikely to succeed so close to the limit, and we fall back | 
 | 1884 | 	 * to regular pages anyway in case of failure. | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1885 | 	 */ | 
| Johannes Weiner | 1994282 | 2011-02-01 15:52:43 -0800 | [diff] [blame] | 1886 | 	if (csize == PAGE_SIZE && ret) | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1887 | 		return CHARGE_RETRY; | 
 | 1888 |  | 
 | 1889 | 	/* | 
 | 1890 | 	 * At task move, charge accounts can be doubly counted. So, it's | 
 | 1891 | 	 * better to wait until the end of task_move if something is going on. | 
 | 1892 | 	 */ | 
 | 1893 | 	if (mem_cgroup_wait_acct_move(mem_over_limit)) | 
 | 1894 | 		return CHARGE_RETRY; | 
 | 1895 |  | 
 | 1896 | 	/* If we don't need to call oom-killer at el, return immediately */ | 
 | 1897 | 	if (!oom_check) | 
 | 1898 | 		return CHARGE_NOMEM; | 
 | 1899 | 	/* check OOM */ | 
 | 1900 | 	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) | 
 | 1901 | 		return CHARGE_OOM_DIE; | 
 | 1902 |  | 
 | 1903 | 	return CHARGE_RETRY; | 
 | 1904 | } | 
 | 1905 |  | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 1906 | /* | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 1907 |  * Unlike exported interface, "oom" parameter is added. if oom==true, | 
 | 1908 |  * oom-killer can be invoked. | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 1909 |  */ | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 1910 | static int __mem_cgroup_try_charge(struct mm_struct *mm, | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 1911 | 				   gfp_t gfp_mask, | 
 | 1912 | 				   struct mem_cgroup **memcg, bool oom, | 
 | 1913 | 				   int page_size) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 1914 | { | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1915 | 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; | 
 | 1916 | 	struct mem_cgroup *mem = NULL; | 
 | 1917 | 	int ret; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 1918 | 	int csize = max(CHARGE_SIZE, (unsigned long) page_size); | 
| KAMEZAWA Hiroyuki | a636b32 | 2009-01-07 18:08:08 -0800 | [diff] [blame] | 1919 |  | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 1920 | 	/* | 
 | 1921 | 	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage | 
 | 1922 | 	 * in system level. So, allow to go ahead dying process in addition to | 
 | 1923 | 	 * MEMDIE process. | 
 | 1924 | 	 */ | 
 | 1925 | 	if (unlikely(test_thread_flag(TIF_MEMDIE) | 
 | 1926 | 		     || fatal_signal_pending(current))) | 
 | 1927 | 		goto bypass; | 
| KAMEZAWA Hiroyuki | a636b32 | 2009-01-07 18:08:08 -0800 | [diff] [blame] | 1928 |  | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 1929 | 	/* | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 1930 | 	 * We always charge the cgroup the mm_struct belongs to. | 
 | 1931 | 	 * The mm_struct's mem_cgroup changes on task migration if the | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 1932 | 	 * thread group leader migrates. It's possible that mm is not | 
 | 1933 | 	 * set, if so charge the init_mm (happens for pagecache usage). | 
 | 1934 | 	 */ | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1935 | 	if (!*memcg && !mm) | 
 | 1936 | 		goto bypass; | 
 | 1937 | again: | 
 | 1938 | 	if (*memcg) { /* css should be a valid one */ | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1939 | 		mem = *memcg; | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1940 | 		VM_BUG_ON(css_is_removed(&mem->css)); | 
 | 1941 | 		if (mem_cgroup_is_root(mem)) | 
 | 1942 | 			goto done; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 1943 | 		if (page_size == PAGE_SIZE && consume_stock(mem)) | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1944 | 			goto done; | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 1945 | 		css_get(&mem->css); | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1946 | 	} else { | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1947 | 		struct task_struct *p; | 
| KAMEZAWA Hiroyuki | 54595fe | 2009-01-07 18:08:33 -0800 | [diff] [blame] | 1948 |  | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1949 | 		rcu_read_lock(); | 
 | 1950 | 		p = rcu_dereference(mm->owner); | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1951 | 		/* | 
| KAMEZAWA Hiroyuki | ebb76ce | 2010-12-29 14:07:11 -0800 | [diff] [blame] | 1952 | 		 * Because we don't have task_lock(), "p" can exit. | 
 | 1953 | 		 * In that case, "mem" can point to root or p can be NULL with | 
 | 1954 | 		 * race with swapoff. Then, we have small risk of mis-accouning. | 
 | 1955 | 		 * But such kind of mis-account by race always happens because | 
 | 1956 | 		 * we don't have cgroup_mutex(). It's overkill and we allo that | 
 | 1957 | 		 * small race, here. | 
 | 1958 | 		 * (*) swapoff at el will charge against mm-struct not against | 
 | 1959 | 		 * task-struct. So, mm->owner can be NULL. | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1960 | 		 */ | 
 | 1961 | 		mem = mem_cgroup_from_task(p); | 
| KAMEZAWA Hiroyuki | ebb76ce | 2010-12-29 14:07:11 -0800 | [diff] [blame] | 1962 | 		if (!mem || mem_cgroup_is_root(mem)) { | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1963 | 			rcu_read_unlock(); | 
 | 1964 | 			goto done; | 
 | 1965 | 		} | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 1966 | 		if (page_size == PAGE_SIZE && consume_stock(mem)) { | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1967 | 			/* | 
 | 1968 | 			 * It seems dagerous to access memcg without css_get(). | 
 | 1969 | 			 * But considering how consume_stok works, it's not | 
 | 1970 | 			 * necessary. If consume_stock success, some charges | 
 | 1971 | 			 * from this memcg are cached on this cpu. So, we | 
 | 1972 | 			 * don't need to call css_get()/css_tryget() before | 
 | 1973 | 			 * calling consume_stock(). | 
 | 1974 | 			 */ | 
 | 1975 | 			rcu_read_unlock(); | 
 | 1976 | 			goto done; | 
 | 1977 | 		} | 
 | 1978 | 		/* after here, we may be blocked. we need to get refcnt */ | 
 | 1979 | 		if (!css_tryget(&mem->css)) { | 
 | 1980 | 			rcu_read_unlock(); | 
 | 1981 | 			goto again; | 
 | 1982 | 		} | 
 | 1983 | 		rcu_read_unlock(); | 
 | 1984 | 	} | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 1985 |  | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1986 | 	do { | 
 | 1987 | 		bool oom_check; | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 1988 |  | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1989 | 		/* If killed, bypass charge */ | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1990 | 		if (fatal_signal_pending(current)) { | 
 | 1991 | 			css_put(&mem->css); | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1992 | 			goto bypass; | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 1993 | 		} | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 1994 |  | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 1995 | 		oom_check = false; | 
 | 1996 | 		if (oom && !nr_oom_retries) { | 
 | 1997 | 			oom_check = true; | 
 | 1998 | 			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES; | 
 | 1999 | 		} | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 2000 |  | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 2001 | 		ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check); | 
 | 2002 |  | 
 | 2003 | 		switch (ret) { | 
 | 2004 | 		case CHARGE_OK: | 
 | 2005 | 			break; | 
 | 2006 | 		case CHARGE_RETRY: /* not in OOM situation but retry */ | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2007 | 			csize = page_size; | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2008 | 			css_put(&mem->css); | 
 | 2009 | 			mem = NULL; | 
 | 2010 | 			goto again; | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 2011 | 		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */ | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2012 | 			css_put(&mem->css); | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2013 | 			goto nomem; | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 2014 | 		case CHARGE_NOMEM: /* OOM routine works */ | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2015 | 			if (!oom) { | 
 | 2016 | 				css_put(&mem->css); | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 2017 | 				goto nomem; | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2018 | 			} | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 2019 | 			/* If oom, we never return -ENOMEM */ | 
 | 2020 | 			nr_oom_retries--; | 
 | 2021 | 			break; | 
 | 2022 | 		case CHARGE_OOM_DIE: /* Killed by OOM Killer */ | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2023 | 			css_put(&mem->css); | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 2024 | 			goto bypass; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 2025 | 		} | 
| KAMEZAWA Hiroyuki | 4b53433 | 2010-08-10 18:02:57 -0700 | [diff] [blame] | 2026 | 	} while (ret != CHARGE_OK); | 
 | 2027 |  | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2028 | 	if (csize > page_size) | 
 | 2029 | 		refill_stock(mem, csize - page_size); | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2030 | 	css_put(&mem->css); | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 2031 | done: | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2032 | 	*memcg = mem; | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2033 | 	return 0; | 
 | 2034 | nomem: | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2035 | 	*memcg = NULL; | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2036 | 	return -ENOMEM; | 
| KAMEZAWA Hiroyuki | 867578c | 2010-03-10 15:22:39 -0800 | [diff] [blame] | 2037 | bypass: | 
 | 2038 | 	*memcg = NULL; | 
 | 2039 | 	return 0; | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2040 | } | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 2041 |  | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2042 | /* | 
| Daisuke Nishimura | a3032a2 | 2009-12-15 16:47:10 -0800 | [diff] [blame] | 2043 |  * Somemtimes we have to undo a charge we got by try_charge(). | 
 | 2044 |  * This function is for that and do uncharge, put css's refcnt. | 
 | 2045 |  * gotten by try_charge(). | 
 | 2046 |  */ | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2047 | static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, | 
 | 2048 | 							unsigned long count) | 
| Daisuke Nishimura | a3032a2 | 2009-12-15 16:47:10 -0800 | [diff] [blame] | 2049 | { | 
 | 2050 | 	if (!mem_cgroup_is_root(mem)) { | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2051 | 		res_counter_uncharge(&mem->res, PAGE_SIZE * count); | 
| Daisuke Nishimura | a3032a2 | 2009-12-15 16:47:10 -0800 | [diff] [blame] | 2052 | 		if (do_swap_account) | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2053 | 			res_counter_uncharge(&mem->memsw, PAGE_SIZE * count); | 
| Daisuke Nishimura | a3032a2 | 2009-12-15 16:47:10 -0800 | [diff] [blame] | 2054 | 	} | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2055 | } | 
 | 2056 |  | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2057 | static void mem_cgroup_cancel_charge(struct mem_cgroup *mem, | 
 | 2058 | 				     int page_size) | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2059 | { | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2060 | 	__mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT); | 
| Daisuke Nishimura | a3032a2 | 2009-12-15 16:47:10 -0800 | [diff] [blame] | 2061 | } | 
 | 2062 |  | 
 | 2063 | /* | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2064 |  * A helper function to get mem_cgroup from ID. must be called under | 
 | 2065 |  * rcu_read_lock(). The caller must check css_is_removed() or some if | 
 | 2066 |  * it's concern. (dropping refcnt from swap can be called against removed | 
 | 2067 |  * memcg.) | 
 | 2068 |  */ | 
 | 2069 | static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) | 
 | 2070 | { | 
 | 2071 | 	struct cgroup_subsys_state *css; | 
 | 2072 |  | 
 | 2073 | 	/* ID 0 is unused ID */ | 
 | 2074 | 	if (!id) | 
 | 2075 | 		return NULL; | 
 | 2076 | 	css = css_lookup(&mem_cgroup_subsys, id); | 
 | 2077 | 	if (!css) | 
 | 2078 | 		return NULL; | 
 | 2079 | 	return container_of(css, struct mem_cgroup, css); | 
 | 2080 | } | 
 | 2081 |  | 
| Wu Fengguang | e42d9d5 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 2082 | struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 2083 | { | 
| Wu Fengguang | e42d9d5 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 2084 | 	struct mem_cgroup *mem = NULL; | 
| Daisuke Nishimura | 3c776e6 | 2009-04-02 16:57:43 -0700 | [diff] [blame] | 2085 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2086 | 	unsigned short id; | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 2087 | 	swp_entry_t ent; | 
 | 2088 |  | 
| Daisuke Nishimura | 3c776e6 | 2009-04-02 16:57:43 -0700 | [diff] [blame] | 2089 | 	VM_BUG_ON(!PageLocked(page)); | 
 | 2090 |  | 
| Daisuke Nishimura | 3c776e6 | 2009-04-02 16:57:43 -0700 | [diff] [blame] | 2091 | 	pc = lookup_page_cgroup(page); | 
| Daisuke Nishimura | c0bd3f6 | 2009-04-30 15:08:11 -0700 | [diff] [blame] | 2092 | 	lock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2093 | 	if (PageCgroupUsed(pc)) { | 
| Daisuke Nishimura | 3c776e6 | 2009-04-02 16:57:43 -0700 | [diff] [blame] | 2094 | 		mem = pc->mem_cgroup; | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2095 | 		if (mem && !css_tryget(&mem->css)) | 
 | 2096 | 			mem = NULL; | 
| Wu Fengguang | e42d9d5 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 2097 | 	} else if (PageSwapCache(page)) { | 
| Daisuke Nishimura | 3c776e6 | 2009-04-02 16:57:43 -0700 | [diff] [blame] | 2098 | 		ent.val = page_private(page); | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2099 | 		id = lookup_swap_cgroup(ent); | 
 | 2100 | 		rcu_read_lock(); | 
 | 2101 | 		mem = mem_cgroup_lookup(id); | 
 | 2102 | 		if (mem && !css_tryget(&mem->css)) | 
 | 2103 | 			mem = NULL; | 
 | 2104 | 		rcu_read_unlock(); | 
| Daisuke Nishimura | 3c776e6 | 2009-04-02 16:57:43 -0700 | [diff] [blame] | 2105 | 	} | 
| Daisuke Nishimura | c0bd3f6 | 2009-04-30 15:08:11 -0700 | [diff] [blame] | 2106 | 	unlock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 2107 | 	return mem; | 
 | 2108 | } | 
 | 2109 |  | 
| KAMEZAWA Hiroyuki | ca3e021 | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2110 | static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, | 
 | 2111 | 				       struct page_cgroup *pc, | 
 | 2112 | 				       enum charge_type ctype, | 
 | 2113 | 				       int page_size) | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2114 | { | 
| KAMEZAWA Hiroyuki | ca3e021 | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2115 | 	int nr_pages = page_size >> PAGE_SHIFT; | 
 | 2116 |  | 
 | 2117 | 	/* try_charge() can return NULL to *memcg, taking care of it. */ | 
 | 2118 | 	if (!mem) | 
 | 2119 | 		return; | 
 | 2120 |  | 
 | 2121 | 	lock_page_cgroup(pc); | 
 | 2122 | 	if (unlikely(PageCgroupUsed(pc))) { | 
 | 2123 | 		unlock_page_cgroup(pc); | 
 | 2124 | 		mem_cgroup_cancel_charge(mem, page_size); | 
 | 2125 | 		return; | 
 | 2126 | 	} | 
 | 2127 | 	/* | 
 | 2128 | 	 * we don't need page_cgroup_lock about tail pages, becase they are not | 
 | 2129 | 	 * accessed by any other context at this point. | 
 | 2130 | 	 */ | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 2131 | 	pc->mem_cgroup = mem; | 
| KAMEZAWA Hiroyuki | 261fb61 | 2009-09-23 15:56:33 -0700 | [diff] [blame] | 2132 | 	/* | 
 | 2133 | 	 * We access a page_cgroup asynchronously without lock_page_cgroup(). | 
 | 2134 | 	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup | 
 | 2135 | 	 * is accessed after testing USED bit. To make pc->mem_cgroup visible | 
 | 2136 | 	 * before USED bit, we need memory barrier here. | 
 | 2137 | 	 * See mem_cgroup_add_lru_list(), etc. | 
 | 2138 |  	 */ | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 2139 | 	smp_wmb(); | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 2140 | 	switch (ctype) { | 
 | 2141 | 	case MEM_CGROUP_CHARGE_TYPE_CACHE: | 
 | 2142 | 	case MEM_CGROUP_CHARGE_TYPE_SHMEM: | 
 | 2143 | 		SetPageCgroupCache(pc); | 
 | 2144 | 		SetPageCgroupUsed(pc); | 
 | 2145 | 		break; | 
 | 2146 | 	case MEM_CGROUP_CHARGE_TYPE_MAPPED: | 
 | 2147 | 		ClearPageCgroupCache(pc); | 
 | 2148 | 		SetPageCgroupUsed(pc); | 
 | 2149 | 		break; | 
 | 2150 | 	default: | 
 | 2151 | 		break; | 
 | 2152 | 	} | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 2153 |  | 
| KAMEZAWA Hiroyuki | ca3e021 | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2154 | 	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages); | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2155 | 	unlock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | 430e4863 | 2010-03-10 15:22:30 -0800 | [diff] [blame] | 2156 | 	/* | 
 | 2157 | 	 * "charge_statistics" updated event counter. Then, check it. | 
 | 2158 | 	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. | 
 | 2159 | 	 * if they exceeds softlimit. | 
 | 2160 | 	 */ | 
| KAMEZAWA Hiroyuki | d2265e6 | 2010-03-10 15:22:31 -0800 | [diff] [blame] | 2161 | 	memcg_check_events(mem, pc->page); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 2162 | } | 
 | 2163 |  | 
| KAMEZAWA Hiroyuki | ca3e021 | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2164 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
 | 2165 |  | 
 | 2166 | #define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\ | 
 | 2167 | 			(1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) | 
 | 2168 | /* | 
 | 2169 |  * Because tail pages are not marked as "used", set it. We're under | 
 | 2170 |  * zone->lru_lock, 'splitting on pmd' and compund_lock. | 
 | 2171 |  */ | 
 | 2172 | void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) | 
 | 2173 | { | 
 | 2174 | 	struct page_cgroup *head_pc = lookup_page_cgroup(head); | 
 | 2175 | 	struct page_cgroup *tail_pc = lookup_page_cgroup(tail); | 
 | 2176 | 	unsigned long flags; | 
 | 2177 |  | 
| KAMEZAWA Hiroyuki | 3d37c4a | 2011-01-25 15:07:28 -0800 | [diff] [blame] | 2178 | 	if (mem_cgroup_disabled()) | 
 | 2179 | 		return; | 
| KAMEZAWA Hiroyuki | ca3e021 | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2180 | 	/* | 
| KAMEZAWA Hiroyuki | ece35ca | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2181 | 	 * We have no races with charge/uncharge but will have races with | 
| KAMEZAWA Hiroyuki | ca3e021 | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2182 | 	 * page state accounting. | 
 | 2183 | 	 */ | 
 | 2184 | 	move_lock_page_cgroup(head_pc, &flags); | 
 | 2185 |  | 
 | 2186 | 	tail_pc->mem_cgroup = head_pc->mem_cgroup; | 
 | 2187 | 	smp_wmb(); /* see __commit_charge() */ | 
| KAMEZAWA Hiroyuki | ece35ca | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2188 | 	if (PageCgroupAcctLRU(head_pc)) { | 
 | 2189 | 		enum lru_list lru; | 
 | 2190 | 		struct mem_cgroup_per_zone *mz; | 
 | 2191 |  | 
 | 2192 | 		/* | 
 | 2193 | 		 * LRU flags cannot be copied because we need to add tail | 
 | 2194 | 		 *.page to LRU by generic call and our hook will be called. | 
 | 2195 | 		 * We hold lru_lock, then, reduce counter directly. | 
 | 2196 | 		 */ | 
 | 2197 | 		lru = page_lru(head); | 
 | 2198 | 		mz = page_cgroup_zoneinfo(head_pc); | 
 | 2199 | 		MEM_CGROUP_ZSTAT(mz, lru) -= 1; | 
 | 2200 | 	} | 
| KAMEZAWA Hiroyuki | ca3e021 | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2201 | 	tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; | 
 | 2202 | 	move_unlock_page_cgroup(head_pc, &flags); | 
 | 2203 | } | 
 | 2204 | #endif | 
 | 2205 |  | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2206 | /** | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2207 |  * __mem_cgroup_move_account - move account of the page | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2208 |  * @pc:	page_cgroup of the page. | 
 | 2209 |  * @from: mem_cgroup which the page is moved from. | 
 | 2210 |  * @to:	mem_cgroup which the page is moved to. @from != @to. | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2211 |  * @uncharge: whether we should call uncharge and css_put against @from. | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2212 |  * | 
 | 2213 |  * The caller must confirm following. | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 2214 |  * - page is not on LRU (isolate_page() is useful.) | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2215 |  * - the pc is locked, used, and ->mem_cgroup points to @from. | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2216 |  * | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2217 |  * This function doesn't do "charge" nor css_get to new cgroup. It should be | 
 | 2218 |  * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is | 
 | 2219 |  * true, this function does "uncharge" from old cgroup, but it doesn't if | 
 | 2220 |  * @uncharge is false, so a caller should do "uncharge". | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2221 |  */ | 
 | 2222 |  | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2223 | static void __mem_cgroup_move_account(struct page_cgroup *pc, | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2224 | 	struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge, | 
 | 2225 | 	int charge_size) | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2226 | { | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2227 | 	int nr_pages = charge_size >> PAGE_SHIFT; | 
 | 2228 |  | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2229 | 	VM_BUG_ON(from == to); | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 2230 | 	VM_BUG_ON(PageLRU(pc->page)); | 
| Kirill A. Shutemov | 112bc2e | 2010-11-24 12:56:58 -0800 | [diff] [blame] | 2231 | 	VM_BUG_ON(!page_is_cgroup_locked(pc)); | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2232 | 	VM_BUG_ON(!PageCgroupUsed(pc)); | 
 | 2233 | 	VM_BUG_ON(pc->mem_cgroup != from); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2234 |  | 
| KAMEZAWA Hiroyuki | 8725d54 | 2010-04-06 14:35:05 -0700 | [diff] [blame] | 2235 | 	if (PageCgroupFileMapped(pc)) { | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 2236 | 		/* Update mapped_file data for mem_cgroup */ | 
 | 2237 | 		preempt_disable(); | 
 | 2238 | 		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); | 
 | 2239 | 		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); | 
 | 2240 | 		preempt_enable(); | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 2241 | 	} | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2242 | 	mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2243 | 	if (uncharge) | 
 | 2244 | 		/* This is not "cancel", but cancel_charge does all we need. */ | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2245 | 		mem_cgroup_cancel_charge(from, charge_size); | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 2246 |  | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2247 | 	/* caller should have done css_get */ | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 2248 | 	pc->mem_cgroup = to; | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2249 | 	mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages); | 
| KAMEZAWA Hiroyuki | 8870326 | 2009-07-29 15:04:06 -0700 | [diff] [blame] | 2250 | 	/* | 
 | 2251 | 	 * We charges against "to" which may not have any tasks. Then, "to" | 
 | 2252 | 	 * can be under rmdir(). But in current implementation, caller of | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 2253 | 	 * this function is just force_empty() and move charge, so it's | 
 | 2254 | 	 * garanteed that "to" is never removed. So, we don't check rmdir | 
 | 2255 | 	 * status here. | 
| KAMEZAWA Hiroyuki | 8870326 | 2009-07-29 15:04:06 -0700 | [diff] [blame] | 2256 | 	 */ | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2257 | } | 
 | 2258 |  | 
 | 2259 | /* | 
 | 2260 |  * check whether the @pc is valid for moving account and call | 
 | 2261 |  * __mem_cgroup_move_account() | 
 | 2262 |  */ | 
 | 2263 | static int mem_cgroup_move_account(struct page_cgroup *pc, | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2264 | 		struct mem_cgroup *from, struct mem_cgroup *to, | 
 | 2265 | 		bool uncharge, int charge_size) | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2266 | { | 
 | 2267 | 	int ret = -EINVAL; | 
| KAMEZAWA Hiroyuki | dbd4ea7 | 2011-01-13 15:47:38 -0800 | [diff] [blame] | 2268 | 	unsigned long flags; | 
| KAMEZAWA Hiroyuki | 52dbb90 | 2011-01-25 15:07:29 -0800 | [diff] [blame] | 2269 | 	/* | 
 | 2270 | 	 * The page is isolated from LRU. So, collapse function | 
 | 2271 | 	 * will not handle this page. But page splitting can happen. | 
 | 2272 | 	 * Do this check under compound_page_lock(). The caller should | 
 | 2273 | 	 * hold it. | 
 | 2274 | 	 */ | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2275 | 	if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page)) | 
 | 2276 | 		return -EBUSY; | 
 | 2277 |  | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2278 | 	lock_page_cgroup(pc); | 
 | 2279 | 	if (PageCgroupUsed(pc) && pc->mem_cgroup == from) { | 
| KAMEZAWA Hiroyuki | dbd4ea7 | 2011-01-13 15:47:38 -0800 | [diff] [blame] | 2280 | 		move_lock_page_cgroup(pc, &flags); | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2281 | 		__mem_cgroup_move_account(pc, from, to, uncharge, charge_size); | 
| KAMEZAWA Hiroyuki | dbd4ea7 | 2011-01-13 15:47:38 -0800 | [diff] [blame] | 2282 | 		move_unlock_page_cgroup(pc, &flags); | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2283 | 		ret = 0; | 
 | 2284 | 	} | 
 | 2285 | 	unlock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | d2265e6 | 2010-03-10 15:22:31 -0800 | [diff] [blame] | 2286 | 	/* | 
 | 2287 | 	 * check events | 
 | 2288 | 	 */ | 
 | 2289 | 	memcg_check_events(to, pc->page); | 
 | 2290 | 	memcg_check_events(from, pc->page); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2291 | 	return ret; | 
 | 2292 | } | 
 | 2293 |  | 
 | 2294 | /* | 
 | 2295 |  * move charges to its parent. | 
 | 2296 |  */ | 
 | 2297 |  | 
 | 2298 | static int mem_cgroup_move_parent(struct page_cgroup *pc, | 
 | 2299 | 				  struct mem_cgroup *child, | 
 | 2300 | 				  gfp_t gfp_mask) | 
 | 2301 | { | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 2302 | 	struct page *page = pc->page; | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2303 | 	struct cgroup *cg = child->css.cgroup; | 
 | 2304 | 	struct cgroup *pcg = cg->parent; | 
 | 2305 | 	struct mem_cgroup *parent; | 
| KAMEZAWA Hiroyuki | 52dbb90 | 2011-01-25 15:07:29 -0800 | [diff] [blame] | 2306 | 	int page_size = PAGE_SIZE; | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2307 | 	unsigned long flags; | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2308 | 	int ret; | 
 | 2309 |  | 
 | 2310 | 	/* Is ROOT ? */ | 
 | 2311 | 	if (!pcg) | 
 | 2312 | 		return -EINVAL; | 
 | 2313 |  | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2314 | 	ret = -EBUSY; | 
 | 2315 | 	if (!get_page_unless_zero(page)) | 
 | 2316 | 		goto out; | 
 | 2317 | 	if (isolate_lru_page(page)) | 
 | 2318 | 		goto put; | 
| KAMEZAWA Hiroyuki | 52dbb90 | 2011-01-25 15:07:29 -0800 | [diff] [blame] | 2319 |  | 
 | 2320 | 	if (PageTransHuge(page)) | 
 | 2321 | 		page_size = HPAGE_SIZE; | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 2322 |  | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2323 | 	parent = mem_cgroup_from_cont(pcg); | 
| KAMEZAWA Hiroyuki | 52dbb90 | 2011-01-25 15:07:29 -0800 | [diff] [blame] | 2324 | 	ret = __mem_cgroup_try_charge(NULL, gfp_mask, | 
 | 2325 | 				&parent, false, page_size); | 
| KAMEZAWA Hiroyuki | a636b32 | 2009-01-07 18:08:08 -0800 | [diff] [blame] | 2326 | 	if (ret || !parent) | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2327 | 		goto put_back; | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 2328 |  | 
| KAMEZAWA Hiroyuki | 52dbb90 | 2011-01-25 15:07:29 -0800 | [diff] [blame] | 2329 | 	if (page_size > PAGE_SIZE) | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2330 | 		flags = compound_lock_irqsave(page); | 
 | 2331 |  | 
| KAMEZAWA Hiroyuki | 52dbb90 | 2011-01-25 15:07:29 -0800 | [diff] [blame] | 2332 | 	ret = mem_cgroup_move_account(pc, child, parent, true, page_size); | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 2333 | 	if (ret) | 
| KAMEZAWA Hiroyuki | 52dbb90 | 2011-01-25 15:07:29 -0800 | [diff] [blame] | 2334 | 		mem_cgroup_cancel_charge(parent, page_size); | 
| Jesper Juhl | 8dba474 | 2011-01-25 15:07:24 -0800 | [diff] [blame] | 2335 |  | 
| KAMEZAWA Hiroyuki | 52dbb90 | 2011-01-25 15:07:29 -0800 | [diff] [blame] | 2336 | 	if (page_size > PAGE_SIZE) | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 2337 | 		compound_unlock_irqrestore(page, flags); | 
| Jesper Juhl | 8dba474 | 2011-01-25 15:07:24 -0800 | [diff] [blame] | 2338 | put_back: | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 2339 | 	putback_lru_page(page); | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2340 | put: | 
| Daisuke Nishimura | 40d5813 | 2009-01-15 13:51:12 -0800 | [diff] [blame] | 2341 | 	put_page(page); | 
| Daisuke Nishimura | 57f9fd7 | 2009-12-15 16:47:11 -0800 | [diff] [blame] | 2342 | out: | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 2343 | 	return ret; | 
 | 2344 | } | 
 | 2345 |  | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2346 | /* | 
 | 2347 |  * Charge the memory controller for page usage. | 
 | 2348 |  * Return | 
 | 2349 |  * 0 if the charge was successful | 
 | 2350 |  * < 0 if the cgroup is over its limit | 
 | 2351 |  */ | 
 | 2352 | static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | 
| Daisuke Nishimura | 73045c4 | 2010-08-10 18:02:59 -0700 | [diff] [blame] | 2353 | 				gfp_t gfp_mask, enum charge_type ctype) | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2354 | { | 
| Daisuke Nishimura | 73045c4 | 2010-08-10 18:02:59 -0700 | [diff] [blame] | 2355 | 	struct mem_cgroup *mem = NULL; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2356 | 	int page_size = PAGE_SIZE; | 
| Johannes Weiner | 8493ae4 | 2011-02-01 15:52:44 -0800 | [diff] [blame] | 2357 | 	struct page_cgroup *pc; | 
 | 2358 | 	bool oom = true; | 
 | 2359 | 	int ret; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2360 |  | 
| Andrea Arcangeli | 37c2ac7 | 2011-01-13 15:47:16 -0800 | [diff] [blame] | 2361 | 	if (PageTransHuge(page)) { | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2362 | 		page_size <<= compound_order(page); | 
| Andrea Arcangeli | 37c2ac7 | 2011-01-13 15:47:16 -0800 | [diff] [blame] | 2363 | 		VM_BUG_ON(!PageTransHuge(page)); | 
| Johannes Weiner | 8493ae4 | 2011-02-01 15:52:44 -0800 | [diff] [blame] | 2364 | 		/* | 
 | 2365 | 		 * Never OOM-kill a process for a huge page.  The | 
 | 2366 | 		 * fault handler will fall back to regular pages. | 
 | 2367 | 		 */ | 
 | 2368 | 		oom = false; | 
| Andrea Arcangeli | 37c2ac7 | 2011-01-13 15:47:16 -0800 | [diff] [blame] | 2369 | 	} | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2370 |  | 
 | 2371 | 	pc = lookup_page_cgroup(page); | 
 | 2372 | 	/* can happen at boot */ | 
 | 2373 | 	if (unlikely(!pc)) | 
 | 2374 | 		return 0; | 
 | 2375 | 	prefetchw(pc); | 
 | 2376 |  | 
| Johannes Weiner | 8493ae4 | 2011-02-01 15:52:44 -0800 | [diff] [blame] | 2377 | 	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size); | 
| KAMEZAWA Hiroyuki | a636b32 | 2009-01-07 18:08:08 -0800 | [diff] [blame] | 2378 | 	if (ret || !mem) | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2379 | 		return ret; | 
 | 2380 |  | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2381 | 	__mem_cgroup_commit_charge(mem, pc, ctype, page_size); | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2382 | 	return 0; | 
 | 2383 | } | 
 | 2384 |  | 
 | 2385 | int mem_cgroup_newpage_charge(struct page *page, | 
 | 2386 | 			      struct mm_struct *mm, gfp_t gfp_mask) | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 2387 | { | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 2388 | 	if (mem_cgroup_disabled()) | 
| Li Zefan | cede86a | 2008-07-25 01:47:18 -0700 | [diff] [blame] | 2389 | 		return 0; | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 2390 | 	/* | 
 | 2391 | 	 * If already mapped, we don't have to account. | 
 | 2392 | 	 * If page cache, page->mapping has address_space. | 
 | 2393 | 	 * But page->mapping may have out-of-use anon_vma pointer, | 
 | 2394 | 	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping | 
 | 2395 | 	 * is NULL. | 
 | 2396 |   	 */ | 
 | 2397 | 	if (page_mapped(page) || (page->mapping && !PageAnon(page))) | 
 | 2398 | 		return 0; | 
 | 2399 | 	if (unlikely(!mm)) | 
 | 2400 | 		mm = &init_mm; | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 2401 | 	return mem_cgroup_charge_common(page, mm, gfp_mask, | 
| Daisuke Nishimura | 73045c4 | 2010-08-10 18:02:59 -0700 | [diff] [blame] | 2402 | 				MEM_CGROUP_CHARGE_TYPE_MAPPED); | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 2403 | } | 
 | 2404 |  | 
| Daisuke Nishimura | 83aae4c | 2009-04-02 16:57:48 -0700 | [diff] [blame] | 2405 | static void | 
 | 2406 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | 
 | 2407 | 					enum charge_type ctype); | 
 | 2408 |  | 
| Balbir Singh | e1a1cd5 | 2008-02-07 00:14:02 -0800 | [diff] [blame] | 2409 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 
 | 2410 | 				gfp_t gfp_mask) | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 2411 | { | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 2412 | 	int ret; | 
 | 2413 |  | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 2414 | 	if (mem_cgroup_disabled()) | 
| Li Zefan | cede86a | 2008-07-25 01:47:18 -0700 | [diff] [blame] | 2415 | 		return 0; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2416 | 	if (PageCompound(page)) | 
 | 2417 | 		return 0; | 
| KAMEZAWA Hiroyuki | accf163 | 2008-07-25 01:47:17 -0700 | [diff] [blame] | 2418 | 	/* | 
 | 2419 | 	 * Corner case handling. This is called from add_to_page_cache() | 
 | 2420 | 	 * in usual. But some FS (shmem) precharges this page before calling it | 
 | 2421 | 	 * and call add_to_page_cache() with GFP_NOWAIT. | 
 | 2422 | 	 * | 
 | 2423 | 	 * For GFP_NOWAIT case, the page may be pre-charged before calling | 
 | 2424 | 	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call | 
 | 2425 | 	 * charge twice. (It works but has to pay a bit larger cost.) | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 2426 | 	 * And when the page is SwapCache, it should take swap information | 
 | 2427 | 	 * into account. This is under lock_page() now. | 
| KAMEZAWA Hiroyuki | accf163 | 2008-07-25 01:47:17 -0700 | [diff] [blame] | 2428 | 	 */ | 
 | 2429 | 	if (!(gfp_mask & __GFP_WAIT)) { | 
 | 2430 | 		struct page_cgroup *pc; | 
 | 2431 |  | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2432 | 		pc = lookup_page_cgroup(page); | 
 | 2433 | 		if (!pc) | 
 | 2434 | 			return 0; | 
 | 2435 | 		lock_page_cgroup(pc); | 
 | 2436 | 		if (PageCgroupUsed(pc)) { | 
 | 2437 | 			unlock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | accf163 | 2008-07-25 01:47:17 -0700 | [diff] [blame] | 2438 | 			return 0; | 
 | 2439 | 		} | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2440 | 		unlock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | accf163 | 2008-07-25 01:47:17 -0700 | [diff] [blame] | 2441 | 	} | 
 | 2442 |  | 
| Daisuke Nishimura | 73045c4 | 2010-08-10 18:02:59 -0700 | [diff] [blame] | 2443 | 	if (unlikely(!mm)) | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 2444 | 		mm = &init_mm; | 
| KAMEZAWA Hiroyuki | accf163 | 2008-07-25 01:47:17 -0700 | [diff] [blame] | 2445 |  | 
| KAMEZAWA Hiroyuki | c05555b | 2008-10-18 20:28:11 -0700 | [diff] [blame] | 2446 | 	if (page_is_file_cache(page)) | 
 | 2447 | 		return mem_cgroup_charge_common(page, mm, gfp_mask, | 
| Daisuke Nishimura | 73045c4 | 2010-08-10 18:02:59 -0700 | [diff] [blame] | 2448 | 				MEM_CGROUP_CHARGE_TYPE_CACHE); | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 2449 |  | 
| Daisuke Nishimura | 83aae4c | 2009-04-02 16:57:48 -0700 | [diff] [blame] | 2450 | 	/* shmem */ | 
 | 2451 | 	if (PageSwapCache(page)) { | 
| Daisuke Nishimura | 73045c4 | 2010-08-10 18:02:59 -0700 | [diff] [blame] | 2452 | 		struct mem_cgroup *mem = NULL; | 
 | 2453 |  | 
| Daisuke Nishimura | 83aae4c | 2009-04-02 16:57:48 -0700 | [diff] [blame] | 2454 | 		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); | 
 | 2455 | 		if (!ret) | 
 | 2456 | 			__mem_cgroup_commit_charge_swapin(page, mem, | 
 | 2457 | 					MEM_CGROUP_CHARGE_TYPE_SHMEM); | 
 | 2458 | 	} else | 
 | 2459 | 		ret = mem_cgroup_charge_common(page, mm, gfp_mask, | 
| Daisuke Nishimura | 73045c4 | 2010-08-10 18:02:59 -0700 | [diff] [blame] | 2460 | 					MEM_CGROUP_CHARGE_TYPE_SHMEM); | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 2461 |  | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 2462 | 	return ret; | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 2463 | } | 
 | 2464 |  | 
| KAMEZAWA Hiroyuki | 54595fe | 2009-01-07 18:08:33 -0800 | [diff] [blame] | 2465 | /* | 
 | 2466 |  * While swap-in, try_charge -> commit or cancel, the page is locked. | 
 | 2467 |  * And when try_charge() successfully returns, one refcnt to memcg without | 
| Uwe Kleine-König | 21ae295 | 2009-10-07 15:21:09 +0200 | [diff] [blame] | 2468 |  * struct page_cgroup is acquired. This refcnt will be consumed by | 
| KAMEZAWA Hiroyuki | 54595fe | 2009-01-07 18:08:33 -0800 | [diff] [blame] | 2469 |  * "commit()" or removed by "cancel()" | 
 | 2470 |  */ | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2471 | int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 
 | 2472 | 				 struct page *page, | 
 | 2473 | 				 gfp_t mask, struct mem_cgroup **ptr) | 
 | 2474 | { | 
 | 2475 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | 54595fe | 2009-01-07 18:08:33 -0800 | [diff] [blame] | 2476 | 	int ret; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2477 |  | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 2478 | 	if (mem_cgroup_disabled()) | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2479 | 		return 0; | 
 | 2480 |  | 
 | 2481 | 	if (!do_swap_account) | 
 | 2482 | 		goto charge_cur_mm; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2483 | 	/* | 
 | 2484 | 	 * A racing thread's fault, or swapoff, may have already updated | 
| Hugh Dickins | 407f9c8 | 2009-12-14 17:59:30 -0800 | [diff] [blame] | 2485 | 	 * the pte, and even removed page from swap cache: in those cases | 
 | 2486 | 	 * do_swap_page()'s pte_same() test will fail; but there's also a | 
 | 2487 | 	 * KSM case which does need to charge the page. | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2488 | 	 */ | 
 | 2489 | 	if (!PageSwapCache(page)) | 
| Hugh Dickins | 407f9c8 | 2009-12-14 17:59:30 -0800 | [diff] [blame] | 2490 | 		goto charge_cur_mm; | 
| Wu Fengguang | e42d9d5 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 2491 | 	mem = try_get_mem_cgroup_from_page(page); | 
| KAMEZAWA Hiroyuki | 54595fe | 2009-01-07 18:08:33 -0800 | [diff] [blame] | 2492 | 	if (!mem) | 
 | 2493 | 		goto charge_cur_mm; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2494 | 	*ptr = mem; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2495 | 	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE); | 
| KAMEZAWA Hiroyuki | 54595fe | 2009-01-07 18:08:33 -0800 | [diff] [blame] | 2496 | 	css_put(&mem->css); | 
 | 2497 | 	return ret; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2498 | charge_cur_mm: | 
 | 2499 | 	if (unlikely(!mm)) | 
 | 2500 | 		mm = &init_mm; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2501 | 	return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2502 | } | 
 | 2503 |  | 
| Daisuke Nishimura | 83aae4c | 2009-04-02 16:57:48 -0700 | [diff] [blame] | 2504 | static void | 
 | 2505 | __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, | 
 | 2506 | 					enum charge_type ctype) | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2507 | { | 
 | 2508 | 	struct page_cgroup *pc; | 
 | 2509 |  | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 2510 | 	if (mem_cgroup_disabled()) | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2511 | 		return; | 
 | 2512 | 	if (!ptr) | 
 | 2513 | 		return; | 
| KAMEZAWA Hiroyuki | 8870326 | 2009-07-29 15:04:06 -0700 | [diff] [blame] | 2514 | 	cgroup_exclude_rmdir(&ptr->css); | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2515 | 	pc = lookup_page_cgroup(page); | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 2516 | 	mem_cgroup_lru_del_before_commit_swapcache(page); | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2517 | 	__mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE); | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 2518 | 	mem_cgroup_lru_add_after_commit_swapcache(page); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2519 | 	/* | 
 | 2520 | 	 * Now swap is on-memory. This means this page may be | 
 | 2521 | 	 * counted both as mem and swap....double count. | 
| KAMEZAWA Hiroyuki | 03f3c43 | 2009-01-07 18:08:31 -0800 | [diff] [blame] | 2522 | 	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable | 
 | 2523 | 	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page() | 
 | 2524 | 	 * may call delete_from_swap_cache() before reach here. | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2525 | 	 */ | 
| KAMEZAWA Hiroyuki | 03f3c43 | 2009-01-07 18:08:31 -0800 | [diff] [blame] | 2526 | 	if (do_swap_account && PageSwapCache(page)) { | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2527 | 		swp_entry_t ent = {.val = page_private(page)}; | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2528 | 		unsigned short id; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2529 | 		struct mem_cgroup *memcg; | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2530 |  | 
 | 2531 | 		id = swap_cgroup_record(ent, 0); | 
 | 2532 | 		rcu_read_lock(); | 
 | 2533 | 		memcg = mem_cgroup_lookup(id); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2534 | 		if (memcg) { | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2535 | 			/* | 
 | 2536 | 			 * This recorded memcg can be obsolete one. So, avoid | 
 | 2537 | 			 * calling css_tryget | 
 | 2538 | 			 */ | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 2539 | 			if (!mem_cgroup_is_root(memcg)) | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 2540 | 				res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 2541 | 			mem_cgroup_swap_statistics(memcg, false); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2542 | 			mem_cgroup_put(memcg); | 
 | 2543 | 		} | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2544 | 		rcu_read_unlock(); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2545 | 	} | 
| KAMEZAWA Hiroyuki | 8870326 | 2009-07-29 15:04:06 -0700 | [diff] [blame] | 2546 | 	/* | 
 | 2547 | 	 * At swapin, we may charge account against cgroup which has no tasks. | 
 | 2548 | 	 * So, rmdir()->pre_destroy() can be called while we do this charge. | 
 | 2549 | 	 * In that case, we need to call pre_destroy() again. check it here. | 
 | 2550 | 	 */ | 
 | 2551 | 	cgroup_release_and_wakeup_rmdir(&ptr->css); | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2552 | } | 
 | 2553 |  | 
| Daisuke Nishimura | 83aae4c | 2009-04-02 16:57:48 -0700 | [diff] [blame] | 2554 | void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) | 
 | 2555 | { | 
 | 2556 | 	__mem_cgroup_commit_charge_swapin(page, ptr, | 
 | 2557 | 					MEM_CGROUP_CHARGE_TYPE_MAPPED); | 
 | 2558 | } | 
 | 2559 |  | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2560 | void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) | 
 | 2561 | { | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 2562 | 	if (mem_cgroup_disabled()) | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2563 | 		return; | 
 | 2564 | 	if (!mem) | 
 | 2565 | 		return; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2566 | 	mem_cgroup_cancel_charge(mem, PAGE_SIZE); | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2567 | } | 
 | 2568 |  | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 2569 | static void | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2570 | __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype, | 
 | 2571 | 	      int page_size) | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 2572 | { | 
 | 2573 | 	struct memcg_batch_info *batch = NULL; | 
 | 2574 | 	bool uncharge_memsw = true; | 
 | 2575 | 	/* If swapout, usage of swap doesn't decrease */ | 
 | 2576 | 	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) | 
 | 2577 | 		uncharge_memsw = false; | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 2578 |  | 
 | 2579 | 	batch = ¤t->memcg_batch; | 
 | 2580 | 	/* | 
 | 2581 | 	 * In usual, we do css_get() when we remember memcg pointer. | 
 | 2582 | 	 * But in this case, we keep res->usage until end of a series of | 
 | 2583 | 	 * uncharges. Then, it's ok to ignore memcg's refcnt. | 
 | 2584 | 	 */ | 
 | 2585 | 	if (!batch->memcg) | 
 | 2586 | 		batch->memcg = mem; | 
 | 2587 | 	/* | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 2588 | 	 * do_batch > 0 when unmapping pages or inode invalidate/truncate. | 
 | 2589 | 	 * In those cases, all pages freed continously can be expected to be in | 
 | 2590 | 	 * the same cgroup and we have chance to coalesce uncharges. | 
 | 2591 | 	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) | 
 | 2592 | 	 * because we want to do uncharge as soon as possible. | 
 | 2593 | 	 */ | 
 | 2594 |  | 
 | 2595 | 	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) | 
 | 2596 | 		goto direct_uncharge; | 
 | 2597 |  | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2598 | 	if (page_size != PAGE_SIZE) | 
 | 2599 | 		goto direct_uncharge; | 
 | 2600 |  | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 2601 | 	/* | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 2602 | 	 * In typical case, batch->memcg == mem. This means we can | 
 | 2603 | 	 * merge a series of uncharges to an uncharge of res_counter. | 
 | 2604 | 	 * If not, we uncharge res_counter ony by one. | 
 | 2605 | 	 */ | 
 | 2606 | 	if (batch->memcg != mem) | 
 | 2607 | 		goto direct_uncharge; | 
 | 2608 | 	/* remember freed charge and uncharge it later */ | 
 | 2609 | 	batch->bytes += PAGE_SIZE; | 
 | 2610 | 	if (uncharge_memsw) | 
 | 2611 | 		batch->memsw_bytes += PAGE_SIZE; | 
 | 2612 | 	return; | 
 | 2613 | direct_uncharge: | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2614 | 	res_counter_uncharge(&mem->res, page_size); | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 2615 | 	if (uncharge_memsw) | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2616 | 		res_counter_uncharge(&mem->memsw, page_size); | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 2617 | 	if (unlikely(batch->memcg != mem)) | 
 | 2618 | 		memcg_oom_recover(mem); | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 2619 | 	return; | 
 | 2620 | } | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 2621 |  | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 2622 | /* | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 2623 |  * uncharge if !page_mapped(page) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 2624 |  */ | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2625 | static struct mem_cgroup * | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 2626 | __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 2627 | { | 
| Daisuke Nishimura | 152c9cc | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2628 | 	int count; | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 2629 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2630 | 	struct mem_cgroup *mem = NULL; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2631 | 	int page_size = PAGE_SIZE; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 2632 |  | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 2633 | 	if (mem_cgroup_disabled()) | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2634 | 		return NULL; | 
| Balbir Singh | 4077960 | 2008-04-04 14:29:59 -0700 | [diff] [blame] | 2635 |  | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2636 | 	if (PageSwapCache(page)) | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2637 | 		return NULL; | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2638 |  | 
| Andrea Arcangeli | 37c2ac7 | 2011-01-13 15:47:16 -0800 | [diff] [blame] | 2639 | 	if (PageTransHuge(page)) { | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2640 | 		page_size <<= compound_order(page); | 
| Andrea Arcangeli | 37c2ac7 | 2011-01-13 15:47:16 -0800 | [diff] [blame] | 2641 | 		VM_BUG_ON(!PageTransHuge(page)); | 
 | 2642 | 	} | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2643 |  | 
| Daisuke Nishimura | 152c9cc | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2644 | 	count = page_size >> PAGE_SHIFT; | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 2645 | 	/* | 
| Balbir Singh | 3c541e1 | 2008-02-07 00:14:41 -0800 | [diff] [blame] | 2646 | 	 * Check if our page_cgroup is valid | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 2647 | 	 */ | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2648 | 	pc = lookup_page_cgroup(page); | 
 | 2649 | 	if (unlikely(!pc || !PageCgroupUsed(pc))) | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2650 | 		return NULL; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 2651 |  | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2652 | 	lock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2653 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2654 | 	mem = pc->mem_cgroup; | 
 | 2655 |  | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2656 | 	if (!PageCgroupUsed(pc)) | 
 | 2657 | 		goto unlock_out; | 
 | 2658 |  | 
 | 2659 | 	switch (ctype) { | 
 | 2660 | 	case MEM_CGROUP_CHARGE_TYPE_MAPPED: | 
| KAMEZAWA Hiroyuki | 8a9478c | 2009-06-17 16:27:17 -0700 | [diff] [blame] | 2661 | 	case MEM_CGROUP_CHARGE_TYPE_DROP: | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2662 | 		/* See mem_cgroup_prepare_migration() */ | 
 | 2663 | 		if (page_mapped(page) || PageCgroupMigration(pc)) | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2664 | 			goto unlock_out; | 
 | 2665 | 		break; | 
 | 2666 | 	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: | 
 | 2667 | 		if (!PageAnon(page)) {	/* Shared memory */ | 
 | 2668 | 			if (page->mapping && !page_is_file_cache(page)) | 
 | 2669 | 				goto unlock_out; | 
 | 2670 | 		} else if (page_mapped(page)) /* Anon */ | 
 | 2671 | 				goto unlock_out; | 
 | 2672 | 		break; | 
 | 2673 | 	default: | 
 | 2674 | 		break; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2675 | 	} | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2676 |  | 
| KAMEZAWA Hiroyuki | ca3e021 | 2011-01-20 14:44:24 -0800 | [diff] [blame] | 2677 | 	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -count); | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 2678 |  | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2679 | 	ClearPageCgroupUsed(pc); | 
| KAMEZAWA Hiroyuki | 544122e | 2009-01-07 18:08:34 -0800 | [diff] [blame] | 2680 | 	/* | 
 | 2681 | 	 * pc->mem_cgroup is not cleared here. It will be accessed when it's | 
 | 2682 | 	 * freed from LRU. This is safe because uncharged page is expected not | 
 | 2683 | 	 * to be reused (freed soon). Exception is SwapCache, it's handled by | 
 | 2684 | 	 * special functions. | 
 | 2685 | 	 */ | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 2686 |  | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2687 | 	unlock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2688 | 	/* | 
 | 2689 | 	 * even after unlock, we have mem->res.usage here and this memcg | 
 | 2690 | 	 * will never be freed. | 
 | 2691 | 	 */ | 
| KAMEZAWA Hiroyuki | d2265e6 | 2010-03-10 15:22:31 -0800 | [diff] [blame] | 2692 | 	memcg_check_events(mem, page); | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2693 | 	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) { | 
 | 2694 | 		mem_cgroup_swap_statistics(mem, true); | 
 | 2695 | 		mem_cgroup_get(mem); | 
 | 2696 | 	} | 
 | 2697 | 	if (!mem_cgroup_is_root(mem)) | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2698 | 		__do_uncharge(mem, ctype, page_size); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 2699 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2700 | 	return mem; | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2701 |  | 
 | 2702 | unlock_out: | 
 | 2703 | 	unlock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2704 | 	return NULL; | 
| Balbir Singh | 3c541e1 | 2008-02-07 00:14:41 -0800 | [diff] [blame] | 2705 | } | 
 | 2706 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 2707 | void mem_cgroup_uncharge_page(struct page *page) | 
 | 2708 | { | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2709 | 	/* early check. */ | 
 | 2710 | 	if (page_mapped(page)) | 
 | 2711 | 		return; | 
 | 2712 | 	if (page->mapping && !PageAnon(page)) | 
 | 2713 | 		return; | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 2714 | 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); | 
 | 2715 | } | 
 | 2716 |  | 
 | 2717 | void mem_cgroup_uncharge_cache_page(struct page *page) | 
 | 2718 | { | 
 | 2719 | 	VM_BUG_ON(page_mapped(page)); | 
| KAMEZAWA Hiroyuki | b7abea9 | 2008-10-18 20:28:09 -0700 | [diff] [blame] | 2720 | 	VM_BUG_ON(page->mapping); | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 2721 | 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); | 
 | 2722 | } | 
 | 2723 |  | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 2724 | /* | 
 | 2725 |  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate. | 
 | 2726 |  * In that cases, pages are freed continuously and we can expect pages | 
 | 2727 |  * are in the same memcg. All these calls itself limits the number of | 
 | 2728 |  * pages freed at once, then uncharge_start/end() is called properly. | 
 | 2729 |  * This may be called prural(2) times in a context, | 
 | 2730 |  */ | 
 | 2731 |  | 
 | 2732 | void mem_cgroup_uncharge_start(void) | 
 | 2733 | { | 
 | 2734 | 	current->memcg_batch.do_batch++; | 
 | 2735 | 	/* We can do nest. */ | 
 | 2736 | 	if (current->memcg_batch.do_batch == 1) { | 
 | 2737 | 		current->memcg_batch.memcg = NULL; | 
 | 2738 | 		current->memcg_batch.bytes = 0; | 
 | 2739 | 		current->memcg_batch.memsw_bytes = 0; | 
 | 2740 | 	} | 
 | 2741 | } | 
 | 2742 |  | 
 | 2743 | void mem_cgroup_uncharge_end(void) | 
 | 2744 | { | 
 | 2745 | 	struct memcg_batch_info *batch = ¤t->memcg_batch; | 
 | 2746 |  | 
 | 2747 | 	if (!batch->do_batch) | 
 | 2748 | 		return; | 
 | 2749 |  | 
 | 2750 | 	batch->do_batch--; | 
 | 2751 | 	if (batch->do_batch) /* If stacked, do nothing. */ | 
 | 2752 | 		return; | 
 | 2753 |  | 
 | 2754 | 	if (!batch->memcg) | 
 | 2755 | 		return; | 
 | 2756 | 	/* | 
 | 2757 | 	 * This "batch->memcg" is valid without any css_get/put etc... | 
 | 2758 | 	 * bacause we hide charges behind us. | 
 | 2759 | 	 */ | 
 | 2760 | 	if (batch->bytes) | 
 | 2761 | 		res_counter_uncharge(&batch->memcg->res, batch->bytes); | 
 | 2762 | 	if (batch->memsw_bytes) | 
 | 2763 | 		res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes); | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 2764 | 	memcg_oom_recover(batch->memcg); | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 2765 | 	/* forget this pointer (for sanity check) */ | 
 | 2766 | 	batch->memcg = NULL; | 
 | 2767 | } | 
 | 2768 |  | 
| Daisuke Nishimura | e767e05 | 2009-05-28 14:34:28 -0700 | [diff] [blame] | 2769 | #ifdef CONFIG_SWAP | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2770 | /* | 
| Daisuke Nishimura | e767e05 | 2009-05-28 14:34:28 -0700 | [diff] [blame] | 2771 |  * called after __delete_from_swap_cache() and drop "page" account. | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2772 |  * memcg information is recorded to swap_cgroup of "ent" | 
 | 2773 |  */ | 
| KAMEZAWA Hiroyuki | 8a9478c | 2009-06-17 16:27:17 -0700 | [diff] [blame] | 2774 | void | 
 | 2775 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2776 | { | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2777 | 	struct mem_cgroup *memcg; | 
| KAMEZAWA Hiroyuki | 8a9478c | 2009-06-17 16:27:17 -0700 | [diff] [blame] | 2778 | 	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2779 |  | 
| KAMEZAWA Hiroyuki | 8a9478c | 2009-06-17 16:27:17 -0700 | [diff] [blame] | 2780 | 	if (!swapout) /* this was a swap cache but the swap is unused ! */ | 
 | 2781 | 		ctype = MEM_CGROUP_CHARGE_TYPE_DROP; | 
 | 2782 |  | 
 | 2783 | 	memcg = __mem_cgroup_uncharge_common(page, ctype); | 
 | 2784 |  | 
| KAMEZAWA Hiroyuki | f75ca96 | 2010-08-10 18:03:02 -0700 | [diff] [blame] | 2785 | 	/* | 
 | 2786 | 	 * record memcg information,  if swapout && memcg != NULL, | 
 | 2787 | 	 * mem_cgroup_get() was called in uncharge(). | 
 | 2788 | 	 */ | 
 | 2789 | 	if (do_swap_account && swapout && memcg) | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2790 | 		swap_cgroup_record(ent, css_id(&memcg->css)); | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2791 | } | 
| Daisuke Nishimura | e767e05 | 2009-05-28 14:34:28 -0700 | [diff] [blame] | 2792 | #endif | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 2793 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2794 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 
 | 2795 | /* | 
 | 2796 |  * called from swap_entry_free(). remove record in swap_cgroup and | 
 | 2797 |  * uncharge "memsw" account. | 
 | 2798 |  */ | 
 | 2799 | void mem_cgroup_uncharge_swap(swp_entry_t ent) | 
 | 2800 | { | 
 | 2801 | 	struct mem_cgroup *memcg; | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2802 | 	unsigned short id; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2803 |  | 
 | 2804 | 	if (!do_swap_account) | 
 | 2805 | 		return; | 
 | 2806 |  | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2807 | 	id = swap_cgroup_record(ent, 0); | 
 | 2808 | 	rcu_read_lock(); | 
 | 2809 | 	memcg = mem_cgroup_lookup(id); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2810 | 	if (memcg) { | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2811 | 		/* | 
 | 2812 | 		 * We uncharge this because swap is freed. | 
 | 2813 | 		 * This memcg can be obsolete one. We avoid calling css_tryget | 
 | 2814 | 		 */ | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 2815 | 		if (!mem_cgroup_is_root(memcg)) | 
| KAMEZAWA Hiroyuki | 4e64915 | 2009-10-01 15:44:11 -0700 | [diff] [blame] | 2816 | 			res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 2817 | 		mem_cgroup_swap_statistics(memcg, false); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2818 | 		mem_cgroup_put(memcg); | 
 | 2819 | 	} | 
| KAMEZAWA Hiroyuki | a3b2d69 | 2009-04-02 16:57:45 -0700 | [diff] [blame] | 2820 | 	rcu_read_unlock(); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2821 | } | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 2822 |  | 
 | 2823 | /** | 
 | 2824 |  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. | 
 | 2825 |  * @entry: swap entry to be moved | 
 | 2826 |  * @from:  mem_cgroup which the entry is moved from | 
 | 2827 |  * @to:  mem_cgroup which the entry is moved to | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 2828 |  * @need_fixup: whether we should fixup res_counters and refcounts. | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 2829 |  * | 
 | 2830 |  * It succeeds only when the swap_cgroup's record for this entry is the same | 
 | 2831 |  * as the mem_cgroup's id of @from. | 
 | 2832 |  * | 
 | 2833 |  * Returns 0 on success, -EINVAL on failure. | 
 | 2834 |  * | 
 | 2835 |  * The caller must have charged to @to, IOW, called res_counter_charge() about | 
 | 2836 |  * both res and memsw, and called css_get(). | 
 | 2837 |  */ | 
 | 2838 | static int mem_cgroup_move_swap_account(swp_entry_t entry, | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 2839 | 		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 2840 | { | 
 | 2841 | 	unsigned short old_id, new_id; | 
 | 2842 |  | 
 | 2843 | 	old_id = css_id(&from->css); | 
 | 2844 | 	new_id = css_id(&to->css); | 
 | 2845 |  | 
 | 2846 | 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 2847 | 		mem_cgroup_swap_statistics(from, false); | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 2848 | 		mem_cgroup_swap_statistics(to, true); | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 2849 | 		/* | 
 | 2850 | 		 * This function is only called from task migration context now. | 
 | 2851 | 		 * It postpones res_counter and refcount handling till the end | 
 | 2852 | 		 * of task migration(mem_cgroup_clear_mc()) for performance | 
 | 2853 | 		 * improvement. But we cannot postpone mem_cgroup_get(to) | 
 | 2854 | 		 * because if the process that has been moved to @to does | 
 | 2855 | 		 * swap-in, the refcount of @to might be decreased to 0. | 
 | 2856 | 		 */ | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 2857 | 		mem_cgroup_get(to); | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 2858 | 		if (need_fixup) { | 
 | 2859 | 			if (!mem_cgroup_is_root(from)) | 
 | 2860 | 				res_counter_uncharge(&from->memsw, PAGE_SIZE); | 
 | 2861 | 			mem_cgroup_put(from); | 
 | 2862 | 			/* | 
 | 2863 | 			 * we charged both to->res and to->memsw, so we should | 
 | 2864 | 			 * uncharge to->res. | 
 | 2865 | 			 */ | 
 | 2866 | 			if (!mem_cgroup_is_root(to)) | 
 | 2867 | 				res_counter_uncharge(&to->res, PAGE_SIZE); | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 2868 | 		} | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 2869 | 		return 0; | 
 | 2870 | 	} | 
 | 2871 | 	return -EINVAL; | 
 | 2872 | } | 
 | 2873 | #else | 
 | 2874 | static inline int mem_cgroup_move_swap_account(swp_entry_t entry, | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 2875 | 		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 2876 | { | 
 | 2877 | 	return -EINVAL; | 
 | 2878 | } | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2879 | #endif | 
 | 2880 |  | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 2881 | /* | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 2882 |  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old | 
 | 2883 |  * page belongs to. | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 2884 |  */ | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2885 | int mem_cgroup_prepare_migration(struct page *page, | 
 | 2886 | 	struct page *newpage, struct mem_cgroup **ptr) | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 2887 | { | 
 | 2888 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 2889 | 	struct mem_cgroup *mem = NULL; | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2890 | 	enum charge_type ctype; | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 2891 | 	int ret = 0; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 2892 |  | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2893 | 	VM_BUG_ON(PageTransHuge(page)); | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 2894 | 	if (mem_cgroup_disabled()) | 
| Balbir Singh | 4077960 | 2008-04-04 14:29:59 -0700 | [diff] [blame] | 2895 | 		return 0; | 
 | 2896 |  | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2897 | 	pc = lookup_page_cgroup(page); | 
 | 2898 | 	lock_page_cgroup(pc); | 
 | 2899 | 	if (PageCgroupUsed(pc)) { | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 2900 | 		mem = pc->mem_cgroup; | 
 | 2901 | 		css_get(&mem->css); | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2902 | 		/* | 
 | 2903 | 		 * At migrating an anonymous page, its mapcount goes down | 
 | 2904 | 		 * to 0 and uncharge() will be called. But, even if it's fully | 
 | 2905 | 		 * unmapped, migration may fail and this page has to be | 
 | 2906 | 		 * charged again. We set MIGRATION flag here and delay uncharge | 
 | 2907 | 		 * until end_migration() is called | 
 | 2908 | 		 * | 
 | 2909 | 		 * Corner Case Thinking | 
 | 2910 | 		 * A) | 
 | 2911 | 		 * When the old page was mapped as Anon and it's unmap-and-freed | 
 | 2912 | 		 * while migration was ongoing. | 
 | 2913 | 		 * If unmap finds the old page, uncharge() of it will be delayed | 
 | 2914 | 		 * until end_migration(). If unmap finds a new page, it's | 
 | 2915 | 		 * uncharged when it make mapcount to be 1->0. If unmap code | 
 | 2916 | 		 * finds swap_migration_entry, the new page will not be mapped | 
 | 2917 | 		 * and end_migration() will find it(mapcount==0). | 
 | 2918 | 		 * | 
 | 2919 | 		 * B) | 
 | 2920 | 		 * When the old page was mapped but migraion fails, the kernel | 
 | 2921 | 		 * remaps it. A charge for it is kept by MIGRATION flag even | 
 | 2922 | 		 * if mapcount goes down to 0. We can do remap successfully | 
 | 2923 | 		 * without charging it again. | 
 | 2924 | 		 * | 
 | 2925 | 		 * C) | 
 | 2926 | 		 * The "old" page is under lock_page() until the end of | 
 | 2927 | 		 * migration, so, the old page itself will not be swapped-out. | 
 | 2928 | 		 * If the new page is swapped out before end_migraton, our | 
 | 2929 | 		 * hook to usual swap-out path will catch the event. | 
 | 2930 | 		 */ | 
 | 2931 | 		if (PageAnon(page)) | 
 | 2932 | 			SetPageCgroupMigration(pc); | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 2933 | 	} | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 2934 | 	unlock_page_cgroup(pc); | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2935 | 	/* | 
 | 2936 | 	 * If the page is not charged at this point, | 
 | 2937 | 	 * we return here. | 
 | 2938 | 	 */ | 
 | 2939 | 	if (!mem) | 
 | 2940 | 		return 0; | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 2941 |  | 
| Andrea Arcangeli | 93d5c9b | 2010-04-23 13:17:39 -0400 | [diff] [blame] | 2942 | 	*ptr = mem; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2943 | 	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE); | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2944 | 	css_put(&mem->css);/* drop extra refcnt */ | 
 | 2945 | 	if (ret || *ptr == NULL) { | 
 | 2946 | 		if (PageAnon(page)) { | 
 | 2947 | 			lock_page_cgroup(pc); | 
 | 2948 | 			ClearPageCgroupMigration(pc); | 
 | 2949 | 			unlock_page_cgroup(pc); | 
 | 2950 | 			/* | 
 | 2951 | 			 * The old page may be fully unmapped while we kept it. | 
 | 2952 | 			 */ | 
 | 2953 | 			mem_cgroup_uncharge_page(page); | 
 | 2954 | 		} | 
 | 2955 | 		return -ENOMEM; | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 2956 | 	} | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2957 | 	/* | 
 | 2958 | 	 * We charge new page before it's used/mapped. So, even if unlock_page() | 
 | 2959 | 	 * is called before end_migration, we can catch all events on this new | 
 | 2960 | 	 * page. In the case new page is migrated but not remapped, new page's | 
 | 2961 | 	 * mapcount will be finally 0 and we call uncharge in end_migration(). | 
 | 2962 | 	 */ | 
 | 2963 | 	pc = lookup_page_cgroup(newpage); | 
 | 2964 | 	if (PageAnon(page)) | 
 | 2965 | 		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; | 
 | 2966 | 	else if (page_is_file_cache(page)) | 
 | 2967 | 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; | 
 | 2968 | 	else | 
 | 2969 | 		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 2970 | 	__mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE); | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 2971 | 	return ret; | 
 | 2972 | } | 
| Hugh Dickins | fb59e9f | 2008-03-04 14:29:16 -0800 | [diff] [blame] | 2973 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 2974 | /* remove redundant charge if migration failed*/ | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 2975 | void mem_cgroup_end_migration(struct mem_cgroup *mem, | 
| Daisuke Nishimura | 50de1dd | 2011-01-13 15:47:43 -0800 | [diff] [blame] | 2976 | 	struct page *oldpage, struct page *newpage, bool migration_ok) | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 2977 | { | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2978 | 	struct page *used, *unused; | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 2979 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 2980 |  | 
 | 2981 | 	if (!mem) | 
 | 2982 | 		return; | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2983 | 	/* blocks rmdir() */ | 
| KAMEZAWA Hiroyuki | 8870326 | 2009-07-29 15:04:06 -0700 | [diff] [blame] | 2984 | 	cgroup_exclude_rmdir(&mem->css); | 
| Daisuke Nishimura | 50de1dd | 2011-01-13 15:47:43 -0800 | [diff] [blame] | 2985 | 	if (!migration_ok) { | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2986 | 		used = oldpage; | 
 | 2987 | 		unused = newpage; | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 2988 | 	} else { | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2989 | 		used = newpage; | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 2990 | 		unused = oldpage; | 
 | 2991 | 	} | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 2992 | 	/* | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2993 | 	 * We disallowed uncharge of pages under migration because mapcount | 
 | 2994 | 	 * of the page goes down to zero, temporarly. | 
 | 2995 | 	 * Clear the flag and check the page should be charged. | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 2996 | 	 */ | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 2997 | 	pc = lookup_page_cgroup(oldpage); | 
 | 2998 | 	lock_page_cgroup(pc); | 
 | 2999 | 	ClearPageCgroupMigration(pc); | 
 | 3000 | 	unlock_page_cgroup(pc); | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 3001 |  | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 3002 | 	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE); | 
 | 3003 |  | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 3004 | 	/* | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 3005 | 	 * If a page is a file cache, radix-tree replacement is very atomic | 
 | 3006 | 	 * and we can skip this check. When it was an Anon page, its mapcount | 
 | 3007 | 	 * goes down to 0. But because we added MIGRATION flage, it's not | 
 | 3008 | 	 * uncharged yet. There are several case but page->mapcount check | 
 | 3009 | 	 * and USED bit check in mem_cgroup_uncharge_page() will do enough | 
 | 3010 | 	 * check. (see prepare_charge() also) | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 3011 | 	 */ | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 3012 | 	if (PageAnon(used)) | 
 | 3013 | 		mem_cgroup_uncharge_page(used); | 
| KAMEZAWA Hiroyuki | 8870326 | 2009-07-29 15:04:06 -0700 | [diff] [blame] | 3014 | 	/* | 
| akpm@linux-foundation.org | ac39cf8 | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 3015 | 	 * At migration, we may charge account against cgroup which has no | 
 | 3016 | 	 * tasks. | 
| KAMEZAWA Hiroyuki | 8870326 | 2009-07-29 15:04:06 -0700 | [diff] [blame] | 3017 | 	 * So, rmdir()->pre_destroy() can be called while we do this charge. | 
 | 3018 | 	 * In that case, we need to call pre_destroy() again. check it here. | 
 | 3019 | 	 */ | 
 | 3020 | 	cgroup_release_and_wakeup_rmdir(&mem->css); | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 3021 | } | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 3022 |  | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3023 | /* | 
| Daisuke Nishimura | ae3abae | 2009-04-30 15:08:19 -0700 | [diff] [blame] | 3024 |  * A call to try to shrink memory usage on charge failure at shmem's swapin. | 
 | 3025 |  * Calling hierarchical_reclaim is not enough because we should update | 
 | 3026 |  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM. | 
 | 3027 |  * Moreover considering hierarchy, we should reclaim from the mem_over_limit, | 
 | 3028 |  * not from the memcg which this page would be charged to. | 
 | 3029 |  * try_charge_swapin does all of these works properly. | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 3030 |  */ | 
| Daisuke Nishimura | ae3abae | 2009-04-30 15:08:19 -0700 | [diff] [blame] | 3031 | int mem_cgroup_shmem_charge_fallback(struct page *page, | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 3032 | 			    struct mm_struct *mm, | 
 | 3033 | 			    gfp_t gfp_mask) | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 3034 | { | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 3035 | 	struct mem_cgroup *mem = NULL; | 
| Daisuke Nishimura | ae3abae | 2009-04-30 15:08:19 -0700 | [diff] [blame] | 3036 | 	int ret; | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 3037 |  | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 3038 | 	if (mem_cgroup_disabled()) | 
| Li Zefan | cede86a | 2008-07-25 01:47:18 -0700 | [diff] [blame] | 3039 | 		return 0; | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 3040 |  | 
| Daisuke Nishimura | ae3abae | 2009-04-30 15:08:19 -0700 | [diff] [blame] | 3041 | 	ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem); | 
 | 3042 | 	if (!ret) | 
 | 3043 | 		mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */ | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 3044 |  | 
| Daisuke Nishimura | ae3abae | 2009-04-30 15:08:19 -0700 | [diff] [blame] | 3045 | 	return ret; | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 3046 | } | 
 | 3047 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3048 | static DEFINE_MUTEX(set_limit_mutex); | 
 | 3049 |  | 
| KOSAKI Motohiro | d38d2a7 | 2009-01-06 14:39:44 -0800 | [diff] [blame] | 3050 | static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3051 | 				unsigned long long val) | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3052 | { | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 3053 | 	int retry_count; | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3054 | 	u64 memswlimit, memlimit; | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3055 | 	int ret = 0; | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 3056 | 	int children = mem_cgroup_count_children(memcg); | 
 | 3057 | 	u64 curusage, oldusage; | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3058 | 	int enlarge; | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 3059 |  | 
 | 3060 | 	/* | 
 | 3061 | 	 * For keeping hierarchical_reclaim simple, how long we should retry | 
 | 3062 | 	 * is depends on callers. We set our retry-count to be function | 
 | 3063 | 	 * of # of children which we should visit in this loop. | 
 | 3064 | 	 */ | 
 | 3065 | 	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; | 
 | 3066 |  | 
 | 3067 | 	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3068 |  | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3069 | 	enlarge = 0; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3070 | 	while (retry_count) { | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3071 | 		if (signal_pending(current)) { | 
 | 3072 | 			ret = -EINTR; | 
 | 3073 | 			break; | 
 | 3074 | 		} | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3075 | 		/* | 
 | 3076 | 		 * Rather than hide all in some function, I do this in | 
 | 3077 | 		 * open coded manner. You see what this really does. | 
 | 3078 | 		 * We have to guarantee mem->res.limit < mem->memsw.limit. | 
 | 3079 | 		 */ | 
 | 3080 | 		mutex_lock(&set_limit_mutex); | 
 | 3081 | 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | 
 | 3082 | 		if (memswlimit < val) { | 
 | 3083 | 			ret = -EINVAL; | 
 | 3084 | 			mutex_unlock(&set_limit_mutex); | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3085 | 			break; | 
 | 3086 | 		} | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3087 |  | 
 | 3088 | 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); | 
 | 3089 | 		if (memlimit < val) | 
 | 3090 | 			enlarge = 1; | 
 | 3091 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3092 | 		ret = res_counter_set_limit(&memcg->res, val); | 
| KAMEZAWA Hiroyuki | 22a668d | 2009-06-17 16:27:19 -0700 | [diff] [blame] | 3093 | 		if (!ret) { | 
 | 3094 | 			if (memswlimit == val) | 
 | 3095 | 				memcg->memsw_is_minimum = true; | 
 | 3096 | 			else | 
 | 3097 | 				memcg->memsw_is_minimum = false; | 
 | 3098 | 		} | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3099 | 		mutex_unlock(&set_limit_mutex); | 
 | 3100 |  | 
 | 3101 | 		if (!ret) | 
 | 3102 | 			break; | 
 | 3103 |  | 
| Bob Liu | aa20d48 | 2009-12-15 16:47:14 -0800 | [diff] [blame] | 3104 | 		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 3105 | 						MEM_CGROUP_RECLAIM_SHRINK); | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 3106 | 		curusage = res_counter_read_u64(&memcg->res, RES_USAGE); | 
 | 3107 | 		/* Usage is reduced ? */ | 
 | 3108 |   		if (curusage >= oldusage) | 
 | 3109 | 			retry_count--; | 
 | 3110 | 		else | 
 | 3111 | 			oldusage = curusage; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3112 | 	} | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3113 | 	if (!ret && enlarge) | 
 | 3114 | 		memcg_oom_recover(memcg); | 
| KOSAKI Motohiro | 14797e2 | 2009-01-07 18:08:18 -0800 | [diff] [blame] | 3115 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3116 | 	return ret; | 
 | 3117 | } | 
 | 3118 |  | 
| Li Zefan | 338c843 | 2009-06-17 16:27:15 -0700 | [diff] [blame] | 3119 | static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, | 
 | 3120 | 					unsigned long long val) | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3121 | { | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 3122 | 	int retry_count; | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3123 | 	u64 memlimit, memswlimit, oldusage, curusage; | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 3124 | 	int children = mem_cgroup_count_children(memcg); | 
 | 3125 | 	int ret = -EBUSY; | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3126 | 	int enlarge = 0; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3127 |  | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 3128 | 	/* see mem_cgroup_resize_res_limit */ | 
 | 3129 |  	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; | 
 | 3130 | 	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3131 | 	while (retry_count) { | 
 | 3132 | 		if (signal_pending(current)) { | 
 | 3133 | 			ret = -EINTR; | 
 | 3134 | 			break; | 
 | 3135 | 		} | 
 | 3136 | 		/* | 
 | 3137 | 		 * Rather than hide all in some function, I do this in | 
 | 3138 | 		 * open coded manner. You see what this really does. | 
 | 3139 | 		 * We have to guarantee mem->res.limit < mem->memsw.limit. | 
 | 3140 | 		 */ | 
 | 3141 | 		mutex_lock(&set_limit_mutex); | 
 | 3142 | 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT); | 
 | 3143 | 		if (memlimit > val) { | 
 | 3144 | 			ret = -EINVAL; | 
 | 3145 | 			mutex_unlock(&set_limit_mutex); | 
 | 3146 | 			break; | 
 | 3147 | 		} | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3148 | 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | 
 | 3149 | 		if (memswlimit < val) | 
 | 3150 | 			enlarge = 1; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3151 | 		ret = res_counter_set_limit(&memcg->memsw, val); | 
| KAMEZAWA Hiroyuki | 22a668d | 2009-06-17 16:27:19 -0700 | [diff] [blame] | 3152 | 		if (!ret) { | 
 | 3153 | 			if (memlimit == val) | 
 | 3154 | 				memcg->memsw_is_minimum = true; | 
 | 3155 | 			else | 
 | 3156 | 				memcg->memsw_is_minimum = false; | 
 | 3157 | 		} | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3158 | 		mutex_unlock(&set_limit_mutex); | 
 | 3159 |  | 
 | 3160 | 		if (!ret) | 
 | 3161 | 			break; | 
 | 3162 |  | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 3163 | 		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, | 
| Balbir Singh | 75822b4 | 2009-09-23 15:56:38 -0700 | [diff] [blame] | 3164 | 						MEM_CGROUP_RECLAIM_NOSWAP | | 
 | 3165 | 						MEM_CGROUP_RECLAIM_SHRINK); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3166 | 		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 3167 | 		/* Usage is reduced ? */ | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3168 | 		if (curusage >= oldusage) | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3169 | 			retry_count--; | 
| KAMEZAWA Hiroyuki | 81d39c2 | 2009-04-02 16:57:36 -0700 | [diff] [blame] | 3170 | 		else | 
 | 3171 | 			oldusage = curusage; | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3172 | 	} | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3173 | 	if (!ret && enlarge) | 
 | 3174 | 		memcg_oom_recover(memcg); | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3175 | 	return ret; | 
 | 3176 | } | 
 | 3177 |  | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 3178 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 
| KOSAKI Motohiro | 00918b6 | 2010-08-10 18:03:05 -0700 | [diff] [blame] | 3179 | 					    gfp_t gfp_mask) | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 3180 | { | 
 | 3181 | 	unsigned long nr_reclaimed = 0; | 
 | 3182 | 	struct mem_cgroup_per_zone *mz, *next_mz = NULL; | 
 | 3183 | 	unsigned long reclaimed; | 
 | 3184 | 	int loop = 0; | 
 | 3185 | 	struct mem_cgroup_tree_per_zone *mctz; | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 3186 | 	unsigned long long excess; | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 3187 |  | 
 | 3188 | 	if (order > 0) | 
 | 3189 | 		return 0; | 
 | 3190 |  | 
| KOSAKI Motohiro | 00918b6 | 2010-08-10 18:03:05 -0700 | [diff] [blame] | 3191 | 	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone)); | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 3192 | 	/* | 
 | 3193 | 	 * This loop can run a while, specially if mem_cgroup's continuously | 
 | 3194 | 	 * keep exceeding their soft limit and putting the system under | 
 | 3195 | 	 * pressure | 
 | 3196 | 	 */ | 
 | 3197 | 	do { | 
 | 3198 | 		if (next_mz) | 
 | 3199 | 			mz = next_mz; | 
 | 3200 | 		else | 
 | 3201 | 			mz = mem_cgroup_largest_soft_limit_node(mctz); | 
 | 3202 | 		if (!mz) | 
 | 3203 | 			break; | 
 | 3204 |  | 
 | 3205 | 		reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, | 
 | 3206 | 						gfp_mask, | 
 | 3207 | 						MEM_CGROUP_RECLAIM_SOFT); | 
 | 3208 | 		nr_reclaimed += reclaimed; | 
 | 3209 | 		spin_lock(&mctz->lock); | 
 | 3210 |  | 
 | 3211 | 		/* | 
 | 3212 | 		 * If we failed to reclaim anything from this memory cgroup | 
 | 3213 | 		 * it is time to move on to the next cgroup | 
 | 3214 | 		 */ | 
 | 3215 | 		next_mz = NULL; | 
 | 3216 | 		if (!reclaimed) { | 
 | 3217 | 			do { | 
 | 3218 | 				/* | 
 | 3219 | 				 * Loop until we find yet another one. | 
 | 3220 | 				 * | 
 | 3221 | 				 * By the time we get the soft_limit lock | 
 | 3222 | 				 * again, someone might have aded the | 
 | 3223 | 				 * group back on the RB tree. Iterate to | 
 | 3224 | 				 * make sure we get a different mem. | 
 | 3225 | 				 * mem_cgroup_largest_soft_limit_node returns | 
 | 3226 | 				 * NULL if no other cgroup is present on | 
 | 3227 | 				 * the tree | 
 | 3228 | 				 */ | 
 | 3229 | 				next_mz = | 
 | 3230 | 				__mem_cgroup_largest_soft_limit_node(mctz); | 
 | 3231 | 				if (next_mz == mz) { | 
 | 3232 | 					css_put(&next_mz->mem->css); | 
 | 3233 | 					next_mz = NULL; | 
 | 3234 | 				} else /* next_mz == NULL or other memcg */ | 
 | 3235 | 					break; | 
 | 3236 | 			} while (1); | 
 | 3237 | 		} | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 3238 | 		__mem_cgroup_remove_exceeded(mz->mem, mz, mctz); | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 3239 | 		excess = res_counter_soft_limit_excess(&mz->mem->res); | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 3240 | 		/* | 
 | 3241 | 		 * One school of thought says that we should not add | 
 | 3242 | 		 * back the node to the tree if reclaim returns 0. | 
 | 3243 | 		 * But our reclaim could return 0, simply because due | 
 | 3244 | 		 * to priority we are exposing a smaller subset of | 
 | 3245 | 		 * memory to reclaim from. Consider this as a longer | 
 | 3246 | 		 * term TODO. | 
 | 3247 | 		 */ | 
| KAMEZAWA Hiroyuki | ef8745c | 2009-10-01 15:44:12 -0700 | [diff] [blame] | 3248 | 		/* If excess == 0, no tree ops */ | 
 | 3249 | 		__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 3250 | 		spin_unlock(&mctz->lock); | 
 | 3251 | 		css_put(&mz->mem->css); | 
 | 3252 | 		loop++; | 
 | 3253 | 		/* | 
 | 3254 | 		 * Could not reclaim anything and there are no more | 
 | 3255 | 		 * mem cgroups to try or we seem to be looping without | 
 | 3256 | 		 * reclaiming anything. | 
 | 3257 | 		 */ | 
 | 3258 | 		if (!nr_reclaimed && | 
 | 3259 | 			(next_mz == NULL || | 
 | 3260 | 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS)) | 
 | 3261 | 			break; | 
 | 3262 | 	} while (!nr_reclaimed); | 
 | 3263 | 	if (next_mz) | 
 | 3264 | 		css_put(&next_mz->mem->css); | 
 | 3265 | 	return nr_reclaimed; | 
 | 3266 | } | 
 | 3267 |  | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 3268 | /* | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3269 |  * This routine traverse page_cgroup in given list and drop them all. | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3270 |  * *And* this routine doesn't reclaim page itself, just removes page_cgroup. | 
 | 3271 |  */ | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3272 | static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3273 | 				int node, int zid, enum lru_list lru) | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3274 | { | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3275 | 	struct zone *zone; | 
 | 3276 | 	struct mem_cgroup_per_zone *mz; | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3277 | 	struct page_cgroup *pc, *busy; | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3278 | 	unsigned long flags, loop; | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 3279 | 	struct list_head *list; | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3280 | 	int ret = 0; | 
| KAMEZAWA Hiroyuki | 072c56c | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 3281 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3282 | 	zone = &NODE_DATA(node)->node_zones[zid]; | 
 | 3283 | 	mz = mem_cgroup_zoneinfo(mem, node, zid); | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 3284 | 	list = &mz->lists[lru]; | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3285 |  | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3286 | 	loop = MEM_CGROUP_ZSTAT(mz, lru); | 
 | 3287 | 	/* give some margin against EBUSY etc...*/ | 
 | 3288 | 	loop += 256; | 
 | 3289 | 	busy = NULL; | 
 | 3290 | 	while (loop--) { | 
 | 3291 | 		ret = 0; | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3292 | 		spin_lock_irqsave(&zone->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3293 | 		if (list_empty(list)) { | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3294 | 			spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 3295 | 			break; | 
 | 3296 | 		} | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3297 | 		pc = list_entry(list->prev, struct page_cgroup, lru); | 
 | 3298 | 		if (busy == pc) { | 
 | 3299 | 			list_move(&pc->lru, list); | 
| Thiago Farina | 648bcc7 | 2010-03-05 13:42:04 -0800 | [diff] [blame] | 3300 | 			busy = NULL; | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3301 | 			spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3302 | 			continue; | 
 | 3303 | 		} | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3304 | 		spin_unlock_irqrestore(&zone->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3305 |  | 
| KAMEZAWA Hiroyuki | 2c26fdd | 2009-01-07 18:08:10 -0800 | [diff] [blame] | 3306 | 		ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3307 | 		if (ret == -ENOMEM) | 
 | 3308 | 			break; | 
 | 3309 |  | 
 | 3310 | 		if (ret == -EBUSY || ret == -EINVAL) { | 
 | 3311 | 			/* found lock contention or "pc" is obsolete. */ | 
 | 3312 | 			busy = pc; | 
 | 3313 | 			cond_resched(); | 
 | 3314 | 		} else | 
 | 3315 | 			busy = NULL; | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3316 | 	} | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3317 |  | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3318 | 	if (!ret && !list_empty(list)) | 
 | 3319 | 		return -EBUSY; | 
 | 3320 | 	return ret; | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3321 | } | 
 | 3322 |  | 
 | 3323 | /* | 
 | 3324 |  * make mem_cgroup's charge to be 0 if there is no task. | 
 | 3325 |  * This enables deleting this mem_cgroup. | 
 | 3326 |  */ | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3327 | static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3328 | { | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3329 | 	int ret; | 
 | 3330 | 	int node, zid, shrink; | 
 | 3331 | 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3332 | 	struct cgroup *cgrp = mem->css.cgroup; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 3333 |  | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3334 | 	css_get(&mem->css); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3335 |  | 
 | 3336 | 	shrink = 0; | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3337 | 	/* should free all ? */ | 
 | 3338 | 	if (free_all) | 
 | 3339 | 		goto try_to_free; | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3340 | move_account: | 
| Daisuke Nishimura | fce6647 | 2010-01-15 17:01:30 -0800 | [diff] [blame] | 3341 | 	do { | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3342 | 		ret = -EBUSY; | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3343 | 		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3344 | 			goto out; | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3345 | 		ret = -EINTR; | 
 | 3346 | 		if (signal_pending(current)) | 
 | 3347 | 			goto out; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 3348 | 		/* This is for making all *used* pages to be on LRU. */ | 
 | 3349 | 		lru_add_drain_all(); | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 3350 | 		drain_all_stock_sync(); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3351 | 		ret = 0; | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 3352 | 		mem_cgroup_start_move(mem); | 
| KAMEZAWA Hiroyuki | 299b4ea | 2009-01-29 14:25:17 -0800 | [diff] [blame] | 3353 | 		for_each_node_state(node, N_HIGH_MEMORY) { | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3354 | 			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 3355 | 				enum lru_list l; | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3356 | 				for_each_lru(l) { | 
 | 3357 | 					ret = mem_cgroup_force_empty_list(mem, | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3358 | 							node, zid, l); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3359 | 					if (ret) | 
 | 3360 | 						break; | 
 | 3361 | 				} | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 3362 | 			} | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3363 | 			if (ret) | 
 | 3364 | 				break; | 
 | 3365 | 		} | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 3366 | 		mem_cgroup_end_move(mem); | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 3367 | 		memcg_oom_recover(mem); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3368 | 		/* it seems parent cgroup doesn't have enough mem */ | 
 | 3369 | 		if (ret == -ENOMEM) | 
 | 3370 | 			goto try_to_free; | 
| KAMEZAWA Hiroyuki | 52d4b9a | 2008-10-18 20:28:16 -0700 | [diff] [blame] | 3371 | 		cond_resched(); | 
| Daisuke Nishimura | fce6647 | 2010-01-15 17:01:30 -0800 | [diff] [blame] | 3372 | 	/* "ret" should also be checked to ensure all lists are empty. */ | 
 | 3373 | 	} while (mem->res.usage > 0 || ret); | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3374 | out: | 
 | 3375 | 	css_put(&mem->css); | 
 | 3376 | 	return ret; | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3377 |  | 
 | 3378 | try_to_free: | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3379 | 	/* returns EBUSY if there is a task or if we come here twice. */ | 
 | 3380 | 	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) { | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3381 | 		ret = -EBUSY; | 
 | 3382 | 		goto out; | 
 | 3383 | 	} | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3384 | 	/* we call try-to-free pages for make this cgroup empty */ | 
 | 3385 | 	lru_add_drain_all(); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3386 | 	/* try to free all pages in this cgroup */ | 
 | 3387 | 	shrink = 1; | 
 | 3388 | 	while (nr_retries && mem->res.usage > 0) { | 
 | 3389 | 		int progress; | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3390 |  | 
 | 3391 | 		if (signal_pending(current)) { | 
 | 3392 | 			ret = -EINTR; | 
 | 3393 | 			goto out; | 
 | 3394 | 		} | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 3395 | 		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, | 
 | 3396 | 						false, get_swappiness(mem)); | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3397 | 		if (!progress) { | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3398 | 			nr_retries--; | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3399 | 			/* maybe some writeback is necessary */ | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 3400 | 			congestion_wait(BLK_RW_ASYNC, HZ/10); | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3401 | 		} | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3402 |  | 
 | 3403 | 	} | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 3404 | 	lru_add_drain(); | 
| KAMEZAWA Hiroyuki | f817ed4 | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 3405 | 	/* try move_account...there may be some *locked* pages. */ | 
| Daisuke Nishimura | fce6647 | 2010-01-15 17:01:30 -0800 | [diff] [blame] | 3406 | 	goto move_account; | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 3407 | } | 
 | 3408 |  | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 3409 | int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) | 
 | 3410 | { | 
 | 3411 | 	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); | 
 | 3412 | } | 
 | 3413 |  | 
 | 3414 |  | 
| Balbir Singh | 18f59ea | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 3415 | static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft) | 
 | 3416 | { | 
 | 3417 | 	return mem_cgroup_from_cont(cont)->use_hierarchy; | 
 | 3418 | } | 
 | 3419 |  | 
 | 3420 | static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, | 
 | 3421 | 					u64 val) | 
 | 3422 | { | 
 | 3423 | 	int retval = 0; | 
 | 3424 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 
 | 3425 | 	struct cgroup *parent = cont->parent; | 
 | 3426 | 	struct mem_cgroup *parent_mem = NULL; | 
 | 3427 |  | 
 | 3428 | 	if (parent) | 
 | 3429 | 		parent_mem = mem_cgroup_from_cont(parent); | 
 | 3430 |  | 
 | 3431 | 	cgroup_lock(); | 
 | 3432 | 	/* | 
| André Goddard Rosa | af901ca | 2009-11-14 13:09:05 -0200 | [diff] [blame] | 3433 | 	 * If parent's use_hierarchy is set, we can't make any modifications | 
| Balbir Singh | 18f59ea | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 3434 | 	 * in the child subtrees. If it is unset, then the change can | 
 | 3435 | 	 * occur, provided the current cgroup has no children. | 
 | 3436 | 	 * | 
 | 3437 | 	 * For the root cgroup, parent_mem is NULL, we allow value to be | 
 | 3438 | 	 * set if there are no children. | 
 | 3439 | 	 */ | 
 | 3440 | 	if ((!parent_mem || !parent_mem->use_hierarchy) && | 
 | 3441 | 				(val == 1 || val == 0)) { | 
 | 3442 | 		if (list_empty(&cont->children)) | 
 | 3443 | 			mem->use_hierarchy = val; | 
 | 3444 | 		else | 
 | 3445 | 			retval = -EBUSY; | 
 | 3446 | 	} else | 
 | 3447 | 		retval = -EINVAL; | 
 | 3448 | 	cgroup_unlock(); | 
 | 3449 |  | 
 | 3450 | 	return retval; | 
 | 3451 | } | 
 | 3452 |  | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 3453 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3454 | static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem, | 
 | 3455 | 				enum mem_cgroup_stat_index idx) | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 3456 | { | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3457 | 	struct mem_cgroup *iter; | 
 | 3458 | 	s64 val = 0; | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 3459 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3460 | 	/* each per cpu's value can be minus.Then, use s64 */ | 
 | 3461 | 	for_each_mem_cgroup_tree(iter, mem) | 
 | 3462 | 		val += mem_cgroup_read_stat(iter, idx); | 
 | 3463 |  | 
 | 3464 | 	if (val < 0) /* race ? */ | 
 | 3465 | 		val = 0; | 
 | 3466 | 	return val; | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 3467 | } | 
 | 3468 |  | 
| Kirill A. Shutemov | 104f392 | 2010-03-10 15:22:21 -0800 | [diff] [blame] | 3469 | static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) | 
 | 3470 | { | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3471 | 	u64 val; | 
| Kirill A. Shutemov | 104f392 | 2010-03-10 15:22:21 -0800 | [diff] [blame] | 3472 |  | 
 | 3473 | 	if (!mem_cgroup_is_root(mem)) { | 
 | 3474 | 		if (!swap) | 
 | 3475 | 			return res_counter_read_u64(&mem->res, RES_USAGE); | 
 | 3476 | 		else | 
 | 3477 | 			return res_counter_read_u64(&mem->memsw, RES_USAGE); | 
 | 3478 | 	} | 
 | 3479 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3480 | 	val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE); | 
 | 3481 | 	val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS); | 
| Kirill A. Shutemov | 104f392 | 2010-03-10 15:22:21 -0800 | [diff] [blame] | 3482 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3483 | 	if (swap) | 
 | 3484 | 		val += mem_cgroup_get_recursive_idx_stat(mem, | 
 | 3485 | 				MEM_CGROUP_STAT_SWAPOUT); | 
| Kirill A. Shutemov | 104f392 | 2010-03-10 15:22:21 -0800 | [diff] [blame] | 3486 |  | 
 | 3487 | 	return val << PAGE_SHIFT; | 
 | 3488 | } | 
 | 3489 |  | 
| Paul Menage | 2c3daa7 | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 3490 | static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 3491 | { | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3492 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 
| Kirill A. Shutemov | 104f392 | 2010-03-10 15:22:21 -0800 | [diff] [blame] | 3493 | 	u64 val; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3494 | 	int type, name; | 
 | 3495 |  | 
 | 3496 | 	type = MEMFILE_TYPE(cft->private); | 
 | 3497 | 	name = MEMFILE_ATTR(cft->private); | 
 | 3498 | 	switch (type) { | 
 | 3499 | 	case _MEM: | 
| Kirill A. Shutemov | 104f392 | 2010-03-10 15:22:21 -0800 | [diff] [blame] | 3500 | 		if (name == RES_USAGE) | 
 | 3501 | 			val = mem_cgroup_usage(mem, false); | 
 | 3502 | 		else | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 3503 | 			val = res_counter_read_u64(&mem->res, name); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3504 | 		break; | 
 | 3505 | 	case _MEMSWAP: | 
| Kirill A. Shutemov | 104f392 | 2010-03-10 15:22:21 -0800 | [diff] [blame] | 3506 | 		if (name == RES_USAGE) | 
 | 3507 | 			val = mem_cgroup_usage(mem, true); | 
 | 3508 | 		else | 
| Balbir Singh | 0c3e73e | 2009-09-23 15:56:42 -0700 | [diff] [blame] | 3509 | 			val = res_counter_read_u64(&mem->memsw, name); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3510 | 		break; | 
 | 3511 | 	default: | 
 | 3512 | 		BUG(); | 
 | 3513 | 		break; | 
 | 3514 | 	} | 
 | 3515 | 	return val; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 3516 | } | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3517 | /* | 
 | 3518 |  * The user of this function is... | 
 | 3519 |  * RES_LIMIT. | 
 | 3520 |  */ | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 3521 | static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, | 
 | 3522 | 			    const char *buffer) | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 3523 | { | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3524 | 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3525 | 	int type, name; | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3526 | 	unsigned long long val; | 
 | 3527 | 	int ret; | 
 | 3528 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3529 | 	type = MEMFILE_TYPE(cft->private); | 
 | 3530 | 	name = MEMFILE_ATTR(cft->private); | 
 | 3531 | 	switch (name) { | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3532 | 	case RES_LIMIT: | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 3533 | 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ | 
 | 3534 | 			ret = -EINVAL; | 
 | 3535 | 			break; | 
 | 3536 | 		} | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3537 | 		/* This function does all necessary parse...reuse it */ | 
 | 3538 | 		ret = res_counter_memparse_write_strategy(buffer, &val); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3539 | 		if (ret) | 
 | 3540 | 			break; | 
 | 3541 | 		if (type == _MEM) | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3542 | 			ret = mem_cgroup_resize_limit(memcg, val); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3543 | 		else | 
 | 3544 | 			ret = mem_cgroup_resize_memsw_limit(memcg, val); | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3545 | 		break; | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 3546 | 	case RES_SOFT_LIMIT: | 
 | 3547 | 		ret = res_counter_memparse_write_strategy(buffer, &val); | 
 | 3548 | 		if (ret) | 
 | 3549 | 			break; | 
 | 3550 | 		/* | 
 | 3551 | 		 * For memsw, soft limits are hard to implement in terms | 
 | 3552 | 		 * of semantics, for now, we support soft limits for | 
 | 3553 | 		 * control without swap | 
 | 3554 | 		 */ | 
 | 3555 | 		if (type == _MEM) | 
 | 3556 | 			ret = res_counter_set_soft_limit(&memcg->res, val); | 
 | 3557 | 		else | 
 | 3558 | 			ret = -EINVAL; | 
 | 3559 | 		break; | 
| KAMEZAWA Hiroyuki | 628f423 | 2008-07-25 01:47:20 -0700 | [diff] [blame] | 3560 | 	default: | 
 | 3561 | 		ret = -EINVAL; /* should be BUG() ? */ | 
 | 3562 | 		break; | 
 | 3563 | 	} | 
 | 3564 | 	return ret; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 3565 | } | 
 | 3566 |  | 
| KAMEZAWA Hiroyuki | fee7b54 | 2009-01-07 18:08:26 -0800 | [diff] [blame] | 3567 | static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, | 
 | 3568 | 		unsigned long long *mem_limit, unsigned long long *memsw_limit) | 
 | 3569 | { | 
 | 3570 | 	struct cgroup *cgroup; | 
 | 3571 | 	unsigned long long min_limit, min_memsw_limit, tmp; | 
 | 3572 |  | 
 | 3573 | 	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); | 
 | 3574 | 	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | 
 | 3575 | 	cgroup = memcg->css.cgroup; | 
 | 3576 | 	if (!memcg->use_hierarchy) | 
 | 3577 | 		goto out; | 
 | 3578 |  | 
 | 3579 | 	while (cgroup->parent) { | 
 | 3580 | 		cgroup = cgroup->parent; | 
 | 3581 | 		memcg = mem_cgroup_from_cont(cgroup); | 
 | 3582 | 		if (!memcg->use_hierarchy) | 
 | 3583 | 			break; | 
 | 3584 | 		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); | 
 | 3585 | 		min_limit = min(min_limit, tmp); | 
 | 3586 | 		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); | 
 | 3587 | 		min_memsw_limit = min(min_memsw_limit, tmp); | 
 | 3588 | 	} | 
 | 3589 | out: | 
 | 3590 | 	*mem_limit = min_limit; | 
 | 3591 | 	*memsw_limit = min_memsw_limit; | 
 | 3592 | 	return; | 
 | 3593 | } | 
 | 3594 |  | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 3595 | static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 3596 | { | 
 | 3597 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3598 | 	int type, name; | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 3599 |  | 
 | 3600 | 	mem = mem_cgroup_from_cont(cont); | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3601 | 	type = MEMFILE_TYPE(event); | 
 | 3602 | 	name = MEMFILE_ATTR(event); | 
 | 3603 | 	switch (name) { | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 3604 | 	case RES_MAX_USAGE: | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3605 | 		if (type == _MEM) | 
 | 3606 | 			res_counter_reset_max(&mem->res); | 
 | 3607 | 		else | 
 | 3608 | 			res_counter_reset_max(&mem->memsw); | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 3609 | 		break; | 
 | 3610 | 	case RES_FAILCNT: | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 3611 | 		if (type == _MEM) | 
 | 3612 | 			res_counter_reset_failcnt(&mem->res); | 
 | 3613 | 		else | 
 | 3614 | 			res_counter_reset_failcnt(&mem->memsw); | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 3615 | 		break; | 
 | 3616 | 	} | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 3617 |  | 
| Pavel Emelyanov | 85cc59d | 2008-04-29 01:00:20 -0700 | [diff] [blame] | 3618 | 	return 0; | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 3619 | } | 
 | 3620 |  | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 3621 | static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, | 
 | 3622 | 					struct cftype *cft) | 
 | 3623 | { | 
 | 3624 | 	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; | 
 | 3625 | } | 
 | 3626 |  | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 3627 | #ifdef CONFIG_MMU | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 3628 | static int mem_cgroup_move_charge_write(struct cgroup *cgrp, | 
 | 3629 | 					struct cftype *cft, u64 val) | 
 | 3630 | { | 
 | 3631 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); | 
 | 3632 |  | 
 | 3633 | 	if (val >= (1 << NR_MOVE_TYPE)) | 
 | 3634 | 		return -EINVAL; | 
 | 3635 | 	/* | 
 | 3636 | 	 * We check this value several times in both in can_attach() and | 
 | 3637 | 	 * attach(), so we need cgroup lock to prevent this value from being | 
 | 3638 | 	 * inconsistent. | 
 | 3639 | 	 */ | 
 | 3640 | 	cgroup_lock(); | 
 | 3641 | 	mem->move_charge_at_immigrate = val; | 
 | 3642 | 	cgroup_unlock(); | 
 | 3643 |  | 
 | 3644 | 	return 0; | 
 | 3645 | } | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 3646 | #else | 
 | 3647 | static int mem_cgroup_move_charge_write(struct cgroup *cgrp, | 
 | 3648 | 					struct cftype *cft, u64 val) | 
 | 3649 | { | 
 | 3650 | 	return -ENOSYS; | 
 | 3651 | } | 
 | 3652 | #endif | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 3653 |  | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3654 |  | 
 | 3655 | /* For read statistics */ | 
 | 3656 | enum { | 
 | 3657 | 	MCS_CACHE, | 
 | 3658 | 	MCS_RSS, | 
| KAMEZAWA Hiroyuki | d804658 | 2009-12-15 16:47:09 -0800 | [diff] [blame] | 3659 | 	MCS_FILE_MAPPED, | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3660 | 	MCS_PGPGIN, | 
 | 3661 | 	MCS_PGPGOUT, | 
| Daisuke Nishimura | 1dd3a27 | 2009-09-23 15:56:43 -0700 | [diff] [blame] | 3662 | 	MCS_SWAP, | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3663 | 	MCS_INACTIVE_ANON, | 
 | 3664 | 	MCS_ACTIVE_ANON, | 
 | 3665 | 	MCS_INACTIVE_FILE, | 
 | 3666 | 	MCS_ACTIVE_FILE, | 
 | 3667 | 	MCS_UNEVICTABLE, | 
 | 3668 | 	NR_MCS_STAT, | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 3669 | }; | 
 | 3670 |  | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3671 | struct mcs_total_stat { | 
 | 3672 | 	s64 stat[NR_MCS_STAT]; | 
 | 3673 | }; | 
 | 3674 |  | 
 | 3675 | struct { | 
 | 3676 | 	char *local_name; | 
 | 3677 | 	char *total_name; | 
 | 3678 | } memcg_stat_strings[NR_MCS_STAT] = { | 
 | 3679 | 	{"cache", "total_cache"}, | 
 | 3680 | 	{"rss", "total_rss"}, | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 3681 | 	{"mapped_file", "total_mapped_file"}, | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3682 | 	{"pgpgin", "total_pgpgin"}, | 
 | 3683 | 	{"pgpgout", "total_pgpgout"}, | 
| Daisuke Nishimura | 1dd3a27 | 2009-09-23 15:56:43 -0700 | [diff] [blame] | 3684 | 	{"swap", "total_swap"}, | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3685 | 	{"inactive_anon", "total_inactive_anon"}, | 
 | 3686 | 	{"active_anon", "total_active_anon"}, | 
 | 3687 | 	{"inactive_file", "total_inactive_file"}, | 
 | 3688 | 	{"active_file", "total_active_file"}, | 
 | 3689 | 	{"unevictable", "total_unevictable"} | 
 | 3690 | }; | 
 | 3691 |  | 
 | 3692 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3693 | static void | 
 | 3694 | mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3695 | { | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3696 | 	s64 val; | 
 | 3697 |  | 
 | 3698 | 	/* per cpu stat */ | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 3699 | 	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3700 | 	s->stat[MCS_CACHE] += val * PAGE_SIZE; | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 3701 | 	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3702 | 	s->stat[MCS_RSS] += val * PAGE_SIZE; | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 3703 | 	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); | 
| KAMEZAWA Hiroyuki | d804658 | 2009-12-15 16:47:09 -0800 | [diff] [blame] | 3704 | 	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 3705 | 	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT); | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3706 | 	s->stat[MCS_PGPGIN] += val; | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 3707 | 	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT); | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3708 | 	s->stat[MCS_PGPGOUT] += val; | 
| Daisuke Nishimura | 1dd3a27 | 2009-09-23 15:56:43 -0700 | [diff] [blame] | 3709 | 	if (do_swap_account) { | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 3710 | 		val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); | 
| Daisuke Nishimura | 1dd3a27 | 2009-09-23 15:56:43 -0700 | [diff] [blame] | 3711 | 		s->stat[MCS_SWAP] += val * PAGE_SIZE; | 
 | 3712 | 	} | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3713 |  | 
 | 3714 | 	/* per zone stat */ | 
 | 3715 | 	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON); | 
 | 3716 | 	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE; | 
 | 3717 | 	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON); | 
 | 3718 | 	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE; | 
 | 3719 | 	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE); | 
 | 3720 | 	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE; | 
 | 3721 | 	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE); | 
 | 3722 | 	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; | 
 | 3723 | 	val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE); | 
 | 3724 | 	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3725 | } | 
 | 3726 |  | 
 | 3727 | static void | 
 | 3728 | mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) | 
 | 3729 | { | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3730 | 	struct mem_cgroup *iter; | 
 | 3731 |  | 
 | 3732 | 	for_each_mem_cgroup_tree(iter, mem) | 
 | 3733 | 		mem_cgroup_get_local_stat(iter, s); | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3734 | } | 
 | 3735 |  | 
| Paul Menage | c64745c | 2008-04-29 01:00:02 -0700 | [diff] [blame] | 3736 | static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, | 
 | 3737 | 				 struct cgroup_map_cb *cb) | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 3738 | { | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 3739 | 	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3740 | 	struct mcs_total_stat mystat; | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 3741 | 	int i; | 
 | 3742 |  | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3743 | 	memset(&mystat, 0, sizeof(mystat)); | 
 | 3744 | 	mem_cgroup_get_local_stat(mem_cont, &mystat); | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 3745 |  | 
| Daisuke Nishimura | 1dd3a27 | 2009-09-23 15:56:43 -0700 | [diff] [blame] | 3746 | 	for (i = 0; i < NR_MCS_STAT; i++) { | 
 | 3747 | 		if (i == MCS_SWAP && !do_swap_account) | 
 | 3748 | 			continue; | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3749 | 		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]); | 
| Daisuke Nishimura | 1dd3a27 | 2009-09-23 15:56:43 -0700 | [diff] [blame] | 3750 | 	} | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 3751 |  | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3752 | 	/* Hierarchical information */ | 
| KAMEZAWA Hiroyuki | fee7b54 | 2009-01-07 18:08:26 -0800 | [diff] [blame] | 3753 | 	{ | 
 | 3754 | 		unsigned long long limit, memsw_limit; | 
 | 3755 | 		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); | 
 | 3756 | 		cb->fill(cb, "hierarchical_memory_limit", limit); | 
 | 3757 | 		if (do_swap_account) | 
 | 3758 | 			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); | 
 | 3759 | 	} | 
| KOSAKI Motohiro | 7f016ee | 2009-01-07 18:08:22 -0800 | [diff] [blame] | 3760 |  | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3761 | 	memset(&mystat, 0, sizeof(mystat)); | 
 | 3762 | 	mem_cgroup_get_total_stat(mem_cont, &mystat); | 
| Daisuke Nishimura | 1dd3a27 | 2009-09-23 15:56:43 -0700 | [diff] [blame] | 3763 | 	for (i = 0; i < NR_MCS_STAT; i++) { | 
 | 3764 | 		if (i == MCS_SWAP && !do_swap_account) | 
 | 3765 | 			continue; | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3766 | 		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]); | 
| Daisuke Nishimura | 1dd3a27 | 2009-09-23 15:56:43 -0700 | [diff] [blame] | 3767 | 	} | 
| KAMEZAWA Hiroyuki | 14067bb | 2009-04-02 16:57:35 -0700 | [diff] [blame] | 3768 |  | 
| KOSAKI Motohiro | 7f016ee | 2009-01-07 18:08:22 -0800 | [diff] [blame] | 3769 | #ifdef CONFIG_DEBUG_VM | 
| KOSAKI Motohiro | c772be9 | 2009-01-07 18:08:25 -0800 | [diff] [blame] | 3770 | 	cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); | 
| KOSAKI Motohiro | 7f016ee | 2009-01-07 18:08:22 -0800 | [diff] [blame] | 3771 |  | 
 | 3772 | 	{ | 
 | 3773 | 		int nid, zid; | 
 | 3774 | 		struct mem_cgroup_per_zone *mz; | 
 | 3775 | 		unsigned long recent_rotated[2] = {0, 0}; | 
 | 3776 | 		unsigned long recent_scanned[2] = {0, 0}; | 
 | 3777 |  | 
 | 3778 | 		for_each_online_node(nid) | 
 | 3779 | 			for (zid = 0; zid < MAX_NR_ZONES; zid++) { | 
 | 3780 | 				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); | 
 | 3781 |  | 
 | 3782 | 				recent_rotated[0] += | 
 | 3783 | 					mz->reclaim_stat.recent_rotated[0]; | 
 | 3784 | 				recent_rotated[1] += | 
 | 3785 | 					mz->reclaim_stat.recent_rotated[1]; | 
 | 3786 | 				recent_scanned[0] += | 
 | 3787 | 					mz->reclaim_stat.recent_scanned[0]; | 
 | 3788 | 				recent_scanned[1] += | 
 | 3789 | 					mz->reclaim_stat.recent_scanned[1]; | 
 | 3790 | 			} | 
 | 3791 | 		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); | 
 | 3792 | 		cb->fill(cb, "recent_rotated_file", recent_rotated[1]); | 
 | 3793 | 		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); | 
 | 3794 | 		cb->fill(cb, "recent_scanned_file", recent_scanned[1]); | 
 | 3795 | 	} | 
 | 3796 | #endif | 
 | 3797 |  | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 3798 | 	return 0; | 
 | 3799 | } | 
 | 3800 |  | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 3801 | static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) | 
 | 3802 | { | 
 | 3803 | 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); | 
 | 3804 |  | 
 | 3805 | 	return get_swappiness(memcg); | 
 | 3806 | } | 
 | 3807 |  | 
 | 3808 | static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, | 
 | 3809 | 				       u64 val) | 
 | 3810 | { | 
 | 3811 | 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); | 
 | 3812 | 	struct mem_cgroup *parent; | 
| Li Zefan | 068b38c | 2009-01-15 13:51:26 -0800 | [diff] [blame] | 3813 |  | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 3814 | 	if (val > 100) | 
 | 3815 | 		return -EINVAL; | 
 | 3816 |  | 
 | 3817 | 	if (cgrp->parent == NULL) | 
 | 3818 | 		return -EINVAL; | 
 | 3819 |  | 
 | 3820 | 	parent = mem_cgroup_from_cont(cgrp->parent); | 
| Li Zefan | 068b38c | 2009-01-15 13:51:26 -0800 | [diff] [blame] | 3821 |  | 
 | 3822 | 	cgroup_lock(); | 
 | 3823 |  | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 3824 | 	/* If under hierarchy, only empty-root can set this value */ | 
 | 3825 | 	if ((parent->use_hierarchy) || | 
| Li Zefan | 068b38c | 2009-01-15 13:51:26 -0800 | [diff] [blame] | 3826 | 	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) { | 
 | 3827 | 		cgroup_unlock(); | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 3828 | 		return -EINVAL; | 
| Li Zefan | 068b38c | 2009-01-15 13:51:26 -0800 | [diff] [blame] | 3829 | 	} | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 3830 |  | 
 | 3831 | 	spin_lock(&memcg->reclaim_param_lock); | 
 | 3832 | 	memcg->swappiness = val; | 
 | 3833 | 	spin_unlock(&memcg->reclaim_param_lock); | 
 | 3834 |  | 
| Li Zefan | 068b38c | 2009-01-15 13:51:26 -0800 | [diff] [blame] | 3835 | 	cgroup_unlock(); | 
 | 3836 |  | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 3837 | 	return 0; | 
 | 3838 | } | 
 | 3839 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3840 | static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) | 
 | 3841 | { | 
 | 3842 | 	struct mem_cgroup_threshold_ary *t; | 
 | 3843 | 	u64 usage; | 
 | 3844 | 	int i; | 
 | 3845 |  | 
 | 3846 | 	rcu_read_lock(); | 
 | 3847 | 	if (!swap) | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3848 | 		t = rcu_dereference(memcg->thresholds.primary); | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3849 | 	else | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3850 | 		t = rcu_dereference(memcg->memsw_thresholds.primary); | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3851 |  | 
 | 3852 | 	if (!t) | 
 | 3853 | 		goto unlock; | 
 | 3854 |  | 
 | 3855 | 	usage = mem_cgroup_usage(memcg, swap); | 
 | 3856 |  | 
 | 3857 | 	/* | 
 | 3858 | 	 * current_threshold points to threshold just below usage. | 
 | 3859 | 	 * If it's not true, a threshold was crossed after last | 
 | 3860 | 	 * call of __mem_cgroup_threshold(). | 
 | 3861 | 	 */ | 
| Phil Carmody | 5407a56 | 2010-05-26 14:42:42 -0700 | [diff] [blame] | 3862 | 	i = t->current_threshold; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3863 |  | 
 | 3864 | 	/* | 
 | 3865 | 	 * Iterate backward over array of thresholds starting from | 
 | 3866 | 	 * current_threshold and check if a threshold is crossed. | 
 | 3867 | 	 * If none of thresholds below usage is crossed, we read | 
 | 3868 | 	 * only one element of the array here. | 
 | 3869 | 	 */ | 
 | 3870 | 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) | 
 | 3871 | 		eventfd_signal(t->entries[i].eventfd, 1); | 
 | 3872 |  | 
 | 3873 | 	/* i = current_threshold + 1 */ | 
 | 3874 | 	i++; | 
 | 3875 |  | 
 | 3876 | 	/* | 
 | 3877 | 	 * Iterate forward over array of thresholds starting from | 
 | 3878 | 	 * current_threshold+1 and check if a threshold is crossed. | 
 | 3879 | 	 * If none of thresholds above usage is crossed, we read | 
 | 3880 | 	 * only one element of the array here. | 
 | 3881 | 	 */ | 
 | 3882 | 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) | 
 | 3883 | 		eventfd_signal(t->entries[i].eventfd, 1); | 
 | 3884 |  | 
 | 3885 | 	/* Update current_threshold */ | 
| Phil Carmody | 5407a56 | 2010-05-26 14:42:42 -0700 | [diff] [blame] | 3886 | 	t->current_threshold = i - 1; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3887 | unlock: | 
 | 3888 | 	rcu_read_unlock(); | 
 | 3889 | } | 
 | 3890 |  | 
 | 3891 | static void mem_cgroup_threshold(struct mem_cgroup *memcg) | 
 | 3892 | { | 
| Kirill A. Shutemov | ad4ca5f | 2010-10-07 12:59:27 -0700 | [diff] [blame] | 3893 | 	while (memcg) { | 
 | 3894 | 		__mem_cgroup_threshold(memcg, false); | 
 | 3895 | 		if (do_swap_account) | 
 | 3896 | 			__mem_cgroup_threshold(memcg, true); | 
 | 3897 |  | 
 | 3898 | 		memcg = parent_mem_cgroup(memcg); | 
 | 3899 | 	} | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3900 | } | 
 | 3901 |  | 
 | 3902 | static int compare_thresholds(const void *a, const void *b) | 
 | 3903 | { | 
 | 3904 | 	const struct mem_cgroup_threshold *_a = a; | 
 | 3905 | 	const struct mem_cgroup_threshold *_b = b; | 
 | 3906 |  | 
 | 3907 | 	return _a->threshold - _b->threshold; | 
 | 3908 | } | 
 | 3909 |  | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3910 | static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem) | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 3911 | { | 
 | 3912 | 	struct mem_cgroup_eventfd_list *ev; | 
 | 3913 |  | 
 | 3914 | 	list_for_each_entry(ev, &mem->oom_notify, list) | 
 | 3915 | 		eventfd_signal(ev->eventfd, 1); | 
 | 3916 | 	return 0; | 
 | 3917 | } | 
 | 3918 |  | 
 | 3919 | static void mem_cgroup_oom_notify(struct mem_cgroup *mem) | 
 | 3920 | { | 
| KAMEZAWA Hiroyuki | 7d74b06 | 2010-10-27 15:33:41 -0700 | [diff] [blame] | 3921 | 	struct mem_cgroup *iter; | 
 | 3922 |  | 
 | 3923 | 	for_each_mem_cgroup_tree(iter, mem) | 
 | 3924 | 		mem_cgroup_oom_notify_cb(iter); | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 3925 | } | 
 | 3926 |  | 
 | 3927 | static int mem_cgroup_usage_register_event(struct cgroup *cgrp, | 
 | 3928 | 	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3929 | { | 
 | 3930 | 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3931 | 	struct mem_cgroup_thresholds *thresholds; | 
 | 3932 | 	struct mem_cgroup_threshold_ary *new; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3933 | 	int type = MEMFILE_TYPE(cft->private); | 
 | 3934 | 	u64 threshold, usage; | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3935 | 	int i, size, ret; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3936 |  | 
 | 3937 | 	ret = res_counter_memparse_write_strategy(args, &threshold); | 
 | 3938 | 	if (ret) | 
 | 3939 | 		return ret; | 
 | 3940 |  | 
 | 3941 | 	mutex_lock(&memcg->thresholds_lock); | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3942 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3943 | 	if (type == _MEM) | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3944 | 		thresholds = &memcg->thresholds; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3945 | 	else if (type == _MEMSWAP) | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3946 | 		thresholds = &memcg->memsw_thresholds; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3947 | 	else | 
 | 3948 | 		BUG(); | 
 | 3949 |  | 
 | 3950 | 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP); | 
 | 3951 |  | 
 | 3952 | 	/* Check if a threshold crossed before adding a new one */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3953 | 	if (thresholds->primary) | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3954 | 		__mem_cgroup_threshold(memcg, type == _MEMSWAP); | 
 | 3955 |  | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3956 | 	size = thresholds->primary ? thresholds->primary->size + 1 : 1; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3957 |  | 
 | 3958 | 	/* Allocate memory for new array of thresholds */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3959 | 	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold), | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3960 | 			GFP_KERNEL); | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3961 | 	if (!new) { | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3962 | 		ret = -ENOMEM; | 
 | 3963 | 		goto unlock; | 
 | 3964 | 	} | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3965 | 	new->size = size; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3966 |  | 
 | 3967 | 	/* Copy thresholds (if any) to new array */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3968 | 	if (thresholds->primary) { | 
 | 3969 | 		memcpy(new->entries, thresholds->primary->entries, (size - 1) * | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3970 | 				sizeof(struct mem_cgroup_threshold)); | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3971 | 	} | 
 | 3972 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3973 | 	/* Add new threshold */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3974 | 	new->entries[size - 1].eventfd = eventfd; | 
 | 3975 | 	new->entries[size - 1].threshold = threshold; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3976 |  | 
 | 3977 | 	/* Sort thresholds. Registering of new threshold isn't time-critical */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3978 | 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold), | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3979 | 			compare_thresholds, NULL); | 
 | 3980 |  | 
 | 3981 | 	/* Find current threshold */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3982 | 	new->current_threshold = -1; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3983 | 	for (i = 0; i < size; i++) { | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3984 | 		if (new->entries[i].threshold < usage) { | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3985 | 			/* | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3986 | 			 * new->current_threshold will not be used until | 
 | 3987 | 			 * rcu_assign_pointer(), so it's safe to increment | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3988 | 			 * it here. | 
 | 3989 | 			 */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3990 | 			++new->current_threshold; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3991 | 		} | 
 | 3992 | 	} | 
 | 3993 |  | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 3994 | 	/* Free old spare buffer and save old primary buffer as spare */ | 
 | 3995 | 	kfree(thresholds->spare); | 
 | 3996 | 	thresholds->spare = thresholds->primary; | 
 | 3997 |  | 
 | 3998 | 	rcu_assign_pointer(thresholds->primary, new); | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 3999 |  | 
| Kirill A. Shutemov | 907860e | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 4000 | 	/* To be sure that nobody uses thresholds */ | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4001 | 	synchronize_rcu(); | 
 | 4002 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4003 | unlock: | 
 | 4004 | 	mutex_unlock(&memcg->thresholds_lock); | 
 | 4005 |  | 
 | 4006 | 	return ret; | 
 | 4007 | } | 
 | 4008 |  | 
| Kirill A. Shutemov | 907860e | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 4009 | static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 4010 | 	struct cftype *cft, struct eventfd_ctx *eventfd) | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4011 | { | 
 | 4012 | 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4013 | 	struct mem_cgroup_thresholds *thresholds; | 
 | 4014 | 	struct mem_cgroup_threshold_ary *new; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4015 | 	int type = MEMFILE_TYPE(cft->private); | 
 | 4016 | 	u64 usage; | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4017 | 	int i, j, size; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4018 |  | 
 | 4019 | 	mutex_lock(&memcg->thresholds_lock); | 
 | 4020 | 	if (type == _MEM) | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4021 | 		thresholds = &memcg->thresholds; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4022 | 	else if (type == _MEMSWAP) | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4023 | 		thresholds = &memcg->memsw_thresholds; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4024 | 	else | 
 | 4025 | 		BUG(); | 
 | 4026 |  | 
 | 4027 | 	/* | 
 | 4028 | 	 * Something went wrong if we trying to unregister a threshold | 
 | 4029 | 	 * if we don't have thresholds | 
 | 4030 | 	 */ | 
 | 4031 | 	BUG_ON(!thresholds); | 
 | 4032 |  | 
 | 4033 | 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP); | 
 | 4034 |  | 
 | 4035 | 	/* Check if a threshold crossed before removing */ | 
 | 4036 | 	__mem_cgroup_threshold(memcg, type == _MEMSWAP); | 
 | 4037 |  | 
 | 4038 | 	/* Calculate new number of threshold */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4039 | 	size = 0; | 
 | 4040 | 	for (i = 0; i < thresholds->primary->size; i++) { | 
 | 4041 | 		if (thresholds->primary->entries[i].eventfd != eventfd) | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4042 | 			size++; | 
 | 4043 | 	} | 
 | 4044 |  | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4045 | 	new = thresholds->spare; | 
| Kirill A. Shutemov | 907860e | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 4046 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4047 | 	/* Set thresholds array to NULL if we don't have thresholds */ | 
 | 4048 | 	if (!size) { | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4049 | 		kfree(new); | 
 | 4050 | 		new = NULL; | 
| Kirill A. Shutemov | 907860e | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 4051 | 		goto swap_buffers; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4052 | 	} | 
 | 4053 |  | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4054 | 	new->size = size; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4055 |  | 
 | 4056 | 	/* Copy thresholds and find current threshold */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4057 | 	new->current_threshold = -1; | 
 | 4058 | 	for (i = 0, j = 0; i < thresholds->primary->size; i++) { | 
 | 4059 | 		if (thresholds->primary->entries[i].eventfd == eventfd) | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4060 | 			continue; | 
 | 4061 |  | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4062 | 		new->entries[j] = thresholds->primary->entries[i]; | 
 | 4063 | 		if (new->entries[j].threshold < usage) { | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4064 | 			/* | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4065 | 			 * new->current_threshold will not be used | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4066 | 			 * until rcu_assign_pointer(), so it's safe to increment | 
 | 4067 | 			 * it here. | 
 | 4068 | 			 */ | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4069 | 			++new->current_threshold; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4070 | 		} | 
 | 4071 | 		j++; | 
 | 4072 | 	} | 
 | 4073 |  | 
| Kirill A. Shutemov | 907860e | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 4074 | swap_buffers: | 
| Kirill A. Shutemov | 2c488db | 2010-05-26 14:42:47 -0700 | [diff] [blame] | 4075 | 	/* Swap primary and spare array */ | 
 | 4076 | 	thresholds->spare = thresholds->primary; | 
 | 4077 | 	rcu_assign_pointer(thresholds->primary, new); | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4078 |  | 
| Kirill A. Shutemov | 907860e | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 4079 | 	/* To be sure that nobody uses thresholds */ | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4080 | 	synchronize_rcu(); | 
 | 4081 |  | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4082 | 	mutex_unlock(&memcg->thresholds_lock); | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4083 | } | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 4084 |  | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 4085 | static int mem_cgroup_oom_register_event(struct cgroup *cgrp, | 
 | 4086 | 	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args) | 
 | 4087 | { | 
 | 4088 | 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); | 
 | 4089 | 	struct mem_cgroup_eventfd_list *event; | 
 | 4090 | 	int type = MEMFILE_TYPE(cft->private); | 
 | 4091 |  | 
 | 4092 | 	BUG_ON(type != _OOM_TYPE); | 
 | 4093 | 	event = kmalloc(sizeof(*event),	GFP_KERNEL); | 
 | 4094 | 	if (!event) | 
 | 4095 | 		return -ENOMEM; | 
 | 4096 |  | 
 | 4097 | 	mutex_lock(&memcg_oom_mutex); | 
 | 4098 |  | 
 | 4099 | 	event->eventfd = eventfd; | 
 | 4100 | 	list_add(&event->list, &memcg->oom_notify); | 
 | 4101 |  | 
 | 4102 | 	/* already in OOM ? */ | 
 | 4103 | 	if (atomic_read(&memcg->oom_lock)) | 
 | 4104 | 		eventfd_signal(eventfd, 1); | 
 | 4105 | 	mutex_unlock(&memcg_oom_mutex); | 
 | 4106 |  | 
 | 4107 | 	return 0; | 
 | 4108 | } | 
 | 4109 |  | 
| Kirill A. Shutemov | 907860e | 2010-05-26 14:42:46 -0700 | [diff] [blame] | 4110 | static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp, | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 4111 | 	struct cftype *cft, struct eventfd_ctx *eventfd) | 
 | 4112 | { | 
 | 4113 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); | 
 | 4114 | 	struct mem_cgroup_eventfd_list *ev, *tmp; | 
 | 4115 | 	int type = MEMFILE_TYPE(cft->private); | 
 | 4116 |  | 
 | 4117 | 	BUG_ON(type != _OOM_TYPE); | 
 | 4118 |  | 
 | 4119 | 	mutex_lock(&memcg_oom_mutex); | 
 | 4120 |  | 
 | 4121 | 	list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) { | 
 | 4122 | 		if (ev->eventfd == eventfd) { | 
 | 4123 | 			list_del(&ev->list); | 
 | 4124 | 			kfree(ev); | 
 | 4125 | 		} | 
 | 4126 | 	} | 
 | 4127 |  | 
 | 4128 | 	mutex_unlock(&memcg_oom_mutex); | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 4129 | } | 
 | 4130 |  | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 4131 | static int mem_cgroup_oom_control_read(struct cgroup *cgrp, | 
 | 4132 | 	struct cftype *cft,  struct cgroup_map_cb *cb) | 
 | 4133 | { | 
 | 4134 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); | 
 | 4135 |  | 
 | 4136 | 	cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable); | 
 | 4137 |  | 
 | 4138 | 	if (atomic_read(&mem->oom_lock)) | 
 | 4139 | 		cb->fill(cb, "under_oom", 1); | 
 | 4140 | 	else | 
 | 4141 | 		cb->fill(cb, "under_oom", 0); | 
 | 4142 | 	return 0; | 
 | 4143 | } | 
 | 4144 |  | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 4145 | static int mem_cgroup_oom_control_write(struct cgroup *cgrp, | 
 | 4146 | 	struct cftype *cft, u64 val) | 
 | 4147 | { | 
 | 4148 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); | 
 | 4149 | 	struct mem_cgroup *parent; | 
 | 4150 |  | 
 | 4151 | 	/* cannot set to root cgroup and only 0 and 1 are allowed */ | 
 | 4152 | 	if (!cgrp->parent || !((val == 0) || (val == 1))) | 
 | 4153 | 		return -EINVAL; | 
 | 4154 |  | 
 | 4155 | 	parent = mem_cgroup_from_cont(cgrp->parent); | 
 | 4156 |  | 
 | 4157 | 	cgroup_lock(); | 
 | 4158 | 	/* oom-kill-disable is a flag for subhierarchy. */ | 
 | 4159 | 	if ((parent->use_hierarchy) || | 
 | 4160 | 	    (mem->use_hierarchy && !list_empty(&cgrp->children))) { | 
 | 4161 | 		cgroup_unlock(); | 
 | 4162 | 		return -EINVAL; | 
 | 4163 | 	} | 
 | 4164 | 	mem->oom_kill_disable = val; | 
| KAMEZAWA Hiroyuki | 4d845eb | 2010-06-29 15:05:18 -0700 | [diff] [blame] | 4165 | 	if (!val) | 
 | 4166 | 		memcg_oom_recover(mem); | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 4167 | 	cgroup_unlock(); | 
 | 4168 | 	return 0; | 
 | 4169 | } | 
 | 4170 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4171 | static struct cftype mem_cgroup_files[] = { | 
 | 4172 | 	{ | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 4173 | 		.name = "usage_in_bytes", | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4174 | 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE), | 
| Paul Menage | 2c3daa7 | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 4175 | 		.read_u64 = mem_cgroup_read, | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 4176 | 		.register_event = mem_cgroup_usage_register_event, | 
 | 4177 | 		.unregister_event = mem_cgroup_usage_unregister_event, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4178 | 	}, | 
 | 4179 | 	{ | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 4180 | 		.name = "max_usage_in_bytes", | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4181 | 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE), | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 4182 | 		.trigger = mem_cgroup_reset, | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 4183 | 		.read_u64 = mem_cgroup_read, | 
 | 4184 | 	}, | 
 | 4185 | 	{ | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 4186 | 		.name = "limit_in_bytes", | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4187 | 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT), | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 4188 | 		.write_string = mem_cgroup_write, | 
| Paul Menage | 2c3daa7 | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 4189 | 		.read_u64 = mem_cgroup_read, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4190 | 	}, | 
 | 4191 | 	{ | 
| Balbir Singh | 296c81d | 2009-09-23 15:56:36 -0700 | [diff] [blame] | 4192 | 		.name = "soft_limit_in_bytes", | 
 | 4193 | 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT), | 
 | 4194 | 		.write_string = mem_cgroup_write, | 
 | 4195 | 		.read_u64 = mem_cgroup_read, | 
 | 4196 | 	}, | 
 | 4197 | 	{ | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4198 | 		.name = "failcnt", | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4199 | 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT), | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 4200 | 		.trigger = mem_cgroup_reset, | 
| Paul Menage | 2c3daa7 | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 4201 | 		.read_u64 = mem_cgroup_read, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4202 | 	}, | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 4203 | 	{ | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 4204 | 		.name = "stat", | 
| Paul Menage | c64745c | 2008-04-29 01:00:02 -0700 | [diff] [blame] | 4205 | 		.read_map = mem_control_stat_show, | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 4206 | 	}, | 
| KAMEZAWA Hiroyuki | c1e862c | 2009-01-07 18:07:55 -0800 | [diff] [blame] | 4207 | 	{ | 
 | 4208 | 		.name = "force_empty", | 
 | 4209 | 		.trigger = mem_cgroup_force_empty_write, | 
 | 4210 | 	}, | 
| Balbir Singh | 18f59ea | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 4211 | 	{ | 
 | 4212 | 		.name = "use_hierarchy", | 
 | 4213 | 		.write_u64 = mem_cgroup_hierarchy_write, | 
 | 4214 | 		.read_u64 = mem_cgroup_hierarchy_read, | 
 | 4215 | 	}, | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 4216 | 	{ | 
 | 4217 | 		.name = "swappiness", | 
 | 4218 | 		.read_u64 = mem_cgroup_swappiness_read, | 
 | 4219 | 		.write_u64 = mem_cgroup_swappiness_write, | 
 | 4220 | 	}, | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4221 | 	{ | 
 | 4222 | 		.name = "move_charge_at_immigrate", | 
 | 4223 | 		.read_u64 = mem_cgroup_move_charge_read, | 
 | 4224 | 		.write_u64 = mem_cgroup_move_charge_write, | 
 | 4225 | 	}, | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 4226 | 	{ | 
 | 4227 | 		.name = "oom_control", | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 4228 | 		.read_map = mem_cgroup_oom_control_read, | 
 | 4229 | 		.write_u64 = mem_cgroup_oom_control_write, | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 4230 | 		.register_event = mem_cgroup_oom_register_event, | 
 | 4231 | 		.unregister_event = mem_cgroup_oom_unregister_event, | 
 | 4232 | 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), | 
 | 4233 | 	}, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4234 | }; | 
 | 4235 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4236 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 
 | 4237 | static struct cftype memsw_cgroup_files[] = { | 
 | 4238 | 	{ | 
 | 4239 | 		.name = "memsw.usage_in_bytes", | 
 | 4240 | 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), | 
 | 4241 | 		.read_u64 = mem_cgroup_read, | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 4242 | 		.register_event = mem_cgroup_usage_register_event, | 
 | 4243 | 		.unregister_event = mem_cgroup_usage_unregister_event, | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4244 | 	}, | 
 | 4245 | 	{ | 
 | 4246 | 		.name = "memsw.max_usage_in_bytes", | 
 | 4247 | 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE), | 
 | 4248 | 		.trigger = mem_cgroup_reset, | 
 | 4249 | 		.read_u64 = mem_cgroup_read, | 
 | 4250 | 	}, | 
 | 4251 | 	{ | 
 | 4252 | 		.name = "memsw.limit_in_bytes", | 
 | 4253 | 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT), | 
 | 4254 | 		.write_string = mem_cgroup_write, | 
 | 4255 | 		.read_u64 = mem_cgroup_read, | 
 | 4256 | 	}, | 
 | 4257 | 	{ | 
 | 4258 | 		.name = "memsw.failcnt", | 
 | 4259 | 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT), | 
 | 4260 | 		.trigger = mem_cgroup_reset, | 
 | 4261 | 		.read_u64 = mem_cgroup_read, | 
 | 4262 | 	}, | 
 | 4263 | }; | 
 | 4264 |  | 
 | 4265 | static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) | 
 | 4266 | { | 
 | 4267 | 	if (!do_swap_account) | 
 | 4268 | 		return 0; | 
 | 4269 | 	return cgroup_add_files(cont, ss, memsw_cgroup_files, | 
 | 4270 | 				ARRAY_SIZE(memsw_cgroup_files)); | 
 | 4271 | }; | 
 | 4272 | #else | 
 | 4273 | static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss) | 
 | 4274 | { | 
 | 4275 | 	return 0; | 
 | 4276 | } | 
 | 4277 | #endif | 
 | 4278 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 4279 | static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | 
 | 4280 | { | 
 | 4281 | 	struct mem_cgroup_per_node *pn; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 4282 | 	struct mem_cgroup_per_zone *mz; | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 4283 | 	enum lru_list l; | 
| KAMEZAWA Hiroyuki | 41e3355 | 2008-04-08 17:41:54 -0700 | [diff] [blame] | 4284 | 	int zone, tmp = node; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 4285 | 	/* | 
 | 4286 | 	 * This routine is called against possible nodes. | 
 | 4287 | 	 * But it's BUG to call kmalloc() against offline node. | 
 | 4288 | 	 * | 
 | 4289 | 	 * TODO: this routine can waste much memory for nodes which will | 
 | 4290 | 	 *       never be onlined. It's better to use memory hotplug callback | 
 | 4291 | 	 *       function. | 
 | 4292 | 	 */ | 
| KAMEZAWA Hiroyuki | 41e3355 | 2008-04-08 17:41:54 -0700 | [diff] [blame] | 4293 | 	if (!node_state(node, N_NORMAL_MEMORY)) | 
 | 4294 | 		tmp = -1; | 
| Jesper Juhl | 17295c8 | 2011-01-13 15:47:42 -0800 | [diff] [blame] | 4295 | 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 4296 | 	if (!pn) | 
 | 4297 | 		return 1; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 4298 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 4299 | 	mem->info.nodeinfo[node] = pn; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 4300 | 	for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 
 | 4301 | 		mz = &pn->zoneinfo[zone]; | 
| Christoph Lameter | b69408e | 2008-10-18 20:26:14 -0700 | [diff] [blame] | 4302 | 		for_each_lru(l) | 
 | 4303 | 			INIT_LIST_HEAD(&mz->lists[l]); | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 4304 | 		mz->usage_in_excess = 0; | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 4305 | 		mz->on_tree = false; | 
 | 4306 | 		mz->mem = mem; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 4307 | 	} | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 4308 | 	return 0; | 
 | 4309 | } | 
 | 4310 |  | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 4311 | static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | 
 | 4312 | { | 
 | 4313 | 	kfree(mem->info.nodeinfo[node]); | 
 | 4314 | } | 
 | 4315 |  | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 4316 | static struct mem_cgroup *mem_cgroup_alloc(void) | 
 | 4317 | { | 
 | 4318 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 4319 | 	int size = sizeof(struct mem_cgroup); | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 4320 |  | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 4321 | 	/* Can be very big if MAX_NUMNODES is very big */ | 
| Jan Blunck | c8dad2b | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 4322 | 	if (size < PAGE_SIZE) | 
| Jesper Juhl | 17295c8 | 2011-01-13 15:47:42 -0800 | [diff] [blame] | 4323 | 		mem = kzalloc(size, GFP_KERNEL); | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 4324 | 	else | 
| Jesper Juhl | 17295c8 | 2011-01-13 15:47:42 -0800 | [diff] [blame] | 4325 | 		mem = vzalloc(size); | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 4326 |  | 
| Dan Carpenter | e7bbcdf | 2010-03-23 13:35:12 -0700 | [diff] [blame] | 4327 | 	if (!mem) | 
 | 4328 | 		return NULL; | 
 | 4329 |  | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 4330 | 	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); | 
| Dan Carpenter | d2e61b8 | 2010-11-11 14:05:12 -0800 | [diff] [blame] | 4331 | 	if (!mem->stat) | 
 | 4332 | 		goto out_free; | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 4333 | 	spin_lock_init(&mem->pcp_counter_lock); | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 4334 | 	return mem; | 
| Dan Carpenter | d2e61b8 | 2010-11-11 14:05:12 -0800 | [diff] [blame] | 4335 |  | 
 | 4336 | out_free: | 
 | 4337 | 	if (size < PAGE_SIZE) | 
 | 4338 | 		kfree(mem); | 
 | 4339 | 	else | 
 | 4340 | 		vfree(mem); | 
 | 4341 | 	return NULL; | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 4342 | } | 
 | 4343 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4344 | /* | 
 | 4345 |  * At destroying mem_cgroup, references from swap_cgroup can remain. | 
 | 4346 |  * (scanning all at force_empty is too costly...) | 
 | 4347 |  * | 
 | 4348 |  * Instead of clearing all references at force_empty, we remember | 
 | 4349 |  * the number of reference from swap_cgroup and free mem_cgroup when | 
 | 4350 |  * it goes down to 0. | 
 | 4351 |  * | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4352 |  * Removal of cgroup itself succeeds regardless of refs from swap. | 
 | 4353 |  */ | 
 | 4354 |  | 
| KAMEZAWA Hiroyuki | a7ba0ee | 2009-01-07 18:08:32 -0800 | [diff] [blame] | 4355 | static void __mem_cgroup_free(struct mem_cgroup *mem) | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 4356 | { | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 4357 | 	int node; | 
 | 4358 |  | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 4359 | 	mem_cgroup_remove_from_trees(mem); | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 4360 | 	free_css_id(&mem_cgroup_subsys, &mem->css); | 
 | 4361 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 4362 | 	for_each_node_state(node, N_POSSIBLE) | 
 | 4363 | 		free_mem_cgroup_per_zone_info(mem, node); | 
 | 4364 |  | 
| KAMEZAWA Hiroyuki | c62b1a3 | 2010-03-10 15:22:29 -0800 | [diff] [blame] | 4365 | 	free_percpu(mem->stat); | 
 | 4366 | 	if (sizeof(struct mem_cgroup) < PAGE_SIZE) | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 4367 | 		kfree(mem); | 
 | 4368 | 	else | 
 | 4369 | 		vfree(mem); | 
 | 4370 | } | 
 | 4371 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4372 | static void mem_cgroup_get(struct mem_cgroup *mem) | 
 | 4373 | { | 
 | 4374 | 	atomic_inc(&mem->refcnt); | 
 | 4375 | } | 
 | 4376 |  | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4377 | static void __mem_cgroup_put(struct mem_cgroup *mem, int count) | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4378 | { | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4379 | 	if (atomic_sub_and_test(count, &mem->refcnt)) { | 
| Daisuke Nishimura | 7bcc1bb | 2009-01-29 14:25:11 -0800 | [diff] [blame] | 4380 | 		struct mem_cgroup *parent = parent_mem_cgroup(mem); | 
| KAMEZAWA Hiroyuki | a7ba0ee | 2009-01-07 18:08:32 -0800 | [diff] [blame] | 4381 | 		__mem_cgroup_free(mem); | 
| Daisuke Nishimura | 7bcc1bb | 2009-01-29 14:25:11 -0800 | [diff] [blame] | 4382 | 		if (parent) | 
 | 4383 | 			mem_cgroup_put(parent); | 
 | 4384 | 	} | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4385 | } | 
 | 4386 |  | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4387 | static void mem_cgroup_put(struct mem_cgroup *mem) | 
 | 4388 | { | 
 | 4389 | 	__mem_cgroup_put(mem, 1); | 
 | 4390 | } | 
 | 4391 |  | 
| Daisuke Nishimura | 7bcc1bb | 2009-01-29 14:25:11 -0800 | [diff] [blame] | 4392 | /* | 
 | 4393 |  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. | 
 | 4394 |  */ | 
 | 4395 | static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem) | 
 | 4396 | { | 
 | 4397 | 	if (!mem->res.parent) | 
 | 4398 | 		return NULL; | 
 | 4399 | 	return mem_cgroup_from_res_counter(mem->res.parent, res); | 
 | 4400 | } | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 4401 |  | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 4402 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 
 | 4403 | static void __init enable_swap_cgroup(void) | 
 | 4404 | { | 
| Hirokazu Takahashi | f8d6654 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 4405 | 	if (!mem_cgroup_disabled() && really_do_swap_account) | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 4406 | 		do_swap_account = 1; | 
 | 4407 | } | 
 | 4408 | #else | 
 | 4409 | static void __init enable_swap_cgroup(void) | 
 | 4410 | { | 
 | 4411 | } | 
 | 4412 | #endif | 
 | 4413 |  | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 4414 | static int mem_cgroup_soft_limit_tree_init(void) | 
 | 4415 | { | 
 | 4416 | 	struct mem_cgroup_tree_per_node *rtpn; | 
 | 4417 | 	struct mem_cgroup_tree_per_zone *rtpz; | 
 | 4418 | 	int tmp, node, zone; | 
 | 4419 |  | 
 | 4420 | 	for_each_node_state(node, N_POSSIBLE) { | 
 | 4421 | 		tmp = node; | 
 | 4422 | 		if (!node_state(node, N_NORMAL_MEMORY)) | 
 | 4423 | 			tmp = -1; | 
 | 4424 | 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp); | 
 | 4425 | 		if (!rtpn) | 
 | 4426 | 			return 1; | 
 | 4427 |  | 
 | 4428 | 		soft_limit_tree.rb_tree_per_node[node] = rtpn; | 
 | 4429 |  | 
 | 4430 | 		for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 
 | 4431 | 			rtpz = &rtpn->rb_tree_per_zone[zone]; | 
 | 4432 | 			rtpz->rb_root = RB_ROOT; | 
 | 4433 | 			spin_lock_init(&rtpz->lock); | 
 | 4434 | 		} | 
 | 4435 | 	} | 
 | 4436 | 	return 0; | 
 | 4437 | } | 
 | 4438 |  | 
| Li Zefan | 0eb253e | 2009-01-15 13:51:25 -0800 | [diff] [blame] | 4439 | static struct cgroup_subsys_state * __ref | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4440 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | 
 | 4441 | { | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 4442 | 	struct mem_cgroup *mem, *parent; | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 4443 | 	long error = -ENOMEM; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 4444 | 	int node; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4445 |  | 
| Jan Blunck | c8dad2b | 2009-01-07 18:07:53 -0800 | [diff] [blame] | 4446 | 	mem = mem_cgroup_alloc(); | 
 | 4447 | 	if (!mem) | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 4448 | 		return ERR_PTR(error); | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 4449 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 4450 | 	for_each_node_state(node, N_POSSIBLE) | 
 | 4451 | 		if (alloc_mem_cgroup_per_zone_info(mem, node)) | 
 | 4452 | 			goto free_out; | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 4453 |  | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 4454 | 	/* root ? */ | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 4455 | 	if (cont->parent == NULL) { | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 4456 | 		int cpu; | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 4457 | 		enable_swap_cgroup(); | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 4458 | 		parent = NULL; | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 4459 | 		root_mem_cgroup = mem; | 
| Balbir Singh | f64c3f5 | 2009-09-23 15:56:37 -0700 | [diff] [blame] | 4460 | 		if (mem_cgroup_soft_limit_tree_init()) | 
 | 4461 | 			goto free_out; | 
| KAMEZAWA Hiroyuki | cdec2e4 | 2009-12-15 16:47:08 -0800 | [diff] [blame] | 4462 | 		for_each_possible_cpu(cpu) { | 
 | 4463 | 			struct memcg_stock_pcp *stock = | 
 | 4464 | 						&per_cpu(memcg_stock, cpu); | 
 | 4465 | 			INIT_WORK(&stock->work, drain_local_stock); | 
 | 4466 | 		} | 
| KAMEZAWA Hiroyuki | 711d3d2 | 2010-10-27 15:33:42 -0700 | [diff] [blame] | 4467 | 		hotcpu_notifier(memcg_cpu_hotplug_callback, 0); | 
| Balbir Singh | 18f59ea | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 4468 | 	} else { | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 4469 | 		parent = mem_cgroup_from_cont(cont->parent); | 
| Balbir Singh | 18f59ea | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 4470 | 		mem->use_hierarchy = parent->use_hierarchy; | 
| KAMEZAWA Hiroyuki | 3c11ecf | 2010-05-26 14:42:37 -0700 | [diff] [blame] | 4471 | 		mem->oom_kill_disable = parent->oom_kill_disable; | 
| Balbir Singh | 18f59ea | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 4472 | 	} | 
| Balbir Singh | 28dbc4b | 2009-01-07 18:08:05 -0800 | [diff] [blame] | 4473 |  | 
| Balbir Singh | 18f59ea | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 4474 | 	if (parent && parent->use_hierarchy) { | 
 | 4475 | 		res_counter_init(&mem->res, &parent->res); | 
 | 4476 | 		res_counter_init(&mem->memsw, &parent->memsw); | 
| Daisuke Nishimura | 7bcc1bb | 2009-01-29 14:25:11 -0800 | [diff] [blame] | 4477 | 		/* | 
 | 4478 | 		 * We increment refcnt of the parent to ensure that we can | 
 | 4479 | 		 * safely access it on res_counter_charge/uncharge. | 
 | 4480 | 		 * This refcnt will be decremented when freeing this | 
 | 4481 | 		 * mem_cgroup(see mem_cgroup_put). | 
 | 4482 | 		 */ | 
 | 4483 | 		mem_cgroup_get(parent); | 
| Balbir Singh | 18f59ea | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 4484 | 	} else { | 
 | 4485 | 		res_counter_init(&mem->res, NULL); | 
 | 4486 | 		res_counter_init(&mem->memsw, NULL); | 
 | 4487 | 	} | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 4488 | 	mem->last_scanned_child = 0; | 
| KOSAKI Motohiro | 2733c06 | 2009-01-07 18:08:23 -0800 | [diff] [blame] | 4489 | 	spin_lock_init(&mem->reclaim_param_lock); | 
| KAMEZAWA Hiroyuki | 9490ff2 | 2010-05-26 14:42:36 -0700 | [diff] [blame] | 4490 | 	INIT_LIST_HEAD(&mem->oom_notify); | 
| Balbir Singh | 6d61ef4 | 2009-01-07 18:08:06 -0800 | [diff] [blame] | 4491 |  | 
| KOSAKI Motohiro | a7885eb | 2009-01-07 18:08:24 -0800 | [diff] [blame] | 4492 | 	if (parent) | 
 | 4493 | 		mem->swappiness = get_swappiness(parent); | 
| KAMEZAWA Hiroyuki | a7ba0ee | 2009-01-07 18:08:32 -0800 | [diff] [blame] | 4494 | 	atomic_set(&mem->refcnt, 1); | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4495 | 	mem->move_charge_at_immigrate = 0; | 
| Kirill A. Shutemov | 2e72b63 | 2010-03-10 15:22:24 -0800 | [diff] [blame] | 4496 | 	mutex_init(&mem->thresholds_lock); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4497 | 	return &mem->css; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 4498 | free_out: | 
| KAMEZAWA Hiroyuki | a7ba0ee | 2009-01-07 18:08:32 -0800 | [diff] [blame] | 4499 | 	__mem_cgroup_free(mem); | 
| Balbir Singh | 4b3bde4 | 2009-09-23 15:56:32 -0700 | [diff] [blame] | 4500 | 	root_mem_cgroup = NULL; | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 4501 | 	return ERR_PTR(error); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4502 | } | 
 | 4503 |  | 
| KAMEZAWA Hiroyuki | ec64f51 | 2009-04-02 16:57:26 -0700 | [diff] [blame] | 4504 | static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss, | 
| KAMEZAWA Hiroyuki | df878fb | 2008-02-07 00:14:28 -0800 | [diff] [blame] | 4505 | 					struct cgroup *cont) | 
 | 4506 | { | 
 | 4507 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 
| KAMEZAWA Hiroyuki | ec64f51 | 2009-04-02 16:57:26 -0700 | [diff] [blame] | 4508 |  | 
 | 4509 | 	return mem_cgroup_force_empty(mem, false); | 
| KAMEZAWA Hiroyuki | df878fb | 2008-02-07 00:14:28 -0800 | [diff] [blame] | 4510 | } | 
 | 4511 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4512 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, | 
 | 4513 | 				struct cgroup *cont) | 
 | 4514 | { | 
| Daisuke Nishimura | c268e99 | 2009-01-15 13:51:13 -0800 | [diff] [blame] | 4515 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 
| Daisuke Nishimura | c268e99 | 2009-01-15 13:51:13 -0800 | [diff] [blame] | 4516 |  | 
| Daisuke Nishimura | c268e99 | 2009-01-15 13:51:13 -0800 | [diff] [blame] | 4517 | 	mem_cgroup_put(mem); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4518 | } | 
 | 4519 |  | 
 | 4520 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | 
 | 4521 | 				struct cgroup *cont) | 
 | 4522 | { | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 4523 | 	int ret; | 
 | 4524 |  | 
 | 4525 | 	ret = cgroup_add_files(cont, ss, mem_cgroup_files, | 
 | 4526 | 				ARRAY_SIZE(mem_cgroup_files)); | 
 | 4527 |  | 
 | 4528 | 	if (!ret) | 
 | 4529 | 		ret = register_memsw_files(cont, ss); | 
 | 4530 | 	return ret; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 4531 | } | 
 | 4532 |  | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4533 | #ifdef CONFIG_MMU | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4534 | /* Handlers for move charge at task migration. */ | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4535 | #define PRECHARGE_COUNT_AT_ONCE	256 | 
 | 4536 | static int mem_cgroup_do_precharge(unsigned long count) | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4537 | { | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4538 | 	int ret = 0; | 
 | 4539 | 	int batch_count = PRECHARGE_COUNT_AT_ONCE; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4540 | 	struct mem_cgroup *mem = mc.to; | 
 | 4541 |  | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4542 | 	if (mem_cgroup_is_root(mem)) { | 
 | 4543 | 		mc.precharge += count; | 
 | 4544 | 		/* we don't need css_get for root */ | 
 | 4545 | 		return ret; | 
 | 4546 | 	} | 
 | 4547 | 	/* try to charge at once */ | 
 | 4548 | 	if (count > 1) { | 
 | 4549 | 		struct res_counter *dummy; | 
 | 4550 | 		/* | 
 | 4551 | 		 * "mem" cannot be under rmdir() because we've already checked | 
 | 4552 | 		 * by cgroup_lock_live_cgroup() that it is not removed and we | 
 | 4553 | 		 * are still under the same cgroup_mutex. So we can postpone | 
 | 4554 | 		 * css_get(). | 
 | 4555 | 		 */ | 
 | 4556 | 		if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy)) | 
 | 4557 | 			goto one_by_one; | 
 | 4558 | 		if (do_swap_account && res_counter_charge(&mem->memsw, | 
 | 4559 | 						PAGE_SIZE * count, &dummy)) { | 
 | 4560 | 			res_counter_uncharge(&mem->res, PAGE_SIZE * count); | 
 | 4561 | 			goto one_by_one; | 
 | 4562 | 		} | 
 | 4563 | 		mc.precharge += count; | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4564 | 		return ret; | 
 | 4565 | 	} | 
 | 4566 | one_by_one: | 
 | 4567 | 	/* fall back to one by one charge */ | 
 | 4568 | 	while (count--) { | 
 | 4569 | 		if (signal_pending(current)) { | 
 | 4570 | 			ret = -EINTR; | 
 | 4571 | 			break; | 
 | 4572 | 		} | 
 | 4573 | 		if (!batch_count--) { | 
 | 4574 | 			batch_count = PRECHARGE_COUNT_AT_ONCE; | 
 | 4575 | 			cond_resched(); | 
 | 4576 | 		} | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 4577 | 		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false, | 
 | 4578 | 					      PAGE_SIZE); | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4579 | 		if (ret || !mem) | 
 | 4580 | 			/* mem_cgroup_clear_mc() will do uncharge later */ | 
 | 4581 | 			return -ENOMEM; | 
 | 4582 | 		mc.precharge++; | 
 | 4583 | 	} | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4584 | 	return ret; | 
 | 4585 | } | 
 | 4586 |  | 
 | 4587 | /** | 
 | 4588 |  * is_target_pte_for_mc - check a pte whether it is valid for move charge | 
 | 4589 |  * @vma: the vma the pte to be checked belongs | 
 | 4590 |  * @addr: the address corresponding to the pte to be checked | 
 | 4591 |  * @ptent: the pte to be checked | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4592 |  * @target: the pointer the target page or swap ent will be stored(can be NULL) | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4593 |  * | 
 | 4594 |  * Returns | 
 | 4595 |  *   0(MC_TARGET_NONE): if the pte is not a target for move charge. | 
 | 4596 |  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for | 
 | 4597 |  *     move charge. if @target is not NULL, the page is stored in target->page | 
 | 4598 |  *     with extra refcnt got(Callers should handle it). | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4599 |  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a | 
 | 4600 |  *     target for charge migration. if @target is not NULL, the entry is stored | 
 | 4601 |  *     in target->ent. | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4602 |  * | 
 | 4603 |  * Called with pte lock held. | 
 | 4604 |  */ | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4605 | union mc_target { | 
 | 4606 | 	struct page	*page; | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4607 | 	swp_entry_t	ent; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4608 | }; | 
 | 4609 |  | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4610 | enum mc_target_type { | 
 | 4611 | 	MC_TARGET_NONE,	/* not used */ | 
 | 4612 | 	MC_TARGET_PAGE, | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4613 | 	MC_TARGET_SWAP, | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4614 | }; | 
 | 4615 |  | 
| Daisuke Nishimura | 90254a6 | 2010-05-26 14:42:38 -0700 | [diff] [blame] | 4616 | static struct page *mc_handle_present_pte(struct vm_area_struct *vma, | 
 | 4617 | 						unsigned long addr, pte_t ptent) | 
 | 4618 | { | 
 | 4619 | 	struct page *page = vm_normal_page(vma, addr, ptent); | 
 | 4620 |  | 
 | 4621 | 	if (!page || !page_mapped(page)) | 
 | 4622 | 		return NULL; | 
 | 4623 | 	if (PageAnon(page)) { | 
 | 4624 | 		/* we don't move shared anon */ | 
 | 4625 | 		if (!move_anon() || page_mapcount(page) > 2) | 
 | 4626 | 			return NULL; | 
| Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 4627 | 	} else if (!move_file()) | 
 | 4628 | 		/* we ignore mapcount for file pages */ | 
| Daisuke Nishimura | 90254a6 | 2010-05-26 14:42:38 -0700 | [diff] [blame] | 4629 | 		return NULL; | 
 | 4630 | 	if (!get_page_unless_zero(page)) | 
 | 4631 | 		return NULL; | 
 | 4632 |  | 
 | 4633 | 	return page; | 
 | 4634 | } | 
 | 4635 |  | 
 | 4636 | static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, | 
 | 4637 | 			unsigned long addr, pte_t ptent, swp_entry_t *entry) | 
 | 4638 | { | 
 | 4639 | 	int usage_count; | 
 | 4640 | 	struct page *page = NULL; | 
 | 4641 | 	swp_entry_t ent = pte_to_swp_entry(ptent); | 
 | 4642 |  | 
 | 4643 | 	if (!move_anon() || non_swap_entry(ent)) | 
 | 4644 | 		return NULL; | 
 | 4645 | 	usage_count = mem_cgroup_count_swap_user(ent, &page); | 
 | 4646 | 	if (usage_count > 1) { /* we don't move shared anon */ | 
 | 4647 | 		if (page) | 
 | 4648 | 			put_page(page); | 
 | 4649 | 		return NULL; | 
 | 4650 | 	} | 
 | 4651 | 	if (do_swap_account) | 
 | 4652 | 		entry->val = ent.val; | 
 | 4653 |  | 
 | 4654 | 	return page; | 
 | 4655 | } | 
 | 4656 |  | 
| Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 4657 | static struct page *mc_handle_file_pte(struct vm_area_struct *vma, | 
 | 4658 | 			unsigned long addr, pte_t ptent, swp_entry_t *entry) | 
 | 4659 | { | 
 | 4660 | 	struct page *page = NULL; | 
 | 4661 | 	struct inode *inode; | 
 | 4662 | 	struct address_space *mapping; | 
 | 4663 | 	pgoff_t pgoff; | 
 | 4664 |  | 
 | 4665 | 	if (!vma->vm_file) /* anonymous vma */ | 
 | 4666 | 		return NULL; | 
 | 4667 | 	if (!move_file()) | 
 | 4668 | 		return NULL; | 
 | 4669 |  | 
 | 4670 | 	inode = vma->vm_file->f_path.dentry->d_inode; | 
 | 4671 | 	mapping = vma->vm_file->f_mapping; | 
 | 4672 | 	if (pte_none(ptent)) | 
 | 4673 | 		pgoff = linear_page_index(vma, addr); | 
 | 4674 | 	else /* pte_file(ptent) is true */ | 
 | 4675 | 		pgoff = pte_to_pgoff(ptent); | 
 | 4676 |  | 
 | 4677 | 	/* page is moved even if it's not RSS of this task(page-faulted). */ | 
 | 4678 | 	if (!mapping_cap_swap_backed(mapping)) { /* normal file */ | 
 | 4679 | 		page = find_get_page(mapping, pgoff); | 
 | 4680 | 	} else { /* shmem/tmpfs file. we should take account of swap too. */ | 
 | 4681 | 		swp_entry_t ent; | 
 | 4682 | 		mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent); | 
 | 4683 | 		if (do_swap_account) | 
 | 4684 | 			entry->val = ent.val; | 
 | 4685 | 	} | 
 | 4686 |  | 
 | 4687 | 	return page; | 
 | 4688 | } | 
 | 4689 |  | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4690 | static int is_target_pte_for_mc(struct vm_area_struct *vma, | 
 | 4691 | 		unsigned long addr, pte_t ptent, union mc_target *target) | 
 | 4692 | { | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4693 | 	struct page *page = NULL; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4694 | 	struct page_cgroup *pc; | 
 | 4695 | 	int ret = 0; | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4696 | 	swp_entry_t ent = { .val = 0 }; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4697 |  | 
| Daisuke Nishimura | 90254a6 | 2010-05-26 14:42:38 -0700 | [diff] [blame] | 4698 | 	if (pte_present(ptent)) | 
 | 4699 | 		page = mc_handle_present_pte(vma, addr, ptent); | 
 | 4700 | 	else if (is_swap_pte(ptent)) | 
 | 4701 | 		page = mc_handle_swap_pte(vma, addr, ptent, &ent); | 
| Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 4702 | 	else if (pte_none(ptent) || pte_file(ptent)) | 
 | 4703 | 		page = mc_handle_file_pte(vma, addr, ptent, &ent); | 
| Daisuke Nishimura | 90254a6 | 2010-05-26 14:42:38 -0700 | [diff] [blame] | 4704 |  | 
 | 4705 | 	if (!page && !ent.val) | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4706 | 		return 0; | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4707 | 	if (page) { | 
 | 4708 | 		pc = lookup_page_cgroup(page); | 
 | 4709 | 		/* | 
 | 4710 | 		 * Do only loose check w/o page_cgroup lock. | 
 | 4711 | 		 * mem_cgroup_move_account() checks the pc is valid or not under | 
 | 4712 | 		 * the lock. | 
 | 4713 | 		 */ | 
 | 4714 | 		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { | 
 | 4715 | 			ret = MC_TARGET_PAGE; | 
 | 4716 | 			if (target) | 
 | 4717 | 				target->page = page; | 
 | 4718 | 		} | 
 | 4719 | 		if (!ret || !target) | 
 | 4720 | 			put_page(page); | 
 | 4721 | 	} | 
| Daisuke Nishimura | 90254a6 | 2010-05-26 14:42:38 -0700 | [diff] [blame] | 4722 | 	/* There is a swap entry and a page doesn't exist or isn't charged */ | 
 | 4723 | 	if (ent.val && !ret && | 
| KAMEZAWA Hiroyuki | 7f0f154 | 2010-05-11 14:06:58 -0700 | [diff] [blame] | 4724 | 			css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { | 
 | 4725 | 		ret = MC_TARGET_SWAP; | 
 | 4726 | 		if (target) | 
 | 4727 | 			target->ent = ent; | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4728 | 	} | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4729 | 	return ret; | 
 | 4730 | } | 
 | 4731 |  | 
 | 4732 | static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, | 
 | 4733 | 					unsigned long addr, unsigned long end, | 
 | 4734 | 					struct mm_walk *walk) | 
 | 4735 | { | 
 | 4736 | 	struct vm_area_struct *vma = walk->private; | 
 | 4737 | 	pte_t *pte; | 
 | 4738 | 	spinlock_t *ptl; | 
 | 4739 |  | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 4740 | 	VM_BUG_ON(pmd_trans_huge(*pmd)); | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4741 | 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 
 | 4742 | 	for (; addr != end; pte++, addr += PAGE_SIZE) | 
 | 4743 | 		if (is_target_pte_for_mc(vma, addr, *pte, NULL)) | 
 | 4744 | 			mc.precharge++;	/* increment precharge temporarily */ | 
 | 4745 | 	pte_unmap_unlock(pte - 1, ptl); | 
 | 4746 | 	cond_resched(); | 
 | 4747 |  | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4748 | 	return 0; | 
 | 4749 | } | 
 | 4750 |  | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4751 | static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) | 
 | 4752 | { | 
 | 4753 | 	unsigned long precharge; | 
 | 4754 | 	struct vm_area_struct *vma; | 
 | 4755 |  | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 4756 | 	down_read(&mm->mmap_sem); | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4757 | 	for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
 | 4758 | 		struct mm_walk mem_cgroup_count_precharge_walk = { | 
 | 4759 | 			.pmd_entry = mem_cgroup_count_precharge_pte_range, | 
 | 4760 | 			.mm = mm, | 
 | 4761 | 			.private = vma, | 
 | 4762 | 		}; | 
 | 4763 | 		if (is_vm_hugetlb_page(vma)) | 
 | 4764 | 			continue; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4765 | 		walk_page_range(vma->vm_start, vma->vm_end, | 
 | 4766 | 					&mem_cgroup_count_precharge_walk); | 
 | 4767 | 	} | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 4768 | 	up_read(&mm->mmap_sem); | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4769 |  | 
 | 4770 | 	precharge = mc.precharge; | 
 | 4771 | 	mc.precharge = 0; | 
 | 4772 |  | 
 | 4773 | 	return precharge; | 
 | 4774 | } | 
 | 4775 |  | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4776 | static int mem_cgroup_precharge_mc(struct mm_struct *mm) | 
 | 4777 | { | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 4778 | 	unsigned long precharge = mem_cgroup_count_precharge(mm); | 
 | 4779 |  | 
 | 4780 | 	VM_BUG_ON(mc.moving_task); | 
 | 4781 | 	mc.moving_task = current; | 
 | 4782 | 	return mem_cgroup_do_precharge(precharge); | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4783 | } | 
 | 4784 |  | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 4785 | /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */ | 
 | 4786 | static void __mem_cgroup_clear_mc(void) | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4787 | { | 
| KAMEZAWA Hiroyuki | 2bd9bb2 | 2010-08-10 18:02:58 -0700 | [diff] [blame] | 4788 | 	struct mem_cgroup *from = mc.from; | 
 | 4789 | 	struct mem_cgroup *to = mc.to; | 
 | 4790 |  | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4791 | 	/* we must uncharge all the leftover precharges from mc.to */ | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4792 | 	if (mc.precharge) { | 
 | 4793 | 		__mem_cgroup_cancel_charge(mc.to, mc.precharge); | 
 | 4794 | 		mc.precharge = 0; | 
 | 4795 | 	} | 
 | 4796 | 	/* | 
 | 4797 | 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so | 
 | 4798 | 	 * we must uncharge here. | 
 | 4799 | 	 */ | 
 | 4800 | 	if (mc.moved_charge) { | 
 | 4801 | 		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge); | 
 | 4802 | 		mc.moved_charge = 0; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4803 | 	} | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4804 | 	/* we must fixup refcnts and charges */ | 
 | 4805 | 	if (mc.moved_swap) { | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4806 | 		/* uncharge swap account from the old cgroup */ | 
 | 4807 | 		if (!mem_cgroup_is_root(mc.from)) | 
 | 4808 | 			res_counter_uncharge(&mc.from->memsw, | 
 | 4809 | 						PAGE_SIZE * mc.moved_swap); | 
 | 4810 | 		__mem_cgroup_put(mc.from, mc.moved_swap); | 
 | 4811 |  | 
 | 4812 | 		if (!mem_cgroup_is_root(mc.to)) { | 
 | 4813 | 			/* | 
 | 4814 | 			 * we charged both to->res and to->memsw, so we should | 
 | 4815 | 			 * uncharge to->res. | 
 | 4816 | 			 */ | 
 | 4817 | 			res_counter_uncharge(&mc.to->res, | 
 | 4818 | 						PAGE_SIZE * mc.moved_swap); | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4819 | 		} | 
 | 4820 | 		/* we've already done mem_cgroup_get(mc.to) */ | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4821 | 		mc.moved_swap = 0; | 
 | 4822 | 	} | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 4823 | 	memcg_oom_recover(from); | 
 | 4824 | 	memcg_oom_recover(to); | 
 | 4825 | 	wake_up_all(&mc.waitq); | 
 | 4826 | } | 
 | 4827 |  | 
 | 4828 | static void mem_cgroup_clear_mc(void) | 
 | 4829 | { | 
 | 4830 | 	struct mem_cgroup *from = mc.from; | 
 | 4831 |  | 
 | 4832 | 	/* | 
 | 4833 | 	 * we must clear moving_task before waking up waiters at the end of | 
 | 4834 | 	 * task migration. | 
 | 4835 | 	 */ | 
 | 4836 | 	mc.moving_task = NULL; | 
 | 4837 | 	__mem_cgroup_clear_mc(); | 
| KAMEZAWA Hiroyuki | 2bd9bb2 | 2010-08-10 18:02:58 -0700 | [diff] [blame] | 4838 | 	spin_lock(&mc.lock); | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4839 | 	mc.from = NULL; | 
 | 4840 | 	mc.to = NULL; | 
| KAMEZAWA Hiroyuki | 2bd9bb2 | 2010-08-10 18:02:58 -0700 | [diff] [blame] | 4841 | 	spin_unlock(&mc.lock); | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 4842 | 	mem_cgroup_end_move(from); | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4843 | } | 
 | 4844 |  | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4845 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | 
 | 4846 | 				struct cgroup *cgroup, | 
 | 4847 | 				struct task_struct *p, | 
 | 4848 | 				bool threadgroup) | 
 | 4849 | { | 
 | 4850 | 	int ret = 0; | 
 | 4851 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); | 
 | 4852 |  | 
 | 4853 | 	if (mem->move_charge_at_immigrate) { | 
 | 4854 | 		struct mm_struct *mm; | 
 | 4855 | 		struct mem_cgroup *from = mem_cgroup_from_task(p); | 
 | 4856 |  | 
 | 4857 | 		VM_BUG_ON(from == mem); | 
 | 4858 |  | 
 | 4859 | 		mm = get_task_mm(p); | 
 | 4860 | 		if (!mm) | 
 | 4861 | 			return 0; | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4862 | 		/* We move charges only when we move a owner of the mm */ | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4863 | 		if (mm->owner == p) { | 
 | 4864 | 			VM_BUG_ON(mc.from); | 
 | 4865 | 			VM_BUG_ON(mc.to); | 
 | 4866 | 			VM_BUG_ON(mc.precharge); | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4867 | 			VM_BUG_ON(mc.moved_charge); | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4868 | 			VM_BUG_ON(mc.moved_swap); | 
| KAMEZAWA Hiroyuki | 32047e2 | 2010-10-27 15:33:40 -0700 | [diff] [blame] | 4869 | 			mem_cgroup_start_move(from); | 
| KAMEZAWA Hiroyuki | 2bd9bb2 | 2010-08-10 18:02:58 -0700 | [diff] [blame] | 4870 | 			spin_lock(&mc.lock); | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4871 | 			mc.from = from; | 
 | 4872 | 			mc.to = mem; | 
| KAMEZAWA Hiroyuki | 2bd9bb2 | 2010-08-10 18:02:58 -0700 | [diff] [blame] | 4873 | 			spin_unlock(&mc.lock); | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 4874 | 			/* We set mc.moving_task later */ | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4875 |  | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4876 | 			ret = mem_cgroup_precharge_mc(mm); | 
 | 4877 | 			if (ret) | 
 | 4878 | 				mem_cgroup_clear_mc(); | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 4879 | 		} | 
 | 4880 | 		mmput(mm); | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4881 | 	} | 
 | 4882 | 	return ret; | 
 | 4883 | } | 
 | 4884 |  | 
 | 4885 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | 
 | 4886 | 				struct cgroup *cgroup, | 
 | 4887 | 				struct task_struct *p, | 
 | 4888 | 				bool threadgroup) | 
 | 4889 | { | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4890 | 	mem_cgroup_clear_mc(); | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4891 | } | 
 | 4892 |  | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4893 | static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, | 
 | 4894 | 				unsigned long addr, unsigned long end, | 
 | 4895 | 				struct mm_walk *walk) | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 4896 | { | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4897 | 	int ret = 0; | 
 | 4898 | 	struct vm_area_struct *vma = walk->private; | 
 | 4899 | 	pte_t *pte; | 
 | 4900 | 	spinlock_t *ptl; | 
 | 4901 |  | 
 | 4902 | retry: | 
| Andrea Arcangeli | ec16851 | 2011-01-13 15:46:56 -0800 | [diff] [blame] | 4903 | 	VM_BUG_ON(pmd_trans_huge(*pmd)); | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4904 | 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 
 | 4905 | 	for (; addr != end; addr += PAGE_SIZE) { | 
 | 4906 | 		pte_t ptent = *(pte++); | 
 | 4907 | 		union mc_target target; | 
 | 4908 | 		int type; | 
 | 4909 | 		struct page *page; | 
 | 4910 | 		struct page_cgroup *pc; | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4911 | 		swp_entry_t ent; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4912 |  | 
 | 4913 | 		if (!mc.precharge) | 
 | 4914 | 			break; | 
 | 4915 |  | 
 | 4916 | 		type = is_target_pte_for_mc(vma, addr, ptent, &target); | 
 | 4917 | 		switch (type) { | 
 | 4918 | 		case MC_TARGET_PAGE: | 
 | 4919 | 			page = target.page; | 
 | 4920 | 			if (isolate_lru_page(page)) | 
 | 4921 | 				goto put; | 
 | 4922 | 			pc = lookup_page_cgroup(page); | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4923 | 			if (!mem_cgroup_move_account(pc, | 
| KAMEZAWA Hiroyuki | 987eba6 | 2011-01-20 14:44:25 -0800 | [diff] [blame] | 4924 | 					mc.from, mc.to, false, PAGE_SIZE)) { | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4925 | 				mc.precharge--; | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4926 | 				/* we uncharge from mc.from later. */ | 
 | 4927 | 				mc.moved_charge++; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4928 | 			} | 
 | 4929 | 			putback_lru_page(page); | 
 | 4930 | put:			/* is_target_pte_for_mc() gets the page */ | 
 | 4931 | 			put_page(page); | 
 | 4932 | 			break; | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4933 | 		case MC_TARGET_SWAP: | 
 | 4934 | 			ent = target.ent; | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4935 | 			if (!mem_cgroup_move_swap_account(ent, | 
 | 4936 | 						mc.from, mc.to, false)) { | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4937 | 				mc.precharge--; | 
| Daisuke Nishimura | 483c30b | 2010-03-10 15:22:18 -0800 | [diff] [blame] | 4938 | 				/* we fixup refcnts and charges later. */ | 
 | 4939 | 				mc.moved_swap++; | 
 | 4940 | 			} | 
| Daisuke Nishimura | 0249144 | 2010-03-10 15:22:17 -0800 | [diff] [blame] | 4941 | 			break; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4942 | 		default: | 
 | 4943 | 			break; | 
 | 4944 | 		} | 
 | 4945 | 	} | 
 | 4946 | 	pte_unmap_unlock(pte - 1, ptl); | 
 | 4947 | 	cond_resched(); | 
 | 4948 |  | 
 | 4949 | 	if (addr != end) { | 
 | 4950 | 		/* | 
 | 4951 | 		 * We have consumed all precharges we got in can_attach(). | 
 | 4952 | 		 * We try charge one by one, but don't do any additional | 
 | 4953 | 		 * charges to mc.to if we have failed in charge once in attach() | 
 | 4954 | 		 * phase. | 
 | 4955 | 		 */ | 
| Daisuke Nishimura | 854ffa8 | 2010-03-10 15:22:15 -0800 | [diff] [blame] | 4956 | 		ret = mem_cgroup_do_precharge(1); | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4957 | 		if (!ret) | 
 | 4958 | 			goto retry; | 
 | 4959 | 	} | 
 | 4960 |  | 
 | 4961 | 	return ret; | 
 | 4962 | } | 
 | 4963 |  | 
 | 4964 | static void mem_cgroup_move_charge(struct mm_struct *mm) | 
 | 4965 | { | 
 | 4966 | 	struct vm_area_struct *vma; | 
 | 4967 |  | 
 | 4968 | 	lru_add_drain_all(); | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 4969 | retry: | 
 | 4970 | 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) { | 
 | 4971 | 		/* | 
 | 4972 | 		 * Someone who are holding the mmap_sem might be waiting in | 
 | 4973 | 		 * waitq. So we cancel all extra charges, wake up all waiters, | 
 | 4974 | 		 * and retry. Because we cancel precharges, we might not be able | 
 | 4975 | 		 * to move enough charges, but moving charge is a best-effort | 
 | 4976 | 		 * feature anyway, so it wouldn't be a big problem. | 
 | 4977 | 		 */ | 
 | 4978 | 		__mem_cgroup_clear_mc(); | 
 | 4979 | 		cond_resched(); | 
 | 4980 | 		goto retry; | 
 | 4981 | 	} | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4982 | 	for (vma = mm->mmap; vma; vma = vma->vm_next) { | 
 | 4983 | 		int ret; | 
 | 4984 | 		struct mm_walk mem_cgroup_move_charge_walk = { | 
 | 4985 | 			.pmd_entry = mem_cgroup_move_charge_pte_range, | 
 | 4986 | 			.mm = mm, | 
 | 4987 | 			.private = vma, | 
 | 4988 | 		}; | 
 | 4989 | 		if (is_vm_hugetlb_page(vma)) | 
 | 4990 | 			continue; | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 4991 | 		ret = walk_page_range(vma->vm_start, vma->vm_end, | 
 | 4992 | 						&mem_cgroup_move_charge_walk); | 
 | 4993 | 		if (ret) | 
 | 4994 | 			/* | 
 | 4995 | 			 * means we have consumed all precharges and failed in | 
 | 4996 | 			 * doing additional charge. Just abandon here. | 
 | 4997 | 			 */ | 
 | 4998 | 			break; | 
 | 4999 | 	} | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 5000 | 	up_read(&mm->mmap_sem); | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 5001 | } | 
 | 5002 |  | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 5003 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 
 | 5004 | 				struct cgroup *cont, | 
 | 5005 | 				struct cgroup *old_cont, | 
| Ben Blum | be367d0 | 2009-09-23 15:56:31 -0700 | [diff] [blame] | 5006 | 				struct task_struct *p, | 
 | 5007 | 				bool threadgroup) | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 5008 | { | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 5009 | 	struct mm_struct *mm; | 
 | 5010 |  | 
 | 5011 | 	if (!mc.to) | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 5012 | 		/* no need to move charge */ | 
 | 5013 | 		return; | 
 | 5014 |  | 
| Daisuke Nishimura | dfe076b | 2011-01-13 15:47:41 -0800 | [diff] [blame] | 5015 | 	mm = get_task_mm(p); | 
 | 5016 | 	if (mm) { | 
 | 5017 | 		mem_cgroup_move_charge(mm); | 
 | 5018 | 		mmput(mm); | 
 | 5019 | 	} | 
| Daisuke Nishimura | 4ffef5f | 2010-03-10 15:22:14 -0800 | [diff] [blame] | 5020 | 	mem_cgroup_clear_mc(); | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 5021 | } | 
| Daisuke Nishimura | 5cfb80a | 2010-03-23 13:35:11 -0700 | [diff] [blame] | 5022 | #else	/* !CONFIG_MMU */ | 
 | 5023 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | 
 | 5024 | 				struct cgroup *cgroup, | 
 | 5025 | 				struct task_struct *p, | 
 | 5026 | 				bool threadgroup) | 
 | 5027 | { | 
 | 5028 | 	return 0; | 
 | 5029 | } | 
 | 5030 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | 
 | 5031 | 				struct cgroup *cgroup, | 
 | 5032 | 				struct task_struct *p, | 
 | 5033 | 				bool threadgroup) | 
 | 5034 | { | 
 | 5035 | } | 
 | 5036 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 
 | 5037 | 				struct cgroup *cont, | 
 | 5038 | 				struct cgroup *old_cont, | 
 | 5039 | 				struct task_struct *p, | 
 | 5040 | 				bool threadgroup) | 
 | 5041 | { | 
 | 5042 | } | 
 | 5043 | #endif | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 5044 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 5045 | struct cgroup_subsys mem_cgroup_subsys = { | 
 | 5046 | 	.name = "memory", | 
 | 5047 | 	.subsys_id = mem_cgroup_subsys_id, | 
 | 5048 | 	.create = mem_cgroup_create, | 
| KAMEZAWA Hiroyuki | df878fb | 2008-02-07 00:14:28 -0800 | [diff] [blame] | 5049 | 	.pre_destroy = mem_cgroup_pre_destroy, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 5050 | 	.destroy = mem_cgroup_destroy, | 
 | 5051 | 	.populate = mem_cgroup_populate, | 
| Daisuke Nishimura | 7dc74be | 2010-03-10 15:22:13 -0800 | [diff] [blame] | 5052 | 	.can_attach = mem_cgroup_can_attach, | 
 | 5053 | 	.cancel_attach = mem_cgroup_cancel_attach, | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 5054 | 	.attach = mem_cgroup_move_task, | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 5055 | 	.early_init = 0, | 
| KAMEZAWA Hiroyuki | 04046e1 | 2009-04-02 16:57:33 -0700 | [diff] [blame] | 5056 | 	.use_id = 1, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 5057 | }; | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 5058 |  | 
 | 5059 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 
| Michal Hocko | a42c390 | 2010-11-24 12:57:08 -0800 | [diff] [blame] | 5060 | static int __init enable_swap_account(char *s) | 
 | 5061 | { | 
 | 5062 | 	/* consider enabled if no parameter or 1 is given */ | 
| Michal Hocko | fceda1b | 2011-02-01 15:52:30 -0800 | [diff] [blame] | 5063 | 	if (!(*s) || !strcmp(s, "=1")) | 
| Michal Hocko | a42c390 | 2010-11-24 12:57:08 -0800 | [diff] [blame] | 5064 | 		really_do_swap_account = 1; | 
| Michal Hocko | fceda1b | 2011-02-01 15:52:30 -0800 | [diff] [blame] | 5065 | 	else if (!strcmp(s, "=0")) | 
| Michal Hocko | a42c390 | 2010-11-24 12:57:08 -0800 | [diff] [blame] | 5066 | 		really_do_swap_account = 0; | 
 | 5067 | 	return 1; | 
 | 5068 | } | 
 | 5069 | __setup("swapaccount", enable_swap_account); | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 5070 |  | 
 | 5071 | static int __init disable_swap_account(char *s) | 
 | 5072 | { | 
| Michal Hocko | 552b372 | 2011-02-01 15:52:31 -0800 | [diff] [blame] | 5073 | 	printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n"); | 
| Michal Hocko | fceda1b | 2011-02-01 15:52:30 -0800 | [diff] [blame] | 5074 | 	enable_swap_account("=0"); | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 5075 | 	return 1; | 
 | 5076 | } | 
 | 5077 | __setup("noswapaccount", disable_swap_account); | 
 | 5078 | #endif |