blob: 6e7f849a1a9e397353e3c779bf5206e222b763a1 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08009 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -080013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080027#include <linux/mm.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080028#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080029#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080030#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080031#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080032#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080033#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070035#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040036#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -080037#include <linux/mutex.h>
Balbir Singhf64c3f52009-09-23 15:56:37 -070038#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070039#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080040#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080041#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080042#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080043#include <linux/eventfd.h>
44#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080045#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080046#include <linux/seq_file.h>
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -070047#include <linux/vmalloc.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070048#include <linux/mm_inline.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070049#include <linux/page_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080050#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070051#include <linux/oom.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080052#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000053#include <net/sock.h>
54#include <net/tcp_memcontrol.h>
Balbir Singh8cdea7c2008-02-07 00:13:50 -080055
Balbir Singh8697d332008-02-07 00:13:59 -080056#include <asm/uaccess.h>
57
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070058#include <trace/events/vmscan.h>
59
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070060struct cgroup_subsys mem_cgroup_subsys __read_mostly;
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070061#define MEM_CGROUP_RECLAIM_RETRIES 5
Balbir Singh4b3bde42009-09-23 15:56:32 -070062struct mem_cgroup *root_mem_cgroup __read_mostly;
Balbir Singh8cdea7c2008-02-07 00:13:50 -080063
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080064#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
Li Zefan338c8432009-06-17 16:27:15 -070065/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080066int do_swap_account __read_mostly;
Michal Hockoa42c3902010-11-24 12:57:08 -080067
68/* for remember boot option*/
69#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
70static int really_do_swap_account __initdata = 1;
71#else
72static int really_do_swap_account __initdata = 0;
73#endif
74
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080075#else
76#define do_swap_account (0)
77#endif
78
79
Balbir Singh8cdea7c2008-02-07 00:13:50 -080080/*
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080081 * Statistics for memory cgroup.
82 */
83enum mem_cgroup_stat_index {
84 /*
85 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
86 */
87 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
Balbir Singhd69b0422009-06-17 16:26:34 -070088 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
KAMEZAWA Hiroyukid8046582009-12-15 16:47:09 -080089 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
Balbir Singh0c3e73e2009-09-23 15:56:42 -070090 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -070091 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -070092 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080093 MEM_CGROUP_STAT_NSTATS,
94};
95
Johannes Weinere9f89742011-03-23 16:42:37 -070096enum mem_cgroup_events_index {
97 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
98 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
99 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
Ying Han456f9982011-05-26 16:25:38 -0700100 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
101 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
Johannes Weinere9f89742011-03-23 16:42:37 -0700102 MEM_CGROUP_EVENTS_NSTATS,
103};
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700104/*
105 * Per memcg event counter is incremented at every pagein/pageout. With THP,
106 * it will be incremated by the number of pages. This counter is used for
107 * for trigger some periodic events. This is straightforward and better
108 * than using jiffies etc. to handle periodic memcg event.
109 */
110enum mem_cgroup_events_target {
111 MEM_CGROUP_TARGET_THRESH,
112 MEM_CGROUP_TARGET_SOFTLIMIT,
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700113 MEM_CGROUP_TARGET_NUMAINFO,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700114 MEM_CGROUP_NTARGETS,
115};
116#define THRESHOLDS_EVENTS_TARGET (128)
117#define SOFTLIMIT_EVENTS_TARGET (1024)
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700118#define NUMAINFO_EVENTS_TARGET (1024)
Johannes Weinere9f89742011-03-23 16:42:37 -0700119
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800120struct mem_cgroup_stat_cpu {
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700121 long count[MEM_CGROUP_STAT_NSTATS];
Johannes Weinere9f89742011-03-23 16:42:37 -0700122 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700123 unsigned long targets[MEM_CGROUP_NTARGETS];
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800124};
125
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800126struct mem_cgroup_reclaim_iter {
127 /* css_id of the last scanned hierarchy member */
128 int position;
129 /* scan generation, increased every round-trip */
130 unsigned int generation;
131};
132
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800133/*
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800134 * per-zone information in memory controller.
135 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800136struct mem_cgroup_per_zone {
Johannes Weiner6290df52012-01-12 17:18:10 -0800137 struct lruvec lruvec;
Christoph Lameterb69408e2008-10-18 20:26:14 -0700138 unsigned long count[NR_LRU_LISTS];
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800139
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800140 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
141
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800142 struct zone_reclaim_stat reclaim_stat;
Balbir Singhf64c3f52009-09-23 15:56:37 -0700143 struct rb_node tree_node; /* RB tree node */
144 unsigned long long usage_in_excess;/* Set to the value by which */
145 /* the soft limit is exceeded*/
146 bool on_tree;
Balbir Singh4e416952009-09-23 15:56:39 -0700147 struct mem_cgroup *mem; /* Back pointer, we cannot */
148 /* use container_of */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800149};
150/* Macro for accessing counter */
151#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
152
153struct mem_cgroup_per_node {
154 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
155};
156
157struct mem_cgroup_lru_info {
158 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
159};
160
161/*
Balbir Singhf64c3f52009-09-23 15:56:37 -0700162 * Cgroups above their limits are maintained in a RB-Tree, independent of
163 * their hierarchy representation
164 */
165
166struct mem_cgroup_tree_per_zone {
167 struct rb_root rb_root;
168 spinlock_t lock;
169};
170
171struct mem_cgroup_tree_per_node {
172 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
173};
174
175struct mem_cgroup_tree {
176 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
177};
178
179static struct mem_cgroup_tree soft_limit_tree __read_mostly;
180
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800181struct mem_cgroup_threshold {
182 struct eventfd_ctx *eventfd;
183 u64 threshold;
184};
185
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700186/* For threshold */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800187struct mem_cgroup_threshold_ary {
188 /* An array index points to threshold just below usage. */
Phil Carmody5407a562010-05-26 14:42:42 -0700189 int current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800190 /* Size of entries[] */
191 unsigned int size;
192 /* Array of thresholds */
193 struct mem_cgroup_threshold entries[0];
194};
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700195
196struct mem_cgroup_thresholds {
197 /* Primary thresholds array */
198 struct mem_cgroup_threshold_ary *primary;
199 /*
200 * Spare threshold array.
201 * This is needed to make mem_cgroup_unregister_event() "never fail".
202 * It must be able to store at least primary->size - 1 entries.
203 */
204 struct mem_cgroup_threshold_ary *spare;
205};
206
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700207/* for OOM */
208struct mem_cgroup_eventfd_list {
209 struct list_head list;
210 struct eventfd_ctx *eventfd;
211};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800212
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700213static void mem_cgroup_threshold(struct mem_cgroup *memcg);
214static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800215
Balbir Singhf64c3f52009-09-23 15:56:37 -0700216/*
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800217 * The memory controller data structure. The memory controller controls both
218 * page cache and RSS per cgroup. We would eventually like to provide
219 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
220 * to help the administrator determine what knobs to tune.
221 *
222 * TODO: Add a water mark for the memory controller. Reclaim will begin when
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800223 * we hit the water mark. May be even add a low water mark, such that
224 * no reclaim occurs from a cgroup at it's low water mark, this is
225 * a feature that will be implemented much later in the future.
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800226 */
227struct mem_cgroup {
228 struct cgroup_subsys_state css;
229 /*
230 * the counter to account for memory usage
231 */
232 struct res_counter res;
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800233 /*
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -0800234 * the counter to account for mem+swap usage.
235 */
236 struct res_counter memsw;
237 /*
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800238 * Per cgroup active and inactive list, similar to the
239 * per zone LRU lists.
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800240 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800241 struct mem_cgroup_lru_info info;
Ying Han889976d2011-05-26 16:25:33 -0700242 int last_scanned_node;
243#if MAX_NUMNODES > 1
244 nodemask_t scan_nodes;
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700245 atomic_t numainfo_events;
246 atomic_t numainfo_updating;
Ying Han889976d2011-05-26 16:25:33 -0700247#endif
Balbir Singh18f59ea2009-01-07 18:08:07 -0800248 /*
249 * Should the accounting and control be hierarchical, per subtree?
250 */
251 bool use_hierarchy;
Michal Hocko79dfdac2011-07-26 16:08:23 -0700252
253 bool oom_lock;
254 atomic_t under_oom;
255
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -0800256 atomic_t refcnt;
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800257
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -0700258 int swappiness;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -0700259 /* OOM-Killer disable */
260 int oom_kill_disable;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -0800261
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -0700262 /* set when res.limit == memsw.limit */
263 bool memsw_is_minimum;
264
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800265 /* protect arrays of thresholds */
266 struct mutex thresholds_lock;
267
268 /* thresholds for memory usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700269 struct mem_cgroup_thresholds thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700270
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800271 /* thresholds for mem+swap usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700272 struct mem_cgroup_thresholds memsw_thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700273
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700274 /* For oom notifier event fd */
275 struct list_head oom_notify;
Johannes Weiner185efc02011-09-14 16:21:58 -0700276
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800277 /*
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800278 * Should we move charges of a task when a task is moved into this
279 * mem_cgroup ? And what type of charges should we move ?
280 */
281 unsigned long move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800282 /*
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800283 * percpu counter.
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800284 */
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800285 struct mem_cgroup_stat_cpu *stat;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700286 /*
287 * used when a cpu is offlined or other synchronizations
288 * See mem_cgroup_read_stat().
289 */
290 struct mem_cgroup_stat_cpu nocpu_base;
291 spinlock_t pcp_counter_lock;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000292
293#ifdef CONFIG_INET
294 struct tcp_memcontrol tcp_mem;
295#endif
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800296};
297
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800298/* Stuffs for move charges at task migration. */
299/*
300 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
301 * left-shifted bitmap of these types.
302 */
303enum move_type {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800304 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700305 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800306 NR_MOVE_TYPE,
307};
308
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800309/* "mc" and its members are protected by cgroup_mutex */
310static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800311 spinlock_t lock; /* for from, to */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800312 struct mem_cgroup *from;
313 struct mem_cgroup *to;
314 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800315 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800316 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800317 struct task_struct *moving_task; /* a task moving charges */
318 wait_queue_head_t waitq; /* a waitq for other context */
319} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700320 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800321 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
322};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800323
Daisuke Nishimura90254a62010-05-26 14:42:38 -0700324static bool move_anon(void)
325{
326 return test_bit(MOVE_CHARGE_TYPE_ANON,
327 &mc.to->move_charge_at_immigrate);
328}
329
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700330static bool move_file(void)
331{
332 return test_bit(MOVE_CHARGE_TYPE_FILE,
333 &mc.to->move_charge_at_immigrate);
334}
335
Balbir Singh4e416952009-09-23 15:56:39 -0700336/*
337 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
338 * limit reclaim to prevent infinite loops, if they ever occur.
339 */
340#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
341#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
342
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800343enum charge_type {
344 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
345 MEM_CGROUP_CHARGE_TYPE_MAPPED,
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700346 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700347 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800348 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700349 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700350 NR_CHARGE_TYPE,
351};
352
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -0800353/* for encoding cft->private value on file */
Glauber Costa65c64ce2011-12-22 01:02:27 +0000354#define _MEM (0)
355#define _MEMSWAP (1)
356#define _OOM_TYPE (2)
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -0800357#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
358#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
359#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700360/* Used for OOM nofiier */
361#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -0800362
Balbir Singh75822b42009-09-23 15:56:38 -0700363/*
364 * Reclaim flags for mem_cgroup_hierarchical_reclaim
365 */
366#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
367#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
368#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
369#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
370
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700371static void mem_cgroup_get(struct mem_cgroup *memcg);
372static void mem_cgroup_put(struct mem_cgroup *memcg);
Glauber Costae1aab162011-12-11 21:47:03 +0000373
374/* Writing them here to avoid exposing memcg's inner layout */
375#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
376#ifdef CONFIG_INET
377#include <net/sock.h>
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000378#include <net/ip.h>
Glauber Costae1aab162011-12-11 21:47:03 +0000379
380static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
381void sock_update_memcg(struct sock *sk)
382{
Glauber Costae1aab162011-12-11 21:47:03 +0000383 if (static_branch(&memcg_socket_limit_enabled)) {
384 struct mem_cgroup *memcg;
385
386 BUG_ON(!sk->sk_prot->proto_cgroup);
387
Glauber Costaf3f511e2012-01-05 20:16:39 +0000388 /* Socket cloning can throw us here with sk_cgrp already
389 * filled. It won't however, necessarily happen from
390 * process context. So the test for root memcg given
391 * the current task's memcg won't help us in this case.
392 *
393 * Respecting the original socket's memcg is a better
394 * decision in this case.
395 */
396 if (sk->sk_cgrp) {
397 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
398 mem_cgroup_get(sk->sk_cgrp->memcg);
399 return;
400 }
401
Glauber Costae1aab162011-12-11 21:47:03 +0000402 rcu_read_lock();
403 memcg = mem_cgroup_from_task(current);
404 if (!mem_cgroup_is_root(memcg)) {
405 mem_cgroup_get(memcg);
406 sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
407 }
408 rcu_read_unlock();
409 }
410}
411EXPORT_SYMBOL(sock_update_memcg);
412
413void sock_release_memcg(struct sock *sk)
414{
415 if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
416 struct mem_cgroup *memcg;
417 WARN_ON(!sk->sk_cgrp->memcg);
418 memcg = sk->sk_cgrp->memcg;
419 mem_cgroup_put(memcg);
420 }
421}
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000422
423struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
424{
425 if (!memcg || mem_cgroup_is_root(memcg))
426 return NULL;
427
428 return &memcg->tcp_mem.cg_proto;
429}
430EXPORT_SYMBOL(tcp_proto_cgroup);
Glauber Costae1aab162011-12-11 21:47:03 +0000431#endif /* CONFIG_INET */
432#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
433
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700434static void drain_all_stock_async(struct mem_cgroup *memcg);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -0800435
Balbir Singhf64c3f52009-09-23 15:56:37 -0700436static struct mem_cgroup_per_zone *
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700437mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700438{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700439 return &memcg->info.nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700440}
441
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700442struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
Wu Fengguangd3242362009-12-16 12:19:59 +0100443{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700444 return &memcg->css;
Wu Fengguangd3242362009-12-16 12:19:59 +0100445}
446
Balbir Singhf64c3f52009-09-23 15:56:37 -0700447static struct mem_cgroup_per_zone *
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700448page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700449{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700450 int nid = page_to_nid(page);
451 int zid = page_zonenum(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700452
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700453 return mem_cgroup_zoneinfo(memcg, nid, zid);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700454}
455
456static struct mem_cgroup_tree_per_zone *
457soft_limit_tree_node_zone(int nid, int zid)
458{
459 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
460}
461
462static struct mem_cgroup_tree_per_zone *
463soft_limit_tree_from_page(struct page *page)
464{
465 int nid = page_to_nid(page);
466 int zid = page_zonenum(page);
467
468 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
469}
470
471static void
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700472__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
Balbir Singhf64c3f52009-09-23 15:56:37 -0700473 struct mem_cgroup_per_zone *mz,
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700474 struct mem_cgroup_tree_per_zone *mctz,
475 unsigned long long new_usage_in_excess)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700476{
477 struct rb_node **p = &mctz->rb_root.rb_node;
478 struct rb_node *parent = NULL;
479 struct mem_cgroup_per_zone *mz_node;
480
481 if (mz->on_tree)
482 return;
483
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700484 mz->usage_in_excess = new_usage_in_excess;
485 if (!mz->usage_in_excess)
486 return;
Balbir Singhf64c3f52009-09-23 15:56:37 -0700487 while (*p) {
488 parent = *p;
489 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
490 tree_node);
491 if (mz->usage_in_excess < mz_node->usage_in_excess)
492 p = &(*p)->rb_left;
493 /*
494 * We can't avoid mem cgroups that are over their soft
495 * limit by the same amount
496 */
497 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
498 p = &(*p)->rb_right;
499 }
500 rb_link_node(&mz->tree_node, parent, p);
501 rb_insert_color(&mz->tree_node, &mctz->rb_root);
502 mz->on_tree = true;
Balbir Singh4e416952009-09-23 15:56:39 -0700503}
504
505static void
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700506__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
Balbir Singh4e416952009-09-23 15:56:39 -0700507 struct mem_cgroup_per_zone *mz,
508 struct mem_cgroup_tree_per_zone *mctz)
509{
510 if (!mz->on_tree)
511 return;
512 rb_erase(&mz->tree_node, &mctz->rb_root);
513 mz->on_tree = false;
514}
515
516static void
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700517mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
Balbir Singhf64c3f52009-09-23 15:56:37 -0700518 struct mem_cgroup_per_zone *mz,
519 struct mem_cgroup_tree_per_zone *mctz)
520{
521 spin_lock(&mctz->lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700522 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700523 spin_unlock(&mctz->lock);
524}
525
Balbir Singhf64c3f52009-09-23 15:56:37 -0700526
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700527static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700528{
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700529 unsigned long long excess;
Balbir Singhf64c3f52009-09-23 15:56:37 -0700530 struct mem_cgroup_per_zone *mz;
531 struct mem_cgroup_tree_per_zone *mctz;
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700532 int nid = page_to_nid(page);
533 int zid = page_zonenum(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700534 mctz = soft_limit_tree_from_page(page);
535
536 /*
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700537 * Necessary to update all ancestors when hierarchy is used.
538 * because their event counter is not touched.
Balbir Singhf64c3f52009-09-23 15:56:37 -0700539 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700540 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
541 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
542 excess = res_counter_soft_limit_excess(&memcg->res);
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700543 /*
544 * We have to update the tree if mz is on RB-tree or
545 * mem is over its softlimit.
546 */
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700547 if (excess || mz->on_tree) {
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700548 spin_lock(&mctz->lock);
549 /* if on-tree, remove it */
550 if (mz->on_tree)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700551 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700552 /*
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700553 * Insert again. mz->usage_in_excess will be updated.
554 * If excess is 0, no tree ops.
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700555 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700556 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700557 spin_unlock(&mctz->lock);
558 }
Balbir Singhf64c3f52009-09-23 15:56:37 -0700559 }
560}
561
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700562static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700563{
564 int node, zone;
565 struct mem_cgroup_per_zone *mz;
566 struct mem_cgroup_tree_per_zone *mctz;
567
568 for_each_node_state(node, N_POSSIBLE) {
569 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700570 mz = mem_cgroup_zoneinfo(memcg, node, zone);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700571 mctz = soft_limit_tree_node_zone(node, zone);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700572 mem_cgroup_remove_exceeded(memcg, mz, mctz);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700573 }
574 }
575}
576
Balbir Singh4e416952009-09-23 15:56:39 -0700577static struct mem_cgroup_per_zone *
578__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
579{
580 struct rb_node *rightmost = NULL;
KAMEZAWA Hiroyuki26251ea2009-10-01 15:44:08 -0700581 struct mem_cgroup_per_zone *mz;
Balbir Singh4e416952009-09-23 15:56:39 -0700582
583retry:
KAMEZAWA Hiroyuki26251ea2009-10-01 15:44:08 -0700584 mz = NULL;
Balbir Singh4e416952009-09-23 15:56:39 -0700585 rightmost = rb_last(&mctz->rb_root);
586 if (!rightmost)
587 goto done; /* Nothing to reclaim from */
588
589 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
590 /*
591 * Remove the node now but someone else can add it back,
592 * we will to add it back at the end of reclaim to its correct
593 * position in the tree.
594 */
595 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
596 if (!res_counter_soft_limit_excess(&mz->mem->res) ||
597 !css_tryget(&mz->mem->css))
598 goto retry;
599done:
600 return mz;
601}
602
603static struct mem_cgroup_per_zone *
604mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
605{
606 struct mem_cgroup_per_zone *mz;
607
608 spin_lock(&mctz->lock);
609 mz = __mem_cgroup_largest_soft_limit_node(mctz);
610 spin_unlock(&mctz->lock);
611 return mz;
612}
613
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700614/*
615 * Implementation Note: reading percpu statistics for memcg.
616 *
617 * Both of vmstat[] and percpu_counter has threshold and do periodic
618 * synchronization to implement "quick" read. There are trade-off between
619 * reading cost and precision of value. Then, we may have a chance to implement
620 * a periodic synchronizion of counter in memcg's counter.
621 *
622 * But this _read() function is used for user interface now. The user accounts
623 * memory usage by memory cgroup and he _always_ requires exact value because
624 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
625 * have to visit all online cpus and make sum. So, for now, unnecessary
626 * synchronization is not implemented. (just implemented for cpu hotplug)
627 *
628 * If there are kernel internal actions which can make use of some not-exact
629 * value, and reading all cpu value can be performance bottleneck in some
630 * common workload, threashold and synchonization as vmstat[] should be
631 * implemented.
632 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700633static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700634 enum mem_cgroup_stat_index idx)
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800635{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700636 long val = 0;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800637 int cpu;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800638
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700639 get_online_cpus();
640 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700641 val += per_cpu(memcg->stat->count[idx], cpu);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700642#ifdef CONFIG_HOTPLUG_CPU
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700643 spin_lock(&memcg->pcp_counter_lock);
644 val += memcg->nocpu_base.count[idx];
645 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700646#endif
647 put_online_cpus();
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800648 return val;
649}
650
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700651static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
Balbir Singh0c3e73e2009-09-23 15:56:42 -0700652 bool charge)
653{
654 int val = (charge) ? 1 : -1;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700655 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
Balbir Singh0c3e73e2009-09-23 15:56:42 -0700656}
657
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700658void mem_cgroup_pgfault(struct mem_cgroup *memcg, int val)
Ying Han456f9982011-05-26 16:25:38 -0700659{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700660 this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
Ying Han456f9982011-05-26 16:25:38 -0700661}
662
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700663void mem_cgroup_pgmajfault(struct mem_cgroup *memcg, int val)
Ying Han456f9982011-05-26 16:25:38 -0700664{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700665 this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
Ying Han456f9982011-05-26 16:25:38 -0700666}
667
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700668static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
Johannes Weinere9f89742011-03-23 16:42:37 -0700669 enum mem_cgroup_events_index idx)
670{
671 unsigned long val = 0;
672 int cpu;
673
674 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700675 val += per_cpu(memcg->stat->events[idx], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -0700676#ifdef CONFIG_HOTPLUG_CPU
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700677 spin_lock(&memcg->pcp_counter_lock);
678 val += memcg->nocpu_base.events[idx];
679 spin_unlock(&memcg->pcp_counter_lock);
Johannes Weinere9f89742011-03-23 16:42:37 -0700680#endif
681 return val;
682}
683
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700684static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800685 bool file, int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800686{
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800687 preempt_disable();
688
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800689 if (file)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700690 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
691 nr_pages);
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800692 else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700693 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
694 nr_pages);
Balaji Rao55e462b2008-05-01 04:35:12 -0700695
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800696 /* pagein of a big page is an event. So, ignore page size */
697 if (nr_pages > 0)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700698 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800699 else {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700700 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800701 nr_pages = -nr_pages; /* for event */
702 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800703
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700704 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800705
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800706 preempt_enable();
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800707}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800708
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700709unsigned long
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700710mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700711 unsigned int lru_mask)
Ying Han889976d2011-05-26 16:25:33 -0700712{
713 struct mem_cgroup_per_zone *mz;
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700714 enum lru_list l;
715 unsigned long ret = 0;
716
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700717 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700718
719 for_each_lru(l) {
720 if (BIT(l) & lru_mask)
721 ret += MEM_CGROUP_ZSTAT(mz, l);
722 }
723 return ret;
724}
725
726static unsigned long
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700727mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700728 int nid, unsigned int lru_mask)
729{
Ying Han889976d2011-05-26 16:25:33 -0700730 u64 total = 0;
731 int zid;
732
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700733 for (zid = 0; zid < MAX_NR_ZONES; zid++)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700734 total += mem_cgroup_zone_nr_lru_pages(memcg,
735 nid, zid, lru_mask);
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700736
Ying Han889976d2011-05-26 16:25:33 -0700737 return total;
738}
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700739
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700740static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700741 unsigned int lru_mask)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800742{
Ying Han889976d2011-05-26 16:25:33 -0700743 int nid;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800744 u64 total = 0;
745
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700746 for_each_node_state(nid, N_HIGH_MEMORY)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700747 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800748 return total;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800749}
750
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700751static bool __memcg_event_check(struct mem_cgroup *memcg, int target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800752{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700753 unsigned long val, next;
754
Steven Rostedt47994012011-11-02 13:38:33 -0700755 val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
756 next = __this_cpu_read(memcg->stat->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700757 /* from time_after() in jiffies.h */
758 return ((long)next - (long)val < 0);
759}
760
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700761static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700762{
763 unsigned long val, next;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800764
Steven Rostedt47994012011-11-02 13:38:33 -0700765 val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800766
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700767 switch (target) {
768 case MEM_CGROUP_TARGET_THRESH:
769 next = val + THRESHOLDS_EVENTS_TARGET;
770 break;
771 case MEM_CGROUP_TARGET_SOFTLIMIT:
772 next = val + SOFTLIMIT_EVENTS_TARGET;
773 break;
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700774 case MEM_CGROUP_TARGET_NUMAINFO:
775 next = val + NUMAINFO_EVENTS_TARGET;
776 break;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700777 default:
778 return;
779 }
780
Steven Rostedt47994012011-11-02 13:38:33 -0700781 __this_cpu_write(memcg->stat->targets[target], next);
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800782}
783
784/*
785 * Check events in order.
786 *
787 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700788static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800789{
Steven Rostedt47994012011-11-02 13:38:33 -0700790 preempt_disable();
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800791 /* threshold event is triggered in finer grain than soft limit */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700792 if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
793 mem_cgroup_threshold(memcg);
794 __mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH);
795 if (unlikely(__memcg_event_check(memcg,
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700796 MEM_CGROUP_TARGET_SOFTLIMIT))) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700797 mem_cgroup_update_tree(memcg, page);
798 __mem_cgroup_target_update(memcg,
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700799 MEM_CGROUP_TARGET_SOFTLIMIT);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700800 }
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700801#if MAX_NUMNODES > 1
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700802 if (unlikely(__memcg_event_check(memcg,
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700803 MEM_CGROUP_TARGET_NUMAINFO))) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700804 atomic_inc(&memcg->numainfo_events);
805 __mem_cgroup_target_update(memcg,
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -0700806 MEM_CGROUP_TARGET_NUMAINFO);
807 }
808#endif
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800809 }
Steven Rostedt47994012011-11-02 13:38:33 -0700810 preempt_enable();
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800811}
812
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000813struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800814{
815 return container_of(cgroup_subsys_state(cont,
816 mem_cgroup_subsys_id), struct mem_cgroup,
817 css);
818}
819
Balbir Singhcf475ad2008-04-29 01:00:16 -0700820struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800821{
Balbir Singh31a78f22008-09-28 23:09:31 +0100822 /*
823 * mm_update_next_owner() may clear mm->owner to NULL
824 * if it races with swapoff, page migration, etc.
825 * So this can be called with p == NULL.
826 */
827 if (unlikely(!p))
828 return NULL;
829
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800830 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
831 struct mem_cgroup, css);
832}
833
KOSAKI Motohiroa4336582011-06-15 15:08:13 -0700834struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800835{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700836 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700837
838 if (!mm)
839 return NULL;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800840 /*
841 * Because we have no locks, mm->owner's may be being moved to other
842 * cgroup. We use css_tryget() here even if this looks
843 * pessimistic (rather than adding locks here).
844 */
845 rcu_read_lock();
846 do {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700847 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
848 if (unlikely(!memcg))
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800849 break;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700850 } while (!css_tryget(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800851 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700852 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800853}
854
Johannes Weiner56600482012-01-12 17:17:59 -0800855/**
856 * mem_cgroup_iter - iterate over memory cgroup hierarchy
857 * @root: hierarchy root
858 * @prev: previously returned memcg, NULL on first invocation
859 * @reclaim: cookie for shared reclaim walks, NULL for full walks
860 *
861 * Returns references to children of the hierarchy below @root, or
862 * @root itself, or %NULL after a full round-trip.
863 *
864 * Caller must pass the return value in @prev on subsequent
865 * invocations for reference counting, or use mem_cgroup_iter_break()
866 * to cancel a hierarchy walk before the round-trip is complete.
867 *
868 * Reclaimers can specify a zone and a priority level in @reclaim to
869 * divide up the memcgs in the hierarchy among all concurrent
870 * reclaimers operating on the same zone and priority.
871 */
872struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
873 struct mem_cgroup *prev,
874 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -0700875{
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800876 struct mem_cgroup *memcg = NULL;
877 int id = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700878
Johannes Weiner56600482012-01-12 17:17:59 -0800879 if (mem_cgroup_disabled())
880 return NULL;
881
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700882 if (!root)
883 root = root_mem_cgroup;
884
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800885 if (prev && !reclaim)
886 id = css_id(&prev->css);
887
888 if (prev && prev != root)
889 css_put(&prev->css);
890
891 if (!root->use_hierarchy && root != root_mem_cgroup) {
892 if (prev)
893 return NULL;
894 return root;
895 }
896
897 while (!memcg) {
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800898 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800899 struct cgroup_subsys_state *css;
900
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800901 if (reclaim) {
902 int nid = zone_to_nid(reclaim->zone);
903 int zid = zone_idx(reclaim->zone);
904 struct mem_cgroup_per_zone *mz;
905
906 mz = mem_cgroup_zoneinfo(root, nid, zid);
907 iter = &mz->reclaim_iter[reclaim->priority];
908 if (prev && reclaim->generation != iter->generation)
909 return NULL;
910 id = iter->position;
911 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800912
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700913 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800914 css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
915 if (css) {
916 if (css == &root->css || css_tryget(css))
917 memcg = container_of(css,
918 struct mem_cgroup, css);
919 } else
920 id = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700921 rcu_read_unlock();
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700922
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800923 if (reclaim) {
924 iter->position = id;
925 if (!css)
926 iter->generation++;
927 else if (!prev && memcg)
928 reclaim->generation = iter->generation;
929 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800930
931 if (prev && !css)
932 return NULL;
933 }
934 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700935}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800936
Johannes Weiner56600482012-01-12 17:17:59 -0800937/**
938 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
939 * @root: hierarchy root
940 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
941 */
942void mem_cgroup_iter_break(struct mem_cgroup *root,
943 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800944{
945 if (!root)
946 root = root_mem_cgroup;
947 if (prev && prev != root)
948 css_put(&prev->css);
949}
950
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700951/*
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800952 * Iteration constructs for visiting all cgroups (under a tree). If
953 * loops are exited prematurely (break), mem_cgroup_iter_break() must
954 * be used for reference counting.
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700955 */
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800956#define for_each_mem_cgroup_tree(iter, root) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800957 for (iter = mem_cgroup_iter(root, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800958 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800959 iter = mem_cgroup_iter(root, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700960
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800961#define for_each_mem_cgroup(iter) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800962 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800963 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800964 iter = mem_cgroup_iter(NULL, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700965
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700966static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
Balbir Singh4b3bde42009-09-23 15:56:32 -0700967{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700968 return (memcg == root_mem_cgroup);
Balbir Singh4b3bde42009-09-23 15:56:32 -0700969}
970
Ying Han456f9982011-05-26 16:25:38 -0700971void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
972{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700973 struct mem_cgroup *memcg;
Ying Han456f9982011-05-26 16:25:38 -0700974
975 if (!mm)
976 return;
977
978 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700979 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
980 if (unlikely(!memcg))
Ying Han456f9982011-05-26 16:25:38 -0700981 goto out;
982
983 switch (idx) {
984 case PGMAJFAULT:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700985 mem_cgroup_pgmajfault(memcg, 1);
Ying Han456f9982011-05-26 16:25:38 -0700986 break;
987 case PGFAULT:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700988 mem_cgroup_pgfault(memcg, 1);
Ying Han456f9982011-05-26 16:25:38 -0700989 break;
990 default:
991 BUG();
992 }
993out:
994 rcu_read_unlock();
995}
996EXPORT_SYMBOL(mem_cgroup_count_vm_event);
997
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800998/*
999 * Following LRU functions are allowed to be used without PCG_LOCK.
1000 * Operations are called by routine of global LRU independently from memcg.
1001 * What we have to take care of here is validness of pc->mem_cgroup.
1002 *
1003 * Changes to pc->mem_cgroup happens when
1004 * 1. charge
1005 * 2. moving account
1006 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1007 * It is added to LRU before charge.
1008 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1009 * When moving account, the page is not on LRU. It's isolated.
1010 */
1011
1012void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001013{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001014 struct page_cgroup *pc;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001015 struct mem_cgroup_per_zone *mz;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001016
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08001017 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001018 return;
1019 pc = lookup_page_cgroup(page);
1020 /* can happen while we handle swapcache. */
Balbir Singh4b3bde42009-09-23 15:56:32 -07001021 if (!TestClearPageCgroupAcctLRU(pc))
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001022 return;
Balbir Singh4b3bde42009-09-23 15:56:32 -07001023 VM_BUG_ON(!pc->mem_cgroup);
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001024 /*
1025 * We don't check PCG_USED bit. It's cleared when the "page" is finally
1026 * removed from global LRU.
1027 */
Johannes Weiner97a6c372011-03-23 16:42:27 -07001028 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
KAMEZAWA Hiroyukiece35ca2011-01-20 14:44:24 -08001029 /* huge page split is done under lru_lock. so, we have no races. */
1030 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
Balbir Singh4b3bde42009-09-23 15:56:32 -07001031 VM_BUG_ON(list_empty(&pc->lru));
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001032 list_del_init(&pc->lru);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001033}
1034
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001035void mem_cgroup_del_lru(struct page *page)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001036{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001037 mem_cgroup_del_lru_list(page, page_lru(page));
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001038}
1039
Minchan Kim3f58a822011-03-22 16:32:53 -07001040/*
1041 * Writeback is about to end against a page which has been marked for immediate
1042 * reclaim. If it still appears to be reclaimable, move it to the tail of the
1043 * inactive list.
1044 */
1045void mem_cgroup_rotate_reclaimable_page(struct page *page)
1046{
1047 struct mem_cgroup_per_zone *mz;
1048 struct page_cgroup *pc;
1049 enum lru_list lru = page_lru(page);
1050
1051 if (mem_cgroup_disabled())
1052 return;
1053
1054 pc = lookup_page_cgroup(page);
Johannes Weinerad2b8e62012-01-12 17:18:02 -08001055 /* unused page is not rotated. */
Minchan Kim3f58a822011-03-22 16:32:53 -07001056 if (!PageCgroupUsed(pc))
1057 return;
1058 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1059 smp_rmb();
Johannes Weiner97a6c372011-03-23 16:42:27 -07001060 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
Johannes Weiner6290df52012-01-12 17:18:10 -08001061 list_move_tail(&pc->lru, &mz->lruvec.lists[lru]);
Minchan Kim3f58a822011-03-22 16:32:53 -07001062}
1063
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001064void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
Balbir Singh66e17072008-02-07 00:13:56 -08001065{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001066 struct mem_cgroup_per_zone *mz;
1067 struct page_cgroup *pc;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001068
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08001069 if (mem_cgroup_disabled())
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001070 return;
Christoph Lameterb69408e2008-10-18 20:26:14 -07001071
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001072 pc = lookup_page_cgroup(page);
Johannes Weinerad2b8e62012-01-12 17:18:02 -08001073 /* unused page is not rotated. */
Johannes Weiner713735b2011-01-20 14:44:31 -08001074 if (!PageCgroupUsed(pc))
1075 return;
1076 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1077 smp_rmb();
Johannes Weiner97a6c372011-03-23 16:42:27 -07001078 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
Johannes Weiner6290df52012-01-12 17:18:10 -08001079 list_move(&pc->lru, &mz->lruvec.lists[lru]);
Balbir Singh66e17072008-02-07 00:13:56 -08001080}
1081
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001082void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
1083{
1084 struct page_cgroup *pc;
1085 struct mem_cgroup_per_zone *mz;
1086
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08001087 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001088 return;
1089 pc = lookup_page_cgroup(page);
Balbir Singh4b3bde42009-09-23 15:56:32 -07001090 VM_BUG_ON(PageCgroupAcctLRU(pc));
Johannes Weinera61ed3c2011-11-02 13:38:29 -07001091 /*
1092 * putback: charge:
1093 * SetPageLRU SetPageCgroupUsed
1094 * smp_mb smp_mb
1095 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU
1096 *
1097 * Ensure that one of the two sides adds the page to the memcg
1098 * LRU during a race.
1099 */
1100 smp_mb();
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001101 if (!PageCgroupUsed(pc))
1102 return;
Johannes Weiner713735b2011-01-20 14:44:31 -08001103 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1104 smp_rmb();
Johannes Weiner97a6c372011-03-23 16:42:27 -07001105 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
KAMEZAWA Hiroyukiece35ca2011-01-20 14:44:24 -08001106 /* huge page split is done under lru_lock. so, we have no races. */
1107 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
Balbir Singh4b3bde42009-09-23 15:56:32 -07001108 SetPageCgroupAcctLRU(pc);
Johannes Weiner6290df52012-01-12 17:18:10 -08001109 list_add(&pc->lru, &mz->lruvec.lists[lru]);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001110}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001111
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001112/*
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07001113 * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
1114 * while it's linked to lru because the page may be reused after it's fully
1115 * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
1116 * It's done under lock_page and expected that zone->lru_lock isnever held.
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001117 */
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07001118static void mem_cgroup_lru_del_before_commit(struct page *page)
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001119{
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001120 unsigned long flags;
1121 struct zone *zone = page_zone(page);
1122 struct page_cgroup *pc = lookup_page_cgroup(page);
1123
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07001124 /*
1125 * Doing this check without taking ->lru_lock seems wrong but this
1126 * is safe. Because if page_cgroup's USED bit is unset, the page
1127 * will not be added to any memcg's LRU. If page_cgroup's USED bit is
1128 * set, the commit after this will fail, anyway.
1129 * This all charge/uncharge is done under some mutual execustion.
1130 * So, we don't need to taking care of changes in USED bit.
1131 */
1132 if (likely(!PageLRU(page)))
1133 return;
1134
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001135 spin_lock_irqsave(&zone->lru_lock, flags);
1136 /*
1137 * Forget old LRU when this page_cgroup is *not* used. This Used bit
1138 * is guarded by lock_page() because the page is SwapCache.
1139 */
1140 if (!PageCgroupUsed(pc))
1141 mem_cgroup_del_lru_list(page, page_lru(page));
1142 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001143}
1144
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07001145static void mem_cgroup_lru_add_after_commit(struct page *page)
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001146{
1147 unsigned long flags;
1148 struct zone *zone = page_zone(page);
1149 struct page_cgroup *pc = lookup_page_cgroup(page);
Johannes Weinera61ed3c2011-11-02 13:38:29 -07001150 /*
1151 * putback: charge:
1152 * SetPageLRU SetPageCgroupUsed
1153 * smp_mb smp_mb
1154 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU
1155 *
1156 * Ensure that one of the two sides adds the page to the memcg
1157 * LRU during a race.
1158 */
1159 smp_mb();
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07001160 /* taking care of that the page is added to LRU while we commit it */
1161 if (likely(!PageLRU(page)))
1162 return;
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001163 spin_lock_irqsave(&zone->lru_lock, flags);
1164 /* link when the page is linked to LRU but page_cgroup isn't */
Balbir Singh4b3bde42009-09-23 15:56:32 -07001165 if (PageLRU(page) && !PageCgroupAcctLRU(pc))
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001166 mem_cgroup_add_lru_list(page, page_lru(page));
1167 spin_unlock_irqrestore(&zone->lru_lock, flags);
1168}
1169
1170
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001171void mem_cgroup_move_lists(struct page *page,
1172 enum lru_list from, enum lru_list to)
1173{
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08001174 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001175 return;
1176 mem_cgroup_del_lru_list(page, from);
1177 mem_cgroup_add_lru_list(page, to);
1178}
1179
Michal Hocko3e920412011-07-26 16:08:29 -07001180/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001181 * Checks whether given mem is same or in the root_mem_cgroup's
Michal Hocko3e920412011-07-26 16:08:29 -07001182 * hierarchy subtree
1183 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001184static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1185 struct mem_cgroup *memcg)
Michal Hocko3e920412011-07-26 16:08:29 -07001186{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001187 if (root_memcg != memcg) {
1188 return (root_memcg->use_hierarchy &&
1189 css_is_ancestor(&memcg->css, &root_memcg->css));
Michal Hocko3e920412011-07-26 16:08:29 -07001190 }
1191
1192 return true;
1193}
1194
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001195int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
David Rientjes4c4a2212008-02-07 00:14:06 -08001196{
1197 int ret;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001198 struct mem_cgroup *curr = NULL;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001199 struct task_struct *p;
David Rientjes4c4a2212008-02-07 00:14:06 -08001200
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001201 p = find_lock_task_mm(task);
1202 if (!p)
1203 return 0;
1204 curr = try_get_mem_cgroup_from_mm(p->mm);
1205 task_unlock(p);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001206 if (!curr)
1207 return 0;
Daisuke Nishimurad31f56d2009-12-15 16:47:12 -08001208 /*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001209 * We should check use_hierarchy of "memcg" not "curr". Because checking
Daisuke Nishimurad31f56d2009-12-15 16:47:12 -08001210 * use_hierarchy of "curr" here make this function true if hierarchy is
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001211 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1212 * hierarchy(even if use_hierarchy is disabled in "memcg").
Daisuke Nishimurad31f56d2009-12-15 16:47:12 -08001213 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001214 ret = mem_cgroup_same_or_subtree(memcg, curr);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001215 css_put(&curr->css);
David Rientjes4c4a2212008-02-07 00:14:06 -08001216 return ret;
1217}
1218
Johannes Weiner9b272972011-11-02 13:38:23 -07001219int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001220{
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001221 unsigned long inactive_ratio;
Johannes Weiner9b272972011-11-02 13:38:23 -07001222 int nid = zone_to_nid(zone);
1223 int zid = zone_idx(zone);
1224 unsigned long inactive;
1225 unsigned long active;
1226 unsigned long gb;
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001227
Johannes Weiner9b272972011-11-02 13:38:23 -07001228 inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1229 BIT(LRU_INACTIVE_ANON));
1230 active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1231 BIT(LRU_ACTIVE_ANON));
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001232
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001233 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1234 if (gb)
1235 inactive_ratio = int_sqrt(10 * gb);
1236 else
1237 inactive_ratio = 1;
1238
Johannes Weiner9b272972011-11-02 13:38:23 -07001239 return inactive * inactive_ratio < active;
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001240}
1241
Johannes Weiner9b272972011-11-02 13:38:23 -07001242int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001243{
1244 unsigned long active;
1245 unsigned long inactive;
Johannes Weiner9b272972011-11-02 13:38:23 -07001246 int zid = zone_idx(zone);
1247 int nid = zone_to_nid(zone);
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001248
Johannes Weiner9b272972011-11-02 13:38:23 -07001249 inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1250 BIT(LRU_INACTIVE_FILE));
1251 active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1252 BIT(LRU_ACTIVE_FILE));
Rik van Riel56e49d22009-06-16 15:32:28 -07001253
1254 return (active > inactive);
1255}
1256
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -08001257struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1258 struct zone *zone)
1259{
KOSAKI Motohiro13d7e3a2010-08-10 18:03:06 -07001260 int nid = zone_to_nid(zone);
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -08001261 int zid = zone_idx(zone);
1262 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1263
1264 return &mz->reclaim_stat;
1265}
1266
1267struct zone_reclaim_stat *
1268mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1269{
1270 struct page_cgroup *pc;
1271 struct mem_cgroup_per_zone *mz;
1272
1273 if (mem_cgroup_disabled())
1274 return NULL;
1275
1276 pc = lookup_page_cgroup(page);
Daisuke Nishimurabd112db2009-01-15 13:51:11 -08001277 if (!PageCgroupUsed(pc))
1278 return NULL;
Johannes Weiner713735b2011-01-20 14:44:31 -08001279 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1280 smp_rmb();
Johannes Weiner97a6c372011-03-23 16:42:27 -07001281 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -08001282 return &mz->reclaim_stat;
1283}
1284
Balbir Singh66e17072008-02-07 00:13:56 -08001285unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1286 struct list_head *dst,
1287 unsigned long *scanned, int order,
Minchan Kim4356f212011-10-31 17:06:47 -07001288 isolate_mode_t mode,
1289 struct zone *z,
Balbir Singh66e17072008-02-07 00:13:56 -08001290 struct mem_cgroup *mem_cont,
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001291 int active, int file)
Balbir Singh66e17072008-02-07 00:13:56 -08001292{
1293 unsigned long nr_taken = 0;
1294 struct page *page;
1295 unsigned long scan;
1296 LIST_HEAD(pc_list);
1297 struct list_head *src;
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001298 struct page_cgroup *pc, *tmp;
KOSAKI Motohiro13d7e3a2010-08-10 18:03:06 -07001299 int nid = zone_to_nid(z);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001300 int zid = zone_idx(z);
1301 struct mem_cgroup_per_zone *mz;
Johannes Weinerb7c46d12009-09-21 17:02:56 -07001302 int lru = LRU_FILE * file + active;
KAMEZAWA Hiroyuki2ffebca2009-06-17 16:27:21 -07001303 int ret;
Balbir Singh66e17072008-02-07 00:13:56 -08001304
Balbir Singhcf475ad2008-04-29 01:00:16 -07001305 BUG_ON(!mem_cont);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001306 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
Johannes Weiner6290df52012-01-12 17:18:10 -08001307 src = &mz->lruvec.lists[lru];
Balbir Singh66e17072008-02-07 00:13:56 -08001308
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001309 scan = 0;
1310 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
Hugh Dickins436c65412008-02-07 00:14:12 -08001311 if (scan >= nr_to_scan)
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001312 break;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001313
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001314 if (unlikely(!PageCgroupUsed(pc)))
1315 continue;
Johannes Weiner5564e882011-03-23 16:42:29 -07001316
Johannes Weiner6b3ae582011-03-23 16:42:30 -07001317 page = lookup_cgroup_page(pc);
Johannes Weiner5564e882011-03-23 16:42:29 -07001318
Hugh Dickins436c65412008-02-07 00:14:12 -08001319 if (unlikely(!PageLRU(page)))
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001320 continue;
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001321
Hugh Dickins436c65412008-02-07 00:14:12 -08001322 scan++;
KAMEZAWA Hiroyuki2ffebca2009-06-17 16:27:21 -07001323 ret = __isolate_lru_page(page, mode, file);
1324 switch (ret) {
1325 case 0:
Balbir Singh66e17072008-02-07 00:13:56 -08001326 list_move(&page->lru, dst);
KAMEZAWA Hiroyuki2ffebca2009-06-17 16:27:21 -07001327 mem_cgroup_del_lru(page);
Rik van Riel2c888cf2011-01-13 15:47:13 -08001328 nr_taken += hpage_nr_pages(page);
KAMEZAWA Hiroyuki2ffebca2009-06-17 16:27:21 -07001329 break;
1330 case -EBUSY:
1331 /* we don't affect global LRU but rotate in our LRU */
1332 mem_cgroup_rotate_lru_list(page, page_lru(page));
1333 break;
1334 default:
1335 break;
Balbir Singh66e17072008-02-07 00:13:56 -08001336 }
1337 }
1338
Balbir Singh66e17072008-02-07 00:13:56 -08001339 *scanned = scan;
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -07001340
1341 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1342 0, 0, 0, mode);
1343
Balbir Singh66e17072008-02-07 00:13:56 -08001344 return nr_taken;
1345}
1346
Balbir Singh6d61ef42009-01-07 18:08:06 -08001347#define mem_cgroup_from_res_counter(counter, member) \
1348 container_of(counter, struct mem_cgroup, member)
1349
Johannes Weiner19942822011-02-01 15:52:43 -08001350/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001351 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1352 * @mem: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001353 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001354 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001355 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001356 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001357static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001358{
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001359 unsigned long long margin;
1360
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001361 margin = res_counter_margin(&memcg->res);
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001362 if (do_swap_account)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001363 margin = min(margin, res_counter_margin(&memcg->memsw));
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001364 return margin >> PAGE_SHIFT;
Johannes Weiner19942822011-02-01 15:52:43 -08001365}
1366
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001367int mem_cgroup_swappiness(struct mem_cgroup *memcg)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001368{
1369 struct cgroup *cgrp = memcg->css.cgroup;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001370
1371 /* root ? */
1372 if (cgrp->parent == NULL)
1373 return vm_swappiness;
1374
Johannes Weinerbf1ff262011-03-23 16:42:32 -07001375 return memcg->swappiness;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001376}
1377
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001378static void mem_cgroup_start_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001379{
1380 int cpu;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001381
1382 get_online_cpus();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001383 spin_lock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001384 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001385 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1386 memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1387 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001388 put_online_cpus();
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001389
1390 synchronize_rcu();
1391}
1392
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001393static void mem_cgroup_end_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001394{
1395 int cpu;
1396
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001397 if (!memcg)
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001398 return;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001399 get_online_cpus();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001400 spin_lock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001401 for_each_online_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001402 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1403 memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1404 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001405 put_online_cpus();
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001406}
1407/*
1408 * 2 routines for checking "mem" is under move_account() or not.
1409 *
1410 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1411 * for avoiding race in accounting. If true,
1412 * pc->mem_cgroup may be overwritten.
1413 *
1414 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1415 * under hierarchy of moving cgroups. This is for
1416 * waiting at hith-memory prressure caused by "move".
1417 */
1418
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001419static bool mem_cgroup_stealed(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001420{
1421 VM_BUG_ON(!rcu_read_lock_held());
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001422 return this_cpu_read(memcg->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001423}
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001424
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001425static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001426{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001427 struct mem_cgroup *from;
1428 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001429 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001430 /*
1431 * Unlike task_move routines, we access mc.to, mc.from not under
1432 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1433 */
1434 spin_lock(&mc.lock);
1435 from = mc.from;
1436 to = mc.to;
1437 if (!from)
1438 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001439
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001440 ret = mem_cgroup_same_or_subtree(memcg, from)
1441 || mem_cgroup_same_or_subtree(memcg, to);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001442unlock:
1443 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001444 return ret;
1445}
1446
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001447static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001448{
1449 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001450 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001451 DEFINE_WAIT(wait);
1452 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1453 /* moving charge context might have finished. */
1454 if (mc.moving_task)
1455 schedule();
1456 finish_wait(&mc.waitq, &wait);
1457 return true;
1458 }
1459 }
1460 return false;
1461}
1462
Balbir Singhe2224322009-04-02 16:57:39 -07001463/**
Kirill A. Shutemov6a6135b2010-03-10 15:22:25 -08001464 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
Balbir Singhe2224322009-04-02 16:57:39 -07001465 * @memcg: The memory cgroup that went over limit
1466 * @p: Task that is going to be killed
1467 *
1468 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1469 * enabled
1470 */
1471void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1472{
1473 struct cgroup *task_cgrp;
1474 struct cgroup *mem_cgrp;
1475 /*
1476 * Need a buffer in BSS, can't rely on allocations. The code relies
1477 * on the assumption that OOM is serialized for memory controller.
1478 * If this assumption is broken, revisit this code.
1479 */
1480 static char memcg_name[PATH_MAX];
1481 int ret;
1482
Daisuke Nishimurad31f56d2009-12-15 16:47:12 -08001483 if (!memcg || !p)
Balbir Singhe2224322009-04-02 16:57:39 -07001484 return;
1485
1486
1487 rcu_read_lock();
1488
1489 mem_cgrp = memcg->css.cgroup;
1490 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1491
1492 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1493 if (ret < 0) {
1494 /*
1495 * Unfortunately, we are unable to convert to a useful name
1496 * But we'll still print out the usage information
1497 */
1498 rcu_read_unlock();
1499 goto done;
1500 }
1501 rcu_read_unlock();
1502
1503 printk(KERN_INFO "Task in %s killed", memcg_name);
1504
1505 rcu_read_lock();
1506 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1507 if (ret < 0) {
1508 rcu_read_unlock();
1509 goto done;
1510 }
1511 rcu_read_unlock();
1512
1513 /*
1514 * Continues from above, so we don't need an KERN_ level
1515 */
1516 printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1517done:
1518
1519 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1520 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1521 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1522 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1523 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1524 "failcnt %llu\n",
1525 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1526 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1527 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1528}
1529
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001530/*
1531 * This function returns the number of memcg under hierarchy tree. Returns
1532 * 1(self count) if no children.
1533 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001534static int mem_cgroup_count_children(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001535{
1536 int num = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001537 struct mem_cgroup *iter;
1538
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001539 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001540 num++;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001541 return num;
1542}
1543
Balbir Singh6d61ef42009-01-07 18:08:06 -08001544/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001545 * Return the memory (and swap, if configured) limit for a memcg.
1546 */
1547u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1548{
1549 u64 limit;
1550 u64 memsw;
1551
Johannes Weinerf3e8eb72011-01-13 15:47:39 -08001552 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1553 limit += total_swap_pages << PAGE_SHIFT;
1554
David Rientjesa63d83f2010-08-09 17:19:46 -07001555 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1556 /*
1557 * If memsw is finite and limits the amount of swap space available
1558 * to this memcg, return that limit.
1559 */
1560 return min(limit, memsw);
1561}
1562
Johannes Weiner56600482012-01-12 17:17:59 -08001563static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1564 gfp_t gfp_mask,
1565 unsigned long flags)
1566{
1567 unsigned long total = 0;
1568 bool noswap = false;
1569 int loop;
1570
1571 if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1572 noswap = true;
1573 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1574 noswap = true;
1575
1576 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1577 if (loop)
1578 drain_all_stock_async(memcg);
1579 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1580 /*
1581 * Allow limit shrinkers, which are triggered directly
1582 * by userspace, to catch signals and stop reclaim
1583 * after minimal progress, regardless of the margin.
1584 */
1585 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1586 break;
1587 if (mem_cgroup_margin(memcg))
1588 break;
1589 /*
1590 * If nothing was reclaimed after two attempts, there
1591 * may be no reclaimable pages in this hierarchy.
1592 */
1593 if (loop && !total)
1594 break;
1595 }
1596 return total;
1597}
1598
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001599/**
1600 * test_mem_cgroup_node_reclaimable
1601 * @mem: the target memcg
1602 * @nid: the node ID to be checked.
1603 * @noswap : specify true here if the user wants flle only information.
1604 *
1605 * This function returns whether the specified memcg contains any
1606 * reclaimable pages on a node. Returns true if there are any reclaimable
1607 * pages in the node.
1608 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001609static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001610 int nid, bool noswap)
1611{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001612 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001613 return true;
1614 if (noswap || !total_swap_pages)
1615 return false;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001616 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001617 return true;
1618 return false;
1619
1620}
Ying Han889976d2011-05-26 16:25:33 -07001621#if MAX_NUMNODES > 1
1622
1623/*
1624 * Always updating the nodemask is not very good - even if we have an empty
1625 * list or the wrong list here, we can start from some node and traverse all
1626 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1627 *
1628 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001629static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001630{
1631 int nid;
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001632 /*
1633 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1634 * pagein/pageout changes since the last update.
1635 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001636 if (!atomic_read(&memcg->numainfo_events))
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001637 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001638 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
Ying Han889976d2011-05-26 16:25:33 -07001639 return;
1640
Ying Han889976d2011-05-26 16:25:33 -07001641 /* make a nodemask where this memcg uses memory from */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001642 memcg->scan_nodes = node_states[N_HIGH_MEMORY];
Ying Han889976d2011-05-26 16:25:33 -07001643
1644 for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
1645
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001646 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1647 node_clear(nid, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001648 }
KAMEZAWA Hiroyuki453a9bf2011-07-08 15:39:43 -07001649
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001650 atomic_set(&memcg->numainfo_events, 0);
1651 atomic_set(&memcg->numainfo_updating, 0);
Ying Han889976d2011-05-26 16:25:33 -07001652}
1653
1654/*
1655 * Selecting a node where we start reclaim from. Because what we need is just
1656 * reducing usage counter, start from anywhere is O,K. Considering
1657 * memory reclaim from current node, there are pros. and cons.
1658 *
1659 * Freeing memory from current node means freeing memory from a node which
1660 * we'll use or we've used. So, it may make LRU bad. And if several threads
1661 * hit limits, it will see a contention on a node. But freeing from remote
1662 * node means more costs for memory reclaim because of memory latency.
1663 *
1664 * Now, we use round-robin. Better algorithm is welcomed.
1665 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001666int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001667{
1668 int node;
1669
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001670 mem_cgroup_may_update_nodemask(memcg);
1671 node = memcg->last_scanned_node;
Ying Han889976d2011-05-26 16:25:33 -07001672
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001673 node = next_node(node, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001674 if (node == MAX_NUMNODES)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001675 node = first_node(memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001676 /*
1677 * We call this when we hit limit, not when pages are added to LRU.
1678 * No LRU may hold pages because all pages are UNEVICTABLE or
1679 * memcg is too small and all pages are not on LRU. In that case,
1680 * we use curret node.
1681 */
1682 if (unlikely(node == MAX_NUMNODES))
1683 node = numa_node_id();
1684
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001685 memcg->last_scanned_node = node;
Ying Han889976d2011-05-26 16:25:33 -07001686 return node;
1687}
1688
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001689/*
1690 * Check all nodes whether it contains reclaimable pages or not.
1691 * For quick scan, we make use of scan_nodes. This will allow us to skip
1692 * unused nodes. But scan_nodes is lazily updated and may not cotain
1693 * enough new information. We need to do double check.
1694 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001695bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001696{
1697 int nid;
1698
1699 /*
1700 * quick check...making use of scan_node.
1701 * We can skip unused nodes.
1702 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001703 if (!nodes_empty(memcg->scan_nodes)) {
1704 for (nid = first_node(memcg->scan_nodes);
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001705 nid < MAX_NUMNODES;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001706 nid = next_node(nid, memcg->scan_nodes)) {
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001707
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001708 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001709 return true;
1710 }
1711 }
1712 /*
1713 * Check rest of nodes.
1714 */
1715 for_each_node_state(nid, N_HIGH_MEMORY) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001716 if (node_isset(nid, memcg->scan_nodes))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001717 continue;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001718 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001719 return true;
1720 }
1721 return false;
1722}
1723
Ying Han889976d2011-05-26 16:25:33 -07001724#else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001725int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001726{
1727 return 0;
1728}
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001729
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001730bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001731{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001732 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001733}
Ying Han889976d2011-05-26 16:25:33 -07001734#endif
1735
Johannes Weiner56600482012-01-12 17:17:59 -08001736static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1737 struct zone *zone,
1738 gfp_t gfp_mask,
1739 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001740{
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001741 struct mem_cgroup *victim = NULL;
Johannes Weiner56600482012-01-12 17:17:59 -08001742 int total = 0;
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001743 int loop = 0;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001744 unsigned long excess;
Johannes Weiner185efc02011-09-14 16:21:58 -07001745 unsigned long nr_scanned;
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001746 struct mem_cgroup_reclaim_cookie reclaim = {
1747 .zone = zone,
1748 .priority = 0,
1749 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001750
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001751 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001752
Balbir Singh4e416952009-09-23 15:56:39 -07001753 while (1) {
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001754 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001755 if (!victim) {
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001756 loop++;
Balbir Singh4e416952009-09-23 15:56:39 -07001757 if (loop >= 2) {
1758 /*
1759 * If we have not been able to reclaim
1760 * anything, it might because there are
1761 * no reclaimable pages under this hierarchy
1762 */
Johannes Weiner56600482012-01-12 17:17:59 -08001763 if (!total)
Balbir Singh4e416952009-09-23 15:56:39 -07001764 break;
Balbir Singh4e416952009-09-23 15:56:39 -07001765 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001766 * We want to do more targeted reclaim.
Balbir Singh4e416952009-09-23 15:56:39 -07001767 * excess >> 2 is not to excessive so as to
1768 * reclaim too much, nor too less that we keep
1769 * coming back to reclaim from this cgroup
1770 */
1771 if (total >= (excess >> 2) ||
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001772 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
Balbir Singh4e416952009-09-23 15:56:39 -07001773 break;
Balbir Singh4e416952009-09-23 15:56:39 -07001774 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001775 continue;
Balbir Singh4e416952009-09-23 15:56:39 -07001776 }
Johannes Weiner56600482012-01-12 17:17:59 -08001777 if (!mem_cgroup_reclaimable(victim, false))
Balbir Singh6d61ef42009-01-07 18:08:06 -08001778 continue;
Johannes Weiner56600482012-01-12 17:17:59 -08001779 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1780 zone, &nr_scanned);
1781 *total_scanned += nr_scanned;
1782 if (!res_counter_soft_limit_excess(&root_memcg->res))
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001783 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001784 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001785 mem_cgroup_iter_break(root_memcg, victim);
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001786 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001787}
1788
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001789/*
1790 * Check OOM-Killer is already running under our hierarchy.
1791 * If someone is running, return false.
Michal Hocko1af8efe2011-07-26 16:08:24 -07001792 * Has to be called with memcg_oom_lock
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001793 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001794static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001795{
Michal Hocko79dfdac2011-07-26 16:08:23 -07001796 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001797
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001798 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07001799 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001800 /*
1801 * this subtree of our hierarchy is already locked
1802 * so we cannot give a lock.
1803 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07001804 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001805 mem_cgroup_iter_break(memcg, iter);
1806 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07001807 } else
1808 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001809 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001810
Michal Hocko79dfdac2011-07-26 16:08:23 -07001811 if (!failed)
Johannes Weiner23751be2011-08-25 15:59:16 -07001812 return true;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001813
1814 /*
1815 * OK, we failed to lock the whole subtree so we have to clean up
1816 * what we set up to the failing subtree
1817 */
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001818 for_each_mem_cgroup_tree(iter, memcg) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001819 if (iter == failed) {
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001820 mem_cgroup_iter_break(memcg, iter);
1821 break;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001822 }
1823 iter->oom_lock = false;
1824 }
Johannes Weiner23751be2011-08-25 15:59:16 -07001825 return false;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001826}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001827
Michal Hocko79dfdac2011-07-26 16:08:23 -07001828/*
Michal Hocko1af8efe2011-07-26 16:08:24 -07001829 * Has to be called with memcg_oom_lock
Michal Hocko79dfdac2011-07-26 16:08:23 -07001830 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001831static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001832{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001833 struct mem_cgroup *iter;
1834
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001835 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001836 iter->oom_lock = false;
1837 return 0;
1838}
1839
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001840static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001841{
1842 struct mem_cgroup *iter;
1843
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001844 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001845 atomic_inc(&iter->under_oom);
1846}
1847
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001848static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001849{
1850 struct mem_cgroup *iter;
1851
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001852 /*
1853 * When a new child is created while the hierarchy is under oom,
1854 * mem_cgroup_oom_lock() may not be called. We have to use
1855 * atomic_add_unless() here.
1856 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001857 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001858 atomic_add_unless(&iter->under_oom, -1, 0);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001859}
1860
Michal Hocko1af8efe2011-07-26 16:08:24 -07001861static DEFINE_SPINLOCK(memcg_oom_lock);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001862static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1863
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001864struct oom_wait_info {
1865 struct mem_cgroup *mem;
1866 wait_queue_t wait;
1867};
1868
1869static int memcg_oom_wake_function(wait_queue_t *wait,
1870 unsigned mode, int sync, void *arg)
1871{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001872 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg,
1873 *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001874 struct oom_wait_info *oom_wait_info;
1875
1876 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001877 oom_wait_memcg = oom_wait_info->mem;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001878
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001879 /*
1880 * Both of oom_wait_info->mem and wake_mem are stable under us.
1881 * Then we can use css_is_ancestor without taking care of RCU.
1882 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001883 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
1884 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001885 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001886 return autoremove_wake_function(wait, mode, sync, arg);
1887}
1888
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001889static void memcg_wakeup_oom(struct mem_cgroup *memcg)
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001890{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001891 /* for filtering, pass "memcg" as argument. */
1892 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001893}
1894
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001895static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001896{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001897 if (memcg && atomic_read(&memcg->under_oom))
1898 memcg_wakeup_oom(memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001899}
1900
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001901/*
1902 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1903 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001904bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001905{
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001906 struct oom_wait_info owait;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001907 bool locked, need_to_kill;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001908
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001909 owait.mem = memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001910 owait.wait.flags = 0;
1911 owait.wait.func = memcg_oom_wake_function;
1912 owait.wait.private = current;
1913 INIT_LIST_HEAD(&owait.wait.task_list);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001914 need_to_kill = true;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001915 mem_cgroup_mark_under_oom(memcg);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001916
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001917 /* At first, try to OOM lock hierarchy under memcg.*/
Michal Hocko1af8efe2011-07-26 16:08:24 -07001918 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001919 locked = mem_cgroup_oom_lock(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001920 /*
1921 * Even if signal_pending(), we can't quit charge() loop without
1922 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1923 * under OOM is always welcomed, use TASK_KILLABLE here.
1924 */
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001925 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001926 if (!locked || memcg->oom_kill_disable)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001927 need_to_kill = false;
1928 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001929 mem_cgroup_oom_notify(memcg);
Michal Hocko1af8efe2011-07-26 16:08:24 -07001930 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001931
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001932 if (need_to_kill) {
1933 finish_wait(&memcg_oom_waitq, &owait.wait);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001934 mem_cgroup_out_of_memory(memcg, mask);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001935 } else {
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001936 schedule();
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001937 finish_wait(&memcg_oom_waitq, &owait.wait);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001938 }
Michal Hocko1af8efe2011-07-26 16:08:24 -07001939 spin_lock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001940 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001941 mem_cgroup_oom_unlock(memcg);
1942 memcg_wakeup_oom(memcg);
Michal Hocko1af8efe2011-07-26 16:08:24 -07001943 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001944
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001945 mem_cgroup_unmark_under_oom(memcg);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001946
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001947 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1948 return false;
1949 /* Give chance to dying process */
KAMEZAWA Hiroyuki715a5ee2011-11-02 13:38:18 -07001950 schedule_timeout_uninterruptible(1);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001951 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001952}
1953
Balbir Singhd69b0422009-06-17 16:26:34 -07001954/*
1955 * Currently used to update mapped file statistics, but the routine can be
1956 * generalized to update other statistics as well.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001957 *
1958 * Notes: Race condition
1959 *
1960 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1961 * it tends to be costly. But considering some conditions, we doesn't need
1962 * to do so _always_.
1963 *
1964 * Considering "charge", lock_page_cgroup() is not required because all
1965 * file-stat operations happen after a page is attached to radix-tree. There
1966 * are no race with "charge".
1967 *
1968 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1969 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1970 * if there are race with "uncharge". Statistics itself is properly handled
1971 * by flags.
1972 *
1973 * Considering "move", this is an only case we see a race. To make the race
1974 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1975 * possibility of race condition. If there is, we take a lock.
Balbir Singhd69b0422009-06-17 16:26:34 -07001976 */
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07001977
Greg Thelen2a7106f2011-01-13 15:47:37 -08001978void mem_cgroup_update_page_stat(struct page *page,
1979 enum mem_cgroup_page_stat_item idx, int val)
Balbir Singhd69b0422009-06-17 16:26:34 -07001980{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001981 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001982 struct page_cgroup *pc = lookup_page_cgroup(page);
1983 bool need_unlock = false;
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08001984 unsigned long uninitialized_var(flags);
Balbir Singhd69b0422009-06-17 16:26:34 -07001985
Balbir Singhd69b0422009-06-17 16:26:34 -07001986 if (unlikely(!pc))
1987 return;
1988
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001989 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001990 memcg = pc->mem_cgroup;
1991 if (unlikely(!memcg || !PageCgroupUsed(pc)))
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001992 goto out;
1993 /* pc->mem_cgroup is unstable ? */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001994 if (unlikely(mem_cgroup_stealed(memcg)) || PageTransHuge(page)) {
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001995 /* take a lock against to access pc->mem_cgroup */
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08001996 move_lock_page_cgroup(pc, &flags);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001997 need_unlock = true;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001998 memcg = pc->mem_cgroup;
1999 if (!memcg || !PageCgroupUsed(pc))
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07002000 goto out;
2001 }
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002002
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002003 switch (idx) {
Greg Thelen2a7106f2011-01-13 15:47:37 -08002004 case MEMCG_NR_FILE_MAPPED:
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002005 if (val > 0)
2006 SetPageCgroupFileMapped(pc);
2007 else if (!page_mapped(page))
KAMEZAWA Hiroyuki0c270f82010-10-27 15:33:39 -07002008 ClearPageCgroupFileMapped(pc);
Greg Thelen2a7106f2011-01-13 15:47:37 -08002009 idx = MEM_CGROUP_STAT_FILE_MAPPED;
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002010 break;
2011 default:
2012 BUG();
KAMEZAWA Hiroyuki8725d542010-04-06 14:35:05 -07002013 }
Balbir Singhd69b0422009-06-17 16:26:34 -07002014
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002015 this_cpu_add(memcg->stat->count[idx], val);
Greg Thelen2a7106f2011-01-13 15:47:37 -08002016
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07002017out:
2018 if (unlikely(need_unlock))
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08002019 move_unlock_page_cgroup(pc, &flags);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07002020 rcu_read_unlock();
2021 return;
Balbir Singhd69b0422009-06-17 16:26:34 -07002022}
Greg Thelen2a7106f2011-01-13 15:47:37 -08002023EXPORT_SYMBOL(mem_cgroup_update_page_stat);
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002024
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002025/*
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002026 * size of first charge trial. "32" comes from vmscan.c's magic value.
2027 * TODO: maybe necessary to use big numbers in big irons.
2028 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002029#define CHARGE_BATCH 32U
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002030struct memcg_stock_pcp {
2031 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002032 unsigned int nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002033 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002034 unsigned long flags;
2035#define FLUSHING_CACHED_CHARGE (0)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002036};
2037static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002038static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002039
2040/*
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002041 * Try to consume stocked charge on this cpu. If success, one page is consumed
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002042 * from local stock and true is returned. If the stock is 0 or charges from a
2043 * cgroup which is not current target, returns false. This stock will be
2044 * refilled.
2045 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002046static bool consume_stock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002047{
2048 struct memcg_stock_pcp *stock;
2049 bool ret = true;
2050
2051 stock = &get_cpu_var(memcg_stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002052 if (memcg == stock->cached && stock->nr_pages)
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002053 stock->nr_pages--;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002054 else /* need to call res_counter_charge */
2055 ret = false;
2056 put_cpu_var(memcg_stock);
2057 return ret;
2058}
2059
2060/*
2061 * Returns stocks cached in percpu to res_counter and reset cached information.
2062 */
2063static void drain_stock(struct memcg_stock_pcp *stock)
2064{
2065 struct mem_cgroup *old = stock->cached;
2066
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002067 if (stock->nr_pages) {
2068 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2069
2070 res_counter_uncharge(&old->res, bytes);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002071 if (do_swap_account)
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002072 res_counter_uncharge(&old->memsw, bytes);
2073 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002074 }
2075 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002076}
2077
2078/*
2079 * This must be called under preempt disabled or must be called by
2080 * a thread which is pinned to local cpu.
2081 */
2082static void drain_local_stock(struct work_struct *dummy)
2083{
2084 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2085 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002086 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002087}
2088
2089/*
2090 * Cache charges(val) which is from res_counter, to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01002091 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002092 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002093static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002094{
2095 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2096
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002097 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002098 drain_stock(stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002099 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002100 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002101 stock->nr_pages += nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002102 put_cpu_var(memcg_stock);
2103}
2104
2105/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002106 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Michal Hockod38144b2011-07-26 16:08:28 -07002107 * of the hierarchy under it. sync flag says whether we should block
2108 * until the work is done.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002109 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002110static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002111{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002112 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07002113
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002114 /* Notify other cpus that system-wide "drain" is running */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002115 get_online_cpus();
Johannes Weiner5af12d02011-08-25 15:59:07 -07002116 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002117 for_each_online_cpu(cpu) {
2118 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002119 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002120
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002121 memcg = stock->cached;
2122 if (!memcg || !stock->nr_pages)
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002123 continue;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002124 if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
Michal Hocko3e920412011-07-26 16:08:29 -07002125 continue;
Michal Hockod1a05b62011-07-26 16:08:27 -07002126 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2127 if (cpu == curcpu)
2128 drain_local_stock(&stock->work);
2129 else
2130 schedule_work_on(cpu, &stock->work);
2131 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002132 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07002133 put_cpu();
Michal Hockod38144b2011-07-26 16:08:28 -07002134
2135 if (!sync)
2136 goto out;
2137
2138 for_each_online_cpu(cpu) {
2139 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002140 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
Michal Hockod38144b2011-07-26 16:08:28 -07002141 flush_work(&stock->work);
2142 }
2143out:
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002144 put_online_cpus();
Michal Hockod38144b2011-07-26 16:08:28 -07002145}
2146
2147/*
2148 * Tries to drain stocked charges in other cpus. This function is asynchronous
2149 * and just put a work per cpu for draining localy on each cpu. Caller can
2150 * expects some charges will be back to res_counter later but cannot wait for
2151 * it.
2152 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002153static void drain_all_stock_async(struct mem_cgroup *root_memcg)
Michal Hockod38144b2011-07-26 16:08:28 -07002154{
Michal Hocko9f50fad2011-08-09 11:56:26 +02002155 /*
2156 * If someone calls draining, avoid adding more kworker runs.
2157 */
2158 if (!mutex_trylock(&percpu_charge_mutex))
2159 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002160 drain_all_stock(root_memcg, false);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002161 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002162}
2163
2164/* This is a synchronous drain interface. */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002165static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002166{
2167 /* called when force_empty is called */
Michal Hocko9f50fad2011-08-09 11:56:26 +02002168 mutex_lock(&percpu_charge_mutex);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002169 drain_all_stock(root_memcg, true);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002170 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002171}
2172
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002173/*
2174 * This function drains percpu counter value from DEAD cpu and
2175 * move it to local cpu. Note that this function can be preempted.
2176 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002177static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002178{
2179 int i;
2180
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002181 spin_lock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002182 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002183 long x = per_cpu(memcg->stat->count[i], cpu);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002184
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002185 per_cpu(memcg->stat->count[i], cpu) = 0;
2186 memcg->nocpu_base.count[i] += x;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002187 }
Johannes Weinere9f89742011-03-23 16:42:37 -07002188 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002189 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -07002190
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002191 per_cpu(memcg->stat->events[i], cpu) = 0;
2192 memcg->nocpu_base.events[i] += x;
Johannes Weinere9f89742011-03-23 16:42:37 -07002193 }
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002194 /* need to clear ON_MOVE value, works as a kind of lock. */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002195 per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
2196 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002197}
2198
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002199static void synchronize_mem_cgroup_on_move(struct mem_cgroup *memcg, int cpu)
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002200{
2201 int idx = MEM_CGROUP_ON_MOVE;
2202
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002203 spin_lock(&memcg->pcp_counter_lock);
2204 per_cpu(memcg->stat->count[idx], cpu) = memcg->nocpu_base.count[idx];
2205 spin_unlock(&memcg->pcp_counter_lock);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002206}
2207
2208static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002209 unsigned long action,
2210 void *hcpu)
2211{
2212 int cpu = (unsigned long)hcpu;
2213 struct memcg_stock_pcp *stock;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002214 struct mem_cgroup *iter;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002215
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002216 if ((action == CPU_ONLINE)) {
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08002217 for_each_mem_cgroup(iter)
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002218 synchronize_mem_cgroup_on_move(iter, cpu);
2219 return NOTIFY_OK;
2220 }
2221
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002222 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002223 return NOTIFY_OK;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002224
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08002225 for_each_mem_cgroup(iter)
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002226 mem_cgroup_drain_pcp_counter(iter, cpu);
2227
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002228 stock = &per_cpu(memcg_stock, cpu);
2229 drain_stock(stock);
2230 return NOTIFY_OK;
2231}
2232
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002233
2234/* See __mem_cgroup_try_charge() for details */
2235enum {
2236 CHARGE_OK, /* success */
2237 CHARGE_RETRY, /* need to retry but retry is not bad */
2238 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
2239 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
2240 CHARGE_OOM_DIE, /* the current is killed because of OOM */
2241};
2242
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002243static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002244 unsigned int nr_pages, bool oom_check)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002245{
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002246 unsigned long csize = nr_pages * PAGE_SIZE;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002247 struct mem_cgroup *mem_over_limit;
2248 struct res_counter *fail_res;
2249 unsigned long flags = 0;
2250 int ret;
2251
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002252 ret = res_counter_charge(&memcg->res, csize, &fail_res);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002253
2254 if (likely(!ret)) {
2255 if (!do_swap_account)
2256 return CHARGE_OK;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002257 ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002258 if (likely(!ret))
2259 return CHARGE_OK;
2260
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002261 res_counter_uncharge(&memcg->res, csize);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002262 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2263 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2264 } else
2265 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
Johannes Weiner9221edb2011-02-01 15:52:42 -08002266 /*
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002267 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
2268 * of regular pages (CHARGE_BATCH), or a single regular page (1).
Johannes Weiner9221edb2011-02-01 15:52:42 -08002269 *
2270 * Never reclaim on behalf of optional batching, retry with a
2271 * single page instead.
2272 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002273 if (nr_pages == CHARGE_BATCH)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002274 return CHARGE_RETRY;
2275
2276 if (!(gfp_mask & __GFP_WAIT))
2277 return CHARGE_WOULDBLOCK;
2278
Johannes Weiner56600482012-01-12 17:17:59 -08002279 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002280 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
Johannes Weiner19942822011-02-01 15:52:43 -08002281 return CHARGE_RETRY;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002282 /*
Johannes Weiner19942822011-02-01 15:52:43 -08002283 * Even though the limit is exceeded at this point, reclaim
2284 * may have been able to free some pages. Retry the charge
2285 * before killing the task.
2286 *
2287 * Only for regular pages, though: huge pages are rather
2288 * unlikely to succeed so close to the limit, and we fall back
2289 * to regular pages anyway in case of failure.
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002290 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002291 if (nr_pages == 1 && ret)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002292 return CHARGE_RETRY;
2293
2294 /*
2295 * At task move, charge accounts can be doubly counted. So, it's
2296 * better to wait until the end of task_move if something is going on.
2297 */
2298 if (mem_cgroup_wait_acct_move(mem_over_limit))
2299 return CHARGE_RETRY;
2300
2301 /* If we don't need to call oom-killer at el, return immediately */
2302 if (!oom_check)
2303 return CHARGE_NOMEM;
2304 /* check OOM */
2305 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
2306 return CHARGE_OOM_DIE;
2307
2308 return CHARGE_RETRY;
2309}
2310
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002311/*
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002312 * Unlike exported interface, "oom" parameter is added. if oom==true,
2313 * oom-killer can be invoked.
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002314 */
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002315static int __mem_cgroup_try_charge(struct mm_struct *mm,
Andrea Arcangeliec168512011-01-13 15:46:56 -08002316 gfp_t gfp_mask,
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002317 unsigned int nr_pages,
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002318 struct mem_cgroup **ptr,
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002319 bool oom)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002320{
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002321 unsigned int batch = max(CHARGE_BATCH, nr_pages);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002322 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002323 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002324 int ret;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002325
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002326 /*
2327 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2328 * in system level. So, allow to go ahead dying process in addition to
2329 * MEMDIE process.
2330 */
2331 if (unlikely(test_thread_flag(TIF_MEMDIE)
2332 || fatal_signal_pending(current)))
2333 goto bypass;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002334
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002335 /*
Hugh Dickins3be91272008-02-07 00:14:19 -08002336 * We always charge the cgroup the mm_struct belongs to.
2337 * The mm_struct's mem_cgroup changes on task migration if the
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002338 * thread group leader migrates. It's possible that mm is not
2339 * set, if so charge the init_mm (happens for pagecache usage).
2340 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002341 if (!*ptr && !mm)
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002342 goto bypass;
2343again:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002344 if (*ptr) { /* css should be a valid one */
2345 memcg = *ptr;
2346 VM_BUG_ON(css_is_removed(&memcg->css));
2347 if (mem_cgroup_is_root(memcg))
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002348 goto done;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002349 if (nr_pages == 1 && consume_stock(memcg))
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002350 goto done;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002351 css_get(&memcg->css);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002352 } else {
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002353 struct task_struct *p;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002354
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002355 rcu_read_lock();
2356 p = rcu_dereference(mm->owner);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002357 /*
KAMEZAWA Hiroyukiebb76ce2010-12-29 14:07:11 -08002358 * Because we don't have task_lock(), "p" can exit.
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002359 * In that case, "memcg" can point to root or p can be NULL with
KAMEZAWA Hiroyukiebb76ce2010-12-29 14:07:11 -08002360 * race with swapoff. Then, we have small risk of mis-accouning.
2361 * But such kind of mis-account by race always happens because
2362 * we don't have cgroup_mutex(). It's overkill and we allo that
2363 * small race, here.
2364 * (*) swapoff at el will charge against mm-struct not against
2365 * task-struct. So, mm->owner can be NULL.
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002366 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002367 memcg = mem_cgroup_from_task(p);
2368 if (!memcg || mem_cgroup_is_root(memcg)) {
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002369 rcu_read_unlock();
2370 goto done;
2371 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002372 if (nr_pages == 1 && consume_stock(memcg)) {
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002373 /*
2374 * It seems dagerous to access memcg without css_get().
2375 * But considering how consume_stok works, it's not
2376 * necessary. If consume_stock success, some charges
2377 * from this memcg are cached on this cpu. So, we
2378 * don't need to call css_get()/css_tryget() before
2379 * calling consume_stock().
2380 */
2381 rcu_read_unlock();
2382 goto done;
2383 }
2384 /* after here, we may be blocked. we need to get refcnt */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002385 if (!css_tryget(&memcg->css)) {
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002386 rcu_read_unlock();
2387 goto again;
2388 }
2389 rcu_read_unlock();
2390 }
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002391
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002392 do {
2393 bool oom_check;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002394
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002395 /* If killed, bypass charge */
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002396 if (fatal_signal_pending(current)) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002397 css_put(&memcg->css);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002398 goto bypass;
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002399 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002400
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002401 oom_check = false;
2402 if (oom && !nr_oom_retries) {
2403 oom_check = true;
2404 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2405 }
Balbir Singh6d61ef42009-01-07 18:08:06 -08002406
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002407 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002408 switch (ret) {
2409 case CHARGE_OK:
2410 break;
2411 case CHARGE_RETRY: /* not in OOM situation but retry */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002412 batch = nr_pages;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002413 css_put(&memcg->css);
2414 memcg = NULL;
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002415 goto again;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002416 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002417 css_put(&memcg->css);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002418 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002419 case CHARGE_NOMEM: /* OOM routine works */
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002420 if (!oom) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002421 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002422 goto nomem;
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002423 }
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002424 /* If oom, we never return -ENOMEM */
2425 nr_oom_retries--;
2426 break;
2427 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002428 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002429 goto bypass;
Balbir Singh66e17072008-02-07 00:13:56 -08002430 }
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002431 } while (ret != CHARGE_OK);
2432
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002433 if (batch > nr_pages)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002434 refill_stock(memcg, batch - nr_pages);
2435 css_put(&memcg->css);
Balbir Singh0c3e73e2009-09-23 15:56:42 -07002436done:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002437 *ptr = memcg;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002438 return 0;
2439nomem:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002440 *ptr = NULL;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002441 return -ENOMEM;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002442bypass:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002443 *ptr = NULL;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002444 return 0;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002445}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002446
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002447/*
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002448 * Somemtimes we have to undo a charge we got by try_charge().
2449 * This function is for that and do uncharge, put css's refcnt.
2450 * gotten by try_charge().
2451 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002452static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
Johannes Weinere7018b82011-03-23 16:42:33 -07002453 unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002454{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002455 if (!mem_cgroup_is_root(memcg)) {
Johannes Weinere7018b82011-03-23 16:42:33 -07002456 unsigned long bytes = nr_pages * PAGE_SIZE;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002457
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002458 res_counter_uncharge(&memcg->res, bytes);
Johannes Weinere7018b82011-03-23 16:42:33 -07002459 if (do_swap_account)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002460 res_counter_uncharge(&memcg->memsw, bytes);
Johannes Weinere7018b82011-03-23 16:42:33 -07002461 }
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002462}
2463
2464/*
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002465 * A helper function to get mem_cgroup from ID. must be called under
2466 * rcu_read_lock(). The caller must check css_is_removed() or some if
2467 * it's concern. (dropping refcnt from swap can be called against removed
2468 * memcg.)
2469 */
2470static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2471{
2472 struct cgroup_subsys_state *css;
2473
2474 /* ID 0 is unused ID */
2475 if (!id)
2476 return NULL;
2477 css = css_lookup(&mem_cgroup_subsys, id);
2478 if (!css)
2479 return NULL;
2480 return container_of(css, struct mem_cgroup, css);
2481}
2482
Wu Fengguange42d9d52009-12-16 12:19:59 +01002483struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002484{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002485 struct mem_cgroup *memcg = NULL;
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002486 struct page_cgroup *pc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002487 unsigned short id;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002488 swp_entry_t ent;
2489
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002490 VM_BUG_ON(!PageLocked(page));
2491
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002492 pc = lookup_page_cgroup(page);
Daisuke Nishimurac0bd3f62009-04-30 15:08:11 -07002493 lock_page_cgroup(pc);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002494 if (PageCgroupUsed(pc)) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002495 memcg = pc->mem_cgroup;
2496 if (memcg && !css_tryget(&memcg->css))
2497 memcg = NULL;
Wu Fengguange42d9d52009-12-16 12:19:59 +01002498 } else if (PageSwapCache(page)) {
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002499 ent.val = page_private(page);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002500 id = lookup_swap_cgroup(ent);
2501 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002502 memcg = mem_cgroup_lookup(id);
2503 if (memcg && !css_tryget(&memcg->css))
2504 memcg = NULL;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002505 rcu_read_unlock();
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002506 }
Daisuke Nishimurac0bd3f62009-04-30 15:08:11 -07002507 unlock_page_cgroup(pc);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002508 return memcg;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002509}
2510
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002511static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
Johannes Weiner5564e882011-03-23 16:42:29 -07002512 struct page *page,
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002513 unsigned int nr_pages,
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002514 struct page_cgroup *pc,
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002515 enum charge_type ctype)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002516{
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002517 lock_page_cgroup(pc);
2518 if (unlikely(PageCgroupUsed(pc))) {
2519 unlock_page_cgroup(pc);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002520 __mem_cgroup_cancel_charge(memcg, nr_pages);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002521 return;
2522 }
2523 /*
2524 * we don't need page_cgroup_lock about tail pages, becase they are not
2525 * accessed by any other context at this point.
2526 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002527 pc->mem_cgroup = memcg;
KAMEZAWA Hiroyuki261fb612009-09-23 15:56:33 -07002528 /*
2529 * We access a page_cgroup asynchronously without lock_page_cgroup().
2530 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2531 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2532 * before USED bit, we need memory barrier here.
2533 * See mem_cgroup_add_lru_list(), etc.
2534 */
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002535 smp_wmb();
Balbir Singh4b3bde42009-09-23 15:56:32 -07002536 switch (ctype) {
2537 case MEM_CGROUP_CHARGE_TYPE_CACHE:
2538 case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2539 SetPageCgroupCache(pc);
2540 SetPageCgroupUsed(pc);
2541 break;
2542 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2543 ClearPageCgroupCache(pc);
2544 SetPageCgroupUsed(pc);
2545 break;
2546 default:
2547 break;
2548 }
Hugh Dickins3be91272008-02-07 00:14:19 -08002549
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002550 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002551 unlock_page_cgroup(pc);
KAMEZAWA Hiroyuki430e48632010-03-10 15:22:30 -08002552 /*
2553 * "charge_statistics" updated event counter. Then, check it.
2554 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2555 * if they exceeds softlimit.
2556 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002557 memcg_check_events(memcg, page);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002558}
2559
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002560#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2561
2562#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2563 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2564/*
2565 * Because tail pages are not marked as "used", set it. We're under
2566 * zone->lru_lock, 'splitting on pmd' and compund_lock.
2567 */
2568void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2569{
2570 struct page_cgroup *head_pc = lookup_page_cgroup(head);
2571 struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
2572 unsigned long flags;
2573
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08002574 if (mem_cgroup_disabled())
2575 return;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002576 /*
KAMEZAWA Hiroyukiece35ca2011-01-20 14:44:24 -08002577 * We have no races with charge/uncharge but will have races with
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002578 * page state accounting.
2579 */
2580 move_lock_page_cgroup(head_pc, &flags);
2581
2582 tail_pc->mem_cgroup = head_pc->mem_cgroup;
2583 smp_wmb(); /* see __commit_charge() */
KAMEZAWA Hiroyukiece35ca2011-01-20 14:44:24 -08002584 if (PageCgroupAcctLRU(head_pc)) {
2585 enum lru_list lru;
2586 struct mem_cgroup_per_zone *mz;
2587
2588 /*
2589 * LRU flags cannot be copied because we need to add tail
2590 *.page to LRU by generic call and our hook will be called.
2591 * We hold lru_lock, then, reduce counter directly.
2592 */
2593 lru = page_lru(head);
Johannes Weiner97a6c372011-03-23 16:42:27 -07002594 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
KAMEZAWA Hiroyukiece35ca2011-01-20 14:44:24 -08002595 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2596 }
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002597 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2598 move_unlock_page_cgroup(head_pc, &flags);
2599}
2600#endif
2601
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002602/**
Johannes Weinerde3638d2011-03-23 16:42:28 -07002603 * mem_cgroup_move_account - move account of the page
Johannes Weiner5564e882011-03-23 16:42:29 -07002604 * @page: the page
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002605 * @nr_pages: number of regular pages (>1 for huge pages)
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002606 * @pc: page_cgroup of the page.
2607 * @from: mem_cgroup which the page is moved from.
2608 * @to: mem_cgroup which the page is moved to. @from != @to.
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002609 * @uncharge: whether we should call uncharge and css_put against @from.
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002610 *
2611 * The caller must confirm following.
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002612 * - page is not on LRU (isolate_page() is useful.)
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002613 * - compound_lock is held when nr_pages > 1
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002614 *
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002615 * This function doesn't do "charge" nor css_get to new cgroup. It should be
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002616 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002617 * true, this function does "uncharge" from old cgroup, but it doesn't if
2618 * @uncharge is false, so a caller should do "uncharge".
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002619 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002620static int mem_cgroup_move_account(struct page *page,
2621 unsigned int nr_pages,
2622 struct page_cgroup *pc,
2623 struct mem_cgroup *from,
2624 struct mem_cgroup *to,
2625 bool uncharge)
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002626{
Johannes Weinerde3638d2011-03-23 16:42:28 -07002627 unsigned long flags;
2628 int ret;
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002629
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002630 VM_BUG_ON(from == to);
Johannes Weiner5564e882011-03-23 16:42:29 -07002631 VM_BUG_ON(PageLRU(page));
Johannes Weinerde3638d2011-03-23 16:42:28 -07002632 /*
2633 * The page is isolated from LRU. So, collapse function
2634 * will not handle this page. But page splitting can happen.
2635 * Do this check under compound_page_lock(). The caller should
2636 * hold it.
2637 */
2638 ret = -EBUSY;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002639 if (nr_pages > 1 && !PageTransHuge(page))
Johannes Weinerde3638d2011-03-23 16:42:28 -07002640 goto out;
2641
2642 lock_page_cgroup(pc);
2643
2644 ret = -EINVAL;
2645 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2646 goto unlock;
2647
2648 move_lock_page_cgroup(pc, &flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002649
KAMEZAWA Hiroyuki8725d542010-04-06 14:35:05 -07002650 if (PageCgroupFileMapped(pc)) {
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08002651 /* Update mapped_file data for mem_cgroup */
2652 preempt_disable();
2653 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2654 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2655 preempt_enable();
Balbir Singhd69b0422009-06-17 16:26:34 -07002656 }
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002657 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002658 if (uncharge)
2659 /* This is not "cancel", but cancel_charge does all we need. */
Johannes Weinere7018b82011-03-23 16:42:33 -07002660 __mem_cgroup_cancel_charge(from, nr_pages);
Balbir Singhd69b0422009-06-17 16:26:34 -07002661
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002662 /* caller should have done css_get */
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002663 pc->mem_cgroup = to;
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002664 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07002665 /*
2666 * We charges against "to" which may not have any tasks. Then, "to"
2667 * can be under rmdir(). But in current implementation, caller of
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08002668 * this function is just force_empty() and move charge, so it's
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002669 * guaranteed that "to" is never removed. So, we don't check rmdir
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08002670 * status here.
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07002671 */
Johannes Weinerde3638d2011-03-23 16:42:28 -07002672 move_unlock_page_cgroup(pc, &flags);
2673 ret = 0;
2674unlock:
Daisuke Nishimura57f9fd72009-12-15 16:47:11 -08002675 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -08002676 /*
2677 * check events
2678 */
Johannes Weiner5564e882011-03-23 16:42:29 -07002679 memcg_check_events(to, page);
2680 memcg_check_events(from, page);
Johannes Weinerde3638d2011-03-23 16:42:28 -07002681out:
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002682 return ret;
2683}
2684
2685/*
2686 * move charges to its parent.
2687 */
2688
Johannes Weiner5564e882011-03-23 16:42:29 -07002689static int mem_cgroup_move_parent(struct page *page,
2690 struct page_cgroup *pc,
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002691 struct mem_cgroup *child,
2692 gfp_t gfp_mask)
2693{
2694 struct cgroup *cg = child->css.cgroup;
2695 struct cgroup *pcg = cg->parent;
2696 struct mem_cgroup *parent;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002697 unsigned int nr_pages;
Andrew Morton4be44892011-03-23 16:42:39 -07002698 unsigned long uninitialized_var(flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002699 int ret;
2700
2701 /* Is ROOT ? */
2702 if (!pcg)
2703 return -EINVAL;
2704
Daisuke Nishimura57f9fd72009-12-15 16:47:11 -08002705 ret = -EBUSY;
2706 if (!get_page_unless_zero(page))
2707 goto out;
2708 if (isolate_lru_page(page))
2709 goto put;
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08002710
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002711 nr_pages = hpage_nr_pages(page);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002712
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002713 parent = mem_cgroup_from_cont(pcg);
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002714 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002715 if (ret || !parent)
Daisuke Nishimura57f9fd72009-12-15 16:47:11 -08002716 goto put_back;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002717
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002718 if (nr_pages > 1)
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002719 flags = compound_lock_irqsave(page);
2720
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002721 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002722 if (ret)
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002723 __mem_cgroup_cancel_charge(parent, nr_pages);
Jesper Juhl8dba4742011-01-25 15:07:24 -08002724
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002725 if (nr_pages > 1)
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002726 compound_unlock_irqrestore(page, flags);
Jesper Juhl8dba4742011-01-25 15:07:24 -08002727put_back:
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002728 putback_lru_page(page);
Daisuke Nishimura57f9fd72009-12-15 16:47:11 -08002729put:
Daisuke Nishimura40d58132009-01-15 13:51:12 -08002730 put_page(page);
Daisuke Nishimura57f9fd72009-12-15 16:47:11 -08002731out:
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002732 return ret;
2733}
2734
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002735/*
2736 * Charge the memory controller for page usage.
2737 * Return
2738 * 0 if the charge was successful
2739 * < 0 if the cgroup is over its limit
2740 */
2741static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002742 gfp_t gfp_mask, enum charge_type ctype)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002743{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002744 struct mem_cgroup *memcg = NULL;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002745 unsigned int nr_pages = 1;
Johannes Weiner8493ae42011-02-01 15:52:44 -08002746 struct page_cgroup *pc;
2747 bool oom = true;
2748 int ret;
Andrea Arcangeliec168512011-01-13 15:46:56 -08002749
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08002750 if (PageTransHuge(page)) {
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002751 nr_pages <<= compound_order(page);
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08002752 VM_BUG_ON(!PageTransHuge(page));
Johannes Weiner8493ae42011-02-01 15:52:44 -08002753 /*
2754 * Never OOM-kill a process for a huge page. The
2755 * fault handler will fall back to regular pages.
2756 */
2757 oom = false;
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08002758 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002759
2760 pc = lookup_page_cgroup(page);
Johannes Weineraf4a6622011-03-23 16:42:24 -07002761 BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002762
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002763 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2764 if (ret || !memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002765 return ret;
2766
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002767 __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002768 return 0;
2769}
2770
2771int mem_cgroup_newpage_charge(struct page *page,
2772 struct mm_struct *mm, gfp_t gfp_mask)
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -08002773{
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08002774 if (mem_cgroup_disabled())
Li Zefancede86a2008-07-25 01:47:18 -07002775 return 0;
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07002776 /*
2777 * If already mapped, we don't have to account.
2778 * If page cache, page->mapping has address_space.
2779 * But page->mapping may have out-of-use anon_vma pointer,
2780 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2781 * is NULL.
2782 */
2783 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2784 return 0;
2785 if (unlikely(!mm))
2786 mm = &init_mm;
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -08002787 return mem_cgroup_charge_common(page, mm, gfp_mask,
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002788 MEM_CGROUP_CHARGE_TYPE_MAPPED);
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -08002789}
2790
Daisuke Nishimura83aae4c2009-04-02 16:57:48 -07002791static void
2792__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2793 enum charge_type ctype);
2794
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07002795static void
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002796__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07002797 enum charge_type ctype)
2798{
2799 struct page_cgroup *pc = lookup_page_cgroup(page);
2800 /*
2801 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2802 * is already on LRU. It means the page may on some other page_cgroup's
2803 * LRU. Take care of it.
2804 */
2805 mem_cgroup_lru_del_before_commit(page);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002806 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07002807 mem_cgroup_lru_add_after_commit(page);
2808 return;
2809}
2810
Balbir Singhe1a1cd52008-02-07 00:14:02 -08002811int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2812 gfp_t gfp_mask)
Balbir Singh8697d332008-02-07 00:13:59 -08002813{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002814 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002815 int ret;
2816
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08002817 if (mem_cgroup_disabled())
Li Zefancede86a2008-07-25 01:47:18 -07002818 return 0;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002819 if (PageCompound(page))
2820 return 0;
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -07002821
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002822 if (unlikely(!mm))
Balbir Singh8697d332008-02-07 00:13:59 -08002823 mm = &init_mm;
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -07002824
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07002825 if (page_is_file_cache(page)) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002826 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &memcg, true);
2827 if (ret || !memcg)
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07002828 return ret;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002829
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07002830 /*
2831 * FUSE reuses pages without going through the final
2832 * put that would remove them from the LRU list, make
2833 * sure that they get relinked properly.
2834 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002835 __mem_cgroup_commit_charge_lrucare(page, memcg,
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07002836 MEM_CGROUP_CHARGE_TYPE_CACHE);
2837 return ret;
2838 }
Daisuke Nishimura83aae4c2009-04-02 16:57:48 -07002839 /* shmem */
2840 if (PageSwapCache(page)) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002841 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
Daisuke Nishimura83aae4c2009-04-02 16:57:48 -07002842 if (!ret)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002843 __mem_cgroup_commit_charge_swapin(page, memcg,
Daisuke Nishimura83aae4c2009-04-02 16:57:48 -07002844 MEM_CGROUP_CHARGE_TYPE_SHMEM);
2845 } else
2846 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002847 MEM_CGROUP_CHARGE_TYPE_SHMEM);
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002848
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002849 return ret;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07002850}
2851
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002852/*
2853 * While swap-in, try_charge -> commit or cancel, the page is locked.
2854 * And when try_charge() successfully returns, one refcnt to memcg without
Uwe Kleine-König21ae2952009-10-07 15:21:09 +02002855 * struct page_cgroup is acquired. This refcnt will be consumed by
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002856 * "commit()" or removed by "cancel()"
2857 */
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002858int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2859 struct page *page,
2860 gfp_t mask, struct mem_cgroup **ptr)
2861{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002862 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002863 int ret;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002864
KAMEZAWA Hiroyuki56039ef2011-03-23 16:42:19 -07002865 *ptr = NULL;
2866
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08002867 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002868 return 0;
2869
2870 if (!do_swap_account)
2871 goto charge_cur_mm;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002872 /*
2873 * A racing thread's fault, or swapoff, may have already updated
Hugh Dickins407f9c82009-12-14 17:59:30 -08002874 * the pte, and even removed page from swap cache: in those cases
2875 * do_swap_page()'s pte_same() test will fail; but there's also a
2876 * KSM case which does need to charge the page.
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002877 */
2878 if (!PageSwapCache(page))
Hugh Dickins407f9c82009-12-14 17:59:30 -08002879 goto charge_cur_mm;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002880 memcg = try_get_mem_cgroup_from_page(page);
2881 if (!memcg)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002882 goto charge_cur_mm;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002883 *ptr = memcg;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002884 ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002885 css_put(&memcg->css);
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002886 return ret;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002887charge_cur_mm:
2888 if (unlikely(!mm))
2889 mm = &init_mm;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002890 return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002891}
2892
Daisuke Nishimura83aae4c2009-04-02 16:57:48 -07002893static void
2894__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2895 enum charge_type ctype)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002896{
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08002897 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002898 return;
2899 if (!ptr)
2900 return;
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07002901 cgroup_exclude_rmdir(&ptr->css);
KAMEZAWA Hiroyuki5a6475a2011-03-23 16:42:42 -07002902
2903 __mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002904 /*
2905 * Now swap is on-memory. This means this page may be
2906 * counted both as mem and swap....double count.
KAMEZAWA Hiroyuki03f3c432009-01-07 18:08:31 -08002907 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2908 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2909 * may call delete_from_swap_cache() before reach here.
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002910 */
KAMEZAWA Hiroyuki03f3c432009-01-07 18:08:31 -08002911 if (do_swap_account && PageSwapCache(page)) {
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002912 swp_entry_t ent = {.val = page_private(page)};
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002913 unsigned short id;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002914 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002915
2916 id = swap_cgroup_record(ent, 0);
2917 rcu_read_lock();
2918 memcg = mem_cgroup_lookup(id);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002919 if (memcg) {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002920 /*
2921 * This recorded memcg can be obsolete one. So, avoid
2922 * calling css_tryget
2923 */
Balbir Singh0c3e73e2009-09-23 15:56:42 -07002924 if (!mem_cgroup_is_root(memcg))
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -07002925 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
Balbir Singh0c3e73e2009-09-23 15:56:42 -07002926 mem_cgroup_swap_statistics(memcg, false);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002927 mem_cgroup_put(memcg);
2928 }
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002929 rcu_read_unlock();
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08002930 }
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07002931 /*
2932 * At swapin, we may charge account against cgroup which has no tasks.
2933 * So, rmdir()->pre_destroy() can be called while we do this charge.
2934 * In that case, we need to call pre_destroy() again. check it here.
2935 */
2936 cgroup_release_and_wakeup_rmdir(&ptr->css);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002937}
2938
Daisuke Nishimura83aae4c2009-04-02 16:57:48 -07002939void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2940{
2941 __mem_cgroup_commit_charge_swapin(page, ptr,
2942 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2943}
2944
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002945void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002946{
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08002947 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002948 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002949 if (!memcg)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002950 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002951 __mem_cgroup_cancel_charge(memcg, 1);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002952}
2953
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002954static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002955 unsigned int nr_pages,
2956 const enum charge_type ctype)
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002957{
2958 struct memcg_batch_info *batch = NULL;
2959 bool uncharge_memsw = true;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002960
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002961 /* If swapout, usage of swap doesn't decrease */
2962 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2963 uncharge_memsw = false;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002964
2965 batch = &current->memcg_batch;
2966 /*
2967 * In usual, we do css_get() when we remember memcg pointer.
2968 * But in this case, we keep res->usage until end of a series of
2969 * uncharges. Then, it's ok to ignore memcg's refcnt.
2970 */
2971 if (!batch->memcg)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002972 batch->memcg = memcg;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002973 /*
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002974 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002975 * In those cases, all pages freed continuously can be expected to be in
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002976 * the same cgroup and we have chance to coalesce uncharges.
2977 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2978 * because we want to do uncharge as soon as possible.
2979 */
2980
2981 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2982 goto direct_uncharge;
2983
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002984 if (nr_pages > 1)
Andrea Arcangeliec168512011-01-13 15:46:56 -08002985 goto direct_uncharge;
2986
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002987 /*
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002988 * In typical case, batch->memcg == mem. This means we can
2989 * merge a series of uncharges to an uncharge of res_counter.
2990 * If not, we uncharge res_counter ony by one.
2991 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002992 if (batch->memcg != memcg)
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002993 goto direct_uncharge;
2994 /* remember freed charge and uncharge it later */
Johannes Weiner7ffd4ca2011-03-23 16:42:35 -07002995 batch->nr_pages++;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002996 if (uncharge_memsw)
Johannes Weiner7ffd4ca2011-03-23 16:42:35 -07002997 batch->memsw_nr_pages++;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002998 return;
2999direct_uncharge:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003000 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003001 if (uncharge_memsw)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003002 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
3003 if (unlikely(batch->memcg != memcg))
3004 memcg_oom_recover(memcg);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003005 return;
3006}
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08003007
Balbir Singh8697d332008-02-07 00:13:59 -08003008/*
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003009 * uncharge if !page_mapped(page)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003010 */
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003011static struct mem_cgroup *
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003012__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003013{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003014 struct mem_cgroup *memcg = NULL;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003015 unsigned int nr_pages = 1;
3016 struct page_cgroup *pc;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003017
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08003018 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003019 return NULL;
Balbir Singh40779602008-04-04 14:29:59 -07003020
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003021 if (PageSwapCache(page))
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003022 return NULL;
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003023
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08003024 if (PageTransHuge(page)) {
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003025 nr_pages <<= compound_order(page);
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08003026 VM_BUG_ON(!PageTransHuge(page));
3027 }
Balbir Singh8697d332008-02-07 00:13:59 -08003028 /*
Balbir Singh3c541e12008-02-07 00:14:41 -08003029 * Check if our page_cgroup is valid
Balbir Singh8697d332008-02-07 00:13:59 -08003030 */
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003031 pc = lookup_page_cgroup(page);
3032 if (unlikely(!pc || !PageCgroupUsed(pc)))
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003033 return NULL;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08003034
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003035 lock_page_cgroup(pc);
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003036
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003037 memcg = pc->mem_cgroup;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003038
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003039 if (!PageCgroupUsed(pc))
3040 goto unlock_out;
3041
3042 switch (ctype) {
3043 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07003044 case MEM_CGROUP_CHARGE_TYPE_DROP:
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003045 /* See mem_cgroup_prepare_migration() */
3046 if (page_mapped(page) || PageCgroupMigration(pc))
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003047 goto unlock_out;
3048 break;
3049 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
3050 if (!PageAnon(page)) { /* Shared memory */
3051 if (page->mapping && !page_is_file_cache(page))
3052 goto unlock_out;
3053 } else if (page_mapped(page)) /* Anon */
3054 goto unlock_out;
3055 break;
3056 default:
3057 break;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003058 }
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003059
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003060 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages);
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07003061
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003062 ClearPageCgroupUsed(pc);
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08003063 /*
3064 * pc->mem_cgroup is not cleared here. It will be accessed when it's
3065 * freed from LRU. This is safe because uncharged page is expected not
3066 * to be reused (freed soon). Exception is SwapCache, it's handled by
3067 * special functions.
3068 */
Hugh Dickinsb9c565d2008-03-04 14:29:11 -08003069
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003070 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07003071 /*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003072 * even after unlock, we have memcg->res.usage here and this memcg
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07003073 * will never be freed.
3074 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003075 memcg_check_events(memcg, page);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07003076 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003077 mem_cgroup_swap_statistics(memcg, true);
3078 mem_cgroup_get(memcg);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07003079 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003080 if (!mem_cgroup_is_root(memcg))
3081 mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003082
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003083 return memcg;
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003084
3085unlock_out:
3086 unlock_page_cgroup(pc);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003087 return NULL;
Balbir Singh3c541e12008-02-07 00:14:41 -08003088}
3089
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003090void mem_cgroup_uncharge_page(struct page *page)
3091{
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003092 /* early check. */
3093 if (page_mapped(page))
3094 return;
3095 if (page->mapping && !PageAnon(page))
3096 return;
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003097 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
3098}
3099
3100void mem_cgroup_uncharge_cache_page(struct page *page)
3101{
3102 VM_BUG_ON(page_mapped(page));
KAMEZAWA Hiroyukib7abea92008-10-18 20:28:09 -07003103 VM_BUG_ON(page->mapping);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003104 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
3105}
3106
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003107/*
3108 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
3109 * In that cases, pages are freed continuously and we can expect pages
3110 * are in the same memcg. All these calls itself limits the number of
3111 * pages freed at once, then uncharge_start/end() is called properly.
3112 * This may be called prural(2) times in a context,
3113 */
3114
3115void mem_cgroup_uncharge_start(void)
3116{
3117 current->memcg_batch.do_batch++;
3118 /* We can do nest. */
3119 if (current->memcg_batch.do_batch == 1) {
3120 current->memcg_batch.memcg = NULL;
Johannes Weiner7ffd4ca2011-03-23 16:42:35 -07003121 current->memcg_batch.nr_pages = 0;
3122 current->memcg_batch.memsw_nr_pages = 0;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003123 }
3124}
3125
3126void mem_cgroup_uncharge_end(void)
3127{
3128 struct memcg_batch_info *batch = &current->memcg_batch;
3129
3130 if (!batch->do_batch)
3131 return;
3132
3133 batch->do_batch--;
3134 if (batch->do_batch) /* If stacked, do nothing. */
3135 return;
3136
3137 if (!batch->memcg)
3138 return;
3139 /*
3140 * This "batch->memcg" is valid without any css_get/put etc...
3141 * bacause we hide charges behind us.
3142 */
Johannes Weiner7ffd4ca2011-03-23 16:42:35 -07003143 if (batch->nr_pages)
3144 res_counter_uncharge(&batch->memcg->res,
3145 batch->nr_pages * PAGE_SIZE);
3146 if (batch->memsw_nr_pages)
3147 res_counter_uncharge(&batch->memcg->memsw,
3148 batch->memsw_nr_pages * PAGE_SIZE);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003149 memcg_oom_recover(batch->memcg);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08003150 /* forget this pointer (for sanity check) */
3151 batch->memcg = NULL;
3152}
3153
Daisuke Nishimurae767e052009-05-28 14:34:28 -07003154#ifdef CONFIG_SWAP
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003155/*
Daisuke Nishimurae767e052009-05-28 14:34:28 -07003156 * called after __delete_from_swap_cache() and drop "page" account.
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003157 * memcg information is recorded to swap_cgroup of "ent"
3158 */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07003159void
3160mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003161{
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003162 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07003163 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003164
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07003165 if (!swapout) /* this was a swap cache but the swap is unused ! */
3166 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
3167
3168 memcg = __mem_cgroup_uncharge_common(page, ctype);
3169
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07003170 /*
3171 * record memcg information, if swapout && memcg != NULL,
3172 * mem_cgroup_get() was called in uncharge().
3173 */
3174 if (do_swap_account && swapout && memcg)
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07003175 swap_cgroup_record(ent, css_id(&memcg->css));
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003176}
Daisuke Nishimurae767e052009-05-28 14:34:28 -07003177#endif
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08003178
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003179#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3180/*
3181 * called from swap_entry_free(). remove record in swap_cgroup and
3182 * uncharge "memsw" account.
3183 */
3184void mem_cgroup_uncharge_swap(swp_entry_t ent)
3185{
3186 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07003187 unsigned short id;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003188
3189 if (!do_swap_account)
3190 return;
3191
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07003192 id = swap_cgroup_record(ent, 0);
3193 rcu_read_lock();
3194 memcg = mem_cgroup_lookup(id);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003195 if (memcg) {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07003196 /*
3197 * We uncharge this because swap is freed.
3198 * This memcg can be obsolete one. We avoid calling css_tryget
3199 */
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003200 if (!mem_cgroup_is_root(memcg))
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -07003201 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003202 mem_cgroup_swap_statistics(memcg, false);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003203 mem_cgroup_put(memcg);
3204 }
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07003205 rcu_read_unlock();
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003206}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003207
3208/**
3209 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3210 * @entry: swap entry to be moved
3211 * @from: mem_cgroup which the entry is moved from
3212 * @to: mem_cgroup which the entry is moved to
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08003213 * @need_fixup: whether we should fixup res_counters and refcounts.
Daisuke Nishimura02491442010-03-10 15:22:17 -08003214 *
3215 * It succeeds only when the swap_cgroup's record for this entry is the same
3216 * as the mem_cgroup's id of @from.
3217 *
3218 * Returns 0 on success, -EINVAL on failure.
3219 *
3220 * The caller must have charged to @to, IOW, called res_counter_charge() about
3221 * both res and memsw, and called css_get().
3222 */
3223static int mem_cgroup_move_swap_account(swp_entry_t entry,
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08003224 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
Daisuke Nishimura02491442010-03-10 15:22:17 -08003225{
3226 unsigned short old_id, new_id;
3227
3228 old_id = css_id(&from->css);
3229 new_id = css_id(&to->css);
3230
3231 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08003232 mem_cgroup_swap_statistics(from, false);
Daisuke Nishimura02491442010-03-10 15:22:17 -08003233 mem_cgroup_swap_statistics(to, true);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08003234 /*
3235 * This function is only called from task migration context now.
3236 * It postpones res_counter and refcount handling till the end
3237 * of task migration(mem_cgroup_clear_mc()) for performance
3238 * improvement. But we cannot postpone mem_cgroup_get(to)
3239 * because if the process that has been moved to @to does
3240 * swap-in, the refcount of @to might be decreased to 0.
3241 */
Daisuke Nishimura02491442010-03-10 15:22:17 -08003242 mem_cgroup_get(to);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08003243 if (need_fixup) {
3244 if (!mem_cgroup_is_root(from))
3245 res_counter_uncharge(&from->memsw, PAGE_SIZE);
3246 mem_cgroup_put(from);
3247 /*
3248 * we charged both to->res and to->memsw, so we should
3249 * uncharge to->res.
3250 */
3251 if (!mem_cgroup_is_root(to))
3252 res_counter_uncharge(&to->res, PAGE_SIZE);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08003253 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08003254 return 0;
3255 }
3256 return -EINVAL;
3257}
3258#else
3259static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08003260 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
Daisuke Nishimura02491442010-03-10 15:22:17 -08003261{
3262 return -EINVAL;
3263}
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003264#endif
3265
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08003266/*
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003267 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
3268 * page belongs to.
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08003269 */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003270int mem_cgroup_prepare_migration(struct page *page,
Miklos Szeredief6a3c62011-03-22 16:30:52 -07003271 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08003272{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003273 struct mem_cgroup *memcg = NULL;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003274 struct page_cgroup *pc;
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003275 enum charge_type ctype;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07003276 int ret = 0;
Hugh Dickins8869b8f2008-03-04 14:29:09 -08003277
KAMEZAWA Hiroyuki56039ef2011-03-23 16:42:19 -07003278 *ptr = NULL;
3279
Andrea Arcangeliec168512011-01-13 15:46:56 -08003280 VM_BUG_ON(PageTransHuge(page));
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08003281 if (mem_cgroup_disabled())
Balbir Singh40779602008-04-04 14:29:59 -07003282 return 0;
3283
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003284 pc = lookup_page_cgroup(page);
3285 lock_page_cgroup(pc);
3286 if (PageCgroupUsed(pc)) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003287 memcg = pc->mem_cgroup;
3288 css_get(&memcg->css);
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003289 /*
3290 * At migrating an anonymous page, its mapcount goes down
3291 * to 0 and uncharge() will be called. But, even if it's fully
3292 * unmapped, migration may fail and this page has to be
3293 * charged again. We set MIGRATION flag here and delay uncharge
3294 * until end_migration() is called
3295 *
3296 * Corner Case Thinking
3297 * A)
3298 * When the old page was mapped as Anon and it's unmap-and-freed
3299 * while migration was ongoing.
3300 * If unmap finds the old page, uncharge() of it will be delayed
3301 * until end_migration(). If unmap finds a new page, it's
3302 * uncharged when it make mapcount to be 1->0. If unmap code
3303 * finds swap_migration_entry, the new page will not be mapped
3304 * and end_migration() will find it(mapcount==0).
3305 *
3306 * B)
3307 * When the old page was mapped but migraion fails, the kernel
3308 * remaps it. A charge for it is kept by MIGRATION flag even
3309 * if mapcount goes down to 0. We can do remap successfully
3310 * without charging it again.
3311 *
3312 * C)
3313 * The "old" page is under lock_page() until the end of
3314 * migration, so, the old page itself will not be swapped-out.
3315 * If the new page is swapped out before end_migraton, our
3316 * hook to usual swap-out path will catch the event.
3317 */
3318 if (PageAnon(page))
3319 SetPageCgroupMigration(pc);
Hugh Dickinsb9c565d2008-03-04 14:29:11 -08003320 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003321 unlock_page_cgroup(pc);
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003322 /*
3323 * If the page is not charged at this point,
3324 * we return here.
3325 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003326 if (!memcg)
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003327 return 0;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003328
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003329 *ptr = memcg;
Johannes Weiner7ec99d62011-03-23 16:42:36 -07003330 ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003331 css_put(&memcg->css);/* drop extra refcnt */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003332 if (ret || *ptr == NULL) {
3333 if (PageAnon(page)) {
3334 lock_page_cgroup(pc);
3335 ClearPageCgroupMigration(pc);
3336 unlock_page_cgroup(pc);
3337 /*
3338 * The old page may be fully unmapped while we kept it.
3339 */
3340 mem_cgroup_uncharge_page(page);
3341 }
3342 return -ENOMEM;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07003343 }
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003344 /*
3345 * We charge new page before it's used/mapped. So, even if unlock_page()
3346 * is called before end_migration, we can catch all events on this new
3347 * page. In the case new page is migrated but not remapped, new page's
3348 * mapcount will be finally 0 and we call uncharge in end_migration().
3349 */
3350 pc = lookup_page_cgroup(newpage);
3351 if (PageAnon(page))
3352 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3353 else if (page_is_file_cache(page))
3354 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3355 else
3356 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003357 __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07003358 return ret;
3359}
Hugh Dickinsfb59e9f2008-03-04 14:29:16 -08003360
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003361/* remove redundant charge if migration failed*/
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003362void mem_cgroup_end_migration(struct mem_cgroup *memcg,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -08003363 struct page *oldpage, struct page *newpage, bool migration_ok)
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07003364{
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003365 struct page *used, *unused;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003366 struct page_cgroup *pc;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003367
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003368 if (!memcg)
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003369 return;
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003370 /* blocks rmdir() */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003371 cgroup_exclude_rmdir(&memcg->css);
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -08003372 if (!migration_ok) {
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003373 used = oldpage;
3374 unused = newpage;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003375 } else {
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003376 used = newpage;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003377 unused = oldpage;
3378 }
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003379 /*
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003380 * We disallowed uncharge of pages under migration because mapcount
3381 * of the page goes down to zero, temporarly.
3382 * Clear the flag and check the page should be charged.
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003383 */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003384 pc = lookup_page_cgroup(oldpage);
3385 lock_page_cgroup(pc);
3386 ClearPageCgroupMigration(pc);
3387 unlock_page_cgroup(pc);
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003388
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003389 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3390
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003391 /*
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003392 * If a page is a file cache, radix-tree replacement is very atomic
3393 * and we can skip this check. When it was an Anon page, its mapcount
3394 * goes down to 0. But because we added MIGRATION flage, it's not
3395 * uncharged yet. There are several case but page->mapcount check
3396 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3397 * check. (see prepare_charge() also)
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003398 */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003399 if (PageAnon(used))
3400 mem_cgroup_uncharge_page(used);
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07003401 /*
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003402 * At migration, we may charge account against cgroup which has no
3403 * tasks.
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07003404 * So, rmdir()->pre_destroy() can be called while we do this charge.
3405 * In that case, we need to call pre_destroy() again. check it here.
3406 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003407 cgroup_release_and_wakeup_rmdir(&memcg->css);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08003408}
Pavel Emelianov78fb7462008-02-07 00:13:51 -08003409
KAMEZAWA Hiroyukiab936cb2012-01-12 17:17:44 -08003410/*
3411 * At replace page cache, newpage is not under any memcg but it's on
3412 * LRU. So, this function doesn't touch res_counter but handles LRU
3413 * in correct way. Both pages are locked so we cannot race with uncharge.
3414 */
3415void mem_cgroup_replace_page_cache(struct page *oldpage,
3416 struct page *newpage)
3417{
3418 struct mem_cgroup *memcg;
3419 struct page_cgroup *pc;
3420 struct zone *zone;
3421 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3422 unsigned long flags;
3423
3424 if (mem_cgroup_disabled())
3425 return;
3426
3427 pc = lookup_page_cgroup(oldpage);
3428 /* fix accounting on old pages */
3429 lock_page_cgroup(pc);
3430 memcg = pc->mem_cgroup;
3431 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
3432 ClearPageCgroupUsed(pc);
3433 unlock_page_cgroup(pc);
3434
3435 if (PageSwapBacked(oldpage))
3436 type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3437
3438 zone = page_zone(newpage);
3439 pc = lookup_page_cgroup(newpage);
3440 /*
3441 * Even if newpage->mapping was NULL before starting replacement,
3442 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3443 * LRU while we overwrite pc->mem_cgroup.
3444 */
3445 spin_lock_irqsave(&zone->lru_lock, flags);
3446 if (PageLRU(newpage))
3447 del_page_from_lru_list(zone, newpage, page_lru(newpage));
3448 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
3449 if (PageLRU(newpage))
3450 add_page_to_lru_list(zone, newpage, page_lru(newpage));
3451 spin_unlock_irqrestore(&zone->lru_lock, flags);
3452}
3453
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07003454#ifdef CONFIG_DEBUG_VM
3455static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3456{
3457 struct page_cgroup *pc;
3458
3459 pc = lookup_page_cgroup(page);
3460 if (likely(pc) && PageCgroupUsed(pc))
3461 return pc;
3462 return NULL;
3463}
3464
3465bool mem_cgroup_bad_page_check(struct page *page)
3466{
3467 if (mem_cgroup_disabled())
3468 return false;
3469
3470 return lookup_page_cgroup_used(page) != NULL;
3471}
3472
3473void mem_cgroup_print_bad_page(struct page *page)
3474{
3475 struct page_cgroup *pc;
3476
3477 pc = lookup_page_cgroup_used(page);
3478 if (pc) {
3479 int ret = -1;
3480 char *path;
3481
3482 printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3483 pc, pc->flags, pc->mem_cgroup);
3484
3485 path = kmalloc(PATH_MAX, GFP_KERNEL);
3486 if (path) {
3487 rcu_read_lock();
3488 ret = cgroup_path(pc->mem_cgroup->css.cgroup,
3489 path, PATH_MAX);
3490 rcu_read_unlock();
3491 }
3492
3493 printk(KERN_CONT "(%s)\n",
3494 (ret < 0) ? "cannot get the path" : path);
3495 kfree(path);
3496 }
3497}
3498#endif
3499
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003500static DEFINE_MUTEX(set_limit_mutex);
3501
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08003502static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003503 unsigned long long val)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003504{
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003505 int retry_count;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003506 u64 memswlimit, memlimit;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003507 int ret = 0;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003508 int children = mem_cgroup_count_children(memcg);
3509 u64 curusage, oldusage;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003510 int enlarge;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003511
3512 /*
3513 * For keeping hierarchical_reclaim simple, how long we should retry
3514 * is depends on callers. We set our retry-count to be function
3515 * of # of children which we should visit in this loop.
3516 */
3517 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3518
3519 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003520
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003521 enlarge = 0;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003522 while (retry_count) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003523 if (signal_pending(current)) {
3524 ret = -EINTR;
3525 break;
3526 }
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003527 /*
3528 * Rather than hide all in some function, I do this in
3529 * open coded manner. You see what this really does.
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003530 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003531 */
3532 mutex_lock(&set_limit_mutex);
3533 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3534 if (memswlimit < val) {
3535 ret = -EINVAL;
3536 mutex_unlock(&set_limit_mutex);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003537 break;
3538 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003539
3540 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3541 if (memlimit < val)
3542 enlarge = 1;
3543
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003544 ret = res_counter_set_limit(&memcg->res, val);
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -07003545 if (!ret) {
3546 if (memswlimit == val)
3547 memcg->memsw_is_minimum = true;
3548 else
3549 memcg->memsw_is_minimum = false;
3550 }
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003551 mutex_unlock(&set_limit_mutex);
3552
3553 if (!ret)
3554 break;
3555
Johannes Weiner56600482012-01-12 17:17:59 -08003556 mem_cgroup_reclaim(memcg, GFP_KERNEL,
3557 MEM_CGROUP_RECLAIM_SHRINK);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003558 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3559 /* Usage is reduced ? */
3560 if (curusage >= oldusage)
3561 retry_count--;
3562 else
3563 oldusage = curusage;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003564 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003565 if (!ret && enlarge)
3566 memcg_oom_recover(memcg);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08003567
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003568 return ret;
3569}
3570
Li Zefan338c8432009-06-17 16:27:15 -07003571static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3572 unsigned long long val)
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003573{
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003574 int retry_count;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003575 u64 memlimit, memswlimit, oldusage, curusage;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003576 int children = mem_cgroup_count_children(memcg);
3577 int ret = -EBUSY;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003578 int enlarge = 0;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003579
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003580 /* see mem_cgroup_resize_res_limit */
3581 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3582 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003583 while (retry_count) {
3584 if (signal_pending(current)) {
3585 ret = -EINTR;
3586 break;
3587 }
3588 /*
3589 * Rather than hide all in some function, I do this in
3590 * open coded manner. You see what this really does.
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003591 * We have to guarantee memcg->res.limit < memcg->memsw.limit.
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003592 */
3593 mutex_lock(&set_limit_mutex);
3594 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3595 if (memlimit > val) {
3596 ret = -EINVAL;
3597 mutex_unlock(&set_limit_mutex);
3598 break;
3599 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003600 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3601 if (memswlimit < val)
3602 enlarge = 1;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003603 ret = res_counter_set_limit(&memcg->memsw, val);
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -07003604 if (!ret) {
3605 if (memlimit == val)
3606 memcg->memsw_is_minimum = true;
3607 else
3608 memcg->memsw_is_minimum = false;
3609 }
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003610 mutex_unlock(&set_limit_mutex);
3611
3612 if (!ret)
3613 break;
3614
Johannes Weiner56600482012-01-12 17:17:59 -08003615 mem_cgroup_reclaim(memcg, GFP_KERNEL,
3616 MEM_CGROUP_RECLAIM_NOSWAP |
3617 MEM_CGROUP_RECLAIM_SHRINK);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003618 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003619 /* Usage is reduced ? */
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003620 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003621 retry_count--;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003622 else
3623 oldusage = curusage;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003624 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003625 if (!ret && enlarge)
3626 memcg_oom_recover(memcg);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003627 return ret;
3628}
3629
Balbir Singh4e416952009-09-23 15:56:39 -07003630unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
Ying Han0ae5e892011-05-26 16:25:25 -07003631 gfp_t gfp_mask,
3632 unsigned long *total_scanned)
Balbir Singh4e416952009-09-23 15:56:39 -07003633{
3634 unsigned long nr_reclaimed = 0;
3635 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3636 unsigned long reclaimed;
3637 int loop = 0;
3638 struct mem_cgroup_tree_per_zone *mctz;
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -07003639 unsigned long long excess;
Ying Han0ae5e892011-05-26 16:25:25 -07003640 unsigned long nr_scanned;
Balbir Singh4e416952009-09-23 15:56:39 -07003641
3642 if (order > 0)
3643 return 0;
3644
KOSAKI Motohiro00918b62010-08-10 18:03:05 -07003645 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
Balbir Singh4e416952009-09-23 15:56:39 -07003646 /*
3647 * This loop can run a while, specially if mem_cgroup's continuously
3648 * keep exceeding their soft limit and putting the system under
3649 * pressure
3650 */
3651 do {
3652 if (next_mz)
3653 mz = next_mz;
3654 else
3655 mz = mem_cgroup_largest_soft_limit_node(mctz);
3656 if (!mz)
3657 break;
3658
Ying Han0ae5e892011-05-26 16:25:25 -07003659 nr_scanned = 0;
Johannes Weiner56600482012-01-12 17:17:59 -08003660 reclaimed = mem_cgroup_soft_reclaim(mz->mem, zone,
3661 gfp_mask, &nr_scanned);
Balbir Singh4e416952009-09-23 15:56:39 -07003662 nr_reclaimed += reclaimed;
Ying Han0ae5e892011-05-26 16:25:25 -07003663 *total_scanned += nr_scanned;
Balbir Singh4e416952009-09-23 15:56:39 -07003664 spin_lock(&mctz->lock);
3665
3666 /*
3667 * If we failed to reclaim anything from this memory cgroup
3668 * it is time to move on to the next cgroup
3669 */
3670 next_mz = NULL;
3671 if (!reclaimed) {
3672 do {
3673 /*
3674 * Loop until we find yet another one.
3675 *
3676 * By the time we get the soft_limit lock
3677 * again, someone might have aded the
3678 * group back on the RB tree. Iterate to
3679 * make sure we get a different mem.
3680 * mem_cgroup_largest_soft_limit_node returns
3681 * NULL if no other cgroup is present on
3682 * the tree
3683 */
3684 next_mz =
3685 __mem_cgroup_largest_soft_limit_node(mctz);
Michal Hocko39cc98f2011-05-26 16:25:28 -07003686 if (next_mz == mz)
Balbir Singh4e416952009-09-23 15:56:39 -07003687 css_put(&next_mz->mem->css);
Michal Hocko39cc98f2011-05-26 16:25:28 -07003688 else /* next_mz == NULL or other memcg */
Balbir Singh4e416952009-09-23 15:56:39 -07003689 break;
3690 } while (1);
3691 }
Balbir Singh4e416952009-09-23 15:56:39 -07003692 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -07003693 excess = res_counter_soft_limit_excess(&mz->mem->res);
Balbir Singh4e416952009-09-23 15:56:39 -07003694 /*
3695 * One school of thought says that we should not add
3696 * back the node to the tree if reclaim returns 0.
3697 * But our reclaim could return 0, simply because due
3698 * to priority we are exposing a smaller subset of
3699 * memory to reclaim from. Consider this as a longer
3700 * term TODO.
3701 */
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -07003702 /* If excess == 0, no tree ops */
3703 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
Balbir Singh4e416952009-09-23 15:56:39 -07003704 spin_unlock(&mctz->lock);
3705 css_put(&mz->mem->css);
3706 loop++;
3707 /*
3708 * Could not reclaim anything and there are no more
3709 * mem cgroups to try or we seem to be looping without
3710 * reclaiming anything.
3711 */
3712 if (!nr_reclaimed &&
3713 (next_mz == NULL ||
3714 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3715 break;
3716 } while (!nr_reclaimed);
3717 if (next_mz)
3718 css_put(&next_mz->mem->css);
3719 return nr_reclaimed;
3720}
3721
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07003722/*
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003723 * This routine traverse page_cgroup in given list and drop them all.
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003724 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3725 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003726static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003727 int node, int zid, enum lru_list lru)
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003728{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003729 struct zone *zone;
3730 struct mem_cgroup_per_zone *mz;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003731 struct page_cgroup *pc, *busy;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003732 unsigned long flags, loop;
KAMEZAWA Hiroyuki072c56c2008-02-07 00:14:39 -08003733 struct list_head *list;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003734 int ret = 0;
KAMEZAWA Hiroyuki072c56c2008-02-07 00:14:39 -08003735
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003736 zone = &NODE_DATA(node)->node_zones[zid];
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003737 mz = mem_cgroup_zoneinfo(memcg, node, zid);
Johannes Weiner6290df52012-01-12 17:18:10 -08003738 list = &mz->lruvec.lists[lru];
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003739
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003740 loop = MEM_CGROUP_ZSTAT(mz, lru);
3741 /* give some margin against EBUSY etc...*/
3742 loop += 256;
3743 busy = NULL;
3744 while (loop--) {
Johannes Weiner5564e882011-03-23 16:42:29 -07003745 struct page *page;
3746
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003747 ret = 0;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003748 spin_lock_irqsave(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003749 if (list_empty(list)) {
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003750 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003751 break;
3752 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003753 pc = list_entry(list->prev, struct page_cgroup, lru);
3754 if (busy == pc) {
3755 list_move(&pc->lru, list);
Thiago Farina648bcc72010-03-05 13:42:04 -08003756 busy = NULL;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003757 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003758 continue;
3759 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003760 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003761
Johannes Weiner6b3ae582011-03-23 16:42:30 -07003762 page = lookup_cgroup_page(pc);
Johannes Weiner5564e882011-03-23 16:42:29 -07003763
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003764 ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003765 if (ret == -ENOMEM)
3766 break;
3767
3768 if (ret == -EBUSY || ret == -EINVAL) {
3769 /* found lock contention or "pc" is obsolete. */
3770 busy = pc;
3771 cond_resched();
3772 } else
3773 busy = NULL;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003774 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003775
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003776 if (!ret && !list_empty(list))
3777 return -EBUSY;
3778 return ret;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003779}
3780
3781/*
3782 * make mem_cgroup's charge to be 0 if there is no task.
3783 * This enables deleting this mem_cgroup.
3784 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003785static int mem_cgroup_force_empty(struct mem_cgroup *memcg, bool free_all)
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003786{
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003787 int ret;
3788 int node, zid, shrink;
3789 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003790 struct cgroup *cgrp = memcg->css.cgroup;
Hugh Dickins8869b8f2008-03-04 14:29:09 -08003791
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003792 css_get(&memcg->css);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003793
3794 shrink = 0;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003795 /* should free all ? */
3796 if (free_all)
3797 goto try_to_free;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003798move_account:
Daisuke Nishimurafce66472010-01-15 17:01:30 -08003799 do {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003800 ret = -EBUSY;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003801 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003802 goto out;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003803 ret = -EINTR;
3804 if (signal_pending(current))
3805 goto out;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003806 /* This is for making all *used* pages to be on LRU. */
3807 lru_add_drain_all();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003808 drain_all_stock_sync(memcg);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003809 ret = 0;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003810 mem_cgroup_start_move(memcg);
KAMEZAWA Hiroyuki299b4ea2009-01-29 14:25:17 -08003811 for_each_node_state(node, N_HIGH_MEMORY) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003812 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
Christoph Lameterb69408e2008-10-18 20:26:14 -07003813 enum lru_list l;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003814 for_each_lru(l) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003815 ret = mem_cgroup_force_empty_list(memcg,
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003816 node, zid, l);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003817 if (ret)
3818 break;
3819 }
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08003820 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003821 if (ret)
3822 break;
3823 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003824 mem_cgroup_end_move(memcg);
3825 memcg_oom_recover(memcg);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003826 /* it seems parent cgroup doesn't have enough mem */
3827 if (ret == -ENOMEM)
3828 goto try_to_free;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003829 cond_resched();
Daisuke Nishimurafce66472010-01-15 17:01:30 -08003830 /* "ret" should also be checked to ensure all lists are empty. */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003831 } while (memcg->res.usage > 0 || ret);
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003832out:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003833 css_put(&memcg->css);
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003834 return ret;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003835
3836try_to_free:
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003837 /* returns EBUSY if there is a task or if we come here twice. */
3838 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003839 ret = -EBUSY;
3840 goto out;
3841 }
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003842 /* we call try-to-free pages for make this cgroup empty */
3843 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003844 /* try to free all pages in this cgroup */
3845 shrink = 1;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003846 while (nr_retries && memcg->res.usage > 0) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003847 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003848
3849 if (signal_pending(current)) {
3850 ret = -EINTR;
3851 goto out;
3852 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003853 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
Johannes Weiner185efc02011-09-14 16:21:58 -07003854 false);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003855 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003856 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003857 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02003858 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003859 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003860
3861 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003862 lru_add_drain();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003863 /* try move_account...there may be some *locked* pages. */
Daisuke Nishimurafce66472010-01-15 17:01:30 -08003864 goto move_account;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003865}
3866
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003867int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3868{
3869 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3870}
3871
3872
Balbir Singh18f59ea2009-01-07 18:08:07 -08003873static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3874{
3875 return mem_cgroup_from_cont(cont)->use_hierarchy;
3876}
3877
3878static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3879 u64 val)
3880{
3881 int retval = 0;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003882 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
Balbir Singh18f59ea2009-01-07 18:08:07 -08003883 struct cgroup *parent = cont->parent;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003884 struct mem_cgroup *parent_memcg = NULL;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003885
3886 if (parent)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003887 parent_memcg = mem_cgroup_from_cont(parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08003888
3889 cgroup_lock();
3890 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003891 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08003892 * in the child subtrees. If it is unset, then the change can
3893 * occur, provided the current cgroup has no children.
3894 *
3895 * For the root cgroup, parent_mem is NULL, we allow value to be
3896 * set if there are no children.
3897 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003898 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
Balbir Singh18f59ea2009-01-07 18:08:07 -08003899 (val == 1 || val == 0)) {
3900 if (list_empty(&cont->children))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003901 memcg->use_hierarchy = val;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003902 else
3903 retval = -EBUSY;
3904 } else
3905 retval = -EINVAL;
3906 cgroup_unlock();
3907
3908 return retval;
3909}
3910
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003911
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003912static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
Johannes Weiner7a159cc2011-03-23 16:42:38 -07003913 enum mem_cgroup_stat_index idx)
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003914{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003915 struct mem_cgroup *iter;
Johannes Weiner7a159cc2011-03-23 16:42:38 -07003916 long val = 0;
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003917
Johannes Weiner7a159cc2011-03-23 16:42:38 -07003918 /* Per-cpu values can be negative, use a signed accumulator */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003919 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003920 val += mem_cgroup_read_stat(iter, idx);
3921
3922 if (val < 0) /* race ? */
3923 val = 0;
3924 return val;
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003925}
3926
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003927static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003928{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003929 u64 val;
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003930
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003931 if (!mem_cgroup_is_root(memcg)) {
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003932 if (!swap)
Glauber Costa65c64ce2011-12-22 01:02:27 +00003933 return res_counter_read_u64(&memcg->res, RES_USAGE);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003934 else
Glauber Costa65c64ce2011-12-22 01:02:27 +00003935 return res_counter_read_u64(&memcg->memsw, RES_USAGE);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003936 }
3937
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003938 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
3939 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003940
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003941 if (swap)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003942 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003943
3944 return val << PAGE_SHIFT;
3945}
3946
Paul Menage2c3daa72008-04-29 00:59:58 -07003947static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003948{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003949 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003950 u64 val;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003951 int type, name;
3952
3953 type = MEMFILE_TYPE(cft->private);
3954 name = MEMFILE_ATTR(cft->private);
3955 switch (type) {
3956 case _MEM:
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003957 if (name == RES_USAGE)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003958 val = mem_cgroup_usage(memcg, false);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003959 else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003960 val = res_counter_read_u64(&memcg->res, name);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003961 break;
3962 case _MEMSWAP:
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003963 if (name == RES_USAGE)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003964 val = mem_cgroup_usage(memcg, true);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003965 else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003966 val = res_counter_read_u64(&memcg->memsw, name);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003967 break;
3968 default:
3969 BUG();
3970 break;
3971 }
3972 return val;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003973}
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003974/*
3975 * The user of this function is...
3976 * RES_LIMIT.
3977 */
Paul Menage856c13a2008-07-25 01:47:04 -07003978static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3979 const char *buffer)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003980{
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003981 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003982 int type, name;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003983 unsigned long long val;
3984 int ret;
3985
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003986 type = MEMFILE_TYPE(cft->private);
3987 name = MEMFILE_ATTR(cft->private);
3988 switch (name) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003989 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07003990 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3991 ret = -EINVAL;
3992 break;
3993 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003994 /* This function does all necessary parse...reuse it */
3995 ret = res_counter_memparse_write_strategy(buffer, &val);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08003996 if (ret)
3997 break;
3998 if (type == _MEM)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003999 ret = mem_cgroup_resize_limit(memcg, val);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004000 else
4001 ret = mem_cgroup_resize_memsw_limit(memcg, val);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004002 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07004003 case RES_SOFT_LIMIT:
4004 ret = res_counter_memparse_write_strategy(buffer, &val);
4005 if (ret)
4006 break;
4007 /*
4008 * For memsw, soft limits are hard to implement in terms
4009 * of semantics, for now, we support soft limits for
4010 * control without swap
4011 */
4012 if (type == _MEM)
4013 ret = res_counter_set_soft_limit(&memcg->res, val);
4014 else
4015 ret = -EINVAL;
4016 break;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07004017 default:
4018 ret = -EINVAL; /* should be BUG() ? */
4019 break;
4020 }
4021 return ret;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004022}
4023
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08004024static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
4025 unsigned long long *mem_limit, unsigned long long *memsw_limit)
4026{
4027 struct cgroup *cgroup;
4028 unsigned long long min_limit, min_memsw_limit, tmp;
4029
4030 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4031 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4032 cgroup = memcg->css.cgroup;
4033 if (!memcg->use_hierarchy)
4034 goto out;
4035
4036 while (cgroup->parent) {
4037 cgroup = cgroup->parent;
4038 memcg = mem_cgroup_from_cont(cgroup);
4039 if (!memcg->use_hierarchy)
4040 break;
4041 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
4042 min_limit = min(min_limit, tmp);
4043 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4044 min_memsw_limit = min(min_memsw_limit, tmp);
4045 }
4046out:
4047 *mem_limit = min_limit;
4048 *memsw_limit = min_memsw_limit;
4049 return;
4050}
4051
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07004052static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004053{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004054 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004055 int type, name;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004056
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004057 memcg = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004058 type = MEMFILE_TYPE(event);
4059 name = MEMFILE_ATTR(event);
4060 switch (name) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07004061 case RES_MAX_USAGE:
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004062 if (type == _MEM)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004063 res_counter_reset_max(&memcg->res);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004064 else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004065 res_counter_reset_max(&memcg->memsw);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07004066 break;
4067 case RES_FAILCNT:
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004068 if (type == _MEM)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004069 res_counter_reset_failcnt(&memcg->res);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004070 else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004071 res_counter_reset_failcnt(&memcg->memsw);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07004072 break;
4073 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07004074
Pavel Emelyanov85cc59d2008-04-29 01:00:20 -07004075 return 0;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004076}
4077
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004078static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
4079 struct cftype *cft)
4080{
4081 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
4082}
4083
Daisuke Nishimura02491442010-03-10 15:22:17 -08004084#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004085static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4086 struct cftype *cft, u64 val)
4087{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004088 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004089
4090 if (val >= (1 << NR_MOVE_TYPE))
4091 return -EINVAL;
4092 /*
4093 * We check this value several times in both in can_attach() and
4094 * attach(), so we need cgroup lock to prevent this value from being
4095 * inconsistent.
4096 */
4097 cgroup_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004098 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004099 cgroup_unlock();
4100
4101 return 0;
4102}
Daisuke Nishimura02491442010-03-10 15:22:17 -08004103#else
4104static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4105 struct cftype *cft, u64 val)
4106{
4107 return -ENOSYS;
4108}
4109#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004110
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004111
4112/* For read statistics */
4113enum {
4114 MCS_CACHE,
4115 MCS_RSS,
KAMEZAWA Hiroyukid8046582009-12-15 16:47:09 -08004116 MCS_FILE_MAPPED,
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004117 MCS_PGPGIN,
4118 MCS_PGPGOUT,
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004119 MCS_SWAP,
Ying Han456f9982011-05-26 16:25:38 -07004120 MCS_PGFAULT,
4121 MCS_PGMAJFAULT,
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004122 MCS_INACTIVE_ANON,
4123 MCS_ACTIVE_ANON,
4124 MCS_INACTIVE_FILE,
4125 MCS_ACTIVE_FILE,
4126 MCS_UNEVICTABLE,
4127 NR_MCS_STAT,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004128};
4129
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004130struct mcs_total_stat {
4131 s64 stat[NR_MCS_STAT];
4132};
4133
4134struct {
4135 char *local_name;
4136 char *total_name;
4137} memcg_stat_strings[NR_MCS_STAT] = {
4138 {"cache", "total_cache"},
4139 {"rss", "total_rss"},
Balbir Singhd69b0422009-06-17 16:26:34 -07004140 {"mapped_file", "total_mapped_file"},
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004141 {"pgpgin", "total_pgpgin"},
4142 {"pgpgout", "total_pgpgout"},
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004143 {"swap", "total_swap"},
Ying Han456f9982011-05-26 16:25:38 -07004144 {"pgfault", "total_pgfault"},
4145 {"pgmajfault", "total_pgmajfault"},
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004146 {"inactive_anon", "total_inactive_anon"},
4147 {"active_anon", "total_active_anon"},
4148 {"inactive_file", "total_inactive_file"},
4149 {"active_file", "total_active_file"},
4150 {"unevictable", "total_unevictable"}
4151};
4152
4153
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07004154static void
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004155mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004156{
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004157 s64 val;
4158
4159 /* per cpu stat */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004160 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004161 s->stat[MCS_CACHE] += val * PAGE_SIZE;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004162 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004163 s->stat[MCS_RSS] += val * PAGE_SIZE;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004164 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
KAMEZAWA Hiroyukid8046582009-12-15 16:47:09 -08004165 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004166 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004167 s->stat[MCS_PGPGIN] += val;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004168 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004169 s->stat[MCS_PGPGOUT] += val;
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004170 if (do_swap_account) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004171 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004172 s->stat[MCS_SWAP] += val * PAGE_SIZE;
4173 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004174 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
Ying Han456f9982011-05-26 16:25:38 -07004175 s->stat[MCS_PGFAULT] += val;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004176 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
Ying Han456f9982011-05-26 16:25:38 -07004177 s->stat[MCS_PGMAJFAULT] += val;
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004178
4179 /* per zone stat */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004180 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004181 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004182 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004183 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004184 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004185 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004186 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004187 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004188 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004189 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004190}
4191
4192static void
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004193mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004194{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07004195 struct mem_cgroup *iter;
4196
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004197 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07004198 mem_cgroup_get_local_stat(iter, s);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004199}
4200
Ying Han406eb0c2011-05-26 16:25:37 -07004201#ifdef CONFIG_NUMA
4202static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4203{
4204 int nid;
4205 unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4206 unsigned long node_nr;
4207 struct cgroup *cont = m->private;
4208 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
4209
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -07004210 total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL);
Ying Han406eb0c2011-05-26 16:25:37 -07004211 seq_printf(m, "total=%lu", total_nr);
4212 for_each_node_state(nid, N_HIGH_MEMORY) {
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -07004213 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL);
Ying Han406eb0c2011-05-26 16:25:37 -07004214 seq_printf(m, " N%d=%lu", nid, node_nr);
4215 }
4216 seq_putc(m, '\n');
4217
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -07004218 file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE);
Ying Han406eb0c2011-05-26 16:25:37 -07004219 seq_printf(m, "file=%lu", file_nr);
4220 for_each_node_state(nid, N_HIGH_MEMORY) {
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -07004221 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4222 LRU_ALL_FILE);
Ying Han406eb0c2011-05-26 16:25:37 -07004223 seq_printf(m, " N%d=%lu", nid, node_nr);
4224 }
4225 seq_putc(m, '\n');
4226
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -07004227 anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON);
Ying Han406eb0c2011-05-26 16:25:37 -07004228 seq_printf(m, "anon=%lu", anon_nr);
4229 for_each_node_state(nid, N_HIGH_MEMORY) {
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -07004230 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4231 LRU_ALL_ANON);
Ying Han406eb0c2011-05-26 16:25:37 -07004232 seq_printf(m, " N%d=%lu", nid, node_nr);
4233 }
4234 seq_putc(m, '\n');
4235
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -07004236 unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE));
Ying Han406eb0c2011-05-26 16:25:37 -07004237 seq_printf(m, "unevictable=%lu", unevictable_nr);
4238 for_each_node_state(nid, N_HIGH_MEMORY) {
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -07004239 node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid,
4240 BIT(LRU_UNEVICTABLE));
Ying Han406eb0c2011-05-26 16:25:37 -07004241 seq_printf(m, " N%d=%lu", nid, node_nr);
4242 }
4243 seq_putc(m, '\n');
4244 return 0;
4245}
4246#endif /* CONFIG_NUMA */
4247
Paul Menagec64745c2008-04-29 01:00:02 -07004248static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4249 struct cgroup_map_cb *cb)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004250{
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004251 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004252 struct mcs_total_stat mystat;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004253 int i;
4254
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004255 memset(&mystat, 0, sizeof(mystat));
4256 mem_cgroup_get_local_stat(mem_cont, &mystat);
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004257
Ying Han406eb0c2011-05-26 16:25:37 -07004258
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004259 for (i = 0; i < NR_MCS_STAT; i++) {
4260 if (i == MCS_SWAP && !do_swap_account)
4261 continue;
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004262 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004263 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004264
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004265 /* Hierarchical information */
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08004266 {
4267 unsigned long long limit, memsw_limit;
4268 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
4269 cb->fill(cb, "hierarchical_memory_limit", limit);
4270 if (do_swap_account)
4271 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
4272 }
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08004273
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004274 memset(&mystat, 0, sizeof(mystat));
4275 mem_cgroup_get_total_stat(mem_cont, &mystat);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004276 for (i = 0; i < NR_MCS_STAT; i++) {
4277 if (i == MCS_SWAP && !do_swap_account)
4278 continue;
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004279 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07004280 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07004281
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08004282#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08004283 {
4284 int nid, zid;
4285 struct mem_cgroup_per_zone *mz;
4286 unsigned long recent_rotated[2] = {0, 0};
4287 unsigned long recent_scanned[2] = {0, 0};
4288
4289 for_each_online_node(nid)
4290 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4291 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
4292
4293 recent_rotated[0] +=
4294 mz->reclaim_stat.recent_rotated[0];
4295 recent_rotated[1] +=
4296 mz->reclaim_stat.recent_rotated[1];
4297 recent_scanned[0] +=
4298 mz->reclaim_stat.recent_scanned[0];
4299 recent_scanned[1] +=
4300 mz->reclaim_stat.recent_scanned[1];
4301 }
4302 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
4303 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
4304 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
4305 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
4306 }
4307#endif
4308
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004309 return 0;
4310}
4311
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004312static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
4313{
4314 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4315
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07004316 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004317}
4318
4319static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
4320 u64 val)
4321{
4322 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4323 struct mem_cgroup *parent;
Li Zefan068b38c2009-01-15 13:51:26 -08004324
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004325 if (val > 100)
4326 return -EINVAL;
4327
4328 if (cgrp->parent == NULL)
4329 return -EINVAL;
4330
4331 parent = mem_cgroup_from_cont(cgrp->parent);
Li Zefan068b38c2009-01-15 13:51:26 -08004332
4333 cgroup_lock();
4334
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004335 /* If under hierarchy, only empty-root can set this value */
4336 if ((parent->use_hierarchy) ||
Li Zefan068b38c2009-01-15 13:51:26 -08004337 (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
4338 cgroup_unlock();
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004339 return -EINVAL;
Li Zefan068b38c2009-01-15 13:51:26 -08004340 }
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004341
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004342 memcg->swappiness = val;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004343
Li Zefan068b38c2009-01-15 13:51:26 -08004344 cgroup_unlock();
4345
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004346 return 0;
4347}
4348
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004349static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4350{
4351 struct mem_cgroup_threshold_ary *t;
4352 u64 usage;
4353 int i;
4354
4355 rcu_read_lock();
4356 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004357 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004358 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004359 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004360
4361 if (!t)
4362 goto unlock;
4363
4364 usage = mem_cgroup_usage(memcg, swap);
4365
4366 /*
4367 * current_threshold points to threshold just below usage.
4368 * If it's not true, a threshold was crossed after last
4369 * call of __mem_cgroup_threshold().
4370 */
Phil Carmody5407a562010-05-26 14:42:42 -07004371 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004372
4373 /*
4374 * Iterate backward over array of thresholds starting from
4375 * current_threshold and check if a threshold is crossed.
4376 * If none of thresholds below usage is crossed, we read
4377 * only one element of the array here.
4378 */
4379 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4380 eventfd_signal(t->entries[i].eventfd, 1);
4381
4382 /* i = current_threshold + 1 */
4383 i++;
4384
4385 /*
4386 * Iterate forward over array of thresholds starting from
4387 * current_threshold+1 and check if a threshold is crossed.
4388 * If none of thresholds above usage is crossed, we read
4389 * only one element of the array here.
4390 */
4391 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4392 eventfd_signal(t->entries[i].eventfd, 1);
4393
4394 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07004395 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004396unlock:
4397 rcu_read_unlock();
4398}
4399
4400static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4401{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07004402 while (memcg) {
4403 __mem_cgroup_threshold(memcg, false);
4404 if (do_swap_account)
4405 __mem_cgroup_threshold(memcg, true);
4406
4407 memcg = parent_mem_cgroup(memcg);
4408 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004409}
4410
4411static int compare_thresholds(const void *a, const void *b)
4412{
4413 const struct mem_cgroup_threshold *_a = a;
4414 const struct mem_cgroup_threshold *_b = b;
4415
4416 return _a->threshold - _b->threshold;
4417}
4418
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004419static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004420{
4421 struct mem_cgroup_eventfd_list *ev;
4422
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004423 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004424 eventfd_signal(ev->eventfd, 1);
4425 return 0;
4426}
4427
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004428static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004429{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07004430 struct mem_cgroup *iter;
4431
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004432 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07004433 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004434}
4435
4436static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4437 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004438{
4439 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004440 struct mem_cgroup_thresholds *thresholds;
4441 struct mem_cgroup_threshold_ary *new;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004442 int type = MEMFILE_TYPE(cft->private);
4443 u64 threshold, usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004444 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004445
4446 ret = res_counter_memparse_write_strategy(args, &threshold);
4447 if (ret)
4448 return ret;
4449
4450 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004451
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004452 if (type == _MEM)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004453 thresholds = &memcg->thresholds;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004454 else if (type == _MEMSWAP)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004455 thresholds = &memcg->memsw_thresholds;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004456 else
4457 BUG();
4458
4459 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4460
4461 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004462 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004463 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4464
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004465 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004466
4467 /* Allocate memory for new array of thresholds */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004468 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004469 GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004470 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004471 ret = -ENOMEM;
4472 goto unlock;
4473 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004474 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004475
4476 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004477 if (thresholds->primary) {
4478 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004479 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004480 }
4481
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004482 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004483 new->entries[size - 1].eventfd = eventfd;
4484 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004485
4486 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004487 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004488 compare_thresholds, NULL);
4489
4490 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004491 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004492 for (i = 0; i < size; i++) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004493 if (new->entries[i].threshold < usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004494 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004495 * new->current_threshold will not be used until
4496 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004497 * it here.
4498 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004499 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004500 }
4501 }
4502
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004503 /* Free old spare buffer and save old primary buffer as spare */
4504 kfree(thresholds->spare);
4505 thresholds->spare = thresholds->primary;
4506
4507 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004508
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004509 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004510 synchronize_rcu();
4511
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004512unlock:
4513 mutex_unlock(&memcg->thresholds_lock);
4514
4515 return ret;
4516}
4517
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004518static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004519 struct cftype *cft, struct eventfd_ctx *eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004520{
4521 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004522 struct mem_cgroup_thresholds *thresholds;
4523 struct mem_cgroup_threshold_ary *new;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004524 int type = MEMFILE_TYPE(cft->private);
4525 u64 usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004526 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004527
4528 mutex_lock(&memcg->thresholds_lock);
4529 if (type == _MEM)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004530 thresholds = &memcg->thresholds;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004531 else if (type == _MEMSWAP)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004532 thresholds = &memcg->memsw_thresholds;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004533 else
4534 BUG();
4535
4536 /*
4537 * Something went wrong if we trying to unregister a threshold
4538 * if we don't have thresholds
4539 */
4540 BUG_ON(!thresholds);
4541
4542 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4543
4544 /* Check if a threshold crossed before removing */
4545 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4546
4547 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004548 size = 0;
4549 for (i = 0; i < thresholds->primary->size; i++) {
4550 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004551 size++;
4552 }
4553
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004554 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004555
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004556 /* Set thresholds array to NULL if we don't have thresholds */
4557 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004558 kfree(new);
4559 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004560 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004561 }
4562
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004563 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004564
4565 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004566 new->current_threshold = -1;
4567 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4568 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004569 continue;
4570
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004571 new->entries[j] = thresholds->primary->entries[i];
4572 if (new->entries[j].threshold < usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004573 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004574 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004575 * until rcu_assign_pointer(), so it's safe to increment
4576 * it here.
4577 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004578 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004579 }
4580 j++;
4581 }
4582
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004583swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004584 /* Swap primary and spare array */
4585 thresholds->spare = thresholds->primary;
4586 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004587
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004588 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004589 synchronize_rcu();
4590
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004591 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004592}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004593
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004594static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4595 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4596{
4597 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4598 struct mem_cgroup_eventfd_list *event;
4599 int type = MEMFILE_TYPE(cft->private);
4600
4601 BUG_ON(type != _OOM_TYPE);
4602 event = kmalloc(sizeof(*event), GFP_KERNEL);
4603 if (!event)
4604 return -ENOMEM;
4605
Michal Hocko1af8efe2011-07-26 16:08:24 -07004606 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004607
4608 event->eventfd = eventfd;
4609 list_add(&event->list, &memcg->oom_notify);
4610
4611 /* already in OOM ? */
Michal Hocko79dfdac2011-07-26 16:08:23 -07004612 if (atomic_read(&memcg->under_oom))
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004613 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07004614 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004615
4616 return 0;
4617}
4618
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004619static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004620 struct cftype *cft, struct eventfd_ctx *eventfd)
4621{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004622 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004623 struct mem_cgroup_eventfd_list *ev, *tmp;
4624 int type = MEMFILE_TYPE(cft->private);
4625
4626 BUG_ON(type != _OOM_TYPE);
4627
Michal Hocko1af8efe2011-07-26 16:08:24 -07004628 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004629
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004630 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004631 if (ev->eventfd == eventfd) {
4632 list_del(&ev->list);
4633 kfree(ev);
4634 }
4635 }
4636
Michal Hocko1af8efe2011-07-26 16:08:24 -07004637 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004638}
4639
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004640static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4641 struct cftype *cft, struct cgroup_map_cb *cb)
4642{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004643 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004644
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004645 cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004646
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004647 if (atomic_read(&memcg->under_oom))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004648 cb->fill(cb, "under_oom", 1);
4649 else
4650 cb->fill(cb, "under_oom", 0);
4651 return 0;
4652}
4653
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004654static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4655 struct cftype *cft, u64 val)
4656{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004657 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004658 struct mem_cgroup *parent;
4659
4660 /* cannot set to root cgroup and only 0 and 1 are allowed */
4661 if (!cgrp->parent || !((val == 0) || (val == 1)))
4662 return -EINVAL;
4663
4664 parent = mem_cgroup_from_cont(cgrp->parent);
4665
4666 cgroup_lock();
4667 /* oom-kill-disable is a flag for subhierarchy. */
4668 if ((parent->use_hierarchy) ||
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004669 (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004670 cgroup_unlock();
4671 return -EINVAL;
4672 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004673 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07004674 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004675 memcg_oom_recover(memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004676 cgroup_unlock();
4677 return 0;
4678}
4679
Ying Han406eb0c2011-05-26 16:25:37 -07004680#ifdef CONFIG_NUMA
4681static const struct file_operations mem_control_numa_stat_file_operations = {
4682 .read = seq_read,
4683 .llseek = seq_lseek,
4684 .release = single_release,
4685};
4686
4687static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4688{
4689 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4690
4691 file->f_op = &mem_control_numa_stat_file_operations;
4692 return single_open(file, mem_control_numa_stat_show, cont);
4693}
4694#endif /* CONFIG_NUMA */
4695
Glauber Costae5671df2011-12-11 21:47:01 +00004696#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
Glauber Costae5671df2011-12-11 21:47:01 +00004697static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
4698{
Glauber Costad1a4c0b2011-12-11 21:47:04 +00004699 /*
4700 * Part of this would be better living in a separate allocation
4701 * function, leaving us with just the cgroup tree population work.
4702 * We, however, depend on state such as network's proto_list that
4703 * is only initialized after cgroup creation. I found the less
4704 * cumbersome way to deal with it to defer it all to populate time
4705 */
Glauber Costa65c64ce2011-12-22 01:02:27 +00004706 return mem_cgroup_sockets_init(cont, ss);
Glauber Costae5671df2011-12-11 21:47:01 +00004707};
4708
Glauber Costad1a4c0b2011-12-11 21:47:04 +00004709static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
4710 struct cgroup *cont)
4711{
4712 mem_cgroup_sockets_destroy(cont, ss);
4713}
Glauber Costae5671df2011-12-11 21:47:01 +00004714#else
4715static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
4716{
4717 return 0;
4718}
Glauber Costad1a4c0b2011-12-11 21:47:04 +00004719
4720static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
4721 struct cgroup *cont)
4722{
4723}
Glauber Costae5671df2011-12-11 21:47:01 +00004724#endif
4725
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004726static struct cftype mem_cgroup_files[] = {
4727 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004728 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004729 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Paul Menage2c3daa72008-04-29 00:59:58 -07004730 .read_u64 = mem_cgroup_read,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004731 .register_event = mem_cgroup_usage_register_event,
4732 .unregister_event = mem_cgroup_usage_unregister_event,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004733 },
4734 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004735 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004736 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07004737 .trigger = mem_cgroup_reset,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004738 .read_u64 = mem_cgroup_read,
4739 },
4740 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004741 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004742 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Paul Menage856c13a2008-07-25 01:47:04 -07004743 .write_string = mem_cgroup_write,
Paul Menage2c3daa72008-04-29 00:59:58 -07004744 .read_u64 = mem_cgroup_read,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004745 },
4746 {
Balbir Singh296c81d2009-09-23 15:56:36 -07004747 .name = "soft_limit_in_bytes",
4748 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4749 .write_string = mem_cgroup_write,
4750 .read_u64 = mem_cgroup_read,
4751 },
4752 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004753 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004754 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07004755 .trigger = mem_cgroup_reset,
Paul Menage2c3daa72008-04-29 00:59:58 -07004756 .read_u64 = mem_cgroup_read,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004757 },
Balbir Singh8697d332008-02-07 00:13:59 -08004758 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004759 .name = "stat",
Paul Menagec64745c2008-04-29 01:00:02 -07004760 .read_map = mem_control_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004761 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004762 {
4763 .name = "force_empty",
4764 .trigger = mem_cgroup_force_empty_write,
4765 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08004766 {
4767 .name = "use_hierarchy",
4768 .write_u64 = mem_cgroup_hierarchy_write,
4769 .read_u64 = mem_cgroup_hierarchy_read,
4770 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004771 {
4772 .name = "swappiness",
4773 .read_u64 = mem_cgroup_swappiness_read,
4774 .write_u64 = mem_cgroup_swappiness_write,
4775 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004776 {
4777 .name = "move_charge_at_immigrate",
4778 .read_u64 = mem_cgroup_move_charge_read,
4779 .write_u64 = mem_cgroup_move_charge_write,
4780 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004781 {
4782 .name = "oom_control",
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004783 .read_map = mem_cgroup_oom_control_read,
4784 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004785 .register_event = mem_cgroup_oom_register_event,
4786 .unregister_event = mem_cgroup_oom_unregister_event,
4787 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4788 },
Ying Han406eb0c2011-05-26 16:25:37 -07004789#ifdef CONFIG_NUMA
4790 {
4791 .name = "numa_stat",
4792 .open = mem_control_numa_stat_open,
KAMEZAWA Hiroyuki89577122011-06-15 15:08:41 -07004793 .mode = S_IRUGO,
Ying Han406eb0c2011-05-26 16:25:37 -07004794 },
4795#endif
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004796};
4797
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004798#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4799static struct cftype memsw_cgroup_files[] = {
4800 {
4801 .name = "memsw.usage_in_bytes",
4802 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4803 .read_u64 = mem_cgroup_read,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004804 .register_event = mem_cgroup_usage_register_event,
4805 .unregister_event = mem_cgroup_usage_unregister_event,
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004806 },
4807 {
4808 .name = "memsw.max_usage_in_bytes",
4809 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4810 .trigger = mem_cgroup_reset,
4811 .read_u64 = mem_cgroup_read,
4812 },
4813 {
4814 .name = "memsw.limit_in_bytes",
4815 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4816 .write_string = mem_cgroup_write,
4817 .read_u64 = mem_cgroup_read,
4818 },
4819 {
4820 .name = "memsw.failcnt",
4821 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4822 .trigger = mem_cgroup_reset,
4823 .read_u64 = mem_cgroup_read,
4824 },
4825};
4826
4827static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4828{
4829 if (!do_swap_account)
4830 return 0;
4831 return cgroup_add_files(cont, ss, memsw_cgroup_files,
4832 ARRAY_SIZE(memsw_cgroup_files));
4833};
4834#else
4835static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4836{
4837 return 0;
4838}
4839#endif
4840
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004841static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004842{
4843 struct mem_cgroup_per_node *pn;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004844 struct mem_cgroup_per_zone *mz;
Christoph Lameterb69408e2008-10-18 20:26:14 -07004845 enum lru_list l;
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004846 int zone, tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004847 /*
4848 * This routine is called against possible nodes.
4849 * But it's BUG to call kmalloc() against offline node.
4850 *
4851 * TODO: this routine can waste much memory for nodes which will
4852 * never be onlined. It's better to use memory hotplug callback
4853 * function.
4854 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004855 if (!node_state(node, N_NORMAL_MEMORY))
4856 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08004857 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004858 if (!pn)
4859 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004860
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004861 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4862 mz = &pn->zoneinfo[zone];
Christoph Lameterb69408e2008-10-18 20:26:14 -07004863 for_each_lru(l)
Johannes Weiner6290df52012-01-12 17:18:10 -08004864 INIT_LIST_HEAD(&mz->lruvec.lists[l]);
Balbir Singhf64c3f52009-09-23 15:56:37 -07004865 mz->usage_in_excess = 0;
Balbir Singh4e416952009-09-23 15:56:39 -07004866 mz->on_tree = false;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004867 mz->mem = memcg;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004868 }
Igor Mammedov0a619e52011-11-02 13:38:21 -07004869 memcg->info.nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004870 return 0;
4871}
4872
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004873static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004874{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004875 kfree(memcg->info.nodeinfo[node]);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004876}
4877
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004878static struct mem_cgroup *mem_cgroup_alloc(void)
4879{
4880 struct mem_cgroup *mem;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08004881 int size = sizeof(struct mem_cgroup);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004882
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08004883 /* Can be very big if MAX_NUMNODES is very big */
Jan Blunckc8dad2b2009-01-07 18:07:53 -08004884 if (size < PAGE_SIZE)
Jesper Juhl17295c82011-01-13 15:47:42 -08004885 mem = kzalloc(size, GFP_KERNEL);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004886 else
Jesper Juhl17295c82011-01-13 15:47:42 -08004887 mem = vzalloc(size);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004888
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07004889 if (!mem)
4890 return NULL;
4891
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08004892 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004893 if (!mem->stat)
4894 goto out_free;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07004895 spin_lock_init(&mem->pcp_counter_lock);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004896 return mem;
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004897
4898out_free:
4899 if (size < PAGE_SIZE)
4900 kfree(mem);
4901 else
4902 vfree(mem);
4903 return NULL;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004904}
4905
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004906/*
4907 * At destroying mem_cgroup, references from swap_cgroup can remain.
4908 * (scanning all at force_empty is too costly...)
4909 *
4910 * Instead of clearing all references at force_empty, we remember
4911 * the number of reference from swap_cgroup and free mem_cgroup when
4912 * it goes down to 0.
4913 *
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004914 * Removal of cgroup itself succeeds regardless of refs from swap.
4915 */
4916
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004917static void __mem_cgroup_free(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004918{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004919 int node;
4920
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004921 mem_cgroup_remove_from_trees(memcg);
4922 free_css_id(&mem_cgroup_subsys, &memcg->css);
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004923
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004924 for_each_node_state(node, N_POSSIBLE)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004925 free_mem_cgroup_per_zone_info(memcg, node);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004926
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004927 free_percpu(memcg->stat);
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08004928 if (sizeof(struct mem_cgroup) < PAGE_SIZE)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004929 kfree(memcg);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004930 else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004931 vfree(memcg);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004932}
4933
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004934static void mem_cgroup_get(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004935{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004936 atomic_inc(&memcg->refcnt);
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004937}
4938
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004939static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004940{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004941 if (atomic_sub_and_test(count, &memcg->refcnt)) {
4942 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
4943 __mem_cgroup_free(memcg);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004944 if (parent)
4945 mem_cgroup_put(parent);
4946 }
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08004947}
4948
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004949static void mem_cgroup_put(struct mem_cgroup *memcg)
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004950{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004951 __mem_cgroup_put(memcg, 1);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004952}
4953
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004954/*
4955 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4956 */
Glauber Costae1aab162011-12-11 21:47:03 +00004957struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004958{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004959 if (!memcg->res.parent)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004960 return NULL;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004961 return mem_cgroup_from_res_counter(memcg->res.parent, res);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004962}
Glauber Costae1aab162011-12-11 21:47:03 +00004963EXPORT_SYMBOL(parent_mem_cgroup);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004964
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08004965#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4966static void __init enable_swap_cgroup(void)
4967{
Hirokazu Takahashif8d66542009-01-07 18:08:02 -08004968 if (!mem_cgroup_disabled() && really_do_swap_account)
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08004969 do_swap_account = 1;
4970}
4971#else
4972static void __init enable_swap_cgroup(void)
4973{
4974}
4975#endif
4976
Balbir Singhf64c3f52009-09-23 15:56:37 -07004977static int mem_cgroup_soft_limit_tree_init(void)
4978{
4979 struct mem_cgroup_tree_per_node *rtpn;
4980 struct mem_cgroup_tree_per_zone *rtpz;
4981 int tmp, node, zone;
4982
4983 for_each_node_state(node, N_POSSIBLE) {
4984 tmp = node;
4985 if (!node_state(node, N_NORMAL_MEMORY))
4986 tmp = -1;
4987 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4988 if (!rtpn)
4989 return 1;
4990
4991 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4992
4993 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4994 rtpz = &rtpn->rb_tree_per_zone[zone];
4995 rtpz->rb_root = RB_ROOT;
4996 spin_lock_init(&rtpz->lock);
4997 }
4998 }
4999 return 0;
5000}
5001
Li Zefan0eb253e2009-01-15 13:51:25 -08005002static struct cgroup_subsys_state * __ref
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005003mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
5004{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005005 struct mem_cgroup *memcg, *parent;
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07005006 long error = -ENOMEM;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005007 int node;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005008
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005009 memcg = mem_cgroup_alloc();
5010 if (!memcg)
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07005011 return ERR_PTR(error);
Pavel Emelianov78fb7462008-02-07 00:13:51 -08005012
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005013 for_each_node_state(node, N_POSSIBLE)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005014 if (alloc_mem_cgroup_per_zone_info(memcg, node))
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005015 goto free_out;
Balbir Singhf64c3f52009-09-23 15:56:37 -07005016
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005017 /* root ? */
Balbir Singh28dbc4b2009-01-07 18:08:05 -08005018 if (cont->parent == NULL) {
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08005019 int cpu;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005020 enable_swap_cgroup();
Balbir Singh28dbc4b2009-01-07 18:08:05 -08005021 parent = NULL;
Balbir Singhf64c3f52009-09-23 15:56:37 -07005022 if (mem_cgroup_soft_limit_tree_init())
5023 goto free_out;
Hillf Dantona41c58a2011-12-19 17:11:57 -08005024 root_mem_cgroup = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08005025 for_each_possible_cpu(cpu) {
5026 struct memcg_stock_pcp *stock =
5027 &per_cpu(memcg_stock, cpu);
5028 INIT_WORK(&stock->work, drain_local_stock);
5029 }
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07005030 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
Balbir Singh18f59ea2009-01-07 18:08:07 -08005031 } else {
Balbir Singh28dbc4b2009-01-07 18:08:05 -08005032 parent = mem_cgroup_from_cont(cont->parent);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005033 memcg->use_hierarchy = parent->use_hierarchy;
5034 memcg->oom_kill_disable = parent->oom_kill_disable;
Balbir Singh18f59ea2009-01-07 18:08:07 -08005035 }
Balbir Singh28dbc4b2009-01-07 18:08:05 -08005036
Balbir Singh18f59ea2009-01-07 18:08:07 -08005037 if (parent && parent->use_hierarchy) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005038 res_counter_init(&memcg->res, &parent->res);
5039 res_counter_init(&memcg->memsw, &parent->memsw);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08005040 /*
5041 * We increment refcnt of the parent to ensure that we can
5042 * safely access it on res_counter_charge/uncharge.
5043 * This refcnt will be decremented when freeing this
5044 * mem_cgroup(see mem_cgroup_put).
5045 */
5046 mem_cgroup_get(parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08005047 } else {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005048 res_counter_init(&memcg->res, NULL);
5049 res_counter_init(&memcg->memsw, NULL);
Balbir Singh18f59ea2009-01-07 18:08:07 -08005050 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005051 memcg->last_scanned_node = MAX_NUMNODES;
5052 INIT_LIST_HEAD(&memcg->oom_notify);
Balbir Singh6d61ef42009-01-07 18:08:06 -08005053
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08005054 if (parent)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005055 memcg->swappiness = mem_cgroup_swappiness(parent);
5056 atomic_set(&memcg->refcnt, 1);
5057 memcg->move_charge_at_immigrate = 0;
5058 mutex_init(&memcg->thresholds_lock);
5059 return &memcg->css;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005060free_out:
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005061 __mem_cgroup_free(memcg);
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07005062 return ERR_PTR(error);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005063}
5064
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07005065static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08005066 struct cgroup *cont)
5067{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005068 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07005069
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005070 return mem_cgroup_force_empty(memcg, false);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08005071}
5072
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005073static void mem_cgroup_destroy(struct cgroup_subsys *ss,
5074 struct cgroup *cont)
5075{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005076 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08005077
Glauber Costad1a4c0b2011-12-11 21:47:04 +00005078 kmem_cgroup_destroy(ss, cont);
5079
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005080 mem_cgroup_put(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005081}
5082
5083static int mem_cgroup_populate(struct cgroup_subsys *ss,
5084 struct cgroup *cont)
5085{
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08005086 int ret;
5087
5088 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
5089 ARRAY_SIZE(mem_cgroup_files));
5090
5091 if (!ret)
5092 ret = register_memsw_files(cont, ss);
Glauber Costae5671df2011-12-11 21:47:01 +00005093
5094 if (!ret)
5095 ret = register_kmem_files(cont, ss);
5096
KAMEZAWA Hiroyuki8c7c6e32009-01-07 18:08:00 -08005097 return ret;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005098}
5099
Daisuke Nishimura02491442010-03-10 15:22:17 -08005100#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005101/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005102#define PRECHARGE_COUNT_AT_ONCE 256
5103static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005104{
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005105 int ret = 0;
5106 int batch_count = PRECHARGE_COUNT_AT_ONCE;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005107 struct mem_cgroup *memcg = mc.to;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005108
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005109 if (mem_cgroup_is_root(memcg)) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005110 mc.precharge += count;
5111 /* we don't need css_get for root */
5112 return ret;
5113 }
5114 /* try to charge at once */
5115 if (count > 1) {
5116 struct res_counter *dummy;
5117 /*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005118 * "memcg" cannot be under rmdir() because we've already checked
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005119 * by cgroup_lock_live_cgroup() that it is not removed and we
5120 * are still under the same cgroup_mutex. So we can postpone
5121 * css_get().
5122 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005123 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005124 goto one_by_one;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005125 if (do_swap_account && res_counter_charge(&memcg->memsw,
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005126 PAGE_SIZE * count, &dummy)) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005127 res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005128 goto one_by_one;
5129 }
5130 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005131 return ret;
5132 }
5133one_by_one:
5134 /* fall back to one by one charge */
5135 while (count--) {
5136 if (signal_pending(current)) {
5137 ret = -EINTR;
5138 break;
5139 }
5140 if (!batch_count--) {
5141 batch_count = PRECHARGE_COUNT_AT_ONCE;
5142 cond_resched();
5143 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005144 ret = __mem_cgroup_try_charge(NULL,
5145 GFP_KERNEL, 1, &memcg, false);
5146 if (ret || !memcg)
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005147 /* mem_cgroup_clear_mc() will do uncharge later */
5148 return -ENOMEM;
5149 mc.precharge++;
5150 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005151 return ret;
5152}
5153
5154/**
5155 * is_target_pte_for_mc - check a pte whether it is valid for move charge
5156 * @vma: the vma the pte to be checked belongs
5157 * @addr: the address corresponding to the pte to be checked
5158 * @ptent: the pte to be checked
Daisuke Nishimura02491442010-03-10 15:22:17 -08005159 * @target: the pointer the target page or swap ent will be stored(can be NULL)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005160 *
5161 * Returns
5162 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5163 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5164 * move charge. if @target is not NULL, the page is stored in target->page
5165 * with extra refcnt got(Callers should handle it).
Daisuke Nishimura02491442010-03-10 15:22:17 -08005166 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5167 * target for charge migration. if @target is not NULL, the entry is stored
5168 * in target->ent.
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005169 *
5170 * Called with pte lock held.
5171 */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005172union mc_target {
5173 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005174 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005175};
5176
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005177enum mc_target_type {
5178 MC_TARGET_NONE, /* not used */
5179 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08005180 MC_TARGET_SWAP,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005181};
5182
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005183static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5184 unsigned long addr, pte_t ptent)
5185{
5186 struct page *page = vm_normal_page(vma, addr, ptent);
5187
5188 if (!page || !page_mapped(page))
5189 return NULL;
5190 if (PageAnon(page)) {
5191 /* we don't move shared anon */
5192 if (!move_anon() || page_mapcount(page) > 2)
5193 return NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005194 } else if (!move_file())
5195 /* we ignore mapcount for file pages */
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005196 return NULL;
5197 if (!get_page_unless_zero(page))
5198 return NULL;
5199
5200 return page;
5201}
5202
5203static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5204 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5205{
5206 int usage_count;
5207 struct page *page = NULL;
5208 swp_entry_t ent = pte_to_swp_entry(ptent);
5209
5210 if (!move_anon() || non_swap_entry(ent))
5211 return NULL;
5212 usage_count = mem_cgroup_count_swap_user(ent, &page);
5213 if (usage_count > 1) { /* we don't move shared anon */
5214 if (page)
5215 put_page(page);
5216 return NULL;
5217 }
5218 if (do_swap_account)
5219 entry->val = ent.val;
5220
5221 return page;
5222}
5223
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005224static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5225 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5226{
5227 struct page *page = NULL;
5228 struct inode *inode;
5229 struct address_space *mapping;
5230 pgoff_t pgoff;
5231
5232 if (!vma->vm_file) /* anonymous vma */
5233 return NULL;
5234 if (!move_file())
5235 return NULL;
5236
5237 inode = vma->vm_file->f_path.dentry->d_inode;
5238 mapping = vma->vm_file->f_mapping;
5239 if (pte_none(ptent))
5240 pgoff = linear_page_index(vma, addr);
5241 else /* pte_file(ptent) is true */
5242 pgoff = pte_to_pgoff(ptent);
5243
5244 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07005245 page = find_get_page(mapping, pgoff);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005246
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07005247#ifdef CONFIG_SWAP
5248 /* shmem/tmpfs may report page out on swap: account for that too. */
5249 if (radix_tree_exceptional_entry(page)) {
5250 swp_entry_t swap = radix_to_swp_entry(page);
5251 if (do_swap_account)
5252 *entry = swap;
5253 page = find_get_page(&swapper_space, swap.val);
5254 }
5255#endif
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005256 return page;
5257}
5258
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005259static int is_target_pte_for_mc(struct vm_area_struct *vma,
5260 unsigned long addr, pte_t ptent, union mc_target *target)
5261{
Daisuke Nishimura02491442010-03-10 15:22:17 -08005262 struct page *page = NULL;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005263 struct page_cgroup *pc;
5264 int ret = 0;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005265 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005266
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005267 if (pte_present(ptent))
5268 page = mc_handle_present_pte(vma, addr, ptent);
5269 else if (is_swap_pte(ptent))
5270 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005271 else if (pte_none(ptent) || pte_file(ptent))
5272 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005273
5274 if (!page && !ent.val)
Daisuke Nishimura02491442010-03-10 15:22:17 -08005275 return 0;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005276 if (page) {
5277 pc = lookup_page_cgroup(page);
5278 /*
5279 * Do only loose check w/o page_cgroup lock.
5280 * mem_cgroup_move_account() checks the pc is valid or not under
5281 * the lock.
5282 */
5283 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
5284 ret = MC_TARGET_PAGE;
5285 if (target)
5286 target->page = page;
5287 }
5288 if (!ret || !target)
5289 put_page(page);
5290 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005291 /* There is a swap entry and a page doesn't exist or isn't charged */
5292 if (ent.val && !ret &&
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07005293 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
5294 ret = MC_TARGET_SWAP;
5295 if (target)
5296 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005297 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005298 return ret;
5299}
5300
5301static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5302 unsigned long addr, unsigned long end,
5303 struct mm_walk *walk)
5304{
5305 struct vm_area_struct *vma = walk->private;
5306 pte_t *pte;
5307 spinlock_t *ptl;
5308
Dave Hansen03319322011-03-22 16:32:56 -07005309 split_huge_page_pmd(walk->mm, pmd);
5310
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005311 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5312 for (; addr != end; pte++, addr += PAGE_SIZE)
5313 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
5314 mc.precharge++; /* increment precharge temporarily */
5315 pte_unmap_unlock(pte - 1, ptl);
5316 cond_resched();
5317
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005318 return 0;
5319}
5320
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005321static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5322{
5323 unsigned long precharge;
5324 struct vm_area_struct *vma;
5325
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005326 down_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005327 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5328 struct mm_walk mem_cgroup_count_precharge_walk = {
5329 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5330 .mm = mm,
5331 .private = vma,
5332 };
5333 if (is_vm_hugetlb_page(vma))
5334 continue;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005335 walk_page_range(vma->vm_start, vma->vm_end,
5336 &mem_cgroup_count_precharge_walk);
5337 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005338 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005339
5340 precharge = mc.precharge;
5341 mc.precharge = 0;
5342
5343 return precharge;
5344}
5345
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005346static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5347{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005348 unsigned long precharge = mem_cgroup_count_precharge(mm);
5349
5350 VM_BUG_ON(mc.moving_task);
5351 mc.moving_task = current;
5352 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005353}
5354
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005355/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5356static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005357{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005358 struct mem_cgroup *from = mc.from;
5359 struct mem_cgroup *to = mc.to;
5360
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005361 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005362 if (mc.precharge) {
5363 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
5364 mc.precharge = 0;
5365 }
5366 /*
5367 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5368 * we must uncharge here.
5369 */
5370 if (mc.moved_charge) {
5371 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
5372 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005373 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005374 /* we must fixup refcnts and charges */
5375 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005376 /* uncharge swap account from the old cgroup */
5377 if (!mem_cgroup_is_root(mc.from))
5378 res_counter_uncharge(&mc.from->memsw,
5379 PAGE_SIZE * mc.moved_swap);
5380 __mem_cgroup_put(mc.from, mc.moved_swap);
5381
5382 if (!mem_cgroup_is_root(mc.to)) {
5383 /*
5384 * we charged both to->res and to->memsw, so we should
5385 * uncharge to->res.
5386 */
5387 res_counter_uncharge(&mc.to->res,
5388 PAGE_SIZE * mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005389 }
5390 /* we've already done mem_cgroup_get(mc.to) */
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005391 mc.moved_swap = 0;
5392 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005393 memcg_oom_recover(from);
5394 memcg_oom_recover(to);
5395 wake_up_all(&mc.waitq);
5396}
5397
5398static void mem_cgroup_clear_mc(void)
5399{
5400 struct mem_cgroup *from = mc.from;
5401
5402 /*
5403 * we must clear moving_task before waking up waiters at the end of
5404 * task migration.
5405 */
5406 mc.moving_task = NULL;
5407 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005408 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005409 mc.from = NULL;
5410 mc.to = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005411 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07005412 mem_cgroup_end_move(from);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005413}
5414
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005415static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5416 struct cgroup *cgroup,
Tejun Heo2f7ee562011-12-12 18:12:21 -08005417 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005418{
Tejun Heo2f7ee562011-12-12 18:12:21 -08005419 struct task_struct *p = cgroup_taskset_first(tset);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005420 int ret = 0;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005421 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005422
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005423 if (memcg->move_charge_at_immigrate) {
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005424 struct mm_struct *mm;
5425 struct mem_cgroup *from = mem_cgroup_from_task(p);
5426
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005427 VM_BUG_ON(from == memcg);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005428
5429 mm = get_task_mm(p);
5430 if (!mm)
5431 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005432 /* We move charges only when we move a owner of the mm */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005433 if (mm->owner == p) {
5434 VM_BUG_ON(mc.from);
5435 VM_BUG_ON(mc.to);
5436 VM_BUG_ON(mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005437 VM_BUG_ON(mc.moved_charge);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005438 VM_BUG_ON(mc.moved_swap);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07005439 mem_cgroup_start_move(from);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005440 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005441 mc.from = from;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005442 mc.to = memcg;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005443 spin_unlock(&mc.lock);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005444 /* We set mc.moving_task later */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005445
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005446 ret = mem_cgroup_precharge_mc(mm);
5447 if (ret)
5448 mem_cgroup_clear_mc();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005449 }
5450 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005451 }
5452 return ret;
5453}
5454
5455static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5456 struct cgroup *cgroup,
Tejun Heo2f7ee562011-12-12 18:12:21 -08005457 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005458{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005459 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005460}
5461
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005462static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5463 unsigned long addr, unsigned long end,
5464 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005465{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005466 int ret = 0;
5467 struct vm_area_struct *vma = walk->private;
5468 pte_t *pte;
5469 spinlock_t *ptl;
5470
Dave Hansen03319322011-03-22 16:32:56 -07005471 split_huge_page_pmd(walk->mm, pmd);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005472retry:
5473 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5474 for (; addr != end; addr += PAGE_SIZE) {
5475 pte_t ptent = *(pte++);
5476 union mc_target target;
5477 int type;
5478 struct page *page;
5479 struct page_cgroup *pc;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005480 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005481
5482 if (!mc.precharge)
5483 break;
5484
5485 type = is_target_pte_for_mc(vma, addr, ptent, &target);
5486 switch (type) {
5487 case MC_TARGET_PAGE:
5488 page = target.page;
5489 if (isolate_lru_page(page))
5490 goto put;
5491 pc = lookup_page_cgroup(page);
Johannes Weiner7ec99d62011-03-23 16:42:36 -07005492 if (!mem_cgroup_move_account(page, 1, pc,
5493 mc.from, mc.to, false)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005494 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005495 /* we uncharge from mc.from later. */
5496 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005497 }
5498 putback_lru_page(page);
5499put: /* is_target_pte_for_mc() gets the page */
5500 put_page(page);
5501 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005502 case MC_TARGET_SWAP:
5503 ent = target.ent;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005504 if (!mem_cgroup_move_swap_account(ent,
5505 mc.from, mc.to, false)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08005506 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005507 /* we fixup refcnts and charges later. */
5508 mc.moved_swap++;
5509 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08005510 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005511 default:
5512 break;
5513 }
5514 }
5515 pte_unmap_unlock(pte - 1, ptl);
5516 cond_resched();
5517
5518 if (addr != end) {
5519 /*
5520 * We have consumed all precharges we got in can_attach().
5521 * We try charge one by one, but don't do any additional
5522 * charges to mc.to if we have failed in charge once in attach()
5523 * phase.
5524 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005525 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005526 if (!ret)
5527 goto retry;
5528 }
5529
5530 return ret;
5531}
5532
5533static void mem_cgroup_move_charge(struct mm_struct *mm)
5534{
5535 struct vm_area_struct *vma;
5536
5537 lru_add_drain_all();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005538retry:
5539 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5540 /*
5541 * Someone who are holding the mmap_sem might be waiting in
5542 * waitq. So we cancel all extra charges, wake up all waiters,
5543 * and retry. Because we cancel precharges, we might not be able
5544 * to move enough charges, but moving charge is a best-effort
5545 * feature anyway, so it wouldn't be a big problem.
5546 */
5547 __mem_cgroup_clear_mc();
5548 cond_resched();
5549 goto retry;
5550 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005551 for (vma = mm->mmap; vma; vma = vma->vm_next) {
5552 int ret;
5553 struct mm_walk mem_cgroup_move_charge_walk = {
5554 .pmd_entry = mem_cgroup_move_charge_pte_range,
5555 .mm = mm,
5556 .private = vma,
5557 };
5558 if (is_vm_hugetlb_page(vma))
5559 continue;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005560 ret = walk_page_range(vma->vm_start, vma->vm_end,
5561 &mem_cgroup_move_charge_walk);
5562 if (ret)
5563 /*
5564 * means we have consumed all precharges and failed in
5565 * doing additional charge. Just abandon here.
5566 */
5567 break;
5568 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005569 up_read(&mm->mmap_sem);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005570}
5571
Balbir Singh67e465a2008-02-07 00:13:54 -08005572static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5573 struct cgroup *cont,
Tejun Heo2f7ee562011-12-12 18:12:21 -08005574 struct cgroup_taskset *tset)
Balbir Singh67e465a2008-02-07 00:13:54 -08005575{
Tejun Heo2f7ee562011-12-12 18:12:21 -08005576 struct task_struct *p = cgroup_taskset_first(tset);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005577 struct mm_struct *mm = get_task_mm(p);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005578
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005579 if (mm) {
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005580 if (mc.to)
5581 mem_cgroup_move_charge(mm);
5582 put_swap_token(mm);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005583 mmput(mm);
5584 }
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005585 if (mc.to)
5586 mem_cgroup_clear_mc();
Balbir Singh67e465a2008-02-07 00:13:54 -08005587}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005588#else /* !CONFIG_MMU */
5589static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5590 struct cgroup *cgroup,
Tejun Heo2f7ee562011-12-12 18:12:21 -08005591 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005592{
5593 return 0;
5594}
5595static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5596 struct cgroup *cgroup,
Tejun Heo2f7ee562011-12-12 18:12:21 -08005597 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005598{
5599}
5600static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5601 struct cgroup *cont,
Tejun Heo2f7ee562011-12-12 18:12:21 -08005602 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005603{
5604}
5605#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08005606
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005607struct cgroup_subsys mem_cgroup_subsys = {
5608 .name = "memory",
5609 .subsys_id = mem_cgroup_subsys_id,
5610 .create = mem_cgroup_create,
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08005611 .pre_destroy = mem_cgroup_pre_destroy,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005612 .destroy = mem_cgroup_destroy,
5613 .populate = mem_cgroup_populate,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005614 .can_attach = mem_cgroup_can_attach,
5615 .cancel_attach = mem_cgroup_cancel_attach,
Balbir Singh67e465a2008-02-07 00:13:54 -08005616 .attach = mem_cgroup_move_task,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005617 .early_init = 0,
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07005618 .use_id = 1,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005619};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005620
5621#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
Michal Hockoa42c3902010-11-24 12:57:08 -08005622static int __init enable_swap_account(char *s)
5623{
5624 /* consider enabled if no parameter or 1 is given */
Michal Hockoa2c89902011-05-24 17:12:50 -07005625 if (!strcmp(s, "1"))
Michal Hockoa42c3902010-11-24 12:57:08 -08005626 really_do_swap_account = 1;
Michal Hockoa2c89902011-05-24 17:12:50 -07005627 else if (!strcmp(s, "0"))
Michal Hockoa42c3902010-11-24 12:57:08 -08005628 really_do_swap_account = 0;
5629 return 1;
5630}
Michal Hockoa2c89902011-05-24 17:12:50 -07005631__setup("swapaccount=", enable_swap_account);
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005632
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005633#endif