| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1 | /* memcontrol.h - Memory Controller | 
|  | 2 | * | 
|  | 3 | * Copyright IBM Corporation, 2007 | 
|  | 4 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | 
|  | 5 | * | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 6 | * Copyright 2007 OpenVZ SWsoft Inc | 
|  | 7 | * Author: Pavel Emelianov <xemul@openvz.org> | 
|  | 8 | * | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 9 | * This program is free software; you can redistribute it and/or modify | 
|  | 10 | * it under the terms of the GNU General Public License as published by | 
|  | 11 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 12 | * (at your option) any later version. | 
|  | 13 | * | 
|  | 14 | * This program is distributed in the hope that it will be useful, | 
|  | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 17 | * GNU General Public License for more details. | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | #ifndef _LINUX_MEMCONTROL_H | 
|  | 21 | #define _LINUX_MEMCONTROL_H | 
| Hirokazu Takahashi | f8d66542 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 22 | #include <linux/cgroup.h> | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 23 | struct mem_cgroup; | 
|  | 24 | struct page_cgroup; | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 25 | struct page; | 
|  | 26 | struct mm_struct; | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 27 |  | 
| Balbir Singh | 00f0b82 | 2008-03-04 14:28:39 -0800 | [diff] [blame] | 28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
| KAMEZAWA Hiroyuki | 2c26fdd | 2009-01-07 18:08:10 -0800 | [diff] [blame] | 29 | /* | 
|  | 30 | * All "charge" functions with gfp_mask should use GFP_KERNEL or | 
|  | 31 | * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't | 
|  | 32 | * alloc memory but reclaims memory from all available zones. So, "where I want | 
|  | 33 | * memory from" bits of gfp_mask has no meaning. So any bits of that field is | 
|  | 34 | * available but adding a rule is better. charge functions' gfp_mask should | 
|  | 35 | * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous | 
|  | 36 | * codes. | 
|  | 37 | * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) | 
|  | 38 | */ | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 39 |  | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 40 | extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, | 
| Balbir Singh | e1a1cd5 | 2008-02-07 00:14:02 -0800 | [diff] [blame] | 41 | gfp_t gfp_mask); | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 42 | /* for swap handling */ | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 43 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 
|  | 44 | struct page *page, gfp_t mask, struct mem_cgroup **ptr); | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 45 | extern void mem_cgroup_commit_charge_swapin(struct page *page, | 
|  | 46 | struct mem_cgroup *ptr); | 
|  | 47 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); | 
|  | 48 |  | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 49 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 
|  | 50 | gfp_t gfp_mask); | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 51 | extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); | 
|  | 52 | extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); | 
|  | 53 | extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); | 
|  | 54 | extern void mem_cgroup_del_lru(struct page *page); | 
|  | 55 | extern void mem_cgroup_move_lists(struct page *page, | 
|  | 56 | enum lru_list from, enum lru_list to); | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 57 |  | 
|  | 58 | /* For coalescing uncharge for reducing memcg' overhead*/ | 
|  | 59 | extern void mem_cgroup_uncharge_start(void); | 
|  | 60 | extern void mem_cgroup_uncharge_end(void); | 
|  | 61 |  | 
| Balbir Singh | 3c541e1 | 2008-02-07 00:14:41 -0800 | [diff] [blame] | 62 | extern void mem_cgroup_uncharge_page(struct page *page); | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 63 | extern void mem_cgroup_uncharge_cache_page(struct page *page); | 
| Daisuke Nishimura | ae3abae | 2009-04-30 15:08:19 -0700 | [diff] [blame] | 64 | extern int mem_cgroup_shmem_charge_fallback(struct page *page, | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 65 | struct mm_struct *mm, gfp_t gfp_mask); | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 66 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 67 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 
|  | 68 | struct list_head *dst, | 
|  | 69 | unsigned long *scanned, int order, | 
|  | 70 | int mode, struct zone *z, | 
|  | 71 | struct mem_cgroup *mem_cont, | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 72 | int active, int file); | 
| Pavel Emelianov | c7ba5c9 | 2008-02-07 00:13:58 -0800 | [diff] [blame] | 73 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); | 
| David Rientjes | 4c4a221 | 2008-02-07 00:14:06 -0800 | [diff] [blame] | 74 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); | 
| David Rientjes | 3062fc6 | 2008-02-07 00:14:03 -0800 | [diff] [blame] | 75 |  | 
| Wu Fengguang | e42d9d5 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 76 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); | 
| Balbir Singh | cf475ad | 2008-04-29 01:00:16 -0700 | [diff] [blame] | 77 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 
|  | 78 |  | 
| Lai Jiangshan | 2e4d409 | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 79 | static inline | 
|  | 80 | int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) | 
|  | 81 | { | 
|  | 82 | struct mem_cgroup *mem; | 
|  | 83 | rcu_read_lock(); | 
| KAMEZAWA Hiroyuki | e638c13 | 2009-04-21 12:24:41 -0700 | [diff] [blame] | 84 | mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); | 
| Lai Jiangshan | 2e4d409 | 2009-01-07 18:08:07 -0800 | [diff] [blame] | 85 | rcu_read_unlock(); | 
|  | 86 | return cgroup == mem; | 
|  | 87 | } | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 88 |  | 
| Wu Fengguang | d324236 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 89 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); | 
|  | 90 |  | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 91 | extern int | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 92 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); | 
|  | 93 | extern void mem_cgroup_end_migration(struct mem_cgroup *mem, | 
|  | 94 | struct page *oldpage, struct page *newpage); | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 95 |  | 
| KAMEZAWA Hiroyuki | 58ae83d | 2008-02-07 00:14:32 -0800 | [diff] [blame] | 96 | /* | 
|  | 97 | * For memory reclaim. | 
|  | 98 | */ | 
| KAMEZAWA Hiroyuki | 6c48a1d | 2008-02-07 00:14:34 -0800 | [diff] [blame] | 99 | extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem); | 
|  | 100 | extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, | 
|  | 101 | int priority); | 
|  | 102 | extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, | 
|  | 103 | int priority); | 
| KOSAKI Motohiro | c772be9 | 2009-01-07 18:08:25 -0800 | [diff] [blame] | 104 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); | 
| Rik van Riel | 56e49d2 | 2009-06-16 15:32:28 -0700 | [diff] [blame] | 105 | int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); | 
| KOSAKI Motohiro | a3d8e05 | 2009-01-07 18:08:19 -0800 | [diff] [blame] | 106 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | 
|  | 107 | struct zone *zone, | 
|  | 108 | enum lru_list lru); | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 109 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | 
|  | 110 | struct zone *zone); | 
|  | 111 | struct zone_reclaim_stat* | 
|  | 112 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); | 
| Balbir Singh | e222432 | 2009-04-02 16:57:39 -0700 | [diff] [blame] | 113 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | 
|  | 114 | struct task_struct *p); | 
| KAMEZAWA Hiroyuki | 58ae83d | 2008-02-07 00:14:32 -0800 | [diff] [blame] | 115 |  | 
| KAMEZAWA Hiroyuki | c077719 | 2009-01-07 18:07:57 -0800 | [diff] [blame] | 116 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 
|  | 117 | extern int do_swap_account; | 
|  | 118 | #endif | 
| Hirokazu Takahashi | f8d66542 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 119 |  | 
|  | 120 | static inline bool mem_cgroup_disabled(void) | 
|  | 121 | { | 
|  | 122 | if (mem_cgroup_subsys.disabled) | 
|  | 123 | return true; | 
|  | 124 | return false; | 
|  | 125 | } | 
|  | 126 |  | 
| KAMEZAWA Hiroyuki | d804658 | 2009-12-15 16:47:09 -0800 | [diff] [blame] | 127 | void mem_cgroup_update_file_mapped(struct page *page, int val); | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 128 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 
|  | 129 | gfp_t gfp_mask, int nid, | 
|  | 130 | int zid); | 
| Balbir Singh | 00f0b82 | 2008-03-04 14:28:39 -0800 | [diff] [blame] | 131 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 132 | struct mem_cgroup; | 
|  | 133 |  | 
|  | 134 | static inline int mem_cgroup_newpage_charge(struct page *page, | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 135 | struct mm_struct *mm, gfp_t gfp_mask) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 136 | { | 
|  | 137 | return 0; | 
|  | 138 | } | 
|  | 139 |  | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 140 | static inline int mem_cgroup_cache_charge(struct page *page, | 
|  | 141 | struct mm_struct *mm, gfp_t gfp_mask) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 142 | { | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 143 | return 0; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 144 | } | 
|  | 145 |  | 
| KAMEZAWA Hiroyuki | 8c7c6e3 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 146 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, | 
|  | 147 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) | 
| KAMEZAWA Hiroyuki | 7a81b88 | 2009-01-07 18:07:48 -0800 | [diff] [blame] | 148 | { | 
|  | 149 | return 0; | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, | 
|  | 153 | struct mem_cgroup *ptr) | 
|  | 154 | { | 
|  | 155 | } | 
|  | 156 |  | 
|  | 157 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) | 
|  | 158 | { | 
|  | 159 | } | 
|  | 160 |  | 
| KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 161 | static inline void mem_cgroup_uncharge_start(void) | 
|  | 162 | { | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | static inline void mem_cgroup_uncharge_end(void) | 
|  | 166 | { | 
|  | 167 | } | 
|  | 168 |  | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 169 | static inline void mem_cgroup_uncharge_page(struct page *page) | 
|  | 170 | { | 
|  | 171 | } | 
|  | 172 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 173 | static inline void mem_cgroup_uncharge_cache_page(struct page *page) | 
|  | 174 | { | 
|  | 175 | } | 
|  | 176 |  | 
| Daisuke Nishimura | ae3abae | 2009-04-30 15:08:19 -0700 | [diff] [blame] | 177 | static inline int mem_cgroup_shmem_charge_fallback(struct page *page, | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 178 | struct mm_struct *mm, gfp_t gfp_mask) | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 179 | { | 
|  | 180 | return 0; | 
|  | 181 | } | 
|  | 182 |  | 
| KAMEZAWA Hiroyuki | 08e552c | 2009-01-07 18:08:01 -0800 | [diff] [blame] | 183 | static inline void mem_cgroup_add_lru_list(struct page *page, int lru) | 
|  | 184 | { | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | static inline void mem_cgroup_del_lru_list(struct page *page, int lru) | 
|  | 188 | { | 
|  | 189 | return ; | 
|  | 190 | } | 
|  | 191 |  | 
|  | 192 | static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) | 
|  | 193 | { | 
|  | 194 | return ; | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | static inline void mem_cgroup_del_lru(struct page *page) | 
|  | 198 | { | 
|  | 199 | return ; | 
|  | 200 | } | 
|  | 201 |  | 
|  | 202 | static inline void | 
|  | 203 | mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 204 | { | 
|  | 205 | } | 
|  | 206 |  | 
| Wu Fengguang | e42d9d5 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 207 | static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | 
|  | 208 | { | 
|  | 209 | return NULL; | 
|  | 210 | } | 
|  | 211 |  | 
| Hugh Dickins | bd845e3 | 2008-03-04 14:29:01 -0800 | [diff] [blame] | 212 | static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 213 | { | 
| David Rientjes | 60c12b1 | 2008-02-09 00:10:15 -0800 | [diff] [blame] | 214 | return 1; | 
| Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 215 | } | 
|  | 216 |  | 
| David Rientjes | 4c4a221 | 2008-02-07 00:14:06 -0800 | [diff] [blame] | 217 | static inline int task_in_mem_cgroup(struct task_struct *task, | 
|  | 218 | const struct mem_cgroup *mem) | 
|  | 219 | { | 
|  | 220 | return 1; | 
|  | 221 | } | 
|  | 222 |  | 
| Wu Fengguang | d324236 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 223 | static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) | 
|  | 224 | { | 
|  | 225 | return NULL; | 
|  | 226 | } | 
|  | 227 |  | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 228 | static inline int | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 229 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 230 | { | 
|  | 231 | return 0; | 
|  | 232 | } | 
|  | 233 |  | 
| KAMEZAWA Hiroyuki | 01b1ae6 | 2009-01-07 18:07:50 -0800 | [diff] [blame] | 234 | static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, | 
|  | 235 | struct page *oldpage, | 
|  | 236 | struct page *newpage) | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 237 | { | 
|  | 238 | } | 
|  | 239 |  | 
| KAMEZAWA Hiroyuki | 6c48a1d | 2008-02-07 00:14:34 -0800 | [diff] [blame] | 240 | static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) | 
|  | 241 | { | 
|  | 242 | return 0; | 
|  | 243 | } | 
|  | 244 |  | 
|  | 245 | static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, | 
|  | 246 | int priority) | 
|  | 247 | { | 
|  | 248 | } | 
|  | 249 |  | 
|  | 250 | static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, | 
|  | 251 | int priority) | 
|  | 252 | { | 
|  | 253 | } | 
|  | 254 |  | 
| Hirokazu Takahashi | f8d66542 | 2009-01-07 18:08:02 -0800 | [diff] [blame] | 255 | static inline bool mem_cgroup_disabled(void) | 
|  | 256 | { | 
|  | 257 | return true; | 
|  | 258 | } | 
| KAMEZAWA Hiroyuki | a636b32 | 2009-01-07 18:08:08 -0800 | [diff] [blame] | 259 |  | 
| KOSAKI Motohiro | 14797e2 | 2009-01-07 18:08:18 -0800 | [diff] [blame] | 260 | static inline int | 
| KOSAKI Motohiro | c772be9 | 2009-01-07 18:08:25 -0800 | [diff] [blame] | 261 | mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) | 
| KOSAKI Motohiro | 14797e2 | 2009-01-07 18:08:18 -0800 | [diff] [blame] | 262 | { | 
|  | 263 | return 1; | 
|  | 264 | } | 
|  | 265 |  | 
| Rik van Riel | 56e49d2 | 2009-06-16 15:32:28 -0700 | [diff] [blame] | 266 | static inline int | 
|  | 267 | mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) | 
|  | 268 | { | 
|  | 269 | return 1; | 
|  | 270 | } | 
|  | 271 |  | 
| KOSAKI Motohiro | a3d8e05 | 2009-01-07 18:08:19 -0800 | [diff] [blame] | 272 | static inline unsigned long | 
|  | 273 | mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, | 
|  | 274 | enum lru_list lru) | 
|  | 275 | { | 
|  | 276 | return 0; | 
|  | 277 | } | 
|  | 278 |  | 
|  | 279 |  | 
| KOSAKI Motohiro | 3e2f41f | 2009-01-07 18:08:20 -0800 | [diff] [blame] | 280 | static inline struct zone_reclaim_stat* | 
|  | 281 | mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) | 
|  | 282 | { | 
|  | 283 | return NULL; | 
|  | 284 | } | 
|  | 285 |  | 
|  | 286 | static inline struct zone_reclaim_stat* | 
|  | 287 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) | 
|  | 288 | { | 
|  | 289 | return NULL; | 
|  | 290 | } | 
|  | 291 |  | 
| Balbir Singh | e222432 | 2009-04-02 16:57:39 -0700 | [diff] [blame] | 292 | static inline void | 
|  | 293 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | 
|  | 294 | { | 
|  | 295 | } | 
|  | 296 |  | 
| KAMEZAWA Hiroyuki | d804658 | 2009-12-15 16:47:09 -0800 | [diff] [blame] | 297 | static inline void mem_cgroup_update_file_mapped(struct page *page, | 
| Balbir Singh | d69b042 | 2009-06-17 16:26:34 -0700 | [diff] [blame] | 298 | int val) | 
|  | 299 | { | 
|  | 300 | } | 
|  | 301 |  | 
| Balbir Singh | 4e41695 | 2009-09-23 15:56:39 -0700 | [diff] [blame] | 302 | static inline | 
|  | 303 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 
|  | 304 | gfp_t gfp_mask, int nid, int zid) | 
|  | 305 | { | 
|  | 306 | return 0; | 
|  | 307 | } | 
|  | 308 |  | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 309 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 
|  | 310 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 311 | #endif /* _LINUX_MEMCONTROL_H */ | 
|  | 312 |  |