| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/mm/vmstat.c | 
 | 3 |  * | 
 | 4 |  *  Manages VM statistics | 
 | 5 |  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 6 |  * | 
 | 7 |  *  zoned VM statistics | 
 | 8 |  *  Copyright (C) 2006 Silicon Graphics, Inc., | 
 | 9 |  *		Christoph Lameter <christoph@lameter.com> | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 10 |  */ | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 11 | #include <linux/fs.h> | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 12 | #include <linux/mm.h> | 
| Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 13 | #include <linux/err.h> | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 14 | #include <linux/module.h> | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 15 | #include <linux/cpu.h> | 
| Adrian Bunk | c748e13 | 2008-07-23 21:27:03 -0700 | [diff] [blame] | 16 | #include <linux/vmstat.h> | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 17 | #include <linux/sched.h> | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 18 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 19 | #ifdef CONFIG_VM_EVENT_COUNTERS | 
 | 20 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; | 
 | 21 | EXPORT_PER_CPU_SYMBOL(vm_event_states); | 
 | 22 |  | 
| Rusty Russell | 174596a | 2009-01-01 10:12:29 +1030 | [diff] [blame] | 23 | static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 24 | { | 
| Christoph Lameter | 9eccf2a | 2008-02-04 22:29:22 -0800 | [diff] [blame] | 25 | 	int cpu; | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 26 | 	int i; | 
 | 27 |  | 
 | 28 | 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); | 
 | 29 |  | 
| Rusty Russell | aa85ea5 | 2009-03-30 22:05:15 -0600 | [diff] [blame] | 30 | 	for_each_cpu(cpu, cpumask) { | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 31 | 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu); | 
 | 32 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 33 | 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) | 
 | 34 | 			ret[i] += this->event[i]; | 
 | 35 | 	} | 
 | 36 | } | 
 | 37 |  | 
 | 38 | /* | 
 | 39 |  * Accumulate the vm event counters across all CPUs. | 
 | 40 |  * The result is unavoidably approximate - it can change | 
 | 41 |  * during and after execution of this function. | 
 | 42 | */ | 
 | 43 | void all_vm_events(unsigned long *ret) | 
 | 44 | { | 
| KOSAKI Motohiro | b5be113 | 2008-05-12 14:02:06 -0700 | [diff] [blame] | 45 | 	get_online_cpus(); | 
| Rusty Russell | 174596a | 2009-01-01 10:12:29 +1030 | [diff] [blame] | 46 | 	sum_vm_events(ret, cpu_online_mask); | 
| KOSAKI Motohiro | b5be113 | 2008-05-12 14:02:06 -0700 | [diff] [blame] | 47 | 	put_online_cpus(); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 48 | } | 
| Heiko Carstens | 32dd66f | 2006-07-10 04:44:31 -0700 | [diff] [blame] | 49 | EXPORT_SYMBOL_GPL(all_vm_events); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 50 |  | 
 | 51 | #ifdef CONFIG_HOTPLUG | 
 | 52 | /* | 
 | 53 |  * Fold the foreign cpu events into our own. | 
 | 54 |  * | 
 | 55 |  * This is adding to the events on one processor | 
 | 56 |  * but keeps the global counts constant. | 
 | 57 |  */ | 
 | 58 | void vm_events_fold_cpu(int cpu) | 
 | 59 | { | 
 | 60 | 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); | 
 | 61 | 	int i; | 
 | 62 |  | 
 | 63 | 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { | 
 | 64 | 		count_vm_events(i, fold_state->event[i]); | 
 | 65 | 		fold_state->event[i] = 0; | 
 | 66 | 	} | 
 | 67 | } | 
 | 68 | #endif /* CONFIG_HOTPLUG */ | 
 | 69 |  | 
 | 70 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | 
 | 71 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 72 | /* | 
 | 73 |  * Manage combined zone based / global counters | 
 | 74 |  * | 
 | 75 |  * vm_stat contains the global counters | 
 | 76 |  */ | 
 | 77 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 
 | 78 | EXPORT_SYMBOL(vm_stat); | 
 | 79 |  | 
 | 80 | #ifdef CONFIG_SMP | 
 | 81 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 82 | static int calculate_threshold(struct zone *zone) | 
 | 83 | { | 
 | 84 | 	int threshold; | 
 | 85 | 	int mem;	/* memory in 128 MB units */ | 
 | 86 |  | 
 | 87 | 	/* | 
 | 88 | 	 * The threshold scales with the number of processors and the amount | 
 | 89 | 	 * of memory per zone. More memory means that we can defer updates for | 
 | 90 | 	 * longer, more processors could lead to more contention. | 
 | 91 |  	 * fls() is used to have a cheap way of logarithmic scaling. | 
 | 92 | 	 * | 
 | 93 | 	 * Some sample thresholds: | 
 | 94 | 	 * | 
 | 95 | 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1) | 
 | 96 | 	 * ------------------------------------------------------------------ | 
 | 97 | 	 * 8		1		1	0.9-1 GB	4 | 
 | 98 | 	 * 16		2		2	0.9-1 GB	4 | 
 | 99 | 	 * 20 		2		2	1-2 GB		5 | 
 | 100 | 	 * 24		2		2	2-4 GB		6 | 
 | 101 | 	 * 28		2		2	4-8 GB		7 | 
 | 102 | 	 * 32		2		2	8-16 GB		8 | 
 | 103 | 	 * 4		2		2	<128M		1 | 
 | 104 | 	 * 30		4		3	2-4 GB		5 | 
 | 105 | 	 * 48		4		3	8-16 GB		8 | 
 | 106 | 	 * 32		8		4	1-2 GB		4 | 
 | 107 | 	 * 32		8		4	0.9-1GB		4 | 
 | 108 | 	 * 10		16		5	<128M		1 | 
 | 109 | 	 * 40		16		5	900M		4 | 
 | 110 | 	 * 70		64		7	2-4 GB		5 | 
 | 111 | 	 * 84		64		7	4-8 GB		6 | 
 | 112 | 	 * 108		512		9	4-8 GB		6 | 
 | 113 | 	 * 125		1024		10	8-16 GB		8 | 
 | 114 | 	 * 125		1024		10	16-32 GB	9 | 
 | 115 | 	 */ | 
 | 116 |  | 
 | 117 | 	mem = zone->present_pages >> (27 - PAGE_SHIFT); | 
 | 118 |  | 
 | 119 | 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); | 
 | 120 |  | 
 | 121 | 	/* | 
 | 122 | 	 * Maximum threshold is 125 | 
 | 123 | 	 */ | 
 | 124 | 	threshold = min(125, threshold); | 
 | 125 |  | 
 | 126 | 	return threshold; | 
 | 127 | } | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 128 |  | 
 | 129 | /* | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 130 |  * Refresh the thresholds for each zone. | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 131 |  */ | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 132 | static void refresh_zone_stat_thresholds(void) | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 133 | { | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 134 | 	struct zone *zone; | 
 | 135 | 	int cpu; | 
 | 136 | 	int threshold; | 
 | 137 |  | 
| KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 138 | 	for_each_populated_zone(zone) { | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 139 | 		threshold = calculate_threshold(zone); | 
 | 140 |  | 
 | 141 | 		for_each_online_cpu(cpu) | 
 | 142 | 			zone_pcp(zone, cpu)->stat_threshold = threshold; | 
 | 143 | 	} | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 144 | } | 
 | 145 |  | 
 | 146 | /* | 
 | 147 |  * For use when we know that interrupts are disabled. | 
 | 148 |  */ | 
 | 149 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | 
 | 150 | 				int delta) | 
 | 151 | { | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 152 | 	struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); | 
 | 153 | 	s8 *p = pcp->vm_stat_diff + item; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 154 | 	long x; | 
 | 155 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 156 | 	x = delta + *p; | 
 | 157 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 158 | 	if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 159 | 		zone_page_state_add(x, zone, item); | 
 | 160 | 		x = 0; | 
 | 161 | 	} | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 162 | 	*p = x; | 
 | 163 | } | 
 | 164 | EXPORT_SYMBOL(__mod_zone_page_state); | 
 | 165 |  | 
 | 166 | /* | 
 | 167 |  * For an unknown interrupt state | 
 | 168 |  */ | 
 | 169 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | 
 | 170 | 					int delta) | 
 | 171 | { | 
 | 172 | 	unsigned long flags; | 
 | 173 |  | 
 | 174 | 	local_irq_save(flags); | 
 | 175 | 	__mod_zone_page_state(zone, item, delta); | 
 | 176 | 	local_irq_restore(flags); | 
 | 177 | } | 
 | 178 | EXPORT_SYMBOL(mod_zone_page_state); | 
 | 179 |  | 
 | 180 | /* | 
 | 181 |  * Optimized increment and decrement functions. | 
 | 182 |  * | 
 | 183 |  * These are only for a single page and therefore can take a struct page * | 
 | 184 |  * argument instead of struct zone *. This allows the inclusion of the code | 
 | 185 |  * generated for page_zone(page) into the optimized functions. | 
 | 186 |  * | 
 | 187 |  * No overflow check is necessary and therefore the differential can be | 
 | 188 |  * incremented or decremented in place which may allow the compilers to | 
 | 189 |  * generate better code. | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 190 |  * The increment or decrement is known and therefore one boundary check can | 
 | 191 |  * be omitted. | 
 | 192 |  * | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 193 |  * NOTE: These functions are very performance sensitive. Change only | 
 | 194 |  * with care. | 
 | 195 |  * | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 196 |  * Some processors have inc/dec instructions that are atomic vs an interrupt. | 
 | 197 |  * However, the code must first determine the differential location in a zone | 
 | 198 |  * based on the processor number and then inc/dec the counter. There is no | 
 | 199 |  * guarantee without disabling preemption that the processor will not change | 
 | 200 |  * in between and therefore the atomicity vs. interrupt cannot be exploited | 
 | 201 |  * in a useful way here. | 
 | 202 |  */ | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 203 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 204 | { | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 205 | 	struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); | 
 | 206 | 	s8 *p = pcp->vm_stat_diff + item; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 207 |  | 
 | 208 | 	(*p)++; | 
 | 209 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 210 | 	if (unlikely(*p > pcp->stat_threshold)) { | 
 | 211 | 		int overstep = pcp->stat_threshold / 2; | 
 | 212 |  | 
 | 213 | 		zone_page_state_add(*p + overstep, zone, item); | 
 | 214 | 		*p = -overstep; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 215 | 	} | 
 | 216 | } | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 217 |  | 
 | 218 | void __inc_zone_page_state(struct page *page, enum zone_stat_item item) | 
 | 219 | { | 
 | 220 | 	__inc_zone_state(page_zone(page), item); | 
 | 221 | } | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 222 | EXPORT_SYMBOL(__inc_zone_page_state); | 
 | 223 |  | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 224 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 225 | { | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 226 | 	struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); | 
 | 227 | 	s8 *p = pcp->vm_stat_diff + item; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 228 |  | 
 | 229 | 	(*p)--; | 
 | 230 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 231 | 	if (unlikely(*p < - pcp->stat_threshold)) { | 
 | 232 | 		int overstep = pcp->stat_threshold / 2; | 
 | 233 |  | 
 | 234 | 		zone_page_state_add(*p - overstep, zone, item); | 
 | 235 | 		*p = overstep; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 236 | 	} | 
 | 237 | } | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 238 |  | 
 | 239 | void __dec_zone_page_state(struct page *page, enum zone_stat_item item) | 
 | 240 | { | 
 | 241 | 	__dec_zone_state(page_zone(page), item); | 
 | 242 | } | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 243 | EXPORT_SYMBOL(__dec_zone_page_state); | 
 | 244 |  | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 245 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) | 
 | 246 | { | 
 | 247 | 	unsigned long flags; | 
 | 248 |  | 
 | 249 | 	local_irq_save(flags); | 
 | 250 | 	__inc_zone_state(zone, item); | 
 | 251 | 	local_irq_restore(flags); | 
 | 252 | } | 
 | 253 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 254 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) | 
 | 255 | { | 
 | 256 | 	unsigned long flags; | 
 | 257 | 	struct zone *zone; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 258 |  | 
 | 259 | 	zone = page_zone(page); | 
 | 260 | 	local_irq_save(flags); | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 261 | 	__inc_zone_state(zone, item); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 262 | 	local_irq_restore(flags); | 
 | 263 | } | 
 | 264 | EXPORT_SYMBOL(inc_zone_page_state); | 
 | 265 |  | 
 | 266 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) | 
 | 267 | { | 
 | 268 | 	unsigned long flags; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 269 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 270 | 	local_irq_save(flags); | 
| Christoph Lameter | a302eb4 | 2006-08-31 21:27:34 -0700 | [diff] [blame] | 271 | 	__dec_zone_page_state(page, item); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 272 | 	local_irq_restore(flags); | 
 | 273 | } | 
 | 274 | EXPORT_SYMBOL(dec_zone_page_state); | 
 | 275 |  | 
 | 276 | /* | 
 | 277 |  * Update the zone counters for one cpu. | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 278 |  * | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 279 |  * The cpu specified must be either the current cpu or a processor that | 
 | 280 |  * is not online. If it is the current cpu then the execution thread must | 
 | 281 |  * be pinned to the current cpu. | 
 | 282 |  * | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 283 |  * Note that refresh_cpu_vm_stats strives to only access | 
 | 284 |  * node local memory. The per cpu pagesets on remote zones are placed | 
 | 285 |  * in the memory local to the processor using that pageset. So the | 
 | 286 |  * loop over all zones will access a series of cachelines local to | 
 | 287 |  * the processor. | 
 | 288 |  * | 
 | 289 |  * The call to zone_page_state_add updates the cachelines with the | 
 | 290 |  * statistics in the remote zone struct as well as the global cachelines | 
 | 291 |  * with the global counters. These could cause remote node cache line | 
 | 292 |  * bouncing and will have to be only done when necessary. | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 293 |  */ | 
 | 294 | void refresh_cpu_vm_stats(int cpu) | 
 | 295 | { | 
 | 296 | 	struct zone *zone; | 
 | 297 | 	int i; | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 298 | 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 299 |  | 
| KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 300 | 	for_each_populated_zone(zone) { | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 301 | 		struct per_cpu_pageset *p; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 302 |  | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 303 | 		p = zone_pcp(zone, cpu); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 304 |  | 
 | 305 | 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 306 | 			if (p->vm_stat_diff[i]) { | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 307 | 				unsigned long flags; | 
 | 308 | 				int v; | 
 | 309 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 310 | 				local_irq_save(flags); | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 311 | 				v = p->vm_stat_diff[i]; | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 312 | 				p->vm_stat_diff[i] = 0; | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 313 | 				local_irq_restore(flags); | 
 | 314 | 				atomic_long_add(v, &zone->vm_stat[i]); | 
 | 315 | 				global_diff[i] += v; | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 316 | #ifdef CONFIG_NUMA | 
 | 317 | 				/* 3 seconds idle till flush */ | 
 | 318 | 				p->expire = 3; | 
 | 319 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 320 | 			} | 
| Dimitri Sivanich | 468fd62 | 2008-04-28 02:13:37 -0700 | [diff] [blame] | 321 | 		cond_resched(); | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 322 | #ifdef CONFIG_NUMA | 
 | 323 | 		/* | 
 | 324 | 		 * Deal with draining the remote pageset of this | 
 | 325 | 		 * processor | 
 | 326 | 		 * | 
 | 327 | 		 * Check if there are pages remaining in this pageset | 
 | 328 | 		 * if not then there is nothing to expire. | 
 | 329 | 		 */ | 
| Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 330 | 		if (!p->expire || !p->pcp.count) | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 331 | 			continue; | 
 | 332 |  | 
 | 333 | 		/* | 
 | 334 | 		 * We never drain zones local to this processor. | 
 | 335 | 		 */ | 
 | 336 | 		if (zone_to_nid(zone) == numa_node_id()) { | 
 | 337 | 			p->expire = 0; | 
 | 338 | 			continue; | 
 | 339 | 		} | 
 | 340 |  | 
 | 341 | 		p->expire--; | 
 | 342 | 		if (p->expire) | 
 | 343 | 			continue; | 
 | 344 |  | 
| Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 345 | 		if (p->pcp.count) | 
 | 346 | 			drain_zone_pages(zone, &p->pcp); | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 347 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 348 | 	} | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 349 |  | 
 | 350 | 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 
 | 351 | 		if (global_diff[i]) | 
 | 352 | 			atomic_long_add(global_diff[i], &vm_stat[i]); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 353 | } | 
 | 354 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 355 | #endif | 
 | 356 |  | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 357 | #ifdef CONFIG_NUMA | 
 | 358 | /* | 
 | 359 |  * zonelist = the list of zones passed to the allocator | 
 | 360 |  * z 	    = the zone from which the allocation occurred. | 
 | 361 |  * | 
 | 362 |  * Must be called with interrupts disabled. | 
 | 363 |  */ | 
| Mel Gorman | 18ea7e7 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 364 | void zone_statistics(struct zone *preferred_zone, struct zone *z) | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 365 | { | 
| Mel Gorman | 18ea7e7 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 366 | 	if (z->zone_pgdat == preferred_zone->zone_pgdat) { | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 367 | 		__inc_zone_state(z, NUMA_HIT); | 
 | 368 | 	} else { | 
 | 369 | 		__inc_zone_state(z, NUMA_MISS); | 
| Mel Gorman | 18ea7e7 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 370 | 		__inc_zone_state(preferred_zone, NUMA_FOREIGN); | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 371 | 	} | 
| Christoph Lameter | 5d29234 | 2006-09-27 01:50:10 -0700 | [diff] [blame] | 372 | 	if (z->node == numa_node_id()) | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 373 | 		__inc_zone_state(z, NUMA_LOCAL); | 
 | 374 | 	else | 
 | 375 | 		__inc_zone_state(z, NUMA_OTHER); | 
 | 376 | } | 
 | 377 | #endif | 
 | 378 |  | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 379 | #ifdef CONFIG_PROC_FS | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 380 | #include <linux/proc_fs.h> | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 381 | #include <linux/seq_file.h> | 
 | 382 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 383 | static char * const migratetype_names[MIGRATE_TYPES] = { | 
 | 384 | 	"Unmovable", | 
 | 385 | 	"Reclaimable", | 
 | 386 | 	"Movable", | 
 | 387 | 	"Reserve", | 
| KOSAKI Motohiro | 91446b0 | 2008-04-15 14:34:42 -0700 | [diff] [blame] | 388 | 	"Isolate", | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 389 | }; | 
 | 390 |  | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 391 | static void *frag_start(struct seq_file *m, loff_t *pos) | 
 | 392 | { | 
 | 393 | 	pg_data_t *pgdat; | 
 | 394 | 	loff_t node = *pos; | 
 | 395 | 	for (pgdat = first_online_pgdat(); | 
 | 396 | 	     pgdat && node; | 
 | 397 | 	     pgdat = next_online_pgdat(pgdat)) | 
 | 398 | 		--node; | 
 | 399 |  | 
 | 400 | 	return pgdat; | 
 | 401 | } | 
 | 402 |  | 
 | 403 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) | 
 | 404 | { | 
 | 405 | 	pg_data_t *pgdat = (pg_data_t *)arg; | 
 | 406 |  | 
 | 407 | 	(*pos)++; | 
 | 408 | 	return next_online_pgdat(pgdat); | 
 | 409 | } | 
 | 410 |  | 
 | 411 | static void frag_stop(struct seq_file *m, void *arg) | 
 | 412 | { | 
 | 413 | } | 
 | 414 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 415 | /* Walk all the zones in a node and print using a callback */ | 
 | 416 | static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, | 
 | 417 | 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 418 | { | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 419 | 	struct zone *zone; | 
 | 420 | 	struct zone *node_zones = pgdat->node_zones; | 
 | 421 | 	unsigned long flags; | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 422 |  | 
 | 423 | 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | 
 | 424 | 		if (!populated_zone(zone)) | 
 | 425 | 			continue; | 
 | 426 |  | 
 | 427 | 		spin_lock_irqsave(&zone->lock, flags); | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 428 | 		print(m, pgdat, zone); | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 429 | 		spin_unlock_irqrestore(&zone->lock, flags); | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 430 | 	} | 
 | 431 | } | 
 | 432 |  | 
 | 433 | static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, | 
 | 434 | 						struct zone *zone) | 
 | 435 | { | 
 | 436 | 	int order; | 
 | 437 |  | 
 | 438 | 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | 
 | 439 | 	for (order = 0; order < MAX_ORDER; ++order) | 
 | 440 | 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free); | 
 | 441 | 	seq_putc(m, '\n'); | 
 | 442 | } | 
 | 443 |  | 
 | 444 | /* | 
 | 445 |  * This walks the free areas for each zone. | 
 | 446 |  */ | 
 | 447 | static int frag_show(struct seq_file *m, void *arg) | 
 | 448 | { | 
 | 449 | 	pg_data_t *pgdat = (pg_data_t *)arg; | 
 | 450 | 	walk_zones_in_node(m, pgdat, frag_show_print); | 
 | 451 | 	return 0; | 
 | 452 | } | 
 | 453 |  | 
 | 454 | static void pagetypeinfo_showfree_print(struct seq_file *m, | 
 | 455 | 					pg_data_t *pgdat, struct zone *zone) | 
 | 456 | { | 
 | 457 | 	int order, mtype; | 
 | 458 |  | 
 | 459 | 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { | 
 | 460 | 		seq_printf(m, "Node %4d, zone %8s, type %12s ", | 
 | 461 | 					pgdat->node_id, | 
 | 462 | 					zone->name, | 
 | 463 | 					migratetype_names[mtype]); | 
 | 464 | 		for (order = 0; order < MAX_ORDER; ++order) { | 
 | 465 | 			unsigned long freecount = 0; | 
 | 466 | 			struct free_area *area; | 
 | 467 | 			struct list_head *curr; | 
 | 468 |  | 
 | 469 | 			area = &(zone->free_area[order]); | 
 | 470 |  | 
 | 471 | 			list_for_each(curr, &area->free_list[mtype]) | 
 | 472 | 				freecount++; | 
 | 473 | 			seq_printf(m, "%6lu ", freecount); | 
 | 474 | 		} | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 475 | 		seq_putc(m, '\n'); | 
 | 476 | 	} | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 477 | } | 
 | 478 |  | 
 | 479 | /* Print out the free pages at each order for each migatetype */ | 
 | 480 | static int pagetypeinfo_showfree(struct seq_file *m, void *arg) | 
 | 481 | { | 
 | 482 | 	int order; | 
 | 483 | 	pg_data_t *pgdat = (pg_data_t *)arg; | 
 | 484 |  | 
 | 485 | 	/* Print header */ | 
 | 486 | 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); | 
 | 487 | 	for (order = 0; order < MAX_ORDER; ++order) | 
 | 488 | 		seq_printf(m, "%6d ", order); | 
 | 489 | 	seq_putc(m, '\n'); | 
 | 490 |  | 
 | 491 | 	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print); | 
 | 492 |  | 
 | 493 | 	return 0; | 
 | 494 | } | 
 | 495 |  | 
 | 496 | static void pagetypeinfo_showblockcount_print(struct seq_file *m, | 
 | 497 | 					pg_data_t *pgdat, struct zone *zone) | 
 | 498 | { | 
 | 499 | 	int mtype; | 
 | 500 | 	unsigned long pfn; | 
 | 501 | 	unsigned long start_pfn = zone->zone_start_pfn; | 
 | 502 | 	unsigned long end_pfn = start_pfn + zone->spanned_pages; | 
 | 503 | 	unsigned long count[MIGRATE_TYPES] = { 0, }; | 
 | 504 |  | 
 | 505 | 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { | 
 | 506 | 		struct page *page; | 
 | 507 |  | 
 | 508 | 		if (!pfn_valid(pfn)) | 
 | 509 | 			continue; | 
 | 510 |  | 
 | 511 | 		page = pfn_to_page(pfn); | 
| Mel Gorman | eb33575 | 2009-05-13 17:34:48 +0100 | [diff] [blame] | 512 |  | 
 | 513 | 		/* Watch for unexpected holes punched in the memmap */ | 
 | 514 | 		if (!memmap_valid_within(pfn, page, zone)) | 
| Mel Gorman | e80d6a2 | 2008-08-14 11:10:14 +0100 | [diff] [blame] | 515 | 			continue; | 
| Mel Gorman | eb33575 | 2009-05-13 17:34:48 +0100 | [diff] [blame] | 516 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 517 | 		mtype = get_pageblock_migratetype(page); | 
 | 518 |  | 
| Mel Gorman | e80d6a2 | 2008-08-14 11:10:14 +0100 | [diff] [blame] | 519 | 		if (mtype < MIGRATE_TYPES) | 
 | 520 | 			count[mtype]++; | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 521 | 	} | 
 | 522 |  | 
 | 523 | 	/* Print counts */ | 
 | 524 | 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | 
 | 525 | 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) | 
 | 526 | 		seq_printf(m, "%12lu ", count[mtype]); | 
 | 527 | 	seq_putc(m, '\n'); | 
 | 528 | } | 
 | 529 |  | 
 | 530 | /* Print out the free pages at each order for each migratetype */ | 
 | 531 | static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) | 
 | 532 | { | 
 | 533 | 	int mtype; | 
 | 534 | 	pg_data_t *pgdat = (pg_data_t *)arg; | 
 | 535 |  | 
 | 536 | 	seq_printf(m, "\n%-23s", "Number of blocks type "); | 
 | 537 | 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) | 
 | 538 | 		seq_printf(m, "%12s ", migratetype_names[mtype]); | 
 | 539 | 	seq_putc(m, '\n'); | 
 | 540 | 	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print); | 
 | 541 |  | 
 | 542 | 	return 0; | 
 | 543 | } | 
 | 544 |  | 
 | 545 | /* | 
 | 546 |  * This prints out statistics in relation to grouping pages by mobility. | 
 | 547 |  * It is expensive to collect so do not constantly read the file. | 
 | 548 |  */ | 
 | 549 | static int pagetypeinfo_show(struct seq_file *m, void *arg) | 
 | 550 | { | 
 | 551 | 	pg_data_t *pgdat = (pg_data_t *)arg; | 
 | 552 |  | 
| KOSAKI Motohiro | 41b25a3 | 2008-04-30 00:52:13 -0700 | [diff] [blame] | 553 | 	/* check memoryless node */ | 
 | 554 | 	if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) | 
 | 555 | 		return 0; | 
 | 556 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 557 | 	seq_printf(m, "Page block order: %d\n", pageblock_order); | 
 | 558 | 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages); | 
 | 559 | 	seq_putc(m, '\n'); | 
 | 560 | 	pagetypeinfo_showfree(m, pgdat); | 
 | 561 | 	pagetypeinfo_showblockcount(m, pgdat); | 
 | 562 |  | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 563 | 	return 0; | 
 | 564 | } | 
 | 565 |  | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 566 | static const struct seq_operations fragmentation_op = { | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 567 | 	.start	= frag_start, | 
 | 568 | 	.next	= frag_next, | 
 | 569 | 	.stop	= frag_stop, | 
 | 570 | 	.show	= frag_show, | 
 | 571 | }; | 
 | 572 |  | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 573 | static int fragmentation_open(struct inode *inode, struct file *file) | 
 | 574 | { | 
 | 575 | 	return seq_open(file, &fragmentation_op); | 
 | 576 | } | 
 | 577 |  | 
 | 578 | static const struct file_operations fragmentation_file_operations = { | 
 | 579 | 	.open		= fragmentation_open, | 
 | 580 | 	.read		= seq_read, | 
 | 581 | 	.llseek		= seq_lseek, | 
 | 582 | 	.release	= seq_release, | 
 | 583 | }; | 
 | 584 |  | 
| Alexey Dobriyan | 74e2e8e | 2008-10-06 04:15:36 +0400 | [diff] [blame] | 585 | static const struct seq_operations pagetypeinfo_op = { | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 586 | 	.start	= frag_start, | 
 | 587 | 	.next	= frag_next, | 
 | 588 | 	.stop	= frag_stop, | 
 | 589 | 	.show	= pagetypeinfo_show, | 
 | 590 | }; | 
 | 591 |  | 
| Alexey Dobriyan | 74e2e8e | 2008-10-06 04:15:36 +0400 | [diff] [blame] | 592 | static int pagetypeinfo_open(struct inode *inode, struct file *file) | 
 | 593 | { | 
 | 594 | 	return seq_open(file, &pagetypeinfo_op); | 
 | 595 | } | 
 | 596 |  | 
 | 597 | static const struct file_operations pagetypeinfo_file_ops = { | 
 | 598 | 	.open		= pagetypeinfo_open, | 
 | 599 | 	.read		= seq_read, | 
 | 600 | 	.llseek		= seq_lseek, | 
 | 601 | 	.release	= seq_release, | 
 | 602 | }; | 
 | 603 |  | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 604 | #ifdef CONFIG_ZONE_DMA | 
 | 605 | #define TEXT_FOR_DMA(xx) xx "_dma", | 
 | 606 | #else | 
 | 607 | #define TEXT_FOR_DMA(xx) | 
 | 608 | #endif | 
 | 609 |  | 
| Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 610 | #ifdef CONFIG_ZONE_DMA32 | 
 | 611 | #define TEXT_FOR_DMA32(xx) xx "_dma32", | 
 | 612 | #else | 
 | 613 | #define TEXT_FOR_DMA32(xx) | 
 | 614 | #endif | 
 | 615 |  | 
 | 616 | #ifdef CONFIG_HIGHMEM | 
 | 617 | #define TEXT_FOR_HIGHMEM(xx) xx "_high", | 
 | 618 | #else | 
 | 619 | #define TEXT_FOR_HIGHMEM(xx) | 
 | 620 | #endif | 
 | 621 |  | 
| Christoph Lameter | 4b51d66 | 2007-02-10 01:43:10 -0800 | [diff] [blame] | 622 | #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 623 | 					TEXT_FOR_HIGHMEM(xx) xx "_movable", | 
| Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 624 |  | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 625 | static const char * const vmstat_text[] = { | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 626 | 	/* Zoned VM counters */ | 
| Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 627 | 	"nr_free_pages", | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 628 | 	"nr_inactive_anon", | 
 | 629 | 	"nr_active_anon", | 
 | 630 | 	"nr_inactive_file", | 
 | 631 | 	"nr_active_file", | 
| Lee Schermerhorn | 7b85412 | 2008-10-18 20:26:40 -0700 | [diff] [blame] | 632 | 	"nr_unevictable", | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 633 | 	"nr_mlock", | 
| Christoph Lameter | f3dbd34 | 2006-06-30 01:55:36 -0700 | [diff] [blame] | 634 | 	"nr_anon_pages", | 
| Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 635 | 	"nr_mapped", | 
| Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 636 | 	"nr_file_pages", | 
| Christoph Lameter | 51ed449 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 637 | 	"nr_dirty", | 
 | 638 | 	"nr_writeback", | 
| Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 639 | 	"nr_slab_reclaimable", | 
 | 640 | 	"nr_slab_unreclaimable", | 
| Christoph Lameter | df849a1 | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 641 | 	"nr_page_table_pages", | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 642 | 	"nr_unstable", | 
| Christoph Lameter | d2c5e30 | 2006-06-30 01:55:41 -0700 | [diff] [blame] | 643 | 	"nr_bounce", | 
| Andrew Morton | e129b5c | 2006-09-27 01:50:00 -0700 | [diff] [blame] | 644 | 	"nr_vmscan_write", | 
| Miklos Szeredi | fc3ba69 | 2008-04-30 00:54:38 -0700 | [diff] [blame] | 645 | 	"nr_writeback_temp", | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 646 |  | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 647 | #ifdef CONFIG_NUMA | 
 | 648 | 	"numa_hit", | 
 | 649 | 	"numa_miss", | 
 | 650 | 	"numa_foreign", | 
 | 651 | 	"numa_interleave", | 
 | 652 | 	"numa_local", | 
 | 653 | 	"numa_other", | 
 | 654 | #endif | 
 | 655 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 656 | #ifdef CONFIG_VM_EVENT_COUNTERS | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 657 | 	"pgpgin", | 
 | 658 | 	"pgpgout", | 
 | 659 | 	"pswpin", | 
 | 660 | 	"pswpout", | 
 | 661 |  | 
| Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 662 | 	TEXTS_FOR_ZONES("pgalloc") | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 663 |  | 
 | 664 | 	"pgfree", | 
 | 665 | 	"pgactivate", | 
 | 666 | 	"pgdeactivate", | 
 | 667 |  | 
 | 668 | 	"pgfault", | 
 | 669 | 	"pgmajfault", | 
 | 670 |  | 
| Christoph Lameter | 27bf71c | 2006-09-25 23:31:15 -0700 | [diff] [blame] | 671 | 	TEXTS_FOR_ZONES("pgrefill") | 
 | 672 | 	TEXTS_FOR_ZONES("pgsteal") | 
 | 673 | 	TEXTS_FOR_ZONES("pgscan_kswapd") | 
 | 674 | 	TEXTS_FOR_ZONES("pgscan_direct") | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 675 |  | 
| Mel Gorman | 24cf72518 | 2009-06-16 15:33:23 -0700 | [diff] [blame] | 676 | #ifdef CONFIG_NUMA | 
 | 677 | 	"zone_reclaim_failed", | 
 | 678 | #endif | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 679 | 	"pginodesteal", | 
 | 680 | 	"slabs_scanned", | 
 | 681 | 	"kswapd_steal", | 
 | 682 | 	"kswapd_inodesteal", | 
 | 683 | 	"pageoutrun", | 
 | 684 | 	"allocstall", | 
 | 685 |  | 
 | 686 | 	"pgrotated", | 
| Adam Litke | 3b11630 | 2008-04-28 02:13:06 -0700 | [diff] [blame] | 687 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 688 | 	"htlb_buddy_alloc_success", | 
 | 689 | 	"htlb_buddy_alloc_fail", | 
 | 690 | #endif | 
| Lee Schermerhorn | bbfd28e | 2008-10-18 20:26:40 -0700 | [diff] [blame] | 691 | 	"unevictable_pgs_culled", | 
 | 692 | 	"unevictable_pgs_scanned", | 
 | 693 | 	"unevictable_pgs_rescued", | 
| Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 694 | 	"unevictable_pgs_mlocked", | 
 | 695 | 	"unevictable_pgs_munlocked", | 
 | 696 | 	"unevictable_pgs_cleared", | 
 | 697 | 	"unevictable_pgs_stranded", | 
| Lee Schermerhorn | 985737c | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 698 | 	"unevictable_pgs_mlockfreed", | 
| Lee Schermerhorn | bbfd28e | 2008-10-18 20:26:40 -0700 | [diff] [blame] | 699 | #endif | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 700 | }; | 
 | 701 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 702 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | 
 | 703 | 							struct zone *zone) | 
 | 704 | { | 
 | 705 | 	int i; | 
 | 706 | 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); | 
 | 707 | 	seq_printf(m, | 
 | 708 | 		   "\n  pages free     %lu" | 
 | 709 | 		   "\n        min      %lu" | 
 | 710 | 		   "\n        low      %lu" | 
 | 711 | 		   "\n        high     %lu" | 
| Wu Fengguang | 08d9ae7 | 2009-06-16 15:32:30 -0700 | [diff] [blame] | 712 | 		   "\n        scanned  %lu" | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 713 | 		   "\n        spanned  %lu" | 
 | 714 | 		   "\n        present  %lu", | 
 | 715 | 		   zone_page_state(zone, NR_FREE_PAGES), | 
| Mel Gorman | 4185896 | 2009-06-16 15:32:12 -0700 | [diff] [blame] | 716 | 		   min_wmark_pages(zone), | 
 | 717 | 		   low_wmark_pages(zone), | 
 | 718 | 		   high_wmark_pages(zone), | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 719 | 		   zone->pages_scanned, | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 720 | 		   zone->spanned_pages, | 
 | 721 | 		   zone->present_pages); | 
 | 722 |  | 
 | 723 | 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 
 | 724 | 		seq_printf(m, "\n    %-12s %lu", vmstat_text[i], | 
 | 725 | 				zone_page_state(zone, i)); | 
 | 726 |  | 
 | 727 | 	seq_printf(m, | 
 | 728 | 		   "\n        protection: (%lu", | 
 | 729 | 		   zone->lowmem_reserve[0]); | 
 | 730 | 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) | 
 | 731 | 		seq_printf(m, ", %lu", zone->lowmem_reserve[i]); | 
 | 732 | 	seq_printf(m, | 
 | 733 | 		   ")" | 
 | 734 | 		   "\n  pagesets"); | 
 | 735 | 	for_each_online_cpu(i) { | 
 | 736 | 		struct per_cpu_pageset *pageset; | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 737 |  | 
 | 738 | 		pageset = zone_pcp(zone, i); | 
| Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 739 | 		seq_printf(m, | 
 | 740 | 			   "\n    cpu: %i" | 
 | 741 | 			   "\n              count: %i" | 
 | 742 | 			   "\n              high:  %i" | 
 | 743 | 			   "\n              batch: %i", | 
 | 744 | 			   i, | 
 | 745 | 			   pageset->pcp.count, | 
 | 746 | 			   pageset->pcp.high, | 
 | 747 | 			   pageset->pcp.batch); | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 748 | #ifdef CONFIG_SMP | 
 | 749 | 		seq_printf(m, "\n  vm stats threshold: %d", | 
 | 750 | 				pageset->stat_threshold); | 
 | 751 | #endif | 
 | 752 | 	} | 
 | 753 | 	seq_printf(m, | 
 | 754 | 		   "\n  all_unreclaimable: %u" | 
 | 755 | 		   "\n  prev_priority:     %i" | 
| Rik van Riel | 556adec | 2008-10-18 20:26:34 -0700 | [diff] [blame] | 756 | 		   "\n  start_pfn:         %lu" | 
 | 757 | 		   "\n  inactive_ratio:    %u", | 
| David Rientjes | e815af9 | 2007-10-16 23:25:54 -0700 | [diff] [blame] | 758 | 			   zone_is_all_unreclaimable(zone), | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 759 | 		   zone->prev_priority, | 
| Rik van Riel | 556adec | 2008-10-18 20:26:34 -0700 | [diff] [blame] | 760 | 		   zone->zone_start_pfn, | 
 | 761 | 		   zone->inactive_ratio); | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 762 | 	seq_putc(m, '\n'); | 
 | 763 | } | 
 | 764 |  | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 765 | /* | 
 | 766 |  * Output information about zones in @pgdat. | 
 | 767 |  */ | 
 | 768 | static int zoneinfo_show(struct seq_file *m, void *arg) | 
 | 769 | { | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 770 | 	pg_data_t *pgdat = (pg_data_t *)arg; | 
 | 771 | 	walk_zones_in_node(m, pgdat, zoneinfo_show_print); | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 772 | 	return 0; | 
 | 773 | } | 
 | 774 |  | 
| Alexey Dobriyan | 5c9fe62 | 2008-10-06 04:19:42 +0400 | [diff] [blame] | 775 | static const struct seq_operations zoneinfo_op = { | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 776 | 	.start	= frag_start, /* iterate over all zones. The same as in | 
 | 777 | 			       * fragmentation. */ | 
 | 778 | 	.next	= frag_next, | 
 | 779 | 	.stop	= frag_stop, | 
 | 780 | 	.show	= zoneinfo_show, | 
 | 781 | }; | 
 | 782 |  | 
| Alexey Dobriyan | 5c9fe62 | 2008-10-06 04:19:42 +0400 | [diff] [blame] | 783 | static int zoneinfo_open(struct inode *inode, struct file *file) | 
 | 784 | { | 
 | 785 | 	return seq_open(file, &zoneinfo_op); | 
 | 786 | } | 
 | 787 |  | 
 | 788 | static const struct file_operations proc_zoneinfo_file_operations = { | 
 | 789 | 	.open		= zoneinfo_open, | 
 | 790 | 	.read		= seq_read, | 
 | 791 | 	.llseek		= seq_lseek, | 
 | 792 | 	.release	= seq_release, | 
 | 793 | }; | 
 | 794 |  | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 795 | static void *vmstat_start(struct seq_file *m, loff_t *pos) | 
 | 796 | { | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 797 | 	unsigned long *v; | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 798 | #ifdef CONFIG_VM_EVENT_COUNTERS | 
 | 799 | 	unsigned long *e; | 
 | 800 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 801 | 	int i; | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 802 |  | 
 | 803 | 	if (*pos >= ARRAY_SIZE(vmstat_text)) | 
 | 804 | 		return NULL; | 
 | 805 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 806 | #ifdef CONFIG_VM_EVENT_COUNTERS | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 807 | 	v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 808 | 			+ sizeof(struct vm_event_state), GFP_KERNEL); | 
 | 809 | #else | 
 | 810 | 	v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), | 
 | 811 | 			GFP_KERNEL); | 
 | 812 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 813 | 	m->private = v; | 
 | 814 | 	if (!v) | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 815 | 		return ERR_PTR(-ENOMEM); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 816 | 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 
 | 817 | 		v[i] = global_page_state(i); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 818 | #ifdef CONFIG_VM_EVENT_COUNTERS | 
 | 819 | 	e = v + NR_VM_ZONE_STAT_ITEMS; | 
 | 820 | 	all_vm_events(e); | 
 | 821 | 	e[PGPGIN] /= 2;		/* sectors -> kbytes */ | 
 | 822 | 	e[PGPGOUT] /= 2; | 
 | 823 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 824 | 	return v + *pos; | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 825 | } | 
 | 826 |  | 
 | 827 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) | 
 | 828 | { | 
 | 829 | 	(*pos)++; | 
 | 830 | 	if (*pos >= ARRAY_SIZE(vmstat_text)) | 
 | 831 | 		return NULL; | 
 | 832 | 	return (unsigned long *)m->private + *pos; | 
 | 833 | } | 
 | 834 |  | 
 | 835 | static int vmstat_show(struct seq_file *m, void *arg) | 
 | 836 | { | 
 | 837 | 	unsigned long *l = arg; | 
 | 838 | 	unsigned long off = l - (unsigned long *)m->private; | 
 | 839 |  | 
 | 840 | 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l); | 
 | 841 | 	return 0; | 
 | 842 | } | 
 | 843 |  | 
 | 844 | static void vmstat_stop(struct seq_file *m, void *arg) | 
 | 845 | { | 
 | 846 | 	kfree(m->private); | 
 | 847 | 	m->private = NULL; | 
 | 848 | } | 
 | 849 |  | 
| Alexey Dobriyan | b6aa44a | 2008-10-06 04:17:48 +0400 | [diff] [blame] | 850 | static const struct seq_operations vmstat_op = { | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 851 | 	.start	= vmstat_start, | 
 | 852 | 	.next	= vmstat_next, | 
 | 853 | 	.stop	= vmstat_stop, | 
 | 854 | 	.show	= vmstat_show, | 
 | 855 | }; | 
 | 856 |  | 
| Alexey Dobriyan | b6aa44a | 2008-10-06 04:17:48 +0400 | [diff] [blame] | 857 | static int vmstat_open(struct inode *inode, struct file *file) | 
 | 858 | { | 
 | 859 | 	return seq_open(file, &vmstat_op); | 
 | 860 | } | 
 | 861 |  | 
 | 862 | static const struct file_operations proc_vmstat_file_operations = { | 
 | 863 | 	.open		= vmstat_open, | 
 | 864 | 	.read		= seq_read, | 
 | 865 | 	.llseek		= seq_lseek, | 
 | 866 | 	.release	= seq_release, | 
 | 867 | }; | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 868 | #endif /* CONFIG_PROC_FS */ | 
 | 869 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 870 | #ifdef CONFIG_SMP | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 871 | static DEFINE_PER_CPU(struct delayed_work, vmstat_work); | 
| Christoph Lameter | 77461ab | 2007-05-09 02:35:13 -0700 | [diff] [blame] | 872 | int sysctl_stat_interval __read_mostly = HZ; | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 873 |  | 
 | 874 | static void vmstat_update(struct work_struct *w) | 
 | 875 | { | 
 | 876 | 	refresh_cpu_vm_stats(smp_processor_id()); | 
| Christoph Lameter | 77461ab | 2007-05-09 02:35:13 -0700 | [diff] [blame] | 877 | 	schedule_delayed_work(&__get_cpu_var(vmstat_work), | 
| Anton Blanchard | 98f4ebb | 2009-04-02 16:56:39 -0700 | [diff] [blame] | 878 | 		round_jiffies_relative(sysctl_stat_interval)); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 879 | } | 
 | 880 |  | 
| Randy Dunlap | 42614fc | 2007-11-14 17:00:12 -0800 | [diff] [blame] | 881 | static void __cpuinit start_cpu_timer(int cpu) | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 882 | { | 
 | 883 | 	struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); | 
 | 884 |  | 
| Christoph Lameter | 39bf627 | 2007-05-10 22:22:21 -0700 | [diff] [blame] | 885 | 	INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); | 
| Anton Blanchard | 98f4ebb | 2009-04-02 16:56:39 -0700 | [diff] [blame] | 886 | 	schedule_delayed_work_on(cpu, vmstat_work, | 
 | 887 | 				 __round_jiffies_relative(HZ, cpu)); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 888 | } | 
 | 889 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 890 | /* | 
 | 891 |  * Use the cpu notifier to insure that the thresholds are recalculated | 
 | 892 |  * when necessary. | 
 | 893 |  */ | 
 | 894 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | 
 | 895 | 		unsigned long action, | 
 | 896 | 		void *hcpu) | 
 | 897 | { | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 898 | 	long cpu = (long)hcpu; | 
 | 899 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 900 | 	switch (action) { | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 901 | 	case CPU_ONLINE: | 
 | 902 | 	case CPU_ONLINE_FROZEN: | 
 | 903 | 		start_cpu_timer(cpu); | 
 | 904 | 		break; | 
 | 905 | 	case CPU_DOWN_PREPARE: | 
 | 906 | 	case CPU_DOWN_PREPARE_FROZEN: | 
 | 907 | 		cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); | 
 | 908 | 		per_cpu(vmstat_work, cpu).work.func = NULL; | 
 | 909 | 		break; | 
 | 910 | 	case CPU_DOWN_FAILED: | 
 | 911 | 	case CPU_DOWN_FAILED_FROZEN: | 
 | 912 | 		start_cpu_timer(cpu); | 
 | 913 | 		break; | 
| Andy Whitcroft | ce421c7 | 2006-12-06 20:33:08 -0800 | [diff] [blame] | 914 | 	case CPU_DEAD: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 915 | 	case CPU_DEAD_FROZEN: | 
| Andy Whitcroft | ce421c7 | 2006-12-06 20:33:08 -0800 | [diff] [blame] | 916 | 		refresh_zone_stat_thresholds(); | 
 | 917 | 		break; | 
 | 918 | 	default: | 
 | 919 | 		break; | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 920 | 	} | 
 | 921 | 	return NOTIFY_OK; | 
 | 922 | } | 
 | 923 |  | 
 | 924 | static struct notifier_block __cpuinitdata vmstat_notifier = | 
 | 925 | 	{ &vmstat_cpuup_callback, NULL, 0 }; | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 926 | #endif | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 927 |  | 
| Adrian Bunk | e2fc88d | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 928 | static int __init setup_vmstat(void) | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 929 | { | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 930 | #ifdef CONFIG_SMP | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 931 | 	int cpu; | 
 | 932 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 933 | 	refresh_zone_stat_thresholds(); | 
 | 934 | 	register_cpu_notifier(&vmstat_notifier); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 935 |  | 
 | 936 | 	for_each_online_cpu(cpu) | 
 | 937 | 		start_cpu_timer(cpu); | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 938 | #endif | 
 | 939 | #ifdef CONFIG_PROC_FS | 
 | 940 | 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); | 
| Alexey Dobriyan | 74e2e8e | 2008-10-06 04:15:36 +0400 | [diff] [blame] | 941 | 	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); | 
| Alexey Dobriyan | b6aa44a | 2008-10-06 04:17:48 +0400 | [diff] [blame] | 942 | 	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); | 
| Alexey Dobriyan | 5c9fe62 | 2008-10-06 04:19:42 +0400 | [diff] [blame] | 943 | 	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 944 | #endif | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 945 | 	return 0; | 
 | 946 | } | 
 | 947 | module_init(setup_vmstat) |