| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/mm/vmstat.c | 
|  | 3 | * | 
|  | 4 | *  Manages VM statistics | 
|  | 5 | *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 6 | * | 
|  | 7 | *  zoned VM statistics | 
|  | 8 | *  Copyright (C) 2006 Silicon Graphics, Inc., | 
|  | 9 | *		Christoph Lameter <christoph@lameter.com> | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 10 | */ | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 11 | #include <linux/fs.h> | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 12 | #include <linux/mm.h> | 
| Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 13 | #include <linux/err.h> | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 14 | #include <linux/module.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include <linux/slab.h> | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 16 | #include <linux/cpu.h> | 
| Adrian Bunk | c748e13 | 2008-07-23 21:27:03 -0700 | [diff] [blame] | 17 | #include <linux/vmstat.h> | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 18 | #include <linux/sched.h> | 
| Mel Gorman | f1a5ab1 | 2010-05-24 14:32:26 -0700 | [diff] [blame] | 19 | #include <linux/math64.h> | 
| Michael Rubin | 79da826 | 2010-10-26 14:21:36 -0700 | [diff] [blame] | 20 | #include <linux/writeback.h> | 
| Namhyung Kim | 36deb0b | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 21 | #include <linux/compaction.h> | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 22 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 23 | #ifdef CONFIG_VM_EVENT_COUNTERS | 
|  | 24 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; | 
|  | 25 | EXPORT_PER_CPU_SYMBOL(vm_event_states); | 
|  | 26 |  | 
| Minchan Kim | 31f961a | 2010-08-09 17:18:59 -0700 | [diff] [blame] | 27 | static void sum_vm_events(unsigned long *ret) | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 28 | { | 
| Christoph Lameter | 9eccf2a | 2008-02-04 22:29:22 -0800 | [diff] [blame] | 29 | int cpu; | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 30 | int i; | 
|  | 31 |  | 
|  | 32 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); | 
|  | 33 |  | 
| Minchan Kim | 31f961a | 2010-08-09 17:18:59 -0700 | [diff] [blame] | 34 | for_each_online_cpu(cpu) { | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 35 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); | 
|  | 36 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 37 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) | 
|  | 38 | ret[i] += this->event[i]; | 
|  | 39 | } | 
|  | 40 | } | 
|  | 41 |  | 
|  | 42 | /* | 
|  | 43 | * Accumulate the vm event counters across all CPUs. | 
|  | 44 | * The result is unavoidably approximate - it can change | 
|  | 45 | * during and after execution of this function. | 
|  | 46 | */ | 
|  | 47 | void all_vm_events(unsigned long *ret) | 
|  | 48 | { | 
| KOSAKI Motohiro | b5be113 | 2008-05-12 14:02:06 -0700 | [diff] [blame] | 49 | get_online_cpus(); | 
| Minchan Kim | 31f961a | 2010-08-09 17:18:59 -0700 | [diff] [blame] | 50 | sum_vm_events(ret); | 
| KOSAKI Motohiro | b5be113 | 2008-05-12 14:02:06 -0700 | [diff] [blame] | 51 | put_online_cpus(); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 52 | } | 
| Heiko Carstens | 32dd66f | 2006-07-10 04:44:31 -0700 | [diff] [blame] | 53 | EXPORT_SYMBOL_GPL(all_vm_events); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 54 |  | 
|  | 55 | #ifdef CONFIG_HOTPLUG | 
|  | 56 | /* | 
|  | 57 | * Fold the foreign cpu events into our own. | 
|  | 58 | * | 
|  | 59 | * This is adding to the events on one processor | 
|  | 60 | * but keeps the global counts constant. | 
|  | 61 | */ | 
|  | 62 | void vm_events_fold_cpu(int cpu) | 
|  | 63 | { | 
|  | 64 | struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); | 
|  | 65 | int i; | 
|  | 66 |  | 
|  | 67 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { | 
|  | 68 | count_vm_events(i, fold_state->event[i]); | 
|  | 69 | fold_state->event[i] = 0; | 
|  | 70 | } | 
|  | 71 | } | 
|  | 72 | #endif /* CONFIG_HOTPLUG */ | 
|  | 73 |  | 
|  | 74 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | 
|  | 75 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 76 | /* | 
|  | 77 | * Manage combined zone based / global counters | 
|  | 78 | * | 
|  | 79 | * vm_stat contains the global counters | 
|  | 80 | */ | 
| Dimitri Sivanich | a1cb2c6 | 2011-10-31 17:09:46 -0700 | [diff] [blame] | 81 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 82 | EXPORT_SYMBOL(vm_stat); | 
|  | 83 |  | 
|  | 84 | #ifdef CONFIG_SMP | 
|  | 85 |  | 
| Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 86 | int calculate_pressure_threshold(struct zone *zone) | 
| Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 87 | { | 
|  | 88 | int threshold; | 
|  | 89 | int watermark_distance; | 
|  | 90 |  | 
|  | 91 | /* | 
|  | 92 | * As vmstats are not up to date, there is drift between the estimated | 
|  | 93 | * and real values. For high thresholds and a high number of CPUs, it | 
|  | 94 | * is possible for the min watermark to be breached while the estimated | 
|  | 95 | * value looks fine. The pressure threshold is a reduced value such | 
|  | 96 | * that even the maximum amount of drift will not accidentally breach | 
|  | 97 | * the min watermark | 
|  | 98 | */ | 
|  | 99 | watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); | 
|  | 100 | threshold = max(1, (int)(watermark_distance / num_online_cpus())); | 
|  | 101 |  | 
|  | 102 | /* | 
|  | 103 | * Maximum threshold is 125 | 
|  | 104 | */ | 
|  | 105 | threshold = min(125, threshold); | 
|  | 106 |  | 
|  | 107 | return threshold; | 
|  | 108 | } | 
|  | 109 |  | 
| Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 110 | int calculate_normal_threshold(struct zone *zone) | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 111 | { | 
|  | 112 | int threshold; | 
|  | 113 | int mem;	/* memory in 128 MB units */ | 
|  | 114 |  | 
|  | 115 | /* | 
|  | 116 | * The threshold scales with the number of processors and the amount | 
|  | 117 | * of memory per zone. More memory means that we can defer updates for | 
|  | 118 | * longer, more processors could lead to more contention. | 
|  | 119 | * fls() is used to have a cheap way of logarithmic scaling. | 
|  | 120 | * | 
|  | 121 | * Some sample thresholds: | 
|  | 122 | * | 
|  | 123 | * Threshold	Processors	(fls)	Zonesize	fls(mem+1) | 
|  | 124 | * ------------------------------------------------------------------ | 
|  | 125 | * 8		1		1	0.9-1 GB	4 | 
|  | 126 | * 16		2		2	0.9-1 GB	4 | 
|  | 127 | * 20 		2		2	1-2 GB		5 | 
|  | 128 | * 24		2		2	2-4 GB		6 | 
|  | 129 | * 28		2		2	4-8 GB		7 | 
|  | 130 | * 32		2		2	8-16 GB		8 | 
|  | 131 | * 4		2		2	<128M		1 | 
|  | 132 | * 30		4		3	2-4 GB		5 | 
|  | 133 | * 48		4		3	8-16 GB		8 | 
|  | 134 | * 32		8		4	1-2 GB		4 | 
|  | 135 | * 32		8		4	0.9-1GB		4 | 
|  | 136 | * 10		16		5	<128M		1 | 
|  | 137 | * 40		16		5	900M		4 | 
|  | 138 | * 70		64		7	2-4 GB		5 | 
|  | 139 | * 84		64		7	4-8 GB		6 | 
|  | 140 | * 108		512		9	4-8 GB		6 | 
|  | 141 | * 125		1024		10	8-16 GB		8 | 
|  | 142 | * 125		1024		10	16-32 GB	9 | 
|  | 143 | */ | 
|  | 144 |  | 
|  | 145 | mem = zone->present_pages >> (27 - PAGE_SHIFT); | 
|  | 146 |  | 
|  | 147 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); | 
|  | 148 |  | 
|  | 149 | /* | 
|  | 150 | * Maximum threshold is 125 | 
|  | 151 | */ | 
|  | 152 | threshold = min(125, threshold); | 
|  | 153 |  | 
|  | 154 | return threshold; | 
|  | 155 | } | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 156 |  | 
|  | 157 | /* | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 158 | * Refresh the thresholds for each zone. | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 159 | */ | 
| KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 160 | void refresh_zone_stat_thresholds(void) | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 161 | { | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 162 | struct zone *zone; | 
|  | 163 | int cpu; | 
|  | 164 | int threshold; | 
|  | 165 |  | 
| KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 166 | for_each_populated_zone(zone) { | 
| Christoph Lameter | aa45484 | 2010-09-09 16:38:17 -0700 | [diff] [blame] | 167 | unsigned long max_drift, tolerate_drift; | 
|  | 168 |  | 
| Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 169 | threshold = calculate_normal_threshold(zone); | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 170 |  | 
|  | 171 | for_each_online_cpu(cpu) | 
| Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 172 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold | 
|  | 173 | = threshold; | 
| Christoph Lameter | aa45484 | 2010-09-09 16:38:17 -0700 | [diff] [blame] | 174 |  | 
|  | 175 | /* | 
|  | 176 | * Only set percpu_drift_mark if there is a danger that | 
|  | 177 | * NR_FREE_PAGES reports the low watermark is ok when in fact | 
|  | 178 | * the min watermark could be breached by an allocation | 
|  | 179 | */ | 
|  | 180 | tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); | 
|  | 181 | max_drift = num_online_cpus() * threshold; | 
|  | 182 | if (max_drift > tolerate_drift) | 
|  | 183 | zone->percpu_drift_mark = high_wmark_pages(zone) + | 
|  | 184 | max_drift; | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 185 | } | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 186 | } | 
|  | 187 |  | 
| Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 188 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, | 
|  | 189 | int (*calculate_pressure)(struct zone *)) | 
| Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 190 | { | 
|  | 191 | struct zone *zone; | 
|  | 192 | int cpu; | 
|  | 193 | int threshold; | 
|  | 194 | int i; | 
|  | 195 |  | 
| Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 196 | for (i = 0; i < pgdat->nr_zones; i++) { | 
|  | 197 | zone = &pgdat->node_zones[i]; | 
|  | 198 | if (!zone->percpu_drift_mark) | 
|  | 199 | continue; | 
|  | 200 |  | 
| Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 201 | threshold = (*calculate_pressure)(zone); | 
|  | 202 | for_each_possible_cpu(cpu) | 
| Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 203 | per_cpu_ptr(zone->pageset, cpu)->stat_threshold | 
|  | 204 | = threshold; | 
|  | 205 | } | 
| Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 206 | } | 
|  | 207 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 208 | /* | 
|  | 209 | * For use when we know that interrupts are disabled. | 
|  | 210 | */ | 
|  | 211 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | 
|  | 212 | int delta) | 
|  | 213 | { | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 214 | struct per_cpu_pageset __percpu *pcp = zone->pageset; | 
|  | 215 | s8 __percpu *p = pcp->vm_stat_diff + item; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 216 | long x; | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 217 | long t; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 218 |  | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 219 | x = delta + __this_cpu_read(*p); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 220 |  | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 221 | t = __this_cpu_read(pcp->stat_threshold); | 
|  | 222 |  | 
|  | 223 | if (unlikely(x > t || x < -t)) { | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 224 | zone_page_state_add(x, zone, item); | 
|  | 225 | x = 0; | 
|  | 226 | } | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 227 | __this_cpu_write(*p, x); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 228 | } | 
|  | 229 | EXPORT_SYMBOL(__mod_zone_page_state); | 
|  | 230 |  | 
|  | 231 | /* | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 232 | * Optimized increment and decrement functions. | 
|  | 233 | * | 
|  | 234 | * These are only for a single page and therefore can take a struct page * | 
|  | 235 | * argument instead of struct zone *. This allows the inclusion of the code | 
|  | 236 | * generated for page_zone(page) into the optimized functions. | 
|  | 237 | * | 
|  | 238 | * No overflow check is necessary and therefore the differential can be | 
|  | 239 | * incremented or decremented in place which may allow the compilers to | 
|  | 240 | * generate better code. | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 241 | * The increment or decrement is known and therefore one boundary check can | 
|  | 242 | * be omitted. | 
|  | 243 | * | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 244 | * NOTE: These functions are very performance sensitive. Change only | 
|  | 245 | * with care. | 
|  | 246 | * | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 247 | * Some processors have inc/dec instructions that are atomic vs an interrupt. | 
|  | 248 | * However, the code must first determine the differential location in a zone | 
|  | 249 | * based on the processor number and then inc/dec the counter. There is no | 
|  | 250 | * guarantee without disabling preemption that the processor will not change | 
|  | 251 | * in between and therefore the atomicity vs. interrupt cannot be exploited | 
|  | 252 | * in a useful way here. | 
|  | 253 | */ | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 254 | void __inc_zone_state(struct zone *zone, enum zone_stat_item item) | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 255 | { | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 256 | struct per_cpu_pageset __percpu *pcp = zone->pageset; | 
|  | 257 | s8 __percpu *p = pcp->vm_stat_diff + item; | 
|  | 258 | s8 v, t; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 259 |  | 
| Christoph Lameter | 908ee0f | 2010-12-06 11:40:02 -0600 | [diff] [blame] | 260 | v = __this_cpu_inc_return(*p); | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 261 | t = __this_cpu_read(pcp->stat_threshold); | 
|  | 262 | if (unlikely(v > t)) { | 
|  | 263 | s8 overstep = t >> 1; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 264 |  | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 265 | zone_page_state_add(v + overstep, zone, item); | 
|  | 266 | __this_cpu_write(*p, -overstep); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 267 | } | 
|  | 268 | } | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 269 |  | 
|  | 270 | void __inc_zone_page_state(struct page *page, enum zone_stat_item item) | 
|  | 271 | { | 
|  | 272 | __inc_zone_state(page_zone(page), item); | 
|  | 273 | } | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 274 | EXPORT_SYMBOL(__inc_zone_page_state); | 
|  | 275 |  | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 276 | void __dec_zone_state(struct zone *zone, enum zone_stat_item item) | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 277 | { | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 278 | struct per_cpu_pageset __percpu *pcp = zone->pageset; | 
|  | 279 | s8 __percpu *p = pcp->vm_stat_diff + item; | 
|  | 280 | s8 v, t; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 281 |  | 
| Christoph Lameter | 908ee0f | 2010-12-06 11:40:02 -0600 | [diff] [blame] | 282 | v = __this_cpu_dec_return(*p); | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 283 | t = __this_cpu_read(pcp->stat_threshold); | 
|  | 284 | if (unlikely(v < - t)) { | 
|  | 285 | s8 overstep = t >> 1; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 286 |  | 
| Christoph Lameter | 12938a9 | 2010-12-06 11:16:20 -0600 | [diff] [blame] | 287 | zone_page_state_add(v - overstep, zone, item); | 
|  | 288 | __this_cpu_write(*p, overstep); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 289 | } | 
|  | 290 | } | 
| Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 291 |  | 
|  | 292 | void __dec_zone_page_state(struct page *page, enum zone_stat_item item) | 
|  | 293 | { | 
|  | 294 | __dec_zone_state(page_zone(page), item); | 
|  | 295 | } | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 296 | EXPORT_SYMBOL(__dec_zone_page_state); | 
|  | 297 |  | 
| Heiko Carstens | 4156153 | 2012-01-12 17:17:30 -0800 | [diff] [blame] | 298 | #ifdef CONFIG_HAVE_CMPXCHG_LOCAL | 
| Christoph Lameter | 7c83912 | 2010-12-14 10:28:46 -0600 | [diff] [blame] | 299 | /* | 
|  | 300 | * If we have cmpxchg_local support then we do not need to incur the overhead | 
|  | 301 | * that comes with local_irq_save/restore if we use this_cpu_cmpxchg. | 
|  | 302 | * | 
|  | 303 | * mod_state() modifies the zone counter state through atomic per cpu | 
|  | 304 | * operations. | 
|  | 305 | * | 
|  | 306 | * Overstep mode specifies how overstep should handled: | 
|  | 307 | *     0       No overstepping | 
|  | 308 | *     1       Overstepping half of threshold | 
|  | 309 | *     -1      Overstepping minus half of threshold | 
|  | 310 | */ | 
|  | 311 | static inline void mod_state(struct zone *zone, | 
|  | 312 | enum zone_stat_item item, int delta, int overstep_mode) | 
|  | 313 | { | 
|  | 314 | struct per_cpu_pageset __percpu *pcp = zone->pageset; | 
|  | 315 | s8 __percpu *p = pcp->vm_stat_diff + item; | 
|  | 316 | long o, n, t, z; | 
|  | 317 |  | 
|  | 318 | do { | 
|  | 319 | z = 0;  /* overflow to zone counters */ | 
|  | 320 |  | 
|  | 321 | /* | 
|  | 322 | * The fetching of the stat_threshold is racy. We may apply | 
|  | 323 | * a counter threshold to the wrong the cpu if we get | 
| Christoph Lameter | d3bc236 | 2011-04-14 15:21:58 -0700 | [diff] [blame] | 324 | * rescheduled while executing here. However, the next | 
|  | 325 | * counter update will apply the threshold again and | 
|  | 326 | * therefore bring the counter under the threshold again. | 
|  | 327 | * | 
|  | 328 | * Most of the time the thresholds are the same anyways | 
|  | 329 | * for all cpus in a zone. | 
| Christoph Lameter | 7c83912 | 2010-12-14 10:28:46 -0600 | [diff] [blame] | 330 | */ | 
|  | 331 | t = this_cpu_read(pcp->stat_threshold); | 
|  | 332 |  | 
|  | 333 | o = this_cpu_read(*p); | 
|  | 334 | n = delta + o; | 
|  | 335 |  | 
|  | 336 | if (n > t || n < -t) { | 
|  | 337 | int os = overstep_mode * (t >> 1) ; | 
|  | 338 |  | 
|  | 339 | /* Overflow must be added to zone counters */ | 
|  | 340 | z = n + os; | 
|  | 341 | n = -os; | 
|  | 342 | } | 
|  | 343 | } while (this_cpu_cmpxchg(*p, o, n) != o); | 
|  | 344 |  | 
|  | 345 | if (z) | 
|  | 346 | zone_page_state_add(z, zone, item); | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | 
|  | 350 | int delta) | 
|  | 351 | { | 
|  | 352 | mod_state(zone, item, delta, 0); | 
|  | 353 | } | 
|  | 354 | EXPORT_SYMBOL(mod_zone_page_state); | 
|  | 355 |  | 
|  | 356 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) | 
|  | 357 | { | 
|  | 358 | mod_state(zone, item, 1, 1); | 
|  | 359 | } | 
|  | 360 |  | 
|  | 361 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) | 
|  | 362 | { | 
|  | 363 | mod_state(page_zone(page), item, 1, 1); | 
|  | 364 | } | 
|  | 365 | EXPORT_SYMBOL(inc_zone_page_state); | 
|  | 366 |  | 
|  | 367 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) | 
|  | 368 | { | 
|  | 369 | mod_state(page_zone(page), item, -1, -1); | 
|  | 370 | } | 
|  | 371 | EXPORT_SYMBOL(dec_zone_page_state); | 
|  | 372 | #else | 
|  | 373 | /* | 
|  | 374 | * Use interrupt disable to serialize counter updates | 
|  | 375 | */ | 
|  | 376 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | 
|  | 377 | int delta) | 
|  | 378 | { | 
|  | 379 | unsigned long flags; | 
|  | 380 |  | 
|  | 381 | local_irq_save(flags); | 
|  | 382 | __mod_zone_page_state(zone, item, delta); | 
|  | 383 | local_irq_restore(flags); | 
|  | 384 | } | 
|  | 385 | EXPORT_SYMBOL(mod_zone_page_state); | 
|  | 386 |  | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 387 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) | 
|  | 388 | { | 
|  | 389 | unsigned long flags; | 
|  | 390 |  | 
|  | 391 | local_irq_save(flags); | 
|  | 392 | __inc_zone_state(zone, item); | 
|  | 393 | local_irq_restore(flags); | 
|  | 394 | } | 
|  | 395 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 396 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) | 
|  | 397 | { | 
|  | 398 | unsigned long flags; | 
|  | 399 | struct zone *zone; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 400 |  | 
|  | 401 | zone = page_zone(page); | 
|  | 402 | local_irq_save(flags); | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 403 | __inc_zone_state(zone, item); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 404 | local_irq_restore(flags); | 
|  | 405 | } | 
|  | 406 | EXPORT_SYMBOL(inc_zone_page_state); | 
|  | 407 |  | 
|  | 408 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) | 
|  | 409 | { | 
|  | 410 | unsigned long flags; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 411 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 412 | local_irq_save(flags); | 
| Christoph Lameter | a302eb4 | 2006-08-31 21:27:34 -0700 | [diff] [blame] | 413 | __dec_zone_page_state(page, item); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 414 | local_irq_restore(flags); | 
|  | 415 | } | 
|  | 416 | EXPORT_SYMBOL(dec_zone_page_state); | 
| Christoph Lameter | 7c83912 | 2010-12-14 10:28:46 -0600 | [diff] [blame] | 417 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 418 |  | 
|  | 419 | /* | 
|  | 420 | * Update the zone counters for one cpu. | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 421 | * | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 422 | * The cpu specified must be either the current cpu or a processor that | 
|  | 423 | * is not online. If it is the current cpu then the execution thread must | 
|  | 424 | * be pinned to the current cpu. | 
|  | 425 | * | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 426 | * Note that refresh_cpu_vm_stats strives to only access | 
|  | 427 | * node local memory. The per cpu pagesets on remote zones are placed | 
|  | 428 | * in the memory local to the processor using that pageset. So the | 
|  | 429 | * loop over all zones will access a series of cachelines local to | 
|  | 430 | * the processor. | 
|  | 431 | * | 
|  | 432 | * The call to zone_page_state_add updates the cachelines with the | 
|  | 433 | * statistics in the remote zone struct as well as the global cachelines | 
|  | 434 | * with the global counters. These could cause remote node cache line | 
|  | 435 | * bouncing and will have to be only done when necessary. | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 436 | */ | 
|  | 437 | void refresh_cpu_vm_stats(int cpu) | 
|  | 438 | { | 
|  | 439 | struct zone *zone; | 
|  | 440 | int i; | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 441 | int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 442 |  | 
| KOSAKI Motohiro | ee99c71 | 2009-03-31 15:19:31 -0700 | [diff] [blame] | 443 | for_each_populated_zone(zone) { | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 444 | struct per_cpu_pageset *p; | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 445 |  | 
| Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 446 | p = per_cpu_ptr(zone->pageset, cpu); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 447 |  | 
|  | 448 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 449 | if (p->vm_stat_diff[i]) { | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 450 | unsigned long flags; | 
|  | 451 | int v; | 
|  | 452 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 453 | local_irq_save(flags); | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 454 | v = p->vm_stat_diff[i]; | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 455 | p->vm_stat_diff[i] = 0; | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 456 | local_irq_restore(flags); | 
|  | 457 | atomic_long_add(v, &zone->vm_stat[i]); | 
|  | 458 | global_diff[i] += v; | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 459 | #ifdef CONFIG_NUMA | 
|  | 460 | /* 3 seconds idle till flush */ | 
|  | 461 | p->expire = 3; | 
|  | 462 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 463 | } | 
| Dimitri Sivanich | 468fd62 | 2008-04-28 02:13:37 -0700 | [diff] [blame] | 464 | cond_resched(); | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 465 | #ifdef CONFIG_NUMA | 
|  | 466 | /* | 
|  | 467 | * Deal with draining the remote pageset of this | 
|  | 468 | * processor | 
|  | 469 | * | 
|  | 470 | * Check if there are pages remaining in this pageset | 
|  | 471 | * if not then there is nothing to expire. | 
|  | 472 | */ | 
| Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 473 | if (!p->expire || !p->pcp.count) | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 474 | continue; | 
|  | 475 |  | 
|  | 476 | /* | 
|  | 477 | * We never drain zones local to this processor. | 
|  | 478 | */ | 
|  | 479 | if (zone_to_nid(zone) == numa_node_id()) { | 
|  | 480 | p->expire = 0; | 
|  | 481 | continue; | 
|  | 482 | } | 
|  | 483 |  | 
|  | 484 | p->expire--; | 
|  | 485 | if (p->expire) | 
|  | 486 | continue; | 
|  | 487 |  | 
| Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 488 | if (p->pcp.count) | 
|  | 489 | drain_zone_pages(zone, &p->pcp); | 
| Christoph Lameter | 4037d45 | 2007-05-09 02:35:14 -0700 | [diff] [blame] | 490 | #endif | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 491 | } | 
| Christoph Lameter | a7f75e2 | 2008-02-04 22:29:16 -0800 | [diff] [blame] | 492 |  | 
|  | 493 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 
|  | 494 | if (global_diff[i]) | 
|  | 495 | atomic_long_add(global_diff[i], &vm_stat[i]); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 496 | } | 
|  | 497 |  | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 498 | #endif | 
|  | 499 |  | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 500 | #ifdef CONFIG_NUMA | 
|  | 501 | /* | 
|  | 502 | * zonelist = the list of zones passed to the allocator | 
|  | 503 | * z 	    = the zone from which the allocation occurred. | 
|  | 504 | * | 
|  | 505 | * Must be called with interrupts disabled. | 
| Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 506 | * | 
|  | 507 | * When __GFP_OTHER_NODE is set assume the node of the preferred | 
|  | 508 | * zone is the local node. This is useful for daemons who allocate | 
|  | 509 | * memory on behalf of other processes. | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 510 | */ | 
| Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 511 | void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags) | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 512 | { | 
| Mel Gorman | 18ea7e7 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 513 | if (z->zone_pgdat == preferred_zone->zone_pgdat) { | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 514 | __inc_zone_state(z, NUMA_HIT); | 
|  | 515 | } else { | 
|  | 516 | __inc_zone_state(z, NUMA_MISS); | 
| Mel Gorman | 18ea7e7 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 517 | __inc_zone_state(preferred_zone, NUMA_FOREIGN); | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 518 | } | 
| Andi Kleen | 78afd56 | 2011-03-22 16:33:12 -0700 | [diff] [blame] | 519 | if (z->node == ((flags & __GFP_OTHER_NODE) ? | 
|  | 520 | preferred_zone->node : numa_node_id())) | 
| Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 521 | __inc_zone_state(z, NUMA_LOCAL); | 
|  | 522 | else | 
|  | 523 | __inc_zone_state(z, NUMA_OTHER); | 
|  | 524 | } | 
|  | 525 | #endif | 
|  | 526 |  | 
| Mel Gorman | d7a5752 | 2010-05-24 14:32:25 -0700 | [diff] [blame] | 527 | #ifdef CONFIG_COMPACTION | 
| Namhyung Kim | 36deb0b | 2010-10-26 14:22:04 -0700 | [diff] [blame] | 528 |  | 
| Mel Gorman | d7a5752 | 2010-05-24 14:32:25 -0700 | [diff] [blame] | 529 | struct contig_page_info { | 
|  | 530 | unsigned long free_pages; | 
|  | 531 | unsigned long free_blocks_total; | 
|  | 532 | unsigned long free_blocks_suitable; | 
|  | 533 | }; | 
|  | 534 |  | 
|  | 535 | /* | 
|  | 536 | * Calculate the number of free pages in a zone, how many contiguous | 
|  | 537 | * pages are free and how many are large enough to satisfy an allocation of | 
|  | 538 | * the target size. Note that this function makes no attempt to estimate | 
|  | 539 | * how many suitable free blocks there *might* be if MOVABLE pages were | 
|  | 540 | * migrated. Calculating that is possible, but expensive and can be | 
|  | 541 | * figured out from userspace | 
|  | 542 | */ | 
|  | 543 | static void fill_contig_page_info(struct zone *zone, | 
|  | 544 | unsigned int suitable_order, | 
|  | 545 | struct contig_page_info *info) | 
|  | 546 | { | 
|  | 547 | unsigned int order; | 
|  | 548 |  | 
|  | 549 | info->free_pages = 0; | 
|  | 550 | info->free_blocks_total = 0; | 
|  | 551 | info->free_blocks_suitable = 0; | 
|  | 552 |  | 
|  | 553 | for (order = 0; order < MAX_ORDER; order++) { | 
|  | 554 | unsigned long blocks; | 
|  | 555 |  | 
|  | 556 | /* Count number of free blocks */ | 
|  | 557 | blocks = zone->free_area[order].nr_free; | 
|  | 558 | info->free_blocks_total += blocks; | 
|  | 559 |  | 
|  | 560 | /* Count free base pages */ | 
|  | 561 | info->free_pages += blocks << order; | 
|  | 562 |  | 
|  | 563 | /* Count the suitable free blocks */ | 
|  | 564 | if (order >= suitable_order) | 
|  | 565 | info->free_blocks_suitable += blocks << | 
|  | 566 | (order - suitable_order); | 
|  | 567 | } | 
|  | 568 | } | 
| Mel Gorman | f1a5ab1 | 2010-05-24 14:32:26 -0700 | [diff] [blame] | 569 |  | 
|  | 570 | /* | 
|  | 571 | * A fragmentation index only makes sense if an allocation of a requested | 
|  | 572 | * size would fail. If that is true, the fragmentation index indicates | 
|  | 573 | * whether external fragmentation or a lack of memory was the problem. | 
|  | 574 | * The value can be used to determine if page reclaim or compaction | 
|  | 575 | * should be used | 
|  | 576 | */ | 
| Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 577 | static int __fragmentation_index(unsigned int order, struct contig_page_info *info) | 
| Mel Gorman | f1a5ab1 | 2010-05-24 14:32:26 -0700 | [diff] [blame] | 578 | { | 
|  | 579 | unsigned long requested = 1UL << order; | 
|  | 580 |  | 
|  | 581 | if (!info->free_blocks_total) | 
|  | 582 | return 0; | 
|  | 583 |  | 
|  | 584 | /* Fragmentation index only makes sense when a request would fail */ | 
|  | 585 | if (info->free_blocks_suitable) | 
|  | 586 | return -1000; | 
|  | 587 |  | 
|  | 588 | /* | 
|  | 589 | * Index is between 0 and 1 so return within 3 decimal places | 
|  | 590 | * | 
|  | 591 | * 0 => allocation would fail due to lack of memory | 
|  | 592 | * 1 => allocation would fail due to fragmentation | 
|  | 593 | */ | 
|  | 594 | return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); | 
|  | 595 | } | 
| Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 596 |  | 
|  | 597 | /* Same as __fragmentation index but allocs contig_page_info on stack */ | 
|  | 598 | int fragmentation_index(struct zone *zone, unsigned int order) | 
|  | 599 | { | 
|  | 600 | struct contig_page_info info; | 
|  | 601 |  | 
|  | 602 | fill_contig_page_info(zone, order, &info); | 
|  | 603 | return __fragmentation_index(order, &info); | 
|  | 604 | } | 
| Mel Gorman | d7a5752 | 2010-05-24 14:32:25 -0700 | [diff] [blame] | 605 | #endif | 
|  | 606 |  | 
|  | 607 | #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION) | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 608 | #include <linux/proc_fs.h> | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 609 | #include <linux/seq_file.h> | 
|  | 610 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 611 | static char * const migratetype_names[MIGRATE_TYPES] = { | 
|  | 612 | "Unmovable", | 
|  | 613 | "Reclaimable", | 
|  | 614 | "Movable", | 
|  | 615 | "Reserve", | 
| Michal Nazarewicz | d4158d2 | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 616 | #ifdef CONFIG_CMA | 
|  | 617 | "CMA", | 
|  | 618 | #endif | 
| KOSAKI Motohiro | 91446b0 | 2008-04-15 14:34:42 -0700 | [diff] [blame] | 619 | "Isolate", | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 620 | }; | 
|  | 621 |  | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 622 | static void *frag_start(struct seq_file *m, loff_t *pos) | 
|  | 623 | { | 
|  | 624 | pg_data_t *pgdat; | 
|  | 625 | loff_t node = *pos; | 
|  | 626 | for (pgdat = first_online_pgdat(); | 
|  | 627 | pgdat && node; | 
|  | 628 | pgdat = next_online_pgdat(pgdat)) | 
|  | 629 | --node; | 
|  | 630 |  | 
|  | 631 | return pgdat; | 
|  | 632 | } | 
|  | 633 |  | 
|  | 634 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) | 
|  | 635 | { | 
|  | 636 | pg_data_t *pgdat = (pg_data_t *)arg; | 
|  | 637 |  | 
|  | 638 | (*pos)++; | 
|  | 639 | return next_online_pgdat(pgdat); | 
|  | 640 | } | 
|  | 641 |  | 
|  | 642 | static void frag_stop(struct seq_file *m, void *arg) | 
|  | 643 | { | 
|  | 644 | } | 
|  | 645 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 646 | /* Walk all the zones in a node and print using a callback */ | 
|  | 647 | static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, | 
|  | 648 | void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 649 | { | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 650 | struct zone *zone; | 
|  | 651 | struct zone *node_zones = pgdat->node_zones; | 
|  | 652 | unsigned long flags; | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 653 |  | 
|  | 654 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | 
|  | 655 | if (!populated_zone(zone)) | 
|  | 656 | continue; | 
|  | 657 |  | 
|  | 658 | spin_lock_irqsave(&zone->lock, flags); | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 659 | print(m, pgdat, zone); | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 660 | spin_unlock_irqrestore(&zone->lock, flags); | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 661 | } | 
|  | 662 | } | 
| Mel Gorman | d7a5752 | 2010-05-24 14:32:25 -0700 | [diff] [blame] | 663 | #endif | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 664 |  | 
| David Rientjes | 0d6617c | 2011-09-14 16:21:05 -0700 | [diff] [blame] | 665 | #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA) | 
| KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 666 | #ifdef CONFIG_ZONE_DMA | 
|  | 667 | #define TEXT_FOR_DMA(xx) xx "_dma", | 
|  | 668 | #else | 
|  | 669 | #define TEXT_FOR_DMA(xx) | 
|  | 670 | #endif | 
|  | 671 |  | 
|  | 672 | #ifdef CONFIG_ZONE_DMA32 | 
|  | 673 | #define TEXT_FOR_DMA32(xx) xx "_dma32", | 
|  | 674 | #else | 
|  | 675 | #define TEXT_FOR_DMA32(xx) | 
|  | 676 | #endif | 
|  | 677 |  | 
|  | 678 | #ifdef CONFIG_HIGHMEM | 
|  | 679 | #define TEXT_FOR_HIGHMEM(xx) xx "_high", | 
|  | 680 | #else | 
|  | 681 | #define TEXT_FOR_HIGHMEM(xx) | 
|  | 682 | #endif | 
|  | 683 |  | 
|  | 684 | #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ | 
|  | 685 | TEXT_FOR_HIGHMEM(xx) xx "_movable", | 
|  | 686 |  | 
|  | 687 | const char * const vmstat_text[] = { | 
|  | 688 | /* Zoned VM counters */ | 
|  | 689 | "nr_free_pages", | 
|  | 690 | "nr_inactive_anon", | 
|  | 691 | "nr_active_anon", | 
|  | 692 | "nr_inactive_file", | 
|  | 693 | "nr_active_file", | 
|  | 694 | "nr_unevictable", | 
|  | 695 | "nr_mlock", | 
|  | 696 | "nr_anon_pages", | 
|  | 697 | "nr_mapped", | 
|  | 698 | "nr_file_pages", | 
|  | 699 | "nr_dirty", | 
|  | 700 | "nr_writeback", | 
|  | 701 | "nr_slab_reclaimable", | 
|  | 702 | "nr_slab_unreclaimable", | 
|  | 703 | "nr_page_table_pages", | 
|  | 704 | "nr_kernel_stack", | 
|  | 705 | "nr_unstable", | 
|  | 706 | "nr_bounce", | 
|  | 707 | "nr_vmscan_write", | 
| Mel Gorman | 49ea7eb | 2011-10-31 17:07:59 -0700 | [diff] [blame] | 708 | "nr_vmscan_immediate_reclaim", | 
| KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 709 | "nr_writeback_temp", | 
|  | 710 | "nr_isolated_anon", | 
|  | 711 | "nr_isolated_file", | 
|  | 712 | "nr_shmem", | 
|  | 713 | "nr_dirtied", | 
|  | 714 | "nr_written", | 
|  | 715 |  | 
|  | 716 | #ifdef CONFIG_NUMA | 
|  | 717 | "numa_hit", | 
|  | 718 | "numa_miss", | 
|  | 719 | "numa_foreign", | 
|  | 720 | "numa_interleave", | 
|  | 721 | "numa_local", | 
|  | 722 | "numa_other", | 
|  | 723 | #endif | 
|  | 724 | "nr_anon_transparent_hugepages", | 
|  | 725 | "nr_dirty_threshold", | 
|  | 726 | "nr_dirty_background_threshold", | 
|  | 727 |  | 
|  | 728 | #ifdef CONFIG_VM_EVENT_COUNTERS | 
|  | 729 | "pgpgin", | 
|  | 730 | "pgpgout", | 
|  | 731 | "pswpin", | 
|  | 732 | "pswpout", | 
|  | 733 |  | 
|  | 734 | TEXTS_FOR_ZONES("pgalloc") | 
|  | 735 |  | 
|  | 736 | "pgfree", | 
|  | 737 | "pgactivate", | 
|  | 738 | "pgdeactivate", | 
|  | 739 |  | 
|  | 740 | "pgfault", | 
|  | 741 | "pgmajfault", | 
|  | 742 |  | 
|  | 743 | TEXTS_FOR_ZONES("pgrefill") | 
| Ying Han | 904249a | 2012-04-25 16:01:48 -0700 | [diff] [blame] | 744 | TEXTS_FOR_ZONES("pgsteal_kswapd") | 
|  | 745 | TEXTS_FOR_ZONES("pgsteal_direct") | 
| KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 746 | TEXTS_FOR_ZONES("pgscan_kswapd") | 
|  | 747 | TEXTS_FOR_ZONES("pgscan_direct") | 
|  | 748 |  | 
|  | 749 | #ifdef CONFIG_NUMA | 
|  | 750 | "zone_reclaim_failed", | 
|  | 751 | #endif | 
|  | 752 | "pginodesteal", | 
|  | 753 | "slabs_scanned", | 
| KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 754 | "kswapd_inodesteal", | 
|  | 755 | "kswapd_low_wmark_hit_quickly", | 
|  | 756 | "kswapd_high_wmark_hit_quickly", | 
|  | 757 | "kswapd_skip_congestion_wait", | 
|  | 758 | "pageoutrun", | 
|  | 759 | "allocstall", | 
|  | 760 |  | 
|  | 761 | "pgrotated", | 
|  | 762 |  | 
|  | 763 | #ifdef CONFIG_COMPACTION | 
|  | 764 | "compact_blocks_moved", | 
|  | 765 | "compact_pages_moved", | 
|  | 766 | "compact_pagemigrate_failed", | 
|  | 767 | "compact_stall", | 
|  | 768 | "compact_fail", | 
|  | 769 | "compact_success", | 
|  | 770 | #endif | 
|  | 771 |  | 
|  | 772 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 773 | "htlb_buddy_alloc_success", | 
|  | 774 | "htlb_buddy_alloc_fail", | 
|  | 775 | #endif | 
|  | 776 | "unevictable_pgs_culled", | 
|  | 777 | "unevictable_pgs_scanned", | 
|  | 778 | "unevictable_pgs_rescued", | 
|  | 779 | "unevictable_pgs_mlocked", | 
|  | 780 | "unevictable_pgs_munlocked", | 
|  | 781 | "unevictable_pgs_cleared", | 
|  | 782 | "unevictable_pgs_stranded", | 
|  | 783 | "unevictable_pgs_mlockfreed", | 
|  | 784 |  | 
|  | 785 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|  | 786 | "thp_fault_alloc", | 
|  | 787 | "thp_fault_fallback", | 
|  | 788 | "thp_collapse_alloc", | 
|  | 789 | "thp_collapse_alloc_failed", | 
|  | 790 | "thp_split", | 
|  | 791 | #endif | 
|  | 792 |  | 
|  | 793 | #endif /* CONFIG_VM_EVENTS_COUNTERS */ | 
|  | 794 | }; | 
| David Rientjes | 0d6617c | 2011-09-14 16:21:05 -0700 | [diff] [blame] | 795 | #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ | 
| KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 796 |  | 
|  | 797 |  | 
| Mel Gorman | d7a5752 | 2010-05-24 14:32:25 -0700 | [diff] [blame] | 798 | #ifdef CONFIG_PROC_FS | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 799 | static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, | 
|  | 800 | struct zone *zone) | 
|  | 801 | { | 
|  | 802 | int order; | 
|  | 803 |  | 
|  | 804 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | 
|  | 805 | for (order = 0; order < MAX_ORDER; ++order) | 
|  | 806 | seq_printf(m, "%6lu ", zone->free_area[order].nr_free); | 
|  | 807 | seq_putc(m, '\n'); | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | /* | 
|  | 811 | * This walks the free areas for each zone. | 
|  | 812 | */ | 
|  | 813 | static int frag_show(struct seq_file *m, void *arg) | 
|  | 814 | { | 
|  | 815 | pg_data_t *pgdat = (pg_data_t *)arg; | 
|  | 816 | walk_zones_in_node(m, pgdat, frag_show_print); | 
|  | 817 | return 0; | 
|  | 818 | } | 
|  | 819 |  | 
|  | 820 | static void pagetypeinfo_showfree_print(struct seq_file *m, | 
|  | 821 | pg_data_t *pgdat, struct zone *zone) | 
|  | 822 | { | 
|  | 823 | int order, mtype; | 
|  | 824 |  | 
|  | 825 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { | 
|  | 826 | seq_printf(m, "Node %4d, zone %8s, type %12s ", | 
|  | 827 | pgdat->node_id, | 
|  | 828 | zone->name, | 
|  | 829 | migratetype_names[mtype]); | 
|  | 830 | for (order = 0; order < MAX_ORDER; ++order) { | 
|  | 831 | unsigned long freecount = 0; | 
|  | 832 | struct free_area *area; | 
|  | 833 | struct list_head *curr; | 
|  | 834 |  | 
|  | 835 | area = &(zone->free_area[order]); | 
|  | 836 |  | 
|  | 837 | list_for_each(curr, &area->free_list[mtype]) | 
|  | 838 | freecount++; | 
|  | 839 | seq_printf(m, "%6lu ", freecount); | 
|  | 840 | } | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 841 | seq_putc(m, '\n'); | 
|  | 842 | } | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 843 | } | 
|  | 844 |  | 
|  | 845 | /* Print out the free pages at each order for each migatetype */ | 
|  | 846 | static int pagetypeinfo_showfree(struct seq_file *m, void *arg) | 
|  | 847 | { | 
|  | 848 | int order; | 
|  | 849 | pg_data_t *pgdat = (pg_data_t *)arg; | 
|  | 850 |  | 
|  | 851 | /* Print header */ | 
|  | 852 | seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); | 
|  | 853 | for (order = 0; order < MAX_ORDER; ++order) | 
|  | 854 | seq_printf(m, "%6d ", order); | 
|  | 855 | seq_putc(m, '\n'); | 
|  | 856 |  | 
|  | 857 | walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print); | 
|  | 858 |  | 
|  | 859 | return 0; | 
|  | 860 | } | 
|  | 861 |  | 
|  | 862 | static void pagetypeinfo_showblockcount_print(struct seq_file *m, | 
|  | 863 | pg_data_t *pgdat, struct zone *zone) | 
|  | 864 | { | 
|  | 865 | int mtype; | 
|  | 866 | unsigned long pfn; | 
|  | 867 | unsigned long start_pfn = zone->zone_start_pfn; | 
|  | 868 | unsigned long end_pfn = start_pfn + zone->spanned_pages; | 
|  | 869 | unsigned long count[MIGRATE_TYPES] = { 0, }; | 
|  | 870 |  | 
|  | 871 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { | 
|  | 872 | struct page *page; | 
|  | 873 |  | 
|  | 874 | if (!pfn_valid(pfn)) | 
|  | 875 | continue; | 
|  | 876 |  | 
|  | 877 | page = pfn_to_page(pfn); | 
| Mel Gorman | eb33575 | 2009-05-13 17:34:48 +0100 | [diff] [blame] | 878 |  | 
|  | 879 | /* Watch for unexpected holes punched in the memmap */ | 
|  | 880 | if (!memmap_valid_within(pfn, page, zone)) | 
| Mel Gorman | e80d6a2 | 2008-08-14 11:10:14 +0100 | [diff] [blame] | 881 | continue; | 
| Mel Gorman | eb33575 | 2009-05-13 17:34:48 +0100 | [diff] [blame] | 882 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 883 | mtype = get_pageblock_migratetype(page); | 
|  | 884 |  | 
| Mel Gorman | e80d6a2 | 2008-08-14 11:10:14 +0100 | [diff] [blame] | 885 | if (mtype < MIGRATE_TYPES) | 
|  | 886 | count[mtype]++; | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 887 | } | 
|  | 888 |  | 
|  | 889 | /* Print counts */ | 
|  | 890 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | 
|  | 891 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) | 
|  | 892 | seq_printf(m, "%12lu ", count[mtype]); | 
|  | 893 | seq_putc(m, '\n'); | 
|  | 894 | } | 
|  | 895 |  | 
|  | 896 | /* Print out the free pages at each order for each migratetype */ | 
|  | 897 | static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) | 
|  | 898 | { | 
|  | 899 | int mtype; | 
|  | 900 | pg_data_t *pgdat = (pg_data_t *)arg; | 
|  | 901 |  | 
|  | 902 | seq_printf(m, "\n%-23s", "Number of blocks type "); | 
|  | 903 | for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) | 
|  | 904 | seq_printf(m, "%12s ", migratetype_names[mtype]); | 
|  | 905 | seq_putc(m, '\n'); | 
|  | 906 | walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print); | 
|  | 907 |  | 
|  | 908 | return 0; | 
|  | 909 | } | 
|  | 910 |  | 
|  | 911 | /* | 
|  | 912 | * This prints out statistics in relation to grouping pages by mobility. | 
|  | 913 | * It is expensive to collect so do not constantly read the file. | 
|  | 914 | */ | 
|  | 915 | static int pagetypeinfo_show(struct seq_file *m, void *arg) | 
|  | 916 | { | 
|  | 917 | pg_data_t *pgdat = (pg_data_t *)arg; | 
|  | 918 |  | 
| KOSAKI Motohiro | 41b25a3 | 2008-04-30 00:52:13 -0700 | [diff] [blame] | 919 | /* check memoryless node */ | 
|  | 920 | if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) | 
|  | 921 | return 0; | 
|  | 922 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 923 | seq_printf(m, "Page block order: %d\n", pageblock_order); | 
|  | 924 | seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages); | 
|  | 925 | seq_putc(m, '\n'); | 
|  | 926 | pagetypeinfo_showfree(m, pgdat); | 
|  | 927 | pagetypeinfo_showblockcount(m, pgdat); | 
|  | 928 |  | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 929 | return 0; | 
|  | 930 | } | 
|  | 931 |  | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 932 | static const struct seq_operations fragmentation_op = { | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 933 | .start	= frag_start, | 
|  | 934 | .next	= frag_next, | 
|  | 935 | .stop	= frag_stop, | 
|  | 936 | .show	= frag_show, | 
|  | 937 | }; | 
|  | 938 |  | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 939 | static int fragmentation_open(struct inode *inode, struct file *file) | 
|  | 940 | { | 
|  | 941 | return seq_open(file, &fragmentation_op); | 
|  | 942 | } | 
|  | 943 |  | 
|  | 944 | static const struct file_operations fragmentation_file_operations = { | 
|  | 945 | .open		= fragmentation_open, | 
|  | 946 | .read		= seq_read, | 
|  | 947 | .llseek		= seq_lseek, | 
|  | 948 | .release	= seq_release, | 
|  | 949 | }; | 
|  | 950 |  | 
| Alexey Dobriyan | 74e2e8e | 2008-10-06 04:15:36 +0400 | [diff] [blame] | 951 | static const struct seq_operations pagetypeinfo_op = { | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 952 | .start	= frag_start, | 
|  | 953 | .next	= frag_next, | 
|  | 954 | .stop	= frag_stop, | 
|  | 955 | .show	= pagetypeinfo_show, | 
|  | 956 | }; | 
|  | 957 |  | 
| Alexey Dobriyan | 74e2e8e | 2008-10-06 04:15:36 +0400 | [diff] [blame] | 958 | static int pagetypeinfo_open(struct inode *inode, struct file *file) | 
|  | 959 | { | 
|  | 960 | return seq_open(file, &pagetypeinfo_op); | 
|  | 961 | } | 
|  | 962 |  | 
|  | 963 | static const struct file_operations pagetypeinfo_file_ops = { | 
|  | 964 | .open		= pagetypeinfo_open, | 
|  | 965 | .read		= seq_read, | 
|  | 966 | .llseek		= seq_lseek, | 
|  | 967 | .release	= seq_release, | 
|  | 968 | }; | 
|  | 969 |  | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 970 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | 
|  | 971 | struct zone *zone) | 
|  | 972 | { | 
|  | 973 | int i; | 
|  | 974 | seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); | 
|  | 975 | seq_printf(m, | 
|  | 976 | "\n  pages free     %lu" | 
|  | 977 | "\n        min      %lu" | 
|  | 978 | "\n        low      %lu" | 
|  | 979 | "\n        high     %lu" | 
| Wu Fengguang | 08d9ae7 | 2009-06-16 15:32:30 -0700 | [diff] [blame] | 980 | "\n        scanned  %lu" | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 981 | "\n        spanned  %lu" | 
|  | 982 | "\n        present  %lu", | 
| Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 983 | zone_page_state(zone, NR_FREE_PAGES), | 
| Mel Gorman | 4185896 | 2009-06-16 15:32:12 -0700 | [diff] [blame] | 984 | min_wmark_pages(zone), | 
|  | 985 | low_wmark_pages(zone), | 
|  | 986 | high_wmark_pages(zone), | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 987 | zone->pages_scanned, | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 988 | zone->spanned_pages, | 
|  | 989 | zone->present_pages); | 
|  | 990 |  | 
|  | 991 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 
|  | 992 | seq_printf(m, "\n    %-12s %lu", vmstat_text[i], | 
|  | 993 | zone_page_state(zone, i)); | 
|  | 994 |  | 
|  | 995 | seq_printf(m, | 
|  | 996 | "\n        protection: (%lu", | 
|  | 997 | zone->lowmem_reserve[0]); | 
|  | 998 | for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) | 
|  | 999 | seq_printf(m, ", %lu", zone->lowmem_reserve[i]); | 
|  | 1000 | seq_printf(m, | 
|  | 1001 | ")" | 
|  | 1002 | "\n  pagesets"); | 
|  | 1003 | for_each_online_cpu(i) { | 
|  | 1004 | struct per_cpu_pageset *pageset; | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 1005 |  | 
| Christoph Lameter | 99dcc3e | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 1006 | pageset = per_cpu_ptr(zone->pageset, i); | 
| Christoph Lameter | 3dfa572 | 2008-02-04 22:29:19 -0800 | [diff] [blame] | 1007 | seq_printf(m, | 
|  | 1008 | "\n    cpu: %i" | 
|  | 1009 | "\n              count: %i" | 
|  | 1010 | "\n              high:  %i" | 
|  | 1011 | "\n              batch: %i", | 
|  | 1012 | i, | 
|  | 1013 | pageset->pcp.count, | 
|  | 1014 | pageset->pcp.high, | 
|  | 1015 | pageset->pcp.batch); | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 1016 | #ifdef CONFIG_SMP | 
|  | 1017 | seq_printf(m, "\n  vm stats threshold: %d", | 
|  | 1018 | pageset->stat_threshold); | 
|  | 1019 | #endif | 
|  | 1020 | } | 
|  | 1021 | seq_printf(m, | 
|  | 1022 | "\n  all_unreclaimable: %u" | 
| Rik van Riel | 556adec | 2008-10-18 20:26:34 -0700 | [diff] [blame] | 1023 | "\n  start_pfn:         %lu" | 
|  | 1024 | "\n  inactive_ratio:    %u", | 
| KOSAKI Motohiro | 93e4a89 | 2010-03-05 13:41:55 -0800 | [diff] [blame] | 1025 | zone->all_unreclaimable, | 
| Rik van Riel | 556adec | 2008-10-18 20:26:34 -0700 | [diff] [blame] | 1026 | zone->zone_start_pfn, | 
|  | 1027 | zone->inactive_ratio); | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 1028 | seq_putc(m, '\n'); | 
|  | 1029 | } | 
|  | 1030 |  | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1031 | /* | 
|  | 1032 | * Output information about zones in @pgdat. | 
|  | 1033 | */ | 
|  | 1034 | static int zoneinfo_show(struct seq_file *m, void *arg) | 
|  | 1035 | { | 
| Mel Gorman | 467c996 | 2007-10-16 01:26:02 -0700 | [diff] [blame] | 1036 | pg_data_t *pgdat = (pg_data_t *)arg; | 
|  | 1037 | walk_zones_in_node(m, pgdat, zoneinfo_show_print); | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1038 | return 0; | 
|  | 1039 | } | 
|  | 1040 |  | 
| Alexey Dobriyan | 5c9fe62 | 2008-10-06 04:19:42 +0400 | [diff] [blame] | 1041 | static const struct seq_operations zoneinfo_op = { | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1042 | .start	= frag_start, /* iterate over all zones. The same as in | 
|  | 1043 | * fragmentation. */ | 
|  | 1044 | .next	= frag_next, | 
|  | 1045 | .stop	= frag_stop, | 
|  | 1046 | .show	= zoneinfo_show, | 
|  | 1047 | }; | 
|  | 1048 |  | 
| Alexey Dobriyan | 5c9fe62 | 2008-10-06 04:19:42 +0400 | [diff] [blame] | 1049 | static int zoneinfo_open(struct inode *inode, struct file *file) | 
|  | 1050 | { | 
|  | 1051 | return seq_open(file, &zoneinfo_op); | 
|  | 1052 | } | 
|  | 1053 |  | 
|  | 1054 | static const struct file_operations proc_zoneinfo_file_operations = { | 
|  | 1055 | .open		= zoneinfo_open, | 
|  | 1056 | .read		= seq_read, | 
|  | 1057 | .llseek		= seq_lseek, | 
|  | 1058 | .release	= seq_release, | 
|  | 1059 | }; | 
|  | 1060 |  | 
| Michael Rubin | 79da826 | 2010-10-26 14:21:36 -0700 | [diff] [blame] | 1061 | enum writeback_stat_item { | 
|  | 1062 | NR_DIRTY_THRESHOLD, | 
|  | 1063 | NR_DIRTY_BG_THRESHOLD, | 
|  | 1064 | NR_VM_WRITEBACK_STAT_ITEMS, | 
|  | 1065 | }; | 
|  | 1066 |  | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1067 | static void *vmstat_start(struct seq_file *m, loff_t *pos) | 
|  | 1068 | { | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 1069 | unsigned long *v; | 
| Michael Rubin | 79da826 | 2010-10-26 14:21:36 -0700 | [diff] [blame] | 1070 | int i, stat_items_size; | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1071 |  | 
|  | 1072 | if (*pos >= ARRAY_SIZE(vmstat_text)) | 
|  | 1073 | return NULL; | 
| Michael Rubin | 79da826 | 2010-10-26 14:21:36 -0700 | [diff] [blame] | 1074 | stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) + | 
|  | 1075 | NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long); | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1076 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1077 | #ifdef CONFIG_VM_EVENT_COUNTERS | 
| Michael Rubin | 79da826 | 2010-10-26 14:21:36 -0700 | [diff] [blame] | 1078 | stat_items_size += sizeof(struct vm_event_state); | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1079 | #endif | 
| Michael Rubin | 79da826 | 2010-10-26 14:21:36 -0700 | [diff] [blame] | 1080 |  | 
|  | 1081 | v = kmalloc(stat_items_size, GFP_KERNEL); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 1082 | m->private = v; | 
|  | 1083 | if (!v) | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1084 | return ERR_PTR(-ENOMEM); | 
| Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 1085 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 
|  | 1086 | v[i] = global_page_state(i); | 
| Michael Rubin | 79da826 | 2010-10-26 14:21:36 -0700 | [diff] [blame] | 1087 | v += NR_VM_ZONE_STAT_ITEMS; | 
|  | 1088 |  | 
|  | 1089 | global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD, | 
|  | 1090 | v + NR_DIRTY_THRESHOLD); | 
|  | 1091 | v += NR_VM_WRITEBACK_STAT_ITEMS; | 
|  | 1092 |  | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1093 | #ifdef CONFIG_VM_EVENT_COUNTERS | 
| Michael Rubin | 79da826 | 2010-10-26 14:21:36 -0700 | [diff] [blame] | 1094 | all_vm_events(v); | 
|  | 1095 | v[PGPGIN] /= 2;		/* sectors -> kbytes */ | 
|  | 1096 | v[PGPGOUT] /= 2; | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1097 | #endif | 
| Wu Fengguang | ff8b16d | 2010-11-04 01:56:49 +0800 | [diff] [blame] | 1098 | return (unsigned long *)m->private + *pos; | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1099 | } | 
|  | 1100 |  | 
|  | 1101 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) | 
|  | 1102 | { | 
|  | 1103 | (*pos)++; | 
|  | 1104 | if (*pos >= ARRAY_SIZE(vmstat_text)) | 
|  | 1105 | return NULL; | 
|  | 1106 | return (unsigned long *)m->private + *pos; | 
|  | 1107 | } | 
|  | 1108 |  | 
|  | 1109 | static int vmstat_show(struct seq_file *m, void *arg) | 
|  | 1110 | { | 
|  | 1111 | unsigned long *l = arg; | 
|  | 1112 | unsigned long off = l - (unsigned long *)m->private; | 
|  | 1113 |  | 
|  | 1114 | seq_printf(m, "%s %lu\n", vmstat_text[off], *l); | 
|  | 1115 | return 0; | 
|  | 1116 | } | 
|  | 1117 |  | 
|  | 1118 | static void vmstat_stop(struct seq_file *m, void *arg) | 
|  | 1119 | { | 
|  | 1120 | kfree(m->private); | 
|  | 1121 | m->private = NULL; | 
|  | 1122 | } | 
|  | 1123 |  | 
| Alexey Dobriyan | b6aa44a | 2008-10-06 04:17:48 +0400 | [diff] [blame] | 1124 | static const struct seq_operations vmstat_op = { | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1125 | .start	= vmstat_start, | 
|  | 1126 | .next	= vmstat_next, | 
|  | 1127 | .stop	= vmstat_stop, | 
|  | 1128 | .show	= vmstat_show, | 
|  | 1129 | }; | 
|  | 1130 |  | 
| Alexey Dobriyan | b6aa44a | 2008-10-06 04:17:48 +0400 | [diff] [blame] | 1131 | static int vmstat_open(struct inode *inode, struct file *file) | 
|  | 1132 | { | 
|  | 1133 | return seq_open(file, &vmstat_op); | 
|  | 1134 | } | 
|  | 1135 |  | 
|  | 1136 | static const struct file_operations proc_vmstat_file_operations = { | 
|  | 1137 | .open		= vmstat_open, | 
|  | 1138 | .read		= seq_read, | 
|  | 1139 | .llseek		= seq_lseek, | 
|  | 1140 | .release	= seq_release, | 
|  | 1141 | }; | 
| Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 1142 | #endif /* CONFIG_PROC_FS */ | 
|  | 1143 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 1144 | #ifdef CONFIG_SMP | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1145 | static DEFINE_PER_CPU(struct delayed_work, vmstat_work); | 
| Christoph Lameter | 77461ab | 2007-05-09 02:35:13 -0700 | [diff] [blame] | 1146 | int sysctl_stat_interval __read_mostly = HZ; | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1147 |  | 
|  | 1148 | static void vmstat_update(struct work_struct *w) | 
|  | 1149 | { | 
|  | 1150 | refresh_cpu_vm_stats(smp_processor_id()); | 
| Christoph Lameter | 77461ab | 2007-05-09 02:35:13 -0700 | [diff] [blame] | 1151 | schedule_delayed_work(&__get_cpu_var(vmstat_work), | 
| Anton Blanchard | 98f4ebb | 2009-04-02 16:56:39 -0700 | [diff] [blame] | 1152 | round_jiffies_relative(sysctl_stat_interval)); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1153 | } | 
|  | 1154 |  | 
| Randy Dunlap | 42614fc | 2007-11-14 17:00:12 -0800 | [diff] [blame] | 1155 | static void __cpuinit start_cpu_timer(int cpu) | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1156 | { | 
| Tejun Heo | 1871e52 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 1157 | struct delayed_work *work = &per_cpu(vmstat_work, cpu); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1158 |  | 
| Tejun Heo | 1871e52 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 1159 | INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); | 
|  | 1160 | schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1161 | } | 
|  | 1162 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 1163 | /* | 
|  | 1164 | * Use the cpu notifier to insure that the thresholds are recalculated | 
|  | 1165 | * when necessary. | 
|  | 1166 | */ | 
|  | 1167 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | 
|  | 1168 | unsigned long action, | 
|  | 1169 | void *hcpu) | 
|  | 1170 | { | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1171 | long cpu = (long)hcpu; | 
|  | 1172 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 1173 | switch (action) { | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1174 | case CPU_ONLINE: | 
|  | 1175 | case CPU_ONLINE_FROZEN: | 
| KAMEZAWA Hiroyuki | 5ee28a4 | 2010-09-09 16:38:14 -0700 | [diff] [blame] | 1176 | refresh_zone_stat_thresholds(); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1177 | start_cpu_timer(cpu); | 
| Christoph Lameter | ad59692 | 2010-01-05 15:34:51 +0900 | [diff] [blame] | 1178 | node_set_state(cpu_to_node(cpu), N_CPU); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1179 | break; | 
|  | 1180 | case CPU_DOWN_PREPARE: | 
|  | 1181 | case CPU_DOWN_PREPARE_FROZEN: | 
| Tejun Heo | afe2c51 | 2010-12-14 16:21:17 +0100 | [diff] [blame] | 1182 | cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1183 | per_cpu(vmstat_work, cpu).work.func = NULL; | 
|  | 1184 | break; | 
|  | 1185 | case CPU_DOWN_FAILED: | 
|  | 1186 | case CPU_DOWN_FAILED_FROZEN: | 
|  | 1187 | start_cpu_timer(cpu); | 
|  | 1188 | break; | 
| Andy Whitcroft | ce421c7 | 2006-12-06 20:33:08 -0800 | [diff] [blame] | 1189 | case CPU_DEAD: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1190 | case CPU_DEAD_FROZEN: | 
| Andy Whitcroft | ce421c7 | 2006-12-06 20:33:08 -0800 | [diff] [blame] | 1191 | refresh_zone_stat_thresholds(); | 
|  | 1192 | break; | 
|  | 1193 | default: | 
|  | 1194 | break; | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 1195 | } | 
|  | 1196 | return NOTIFY_OK; | 
|  | 1197 | } | 
|  | 1198 |  | 
|  | 1199 | static struct notifier_block __cpuinitdata vmstat_notifier = | 
|  | 1200 | { &vmstat_cpuup_callback, NULL, 0 }; | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 1201 | #endif | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 1202 |  | 
| Adrian Bunk | e2fc88d | 2007-10-16 01:26:27 -0700 | [diff] [blame] | 1203 | static int __init setup_vmstat(void) | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 1204 | { | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 1205 | #ifdef CONFIG_SMP | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1206 | int cpu; | 
|  | 1207 |  | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 1208 | register_cpu_notifier(&vmstat_notifier); | 
| Christoph Lameter | d1187ed | 2007-05-09 02:35:12 -0700 | [diff] [blame] | 1209 |  | 
|  | 1210 | for_each_online_cpu(cpu) | 
|  | 1211 | start_cpu_timer(cpu); | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 1212 | #endif | 
|  | 1213 | #ifdef CONFIG_PROC_FS | 
|  | 1214 | proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); | 
| Alexey Dobriyan | 74e2e8e | 2008-10-06 04:15:36 +0400 | [diff] [blame] | 1215 | proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); | 
| Alexey Dobriyan | b6aa44a | 2008-10-06 04:17:48 +0400 | [diff] [blame] | 1216 | proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); | 
| Alexey Dobriyan | 5c9fe62 | 2008-10-06 04:19:42 +0400 | [diff] [blame] | 1217 | proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); | 
| Alexey Dobriyan | 8f32f7e | 2008-10-06 04:13:52 +0400 | [diff] [blame] | 1218 | #endif | 
| Christoph Lameter | df9ecab | 2006-08-31 21:27:35 -0700 | [diff] [blame] | 1219 | return 0; | 
|  | 1220 | } | 
|  | 1221 | module_init(setup_vmstat) | 
| Mel Gorman | d7a5752 | 2010-05-24 14:32:25 -0700 | [diff] [blame] | 1222 |  | 
|  | 1223 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) | 
|  | 1224 | #include <linux/debugfs.h> | 
|  | 1225 |  | 
|  | 1226 | static struct dentry *extfrag_debug_root; | 
|  | 1227 |  | 
|  | 1228 | /* | 
|  | 1229 | * Return an index indicating how much of the available free memory is | 
|  | 1230 | * unusable for an allocation of the requested size. | 
|  | 1231 | */ | 
|  | 1232 | static int unusable_free_index(unsigned int order, | 
|  | 1233 | struct contig_page_info *info) | 
|  | 1234 | { | 
|  | 1235 | /* No free memory is interpreted as all free memory is unusable */ | 
|  | 1236 | if (info->free_pages == 0) | 
|  | 1237 | return 1000; | 
|  | 1238 |  | 
|  | 1239 | /* | 
|  | 1240 | * Index should be a value between 0 and 1. Return a value to 3 | 
|  | 1241 | * decimal places. | 
|  | 1242 | * | 
|  | 1243 | * 0 => no fragmentation | 
|  | 1244 | * 1 => high fragmentation | 
|  | 1245 | */ | 
|  | 1246 | return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages); | 
|  | 1247 |  | 
|  | 1248 | } | 
|  | 1249 |  | 
|  | 1250 | static void unusable_show_print(struct seq_file *m, | 
|  | 1251 | pg_data_t *pgdat, struct zone *zone) | 
|  | 1252 | { | 
|  | 1253 | unsigned int order; | 
|  | 1254 | int index; | 
|  | 1255 | struct contig_page_info info; | 
|  | 1256 |  | 
|  | 1257 | seq_printf(m, "Node %d, zone %8s ", | 
|  | 1258 | pgdat->node_id, | 
|  | 1259 | zone->name); | 
|  | 1260 | for (order = 0; order < MAX_ORDER; ++order) { | 
|  | 1261 | fill_contig_page_info(zone, order, &info); | 
|  | 1262 | index = unusable_free_index(order, &info); | 
|  | 1263 | seq_printf(m, "%d.%03d ", index / 1000, index % 1000); | 
|  | 1264 | } | 
|  | 1265 |  | 
|  | 1266 | seq_putc(m, '\n'); | 
|  | 1267 | } | 
|  | 1268 |  | 
|  | 1269 | /* | 
|  | 1270 | * Display unusable free space index | 
|  | 1271 | * | 
|  | 1272 | * The unusable free space index measures how much of the available free | 
|  | 1273 | * memory cannot be used to satisfy an allocation of a given size and is a | 
|  | 1274 | * value between 0 and 1. The higher the value, the more of free memory is | 
|  | 1275 | * unusable and by implication, the worse the external fragmentation is. This | 
|  | 1276 | * can be expressed as a percentage by multiplying by 100. | 
|  | 1277 | */ | 
|  | 1278 | static int unusable_show(struct seq_file *m, void *arg) | 
|  | 1279 | { | 
|  | 1280 | pg_data_t *pgdat = (pg_data_t *)arg; | 
|  | 1281 |  | 
|  | 1282 | /* check memoryless node */ | 
|  | 1283 | if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) | 
|  | 1284 | return 0; | 
|  | 1285 |  | 
|  | 1286 | walk_zones_in_node(m, pgdat, unusable_show_print); | 
|  | 1287 |  | 
|  | 1288 | return 0; | 
|  | 1289 | } | 
|  | 1290 |  | 
|  | 1291 | static const struct seq_operations unusable_op = { | 
|  | 1292 | .start	= frag_start, | 
|  | 1293 | .next	= frag_next, | 
|  | 1294 | .stop	= frag_stop, | 
|  | 1295 | .show	= unusable_show, | 
|  | 1296 | }; | 
|  | 1297 |  | 
|  | 1298 | static int unusable_open(struct inode *inode, struct file *file) | 
|  | 1299 | { | 
|  | 1300 | return seq_open(file, &unusable_op); | 
|  | 1301 | } | 
|  | 1302 |  | 
|  | 1303 | static const struct file_operations unusable_file_ops = { | 
|  | 1304 | .open		= unusable_open, | 
|  | 1305 | .read		= seq_read, | 
|  | 1306 | .llseek		= seq_lseek, | 
|  | 1307 | .release	= seq_release, | 
|  | 1308 | }; | 
|  | 1309 |  | 
| Mel Gorman | f1a5ab1 | 2010-05-24 14:32:26 -0700 | [diff] [blame] | 1310 | static void extfrag_show_print(struct seq_file *m, | 
|  | 1311 | pg_data_t *pgdat, struct zone *zone) | 
|  | 1312 | { | 
|  | 1313 | unsigned int order; | 
|  | 1314 | int index; | 
|  | 1315 |  | 
|  | 1316 | /* Alloc on stack as interrupts are disabled for zone walk */ | 
|  | 1317 | struct contig_page_info info; | 
|  | 1318 |  | 
|  | 1319 | seq_printf(m, "Node %d, zone %8s ", | 
|  | 1320 | pgdat->node_id, | 
|  | 1321 | zone->name); | 
|  | 1322 | for (order = 0; order < MAX_ORDER; ++order) { | 
|  | 1323 | fill_contig_page_info(zone, order, &info); | 
| Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1324 | index = __fragmentation_index(order, &info); | 
| Mel Gorman | f1a5ab1 | 2010-05-24 14:32:26 -0700 | [diff] [blame] | 1325 | seq_printf(m, "%d.%03d ", index / 1000, index % 1000); | 
|  | 1326 | } | 
|  | 1327 |  | 
|  | 1328 | seq_putc(m, '\n'); | 
|  | 1329 | } | 
|  | 1330 |  | 
|  | 1331 | /* | 
|  | 1332 | * Display fragmentation index for orders that allocations would fail for | 
|  | 1333 | */ | 
|  | 1334 | static int extfrag_show(struct seq_file *m, void *arg) | 
|  | 1335 | { | 
|  | 1336 | pg_data_t *pgdat = (pg_data_t *)arg; | 
|  | 1337 |  | 
|  | 1338 | walk_zones_in_node(m, pgdat, extfrag_show_print); | 
|  | 1339 |  | 
|  | 1340 | return 0; | 
|  | 1341 | } | 
|  | 1342 |  | 
|  | 1343 | static const struct seq_operations extfrag_op = { | 
|  | 1344 | .start	= frag_start, | 
|  | 1345 | .next	= frag_next, | 
|  | 1346 | .stop	= frag_stop, | 
|  | 1347 | .show	= extfrag_show, | 
|  | 1348 | }; | 
|  | 1349 |  | 
|  | 1350 | static int extfrag_open(struct inode *inode, struct file *file) | 
|  | 1351 | { | 
|  | 1352 | return seq_open(file, &extfrag_op); | 
|  | 1353 | } | 
|  | 1354 |  | 
|  | 1355 | static const struct file_operations extfrag_file_ops = { | 
|  | 1356 | .open		= extfrag_open, | 
|  | 1357 | .read		= seq_read, | 
|  | 1358 | .llseek		= seq_lseek, | 
|  | 1359 | .release	= seq_release, | 
|  | 1360 | }; | 
|  | 1361 |  | 
| Mel Gorman | d7a5752 | 2010-05-24 14:32:25 -0700 | [diff] [blame] | 1362 | static int __init extfrag_debug_init(void) | 
|  | 1363 | { | 
|  | 1364 | extfrag_debug_root = debugfs_create_dir("extfrag", NULL); | 
|  | 1365 | if (!extfrag_debug_root) | 
|  | 1366 | return -ENOMEM; | 
|  | 1367 |  | 
|  | 1368 | if (!debugfs_create_file("unusable_index", 0444, | 
|  | 1369 | extfrag_debug_root, NULL, &unusable_file_ops)) | 
|  | 1370 | return -ENOMEM; | 
|  | 1371 |  | 
| Mel Gorman | f1a5ab1 | 2010-05-24 14:32:26 -0700 | [diff] [blame] | 1372 | if (!debugfs_create_file("extfrag_index", 0444, | 
|  | 1373 | extfrag_debug_root, NULL, &extfrag_file_ops)) | 
|  | 1374 | return -ENOMEM; | 
|  | 1375 |  | 
| Mel Gorman | d7a5752 | 2010-05-24 14:32:25 -0700 | [diff] [blame] | 1376 | return 0; | 
|  | 1377 | } | 
|  | 1378 |  | 
|  | 1379 | module_init(extfrag_debug_init); | 
|  | 1380 | #endif |