| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/mm/page_alloc.c | 
 | 3 |  * | 
 | 4 |  *  Manages the free list, the system allocates free pages here. | 
 | 5 |  *  Note that kmalloc() lives in slab.c | 
 | 6 |  * | 
 | 7 |  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds | 
 | 8 |  *  Swap reorganised 29.12.95, Stephen Tweedie | 
 | 9 |  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | 
 | 10 |  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 | 
 | 11 |  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 | 
 | 12 |  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000 | 
 | 13 |  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 | 
 | 14 |  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton) | 
 | 15 |  */ | 
 | 16 |  | 
 | 17 | #include <linux/config.h> | 
 | 18 | #include <linux/stddef.h> | 
 | 19 | #include <linux/mm.h> | 
 | 20 | #include <linux/swap.h> | 
 | 21 | #include <linux/interrupt.h> | 
 | 22 | #include <linux/pagemap.h> | 
 | 23 | #include <linux/bootmem.h> | 
 | 24 | #include <linux/compiler.h> | 
| Randy Dunlap | 9f15833 | 2005-09-13 01:25:16 -0700 | [diff] [blame] | 25 | #include <linux/kernel.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/module.h> | 
 | 27 | #include <linux/suspend.h> | 
 | 28 | #include <linux/pagevec.h> | 
 | 29 | #include <linux/blkdev.h> | 
 | 30 | #include <linux/slab.h> | 
 | 31 | #include <linux/notifier.h> | 
 | 32 | #include <linux/topology.h> | 
 | 33 | #include <linux/sysctl.h> | 
 | 34 | #include <linux/cpu.h> | 
 | 35 | #include <linux/cpuset.h> | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 36 | #include <linux/memory_hotplug.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/nodemask.h> | 
 | 38 | #include <linux/vmalloc.h> | 
| Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 39 | #include <linux/mempolicy.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
 | 41 | #include <asm/tlbflush.h> | 
 | 42 | #include "internal.h" | 
 | 43 |  | 
 | 44 | /* | 
 | 45 |  * MCD - HACK: Find somewhere to initialize this EARLY, or make this | 
 | 46 |  * initializer cleaner | 
 | 47 |  */ | 
| Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 48 | nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; | 
| Dean Nelson | 7223a93 | 2005-03-23 19:00:00 -0700 | [diff] [blame] | 49 | EXPORT_SYMBOL(node_online_map); | 
| Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 50 | nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; | 
| Dean Nelson | 7223a93 | 2005-03-23 19:00:00 -0700 | [diff] [blame] | 51 | EXPORT_SYMBOL(node_possible_map); | 
| Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 52 | struct pglist_data *pgdat_list __read_mostly; | 
| Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 53 | unsigned long totalram_pages __read_mostly; | 
 | 54 | unsigned long totalhigh_pages __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | long nr_swap_pages; | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 56 | int percpu_pagelist_fraction; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 58 | static void fastcall free_hot_cold_page(struct page *page, int cold); | 
| Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 59 | static void __free_pages_ok(struct page *page, unsigned int order); | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 60 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | /* | 
 | 62 |  * results with 256, 32 in the lowmem_reserve sysctl: | 
 | 63 |  *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high) | 
 | 64 |  *	1G machine -> (16M dma, 784M normal, 224M high) | 
 | 65 |  *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA | 
 | 66 |  *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL | 
 | 67 |  *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 68 |  * | 
 | 69 |  * TBD: should special case ZONE_DMA32 machines here - in those we normally | 
 | 70 |  * don't need any ZONE_NORMAL reservation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  */ | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 72 | int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 |  | 
 | 74 | EXPORT_SYMBOL(totalram_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 |  | 
 | 76 | /* | 
 | 77 |  * Used by page_zone() to look up the address of the struct zone whose | 
 | 78 |  * id is encoded in the upper bits of page->flags | 
 | 79 |  */ | 
| Christoph Lameter | c3d8c14 | 2005-09-06 15:16:33 -0700 | [diff] [blame] | 80 | struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | EXPORT_SYMBOL(zone_table); | 
 | 82 |  | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 83 | static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | int min_free_kbytes = 1024; | 
 | 85 |  | 
 | 86 | unsigned long __initdata nr_kernel_pages; | 
 | 87 | unsigned long __initdata nr_all_pages; | 
 | 88 |  | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 89 | #ifdef CONFIG_DEBUG_VM | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 90 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | { | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 92 | 	int ret = 0; | 
 | 93 | 	unsigned seq; | 
 | 94 | 	unsigned long pfn = page_to_pfn(page); | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 95 |  | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 96 | 	do { | 
 | 97 | 		seq = zone_span_seqbegin(zone); | 
 | 98 | 		if (pfn >= zone->zone_start_pfn + zone->spanned_pages) | 
 | 99 | 			ret = 1; | 
 | 100 | 		else if (pfn < zone->zone_start_pfn) | 
 | 101 | 			ret = 1; | 
 | 102 | 	} while (zone_span_seqretry(zone, seq)); | 
 | 103 |  | 
 | 104 | 	return ret; | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 105 | } | 
 | 106 |  | 
 | 107 | static int page_is_consistent(struct zone *zone, struct page *page) | 
 | 108 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | #ifdef CONFIG_HOLES_IN_ZONE | 
 | 110 | 	if (!pfn_valid(page_to_pfn(page))) | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 111 | 		return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | #endif | 
 | 113 | 	if (zone != page_zone(page)) | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 114 | 		return 0; | 
 | 115 |  | 
 | 116 | 	return 1; | 
 | 117 | } | 
 | 118 | /* | 
 | 119 |  * Temporary debugging check for pages not lying within a given zone. | 
 | 120 |  */ | 
 | 121 | static int bad_range(struct zone *zone, struct page *page) | 
 | 122 | { | 
 | 123 | 	if (page_outside_zone_boundaries(zone, page)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | 		return 1; | 
| Dave Hansen | c6a57e1 | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 125 | 	if (!page_is_consistent(zone, page)) | 
 | 126 | 		return 1; | 
 | 127 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | 	return 0; | 
 | 129 | } | 
 | 130 |  | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 131 | #else | 
 | 132 | static inline int bad_range(struct zone *zone, struct page *page) | 
 | 133 | { | 
 | 134 | 	return 0; | 
 | 135 | } | 
 | 136 | #endif | 
 | 137 |  | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 138 | static void bad_page(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | { | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 140 | 	printk(KERN_EMERG "Bad page state in process '%s'\n" | 
| Hugh Dickins | 7365f3d | 2006-01-11 12:17:18 -0800 | [diff] [blame] | 141 | 		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" | 
 | 142 | 		KERN_EMERG "Trying to fix it up, but a reboot is needed\n" | 
 | 143 | 		KERN_EMERG "Backtrace:\n", | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 144 | 		current->comm, page, (int)(2*sizeof(unsigned long)), | 
 | 145 | 		(unsigned long)page->flags, page->mapping, | 
 | 146 | 		page_mapcount(page), page_count(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | 	dump_stack(); | 
| Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 148 | 	page->flags &= ~(1 << PG_lru	| | 
 | 149 | 			1 << PG_private | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | 			1 << PG_locked	| | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | 			1 << PG_active	| | 
 | 152 | 			1 << PG_dirty	| | 
| Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 153 | 			1 << PG_reclaim | | 
 | 154 | 			1 << PG_slab    | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | 			1 << PG_swapcache | | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 156 | 			1 << PG_writeback ); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | 	set_page_count(page, 0); | 
 | 158 | 	reset_page_mapcount(page); | 
 | 159 | 	page->mapping = NULL; | 
| Randy Dunlap | 9f15833 | 2005-09-13 01:25:16 -0700 | [diff] [blame] | 160 | 	add_taint(TAINT_BAD_PAGE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | } | 
 | 162 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | /* | 
 | 164 |  * Higher-order pages are called "compound pages".  They are structured thusly: | 
 | 165 |  * | 
 | 166 |  * The first PAGE_SIZE page is called the "head page". | 
 | 167 |  * | 
 | 168 |  * The remaining PAGE_SIZE pages are called "tail pages". | 
 | 169 |  * | 
 | 170 |  * All pages have PG_compound set.  All pages have their ->private pointing at | 
 | 171 |  * the head page (even the head page has this). | 
 | 172 |  * | 
| Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 173 |  * The first tail page's ->lru.next holds the address of the compound page's | 
 | 174 |  * put_page() function.  Its ->lru.prev holds the order of allocation. | 
 | 175 |  * This usage means that zero-order pages may not be compound. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 |  */ | 
| Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 177 |  | 
 | 178 | static void free_compound_page(struct page *page) | 
 | 179 | { | 
 | 180 | 	__free_pages_ok(page, (unsigned long)page[1].lru.prev); | 
 | 181 | } | 
 | 182 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | static void prep_compound_page(struct page *page, unsigned long order) | 
 | 184 | { | 
 | 185 | 	int i; | 
 | 186 | 	int nr_pages = 1 << order; | 
 | 187 |  | 
| Hugh Dickins | d98c7a0 | 2006-02-14 13:52:59 -0800 | [diff] [blame] | 188 | 	page[1].lru.next = (void *)free_compound_page;	/* set dtor */ | 
| Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 189 | 	page[1].lru.prev = (void *)order; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | 	for (i = 0; i < nr_pages; i++) { | 
 | 191 | 		struct page *p = page + i; | 
 | 192 |  | 
 | 193 | 		SetPageCompound(p); | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 194 | 		set_page_private(p, (unsigned long)page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | 	} | 
 | 196 | } | 
 | 197 |  | 
 | 198 | static void destroy_compound_page(struct page *page, unsigned long order) | 
 | 199 | { | 
 | 200 | 	int i; | 
 | 201 | 	int nr_pages = 1 << order; | 
 | 202 |  | 
| Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 203 | 	if (unlikely((unsigned long)page[1].lru.prev != order)) | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 204 | 		bad_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 |  | 
 | 206 | 	for (i = 0; i < nr_pages; i++) { | 
 | 207 | 		struct page *p = page + i; | 
 | 208 |  | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 209 | 		if (unlikely(!PageCompound(p) | | 
 | 210 | 				(page_private(p) != (unsigned long)page))) | 
 | 211 | 			bad_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | 		ClearPageCompound(p); | 
 | 213 | 	} | 
 | 214 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 |  | 
 | 216 | /* | 
 | 217 |  * function for dealing with page's order in buddy system. | 
 | 218 |  * zone->lock is already acquired when we use these. | 
 | 219 |  * So, we don't need atomic page->flags operations here. | 
 | 220 |  */ | 
 | 221 | static inline unsigned long page_order(struct page *page) { | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 222 | 	return page_private(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | } | 
 | 224 |  | 
 | 225 | static inline void set_page_order(struct page *page, int order) { | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 226 | 	set_page_private(page, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | 	__SetPagePrivate(page); | 
 | 228 | } | 
 | 229 |  | 
 | 230 | static inline void rmv_page_order(struct page *page) | 
 | 231 | { | 
 | 232 | 	__ClearPagePrivate(page); | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 233 | 	set_page_private(page, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | } | 
 | 235 |  | 
 | 236 | /* | 
 | 237 |  * Locate the struct page for both the matching buddy in our | 
 | 238 |  * pair (buddy1) and the combined O(n+1) page they form (page). | 
 | 239 |  * | 
 | 240 |  * 1) Any buddy B1 will have an order O twin B2 which satisfies | 
 | 241 |  * the following equation: | 
 | 242 |  *     B2 = B1 ^ (1 << O) | 
 | 243 |  * For example, if the starting buddy (buddy2) is #8 its order | 
 | 244 |  * 1 buddy is #10: | 
 | 245 |  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 | 
 | 246 |  * | 
 | 247 |  * 2) Any buddy B will have an order O+1 parent P which | 
 | 248 |  * satisfies the following equation: | 
 | 249 |  *     P = B & ~(1 << O) | 
 | 250 |  * | 
 | 251 |  * Assumption: *_mem_map is contigious at least up to MAX_ORDER | 
 | 252 |  */ | 
 | 253 | static inline struct page * | 
 | 254 | __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) | 
 | 255 | { | 
 | 256 | 	unsigned long buddy_idx = page_idx ^ (1 << order); | 
 | 257 |  | 
 | 258 | 	return page + (buddy_idx - page_idx); | 
 | 259 | } | 
 | 260 |  | 
 | 261 | static inline unsigned long | 
 | 262 | __find_combined_index(unsigned long page_idx, unsigned int order) | 
 | 263 | { | 
 | 264 | 	return (page_idx & ~(1 << order)); | 
 | 265 | } | 
 | 266 |  | 
 | 267 | /* | 
 | 268 |  * This function checks whether a page is free && is the buddy | 
 | 269 |  * we can do coalesce a page and its buddy if | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 270 |  * (a) the buddy is not in a hole && | 
 | 271 |  * (b) the buddy is free && | 
 | 272 |  * (c) the buddy is on the buddy system && | 
 | 273 |  * (d) a page and its buddy have the same order. | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 274 |  * for recording page's order, we use page_private(page) and PG_private. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 |  * | 
 | 276 |  */ | 
 | 277 | static inline int page_is_buddy(struct page *page, int order) | 
 | 278 | { | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 279 | #ifdef CONFIG_HOLES_IN_ZONE | 
 | 280 | 	if (!pfn_valid(page_to_pfn(page))) | 
 | 281 | 		return 0; | 
 | 282 | #endif | 
 | 283 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 |        if (PagePrivate(page)           && | 
 | 285 |            (page_order(page) == order) && | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 |             page_count(page) == 0) | 
 | 287 |                return 1; | 
 | 288 |        return 0; | 
 | 289 | } | 
 | 290 |  | 
 | 291 | /* | 
 | 292 |  * Freeing function for a buddy system allocator. | 
 | 293 |  * | 
 | 294 |  * The concept of a buddy system is to maintain direct-mapped table | 
 | 295 |  * (containing bit values) for memory blocks of various "orders". | 
 | 296 |  * The bottom level table contains the map for the smallest allocatable | 
 | 297 |  * units of memory (here, pages), and each level above it describes | 
 | 298 |  * pairs of units from the levels below, hence, "buddies". | 
 | 299 |  * At a high level, all that happens here is marking the table entry | 
 | 300 |  * at the bottom level available, and propagating the changes upward | 
 | 301 |  * as necessary, plus some accounting needed to play nicely with other | 
 | 302 |  * parts of the VM system. | 
 | 303 |  * At each level, we keep a list of pages, which are heads of continuous | 
 | 304 |  * free pages of length of (1 << order) and marked with PG_Private.Page's | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 305 |  * order is recorded in page_private(page) field. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 |  * So when we are allocating or freeing one, we can derive the state of the | 
 | 307 |  * other.  That is, if we allocate a small block, and both were    | 
 | 308 |  * free, the remainder of the region must be split into blocks.    | 
 | 309 |  * If a block is freed, and its buddy is also free, then this | 
 | 310 |  * triggers coalescing into a block of larger size.             | 
 | 311 |  * | 
 | 312 |  * -- wli | 
 | 313 |  */ | 
 | 314 |  | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 315 | static inline void __free_one_page(struct page *page, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | 		struct zone *zone, unsigned int order) | 
 | 317 | { | 
 | 318 | 	unsigned long page_idx; | 
 | 319 | 	int order_size = 1 << order; | 
 | 320 |  | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 321 | 	if (unlikely(PageCompound(page))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | 		destroy_compound_page(page, order); | 
 | 323 |  | 
 | 324 | 	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); | 
 | 325 |  | 
 | 326 | 	BUG_ON(page_idx & (order_size - 1)); | 
 | 327 | 	BUG_ON(bad_range(zone, page)); | 
 | 328 |  | 
 | 329 | 	zone->free_pages += order_size; | 
 | 330 | 	while (order < MAX_ORDER-1) { | 
 | 331 | 		unsigned long combined_idx; | 
 | 332 | 		struct free_area *area; | 
 | 333 | 		struct page *buddy; | 
 | 334 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 		buddy = __page_find_buddy(page, page_idx, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | 		if (!page_is_buddy(buddy, order)) | 
 | 337 | 			break;		/* Move the buddy up one level. */ | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 338 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | 		list_del(&buddy->lru); | 
 | 340 | 		area = zone->free_area + order; | 
 | 341 | 		area->nr_free--; | 
 | 342 | 		rmv_page_order(buddy); | 
| Nick Piggin | 13e7444 | 2006-01-06 00:10:58 -0800 | [diff] [blame] | 343 | 		combined_idx = __find_combined_index(page_idx, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | 		page = page + (combined_idx - page_idx); | 
 | 345 | 		page_idx = combined_idx; | 
 | 346 | 		order++; | 
 | 347 | 	} | 
 | 348 | 	set_page_order(page, order); | 
 | 349 | 	list_add(&page->lru, &zone->free_area[order].free_list); | 
 | 350 | 	zone->free_area[order].nr_free++; | 
 | 351 | } | 
 | 352 |  | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 353 | static inline int free_pages_check(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | { | 
| Nick Piggin | 92be2e3 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 355 | 	if (unlikely(page_mapcount(page) | | 
 | 356 | 		(page->mapping != NULL)  | | 
 | 357 | 		(page_count(page) != 0)  | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | 		(page->flags & ( | 
 | 359 | 			1 << PG_lru	| | 
 | 360 | 			1 << PG_private | | 
 | 361 | 			1 << PG_locked	| | 
 | 362 | 			1 << PG_active	| | 
 | 363 | 			1 << PG_reclaim	| | 
 | 364 | 			1 << PG_slab	| | 
 | 365 | 			1 << PG_swapcache | | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 366 | 			1 << PG_writeback | | 
| Nick Piggin | 92be2e3 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 367 | 			1 << PG_reserved )))) | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 368 | 		bad_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | 	if (PageDirty(page)) | 
| Nick Piggin | 242e546 | 2005-09-03 15:54:50 -0700 | [diff] [blame] | 370 | 		__ClearPageDirty(page); | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 371 | 	/* | 
 | 372 | 	 * For now, we report if PG_reserved was found set, but do not | 
 | 373 | 	 * clear it, and do not free the page.  But we shall soon need | 
 | 374 | 	 * to do more, for when the ZERO_PAGE count wraps negative. | 
 | 375 | 	 */ | 
 | 376 | 	return PageReserved(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | } | 
 | 378 |  | 
 | 379 | /* | 
 | 380 |  * Frees a list of pages.  | 
 | 381 |  * Assumes all pages on list are in same zone, and of same order. | 
| Renaud Lienhart | 207f36e | 2005-09-10 00:26:59 -0700 | [diff] [blame] | 382 |  * count is the number of pages to free. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 |  * | 
 | 384 |  * If the zone was previously in an "all pages pinned" state then look to | 
 | 385 |  * see if this freeing clears that state. | 
 | 386 |  * | 
 | 387 |  * And clear the zone's pages_scanned counter, to hold off the "all pages are | 
 | 388 |  * pinned" detection logic. | 
 | 389 |  */ | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 390 | static void free_pages_bulk(struct zone *zone, int count, | 
 | 391 | 					struct list_head *list, int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | { | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 393 | 	spin_lock(&zone->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | 	zone->all_unreclaimable = 0; | 
 | 395 | 	zone->pages_scanned = 0; | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 396 | 	while (count--) { | 
 | 397 | 		struct page *page; | 
 | 398 |  | 
 | 399 | 		BUG_ON(list_empty(list)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | 		page = list_entry(list->prev, struct page, lru); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 401 | 		/* have to delete it as __free_one_page list manipulates */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | 		list_del(&page->lru); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 403 | 		__free_one_page(page, zone, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | 	} | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 405 | 	spin_unlock(&zone->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | } | 
 | 407 |  | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 408 | static void free_one_page(struct zone *zone, struct page *page, int order) | 
 | 409 | { | 
 | 410 | 	LIST_HEAD(list); | 
 | 411 | 	list_add(&page->lru, &list); | 
 | 412 | 	free_pages_bulk(zone, 1, &list, order); | 
 | 413 | } | 
 | 414 |  | 
 | 415 | static void __free_pages_ok(struct page *page, unsigned int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | { | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 417 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | 	int i; | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 419 | 	int reserved = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 |  | 
 | 421 | 	arch_free_page(page, order); | 
| Ingo Molnar | de5097c | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 422 | 	if (!PageHighMem(page)) | 
 | 423 | 		mutex_debug_check_no_locks_freed(page_address(page), | 
| David Woodhouse | a4fc7ab | 2006-01-11 14:41:26 +0000 | [diff] [blame] | 424 | 						 PAGE_SIZE<<order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | #ifndef CONFIG_MMU | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 427 | 	for (i = 1 ; i < (1 << order) ; ++i) | 
 | 428 | 		__put_page(page + i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | #endif | 
 | 430 |  | 
 | 431 | 	for (i = 0 ; i < (1 << order) ; ++i) | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 432 | 		reserved += free_pages_check(page + i); | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 433 | 	if (reserved) | 
 | 434 | 		return; | 
 | 435 |  | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 436 | 	kernel_map_pages(page, 1 << order, 0); | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 437 | 	local_irq_save(flags); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 438 | 	__mod_page_state(pgfree, 1 << order); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 439 | 	free_one_page(page_zone(page), page, order); | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 440 | 	local_irq_restore(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | } | 
 | 442 |  | 
| David Howells | a226f6c | 2006-01-06 00:11:08 -0800 | [diff] [blame] | 443 | /* | 
 | 444 |  * permit the bootmem allocator to evade page validation on high-order frees | 
 | 445 |  */ | 
 | 446 | void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) | 
 | 447 | { | 
 | 448 | 	if (order == 0) { | 
 | 449 | 		__ClearPageReserved(page); | 
 | 450 | 		set_page_count(page, 0); | 
 | 451 |  | 
 | 452 | 		free_hot_cold_page(page, 0); | 
 | 453 | 	} else { | 
 | 454 | 		LIST_HEAD(list); | 
 | 455 | 		int loop; | 
 | 456 |  | 
 | 457 | 		for (loop = 0; loop < BITS_PER_LONG; loop++) { | 
 | 458 | 			struct page *p = &page[loop]; | 
 | 459 |  | 
 | 460 | 			if (loop + 16 < BITS_PER_LONG) | 
 | 461 | 				prefetchw(p + 16); | 
 | 462 | 			__ClearPageReserved(p); | 
 | 463 | 			set_page_count(p, 0); | 
 | 464 | 		} | 
 | 465 |  | 
 | 466 | 		arch_free_page(page, order); | 
 | 467 |  | 
 | 468 | 		mod_page_state(pgfree, 1 << order); | 
 | 469 |  | 
 | 470 | 		list_add(&page->lru, &list); | 
 | 471 | 		kernel_map_pages(page, 1 << order, 0); | 
 | 472 | 		free_pages_bulk(page_zone(page), 1, &list, order); | 
 | 473 | 	} | 
 | 474 | } | 
 | 475 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 |  | 
 | 477 | /* | 
 | 478 |  * The order of subdivision here is critical for the IO subsystem. | 
 | 479 |  * Please do not alter this order without good reasons and regression | 
 | 480 |  * testing. Specifically, as large blocks of memory are subdivided, | 
 | 481 |  * the order in which smaller blocks are delivered depends on the order | 
 | 482 |  * they're subdivided in this function. This is the primary factor | 
 | 483 |  * influencing the order in which pages are delivered to the IO | 
 | 484 |  * subsystem according to empirical testing, and this is also justified | 
 | 485 |  * by considering the behavior of a buddy system containing a single | 
 | 486 |  * large block of memory acted on by a series of small allocations. | 
 | 487 |  * This behavior is a critical factor in sglist merging's success. | 
 | 488 |  * | 
 | 489 |  * -- wli | 
 | 490 |  */ | 
| Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 491 | static inline void expand(struct zone *zone, struct page *page, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 |  	int low, int high, struct free_area *area) | 
 | 493 | { | 
 | 494 | 	unsigned long size = 1 << high; | 
 | 495 |  | 
 | 496 | 	while (high > low) { | 
 | 497 | 		area--; | 
 | 498 | 		high--; | 
 | 499 | 		size >>= 1; | 
 | 500 | 		BUG_ON(bad_range(zone, &page[size])); | 
 | 501 | 		list_add(&page[size].lru, &area->free_list); | 
 | 502 | 		area->nr_free++; | 
 | 503 | 		set_page_order(&page[size], high); | 
 | 504 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | } | 
 | 506 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | /* | 
 | 508 |  * This page is about to be returned from the page allocator | 
 | 509 |  */ | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 510 | static int prep_new_page(struct page *page, int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | { | 
| Nick Piggin | 92be2e3 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 512 | 	if (unlikely(page_mapcount(page) | | 
 | 513 | 		(page->mapping != NULL)  | | 
 | 514 | 		(page_count(page) != 0)  | | 
| Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 515 | 		(page->flags & ( | 
 | 516 | 			1 << PG_lru	| | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | 			1 << PG_private	| | 
 | 518 | 			1 << PG_locked	| | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | 			1 << PG_active	| | 
 | 520 | 			1 << PG_dirty	| | 
 | 521 | 			1 << PG_reclaim	| | 
| Hugh Dickins | 334795e | 2005-06-21 17:15:08 -0700 | [diff] [blame] | 522 | 			1 << PG_slab    | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | 			1 << PG_swapcache | | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 524 | 			1 << PG_writeback | | 
| Nick Piggin | 92be2e3 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 525 | 			1 << PG_reserved )))) | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 526 | 		bad_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 |  | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 528 | 	/* | 
 | 529 | 	 * For now, we report if PG_reserved was found set, but do not | 
 | 530 | 	 * clear it, and do not allocate the page: as a safety net. | 
 | 531 | 	 */ | 
 | 532 | 	if (PageReserved(page)) | 
 | 533 | 		return 1; | 
 | 534 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | 	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | | 
 | 536 | 			1 << PG_referenced | 1 << PG_arch_1 | | 
 | 537 | 			1 << PG_checked | 1 << PG_mappedtodisk); | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 538 | 	set_page_private(page, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | 	set_page_refs(page, order); | 
 | 540 | 	kernel_map_pages(page, 1 << order, 1); | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 541 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | } | 
 | 543 |  | 
 | 544 | /*  | 
 | 545 |  * Do the hard work of removing an element from the buddy allocator. | 
 | 546 |  * Call me with the zone->lock already held. | 
 | 547 |  */ | 
 | 548 | static struct page *__rmqueue(struct zone *zone, unsigned int order) | 
 | 549 | { | 
 | 550 | 	struct free_area * area; | 
 | 551 | 	unsigned int current_order; | 
 | 552 | 	struct page *page; | 
 | 553 |  | 
 | 554 | 	for (current_order = order; current_order < MAX_ORDER; ++current_order) { | 
 | 555 | 		area = zone->free_area + current_order; | 
 | 556 | 		if (list_empty(&area->free_list)) | 
 | 557 | 			continue; | 
 | 558 |  | 
 | 559 | 		page = list_entry(area->free_list.next, struct page, lru); | 
 | 560 | 		list_del(&page->lru); | 
 | 561 | 		rmv_page_order(page); | 
 | 562 | 		area->nr_free--; | 
 | 563 | 		zone->free_pages -= 1UL << order; | 
| Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 564 | 		expand(zone, page, order, current_order, area); | 
 | 565 | 		return page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | 	} | 
 | 567 |  | 
 | 568 | 	return NULL; | 
 | 569 | } | 
 | 570 |  | 
 | 571 | /*  | 
 | 572 |  * Obtain a specified number of elements from the buddy allocator, all under | 
 | 573 |  * a single hold of the lock, for efficiency.  Add them to the supplied list. | 
 | 574 |  * Returns the number of new pages which were placed at *list. | 
 | 575 |  */ | 
 | 576 | static int rmqueue_bulk(struct zone *zone, unsigned int order,  | 
 | 577 | 			unsigned long count, struct list_head *list) | 
 | 578 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | 	int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | 	 | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 581 | 	spin_lock(&zone->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | 	for (i = 0; i < count; ++i) { | 
| Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 583 | 		struct page *page = __rmqueue(zone, order); | 
 | 584 | 		if (unlikely(page == NULL)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | 		list_add_tail(&page->lru, list); | 
 | 587 | 	} | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 588 | 	spin_unlock(&zone->lock); | 
| Nick Piggin | 085cc7d | 2006-01-06 00:11:01 -0800 | [diff] [blame] | 589 | 	return i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | } | 
 | 591 |  | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 592 | #ifdef CONFIG_NUMA | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 593 | /* | 
 | 594 |  * Called from the slab reaper to drain pagesets on a particular node that | 
 | 595 |  * belong to the currently executing processor. | 
 | 596 |  */ | 
 | 597 | void drain_node_pages(int nodeid) | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 598 | { | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 599 | 	int i, z; | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 600 | 	unsigned long flags; | 
 | 601 |  | 
 | 602 | 	local_irq_save(flags); | 
| Christoph Lameter | 8fce4d8 | 2006-03-09 17:33:54 -0800 | [diff] [blame] | 603 | 	for (z = 0; z < MAX_NR_ZONES; z++) { | 
 | 604 | 		struct zone *zone = NODE_DATA(nodeid)->node_zones + z; | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 605 | 		struct per_cpu_pageset *pset; | 
 | 606 |  | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 607 | 		pset = zone_pcp(zone, smp_processor_id()); | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 608 | 		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { | 
 | 609 | 			struct per_cpu_pages *pcp; | 
 | 610 |  | 
 | 611 | 			pcp = &pset->pcp[i]; | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 612 | 			free_pages_bulk(zone, pcp->count, &pcp->list, 0); | 
 | 613 | 			pcp->count = 0; | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 614 | 		} | 
 | 615 | 	} | 
 | 616 | 	local_irq_restore(flags); | 
 | 617 | } | 
 | 618 | #endif | 
 | 619 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) | 
 | 621 | static void __drain_pages(unsigned int cpu) | 
 | 622 | { | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 623 | 	unsigned long flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | 	struct zone *zone; | 
 | 625 | 	int i; | 
 | 626 |  | 
 | 627 | 	for_each_zone(zone) { | 
 | 628 | 		struct per_cpu_pageset *pset; | 
 | 629 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 630 | 		pset = zone_pcp(zone, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | 		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { | 
 | 632 | 			struct per_cpu_pages *pcp; | 
 | 633 |  | 
 | 634 | 			pcp = &pset->pcp[i]; | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 635 | 			local_irq_save(flags); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 636 | 			free_pages_bulk(zone, pcp->count, &pcp->list, 0); | 
 | 637 | 			pcp->count = 0; | 
| Nick Piggin | c54ad30 | 2006-01-06 00:10:56 -0800 | [diff] [blame] | 638 | 			local_irq_restore(flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | 		} | 
 | 640 | 	} | 
 | 641 | } | 
 | 642 | #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ | 
 | 643 |  | 
 | 644 | #ifdef CONFIG_PM | 
 | 645 |  | 
 | 646 | void mark_free_pages(struct zone *zone) | 
 | 647 | { | 
 | 648 | 	unsigned long zone_pfn, flags; | 
 | 649 | 	int order; | 
 | 650 | 	struct list_head *curr; | 
 | 651 |  | 
 | 652 | 	if (!zone->spanned_pages) | 
 | 653 | 		return; | 
 | 654 |  | 
 | 655 | 	spin_lock_irqsave(&zone->lock, flags); | 
 | 656 | 	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) | 
 | 657 | 		ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); | 
 | 658 |  | 
 | 659 | 	for (order = MAX_ORDER - 1; order >= 0; --order) | 
 | 660 | 		list_for_each(curr, &zone->free_area[order].free_list) { | 
 | 661 | 			unsigned long start_pfn, i; | 
 | 662 |  | 
 | 663 | 			start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); | 
 | 664 |  | 
 | 665 | 			for (i=0; i < (1<<order); i++) | 
 | 666 | 				SetPageNosaveFree(pfn_to_page(start_pfn+i)); | 
 | 667 | 	} | 
 | 668 | 	spin_unlock_irqrestore(&zone->lock, flags); | 
 | 669 | } | 
 | 670 |  | 
 | 671 | /* | 
 | 672 |  * Spill all of this CPU's per-cpu pages back into the buddy allocator. | 
 | 673 |  */ | 
 | 674 | void drain_local_pages(void) | 
 | 675 | { | 
 | 676 | 	unsigned long flags; | 
 | 677 |  | 
 | 678 | 	local_irq_save(flags);	 | 
 | 679 | 	__drain_pages(smp_processor_id()); | 
 | 680 | 	local_irq_restore(flags);	 | 
 | 681 | } | 
 | 682 | #endif /* CONFIG_PM */ | 
 | 683 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 684 | static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | { | 
 | 686 | #ifdef CONFIG_NUMA | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | 	pg_data_t *pg = z->zone_pgdat; | 
 | 688 | 	pg_data_t *orig = zonelist->zones[0]->zone_pgdat; | 
 | 689 | 	struct per_cpu_pageset *p; | 
 | 690 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 691 | 	p = zone_pcp(z, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | 	if (pg == orig) { | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 693 | 		p->numa_hit++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | 	} else { | 
 | 695 | 		p->numa_miss++; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 696 | 		zone_pcp(zonelist->zones[0], cpu)->numa_foreign++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | 	} | 
 | 698 | 	if (pg == NODE_DATA(numa_node_id())) | 
 | 699 | 		p->local_node++; | 
 | 700 | 	else | 
 | 701 | 		p->other_node++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | #endif | 
 | 703 | } | 
 | 704 |  | 
 | 705 | /* | 
 | 706 |  * Free a 0-order page | 
 | 707 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | static void fastcall free_hot_cold_page(struct page *page, int cold) | 
 | 709 | { | 
 | 710 | 	struct zone *zone = page_zone(page); | 
 | 711 | 	struct per_cpu_pages *pcp; | 
 | 712 | 	unsigned long flags; | 
 | 713 |  | 
 | 714 | 	arch_free_page(page, 0); | 
 | 715 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | 	if (PageAnon(page)) | 
 | 717 | 		page->mapping = NULL; | 
| Nick Piggin | 224abf9 | 2006-01-06 00:11:11 -0800 | [diff] [blame] | 718 | 	if (free_pages_check(page)) | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 719 | 		return; | 
 | 720 |  | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 721 | 	kernel_map_pages(page, 1, 0); | 
 | 722 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 723 | 	pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | 	local_irq_save(flags); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 725 | 	__inc_page_state(pgfree); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 726 | 	list_add(&page->lru, &pcp->list); | 
 | 727 | 	pcp->count++; | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 728 | 	if (pcp->count >= pcp->high) { | 
 | 729 | 		free_pages_bulk(zone, pcp->batch, &pcp->list, 0); | 
 | 730 | 		pcp->count -= pcp->batch; | 
 | 731 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | 	local_irq_restore(flags); | 
 | 733 | 	put_cpu(); | 
 | 734 | } | 
 | 735 |  | 
 | 736 | void fastcall free_hot_page(struct page *page) | 
 | 737 | { | 
 | 738 | 	free_hot_cold_page(page, 0); | 
 | 739 | } | 
 | 740 | 	 | 
 | 741 | void fastcall free_cold_page(struct page *page) | 
 | 742 | { | 
 | 743 | 	free_hot_cold_page(page, 1); | 
 | 744 | } | 
 | 745 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 746 | static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | { | 
 | 748 | 	int i; | 
 | 749 |  | 
 | 750 | 	BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); | 
 | 751 | 	for(i = 0; i < (1 << order); i++) | 
 | 752 | 		clear_highpage(page + i); | 
 | 753 | } | 
 | 754 |  | 
 | 755 | /* | 
 | 756 |  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But | 
 | 757 |  * we cheat by calling it from here, in the order > 0 path.  Saves a branch | 
 | 758 |  * or two. | 
 | 759 |  */ | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 760 | static struct page *buffered_rmqueue(struct zonelist *zonelist, | 
 | 761 | 			struct zone *zone, int order, gfp_t gfp_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | { | 
 | 763 | 	unsigned long flags; | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 764 | 	struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 765 | 	int cold = !!(gfp_flags & __GFP_COLD); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 766 | 	int cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 |  | 
| Hugh Dickins | 689bceb | 2005-11-21 21:32:20 -0800 | [diff] [blame] | 768 | again: | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 769 | 	cpu  = get_cpu(); | 
| Nick Piggin | 48db57f | 2006-01-08 01:00:42 -0800 | [diff] [blame] | 770 | 	if (likely(order == 0)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | 		struct per_cpu_pages *pcp; | 
 | 772 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 773 | 		pcp = &zone_pcp(zone, cpu)->pcp[cold]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | 		local_irq_save(flags); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 775 | 		if (!pcp->count) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | 			pcp->count += rmqueue_bulk(zone, 0, | 
 | 777 | 						pcp->batch, &pcp->list); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 778 | 			if (unlikely(!pcp->count)) | 
 | 779 | 				goto failed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | 		} | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 781 | 		page = list_entry(pcp->list.next, struct page, lru); | 
 | 782 | 		list_del(&page->lru); | 
 | 783 | 		pcp->count--; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 784 | 	} else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | 		spin_lock_irqsave(&zone->lock, flags); | 
 | 786 | 		page = __rmqueue(zone, order); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 787 | 		spin_unlock(&zone->lock); | 
 | 788 | 		if (!page) | 
 | 789 | 			goto failed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | 	} | 
 | 791 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 792 | 	__mod_page_state_zone(zone, pgalloc, 1 << order); | 
 | 793 | 	zone_statistics(zonelist, zone, cpu); | 
 | 794 | 	local_irq_restore(flags); | 
 | 795 | 	put_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 797 | 	BUG_ON(bad_range(zone, page)); | 
 | 798 | 	if (prep_new_page(page, order)) | 
 | 799 | 		goto again; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 801 | 	if (gfp_flags & __GFP_ZERO) | 
 | 802 | 		prep_zero_page(page, order, gfp_flags); | 
 | 803 |  | 
 | 804 | 	if (order && (gfp_flags & __GFP_COMP)) | 
 | 805 | 		prep_compound_page(page, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | 	return page; | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 807 |  | 
 | 808 | failed: | 
 | 809 | 	local_irq_restore(flags); | 
 | 810 | 	put_cpu(); | 
 | 811 | 	return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | } | 
 | 813 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 814 | #define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */ | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 815 | #define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */ | 
 | 816 | #define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */ | 
 | 817 | #define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */ | 
 | 818 | #define ALLOC_HARDER		0x10 /* try to alloc harder */ | 
 | 819 | #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */ | 
 | 820 | #define ALLOC_CPUSET		0x40 /* check for correct cpuset */ | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 821 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 | /* | 
 | 823 |  * Return 1 if free pages are above 'mark'. This takes into account the order | 
 | 824 |  * of the allocation. | 
 | 825 |  */ | 
 | 826 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 827 | 		      int classzone_idx, int alloc_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | { | 
 | 829 | 	/* free_pages my go negative - that's OK */ | 
 | 830 | 	long min = mark, free_pages = z->free_pages - (1 << order) + 1; | 
 | 831 | 	int o; | 
 | 832 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 833 | 	if (alloc_flags & ALLOC_HIGH) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | 		min -= min / 2; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 835 | 	if (alloc_flags & ALLOC_HARDER) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | 		min -= min / 4; | 
 | 837 |  | 
 | 838 | 	if (free_pages <= min + z->lowmem_reserve[classzone_idx]) | 
 | 839 | 		return 0; | 
 | 840 | 	for (o = 0; o < order; o++) { | 
 | 841 | 		/* At the next order, this order's pages become unavailable */ | 
 | 842 | 		free_pages -= z->free_area[o].nr_free << o; | 
 | 843 |  | 
 | 844 | 		/* Require fewer higher order pages to be free */ | 
 | 845 | 		min >>= 1; | 
 | 846 |  | 
 | 847 | 		if (free_pages <= min) | 
 | 848 | 			return 0; | 
 | 849 | 	} | 
 | 850 | 	return 1; | 
 | 851 | } | 
 | 852 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 853 | /* | 
 | 854 |  * get_page_from_freeliest goes through the zonelist trying to allocate | 
 | 855 |  * a page. | 
 | 856 |  */ | 
 | 857 | static struct page * | 
 | 858 | get_page_from_freelist(gfp_t gfp_mask, unsigned int order, | 
 | 859 | 		struct zonelist *zonelist, int alloc_flags) | 
| Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 860 | { | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 861 | 	struct zone **z = zonelist->zones; | 
 | 862 | 	struct page *page = NULL; | 
 | 863 | 	int classzone_idx = zone_idx(*z); | 
 | 864 |  | 
 | 865 | 	/* | 
 | 866 | 	 * Go through the zonelist once, looking for a zone with enough free. | 
 | 867 | 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. | 
 | 868 | 	 */ | 
 | 869 | 	do { | 
 | 870 | 		if ((alloc_flags & ALLOC_CPUSET) && | 
 | 871 | 				!cpuset_zone_allowed(*z, gfp_mask)) | 
 | 872 | 			continue; | 
 | 873 |  | 
 | 874 | 		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 875 | 			unsigned long mark; | 
 | 876 | 			if (alloc_flags & ALLOC_WMARK_MIN) | 
 | 877 | 				mark = (*z)->pages_min; | 
 | 878 | 			else if (alloc_flags & ALLOC_WMARK_LOW) | 
 | 879 | 				mark = (*z)->pages_low; | 
 | 880 | 			else | 
 | 881 | 				mark = (*z)->pages_high; | 
 | 882 | 			if (!zone_watermark_ok(*z, order, mark, | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 883 | 				    classzone_idx, alloc_flags)) | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 884 | 				if (!zone_reclaim_mode || | 
 | 885 | 				    !zone_reclaim(*z, gfp_mask, order)) | 
 | 886 | 					continue; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 887 | 		} | 
 | 888 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 889 | 		page = buffered_rmqueue(zonelist, *z, order, gfp_mask); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 890 | 		if (page) { | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 891 | 			break; | 
 | 892 | 		} | 
 | 893 | 	} while (*(++z) != NULL); | 
 | 894 | 	return page; | 
| Martin Hicks | 753ee72 | 2005-06-21 17:14:41 -0700 | [diff] [blame] | 895 | } | 
 | 896 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | /* | 
 | 898 |  * This is the 'heart' of the zoned buddy allocator. | 
 | 899 |  */ | 
 | 900 | struct page * fastcall | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 901 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | 		struct zonelist *zonelist) | 
 | 903 | { | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 904 | 	const gfp_t wait = gfp_mask & __GFP_WAIT; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 905 | 	struct zone **z; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | 	struct page *page; | 
 | 907 | 	struct reclaim_state reclaim_state; | 
 | 908 | 	struct task_struct *p = current; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | 	int do_retry; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 910 | 	int alloc_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | 	int did_some_progress; | 
 | 912 |  | 
 | 913 | 	might_sleep_if(wait); | 
 | 914 |  | 
| Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 915 | restart: | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 916 | 	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 917 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 918 | 	if (unlikely(*z == NULL)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | 		/* Should this ever happen?? */ | 
 | 920 | 		return NULL; | 
 | 921 | 	} | 
| Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 922 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 923 | 	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 924 | 				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 925 | 	if (page) | 
 | 926 | 		goto got_pg; | 
 | 927 |  | 
| Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 928 | 	do { | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 929 | 		wakeup_kswapd(*z, order); | 
| Jens Axboe | 6b1de91 | 2005-11-17 21:35:02 +0100 | [diff] [blame] | 930 | 	} while (*(++z)); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 931 |  | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 932 | 	/* | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 933 | 	 * OK, we're below the kswapd watermark and have kicked background | 
 | 934 | 	 * reclaim. Now things get more complex, so set up alloc_flags according | 
 | 935 | 	 * to how we want to proceed. | 
 | 936 | 	 * | 
 | 937 | 	 * The caller may dip into page reserves a bit more if the caller | 
 | 938 | 	 * cannot run direct reclaim, or if the caller has realtime scheduling | 
| Paul Jackson | 4eac915 | 2006-01-11 12:17:19 -0800 | [diff] [blame] | 939 | 	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will | 
 | 940 | 	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 941 | 	 */ | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 942 | 	alloc_flags = ALLOC_WMARK_MIN; | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 943 | 	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) | 
 | 944 | 		alloc_flags |= ALLOC_HARDER; | 
 | 945 | 	if (gfp_mask & __GFP_HIGH) | 
 | 946 | 		alloc_flags |= ALLOC_HIGH; | 
| Paul Jackson | 47f3a867 | 2006-01-06 00:10:32 -0800 | [diff] [blame] | 947 | 	alloc_flags |= ALLOC_CPUSET; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 |  | 
 | 949 | 	/* | 
 | 950 | 	 * Go through the zonelist again. Let __GFP_HIGH and allocations | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 951 | 	 * coming from realtime tasks go deeper into reserves. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | 	 * | 
 | 953 | 	 * This is the last chance, in general, before the goto nopage. | 
 | 954 | 	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. | 
| Paul Jackson | 9bf2229 | 2005-09-06 15:18:12 -0700 | [diff] [blame] | 955 | 	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | 	 */ | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 957 | 	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); | 
 | 958 | 	if (page) | 
 | 959 | 		goto got_pg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 |  | 
 | 961 | 	/* This allocation should allow future memory freeing. */ | 
| Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 962 |  | 
 | 963 | 	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) | 
 | 964 | 			&& !in_interrupt()) { | 
 | 965 | 		if (!(gfp_mask & __GFP_NOMEMALLOC)) { | 
| Kirill Korotaev | 885036d | 2005-11-13 16:06:41 -0800 | [diff] [blame] | 966 | nofail_alloc: | 
| Nick Piggin | b84a35b | 2005-05-01 08:58:36 -0700 | [diff] [blame] | 967 | 			/* go through the zonelist yet again, ignoring mins */ | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 968 | 			page = get_page_from_freelist(gfp_mask, order, | 
| Paul Jackson | 47f3a867 | 2006-01-06 00:10:32 -0800 | [diff] [blame] | 969 | 				zonelist, ALLOC_NO_WATERMARKS); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 970 | 			if (page) | 
 | 971 | 				goto got_pg; | 
| Kirill Korotaev | 885036d | 2005-11-13 16:06:41 -0800 | [diff] [blame] | 972 | 			if (gfp_mask & __GFP_NOFAIL) { | 
 | 973 | 				blk_congestion_wait(WRITE, HZ/50); | 
 | 974 | 				goto nofail_alloc; | 
 | 975 | 			} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 | 		} | 
 | 977 | 		goto nopage; | 
 | 978 | 	} | 
 | 979 |  | 
 | 980 | 	/* Atomic allocations - we can't balance anything */ | 
 | 981 | 	if (!wait) | 
 | 982 | 		goto nopage; | 
 | 983 |  | 
 | 984 | rebalance: | 
 | 985 | 	cond_resched(); | 
 | 986 |  | 
 | 987 | 	/* We now go into synchronous reclaim */ | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 988 | 	cpuset_memory_pressure_bump(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | 	p->flags |= PF_MEMALLOC; | 
 | 990 | 	reclaim_state.reclaimed_slab = 0; | 
 | 991 | 	p->reclaim_state = &reclaim_state; | 
 | 992 |  | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 993 | 	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 |  | 
 | 995 | 	p->reclaim_state = NULL; | 
 | 996 | 	p->flags &= ~PF_MEMALLOC; | 
 | 997 |  | 
 | 998 | 	cond_resched(); | 
 | 999 |  | 
 | 1000 | 	if (likely(did_some_progress)) { | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1001 | 		page = get_page_from_freelist(gfp_mask, order, | 
 | 1002 | 						zonelist, alloc_flags); | 
 | 1003 | 		if (page) | 
 | 1004 | 			goto got_pg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | 	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { | 
 | 1006 | 		/* | 
 | 1007 | 		 * Go through the zonelist yet one more time, keep | 
 | 1008 | 		 * very high watermark here, this is only to catch | 
 | 1009 | 		 * a parallel oom killing, we must fail if we're still | 
 | 1010 | 		 * under heavy pressure. | 
 | 1011 | 		 */ | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1012 | 		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, | 
| Nick Piggin | 3148890 | 2005-11-28 13:44:03 -0800 | [diff] [blame] | 1013 | 				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); | 
| Rohit Seth | 7fb1d9f | 2005-11-13 16:06:43 -0800 | [diff] [blame] | 1014 | 		if (page) | 
 | 1015 | 			goto got_pg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 |  | 
| Christoph Lameter | 9b0f8b0 | 2006-02-20 18:27:52 -0800 | [diff] [blame] | 1017 | 		out_of_memory(zonelist, gfp_mask, order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1018 | 		goto restart; | 
 | 1019 | 	} | 
 | 1020 |  | 
 | 1021 | 	/* | 
 | 1022 | 	 * Don't let big-order allocations loop unless the caller explicitly | 
 | 1023 | 	 * requests that.  Wait for some write requests to complete then retry. | 
 | 1024 | 	 * | 
 | 1025 | 	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order | 
 | 1026 | 	 * <= 3, but that may not be true in other implementations. | 
 | 1027 | 	 */ | 
 | 1028 | 	do_retry = 0; | 
 | 1029 | 	if (!(gfp_mask & __GFP_NORETRY)) { | 
 | 1030 | 		if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) | 
 | 1031 | 			do_retry = 1; | 
 | 1032 | 		if (gfp_mask & __GFP_NOFAIL) | 
 | 1033 | 			do_retry = 1; | 
 | 1034 | 	} | 
 | 1035 | 	if (do_retry) { | 
 | 1036 | 		blk_congestion_wait(WRITE, HZ/50); | 
 | 1037 | 		goto rebalance; | 
 | 1038 | 	} | 
 | 1039 |  | 
 | 1040 | nopage: | 
 | 1041 | 	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { | 
 | 1042 | 		printk(KERN_WARNING "%s: page allocation failure." | 
 | 1043 | 			" order:%d, mode:0x%x\n", | 
 | 1044 | 			p->comm, order, gfp_mask); | 
 | 1045 | 		dump_stack(); | 
| Janet Morgan | 578c2fd | 2005-06-21 17:14:56 -0700 | [diff] [blame] | 1046 | 		show_mem(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1048 | got_pg: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1049 | 	return page; | 
 | 1050 | } | 
 | 1051 |  | 
 | 1052 | EXPORT_SYMBOL(__alloc_pages); | 
 | 1053 |  | 
 | 1054 | /* | 
 | 1055 |  * Common helper functions. | 
 | 1056 |  */ | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1057 | fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | { | 
 | 1059 | 	struct page * page; | 
 | 1060 | 	page = alloc_pages(gfp_mask, order); | 
 | 1061 | 	if (!page) | 
 | 1062 | 		return 0; | 
 | 1063 | 	return (unsigned long) page_address(page); | 
 | 1064 | } | 
 | 1065 |  | 
 | 1066 | EXPORT_SYMBOL(__get_free_pages); | 
 | 1067 |  | 
| Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 1068 | fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | { | 
 | 1070 | 	struct page * page; | 
 | 1071 |  | 
 | 1072 | 	/* | 
 | 1073 | 	 * get_zeroed_page() returns a 32-bit address, which cannot represent | 
 | 1074 | 	 * a highmem page | 
 | 1075 | 	 */ | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1076 | 	BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 |  | 
 | 1078 | 	page = alloc_pages(gfp_mask | __GFP_ZERO, 0); | 
 | 1079 | 	if (page) | 
 | 1080 | 		return (unsigned long) page_address(page); | 
 | 1081 | 	return 0; | 
 | 1082 | } | 
 | 1083 |  | 
 | 1084 | EXPORT_SYMBOL(get_zeroed_page); | 
 | 1085 |  | 
 | 1086 | void __pagevec_free(struct pagevec *pvec) | 
 | 1087 | { | 
 | 1088 | 	int i = pagevec_count(pvec); | 
 | 1089 |  | 
 | 1090 | 	while (--i >= 0) | 
 | 1091 | 		free_hot_cold_page(pvec->pages[i], pvec->cold); | 
 | 1092 | } | 
 | 1093 |  | 
 | 1094 | fastcall void __free_pages(struct page *page, unsigned int order) | 
 | 1095 | { | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1096 | 	if (put_page_testzero(page)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | 		if (order == 0) | 
 | 1098 | 			free_hot_page(page); | 
 | 1099 | 		else | 
 | 1100 | 			__free_pages_ok(page, order); | 
 | 1101 | 	} | 
 | 1102 | } | 
 | 1103 |  | 
 | 1104 | EXPORT_SYMBOL(__free_pages); | 
 | 1105 |  | 
 | 1106 | fastcall void free_pages(unsigned long addr, unsigned int order) | 
 | 1107 | { | 
 | 1108 | 	if (addr != 0) { | 
 | 1109 | 		BUG_ON(!virt_addr_valid((void *)addr)); | 
 | 1110 | 		__free_pages(virt_to_page((void *)addr), order); | 
 | 1111 | 	} | 
 | 1112 | } | 
 | 1113 |  | 
 | 1114 | EXPORT_SYMBOL(free_pages); | 
 | 1115 |  | 
 | 1116 | /* | 
 | 1117 |  * Total amount of free (allocatable) RAM: | 
 | 1118 |  */ | 
 | 1119 | unsigned int nr_free_pages(void) | 
 | 1120 | { | 
 | 1121 | 	unsigned int sum = 0; | 
 | 1122 | 	struct zone *zone; | 
 | 1123 |  | 
 | 1124 | 	for_each_zone(zone) | 
 | 1125 | 		sum += zone->free_pages; | 
 | 1126 |  | 
 | 1127 | 	return sum; | 
 | 1128 | } | 
 | 1129 |  | 
 | 1130 | EXPORT_SYMBOL(nr_free_pages); | 
 | 1131 |  | 
 | 1132 | #ifdef CONFIG_NUMA | 
 | 1133 | unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) | 
 | 1134 | { | 
 | 1135 | 	unsigned int i, sum = 0; | 
 | 1136 |  | 
 | 1137 | 	for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 1138 | 		sum += pgdat->node_zones[i].free_pages; | 
 | 1139 |  | 
 | 1140 | 	return sum; | 
 | 1141 | } | 
 | 1142 | #endif | 
 | 1143 |  | 
 | 1144 | static unsigned int nr_free_zone_pages(int offset) | 
 | 1145 | { | 
| Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 1146 | 	/* Just pick one node, since fallback list is circular */ | 
 | 1147 | 	pg_data_t *pgdat = NODE_DATA(numa_node_id()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | 	unsigned int sum = 0; | 
 | 1149 |  | 
| Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 1150 | 	struct zonelist *zonelist = pgdat->node_zonelists + offset; | 
 | 1151 | 	struct zone **zonep = zonelist->zones; | 
 | 1152 | 	struct zone *zone; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1153 |  | 
| Martin J. Bligh | e310fd4 | 2005-07-29 22:59:18 -0700 | [diff] [blame] | 1154 | 	for (zone = *zonep++; zone; zone = *zonep++) { | 
 | 1155 | 		unsigned long size = zone->present_pages; | 
 | 1156 | 		unsigned long high = zone->pages_high; | 
 | 1157 | 		if (size > high) | 
 | 1158 | 			sum += size - high; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | 	} | 
 | 1160 |  | 
 | 1161 | 	return sum; | 
 | 1162 | } | 
 | 1163 |  | 
 | 1164 | /* | 
 | 1165 |  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL | 
 | 1166 |  */ | 
 | 1167 | unsigned int nr_free_buffer_pages(void) | 
 | 1168 | { | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 1169 | 	return nr_free_zone_pages(gfp_zone(GFP_USER)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1170 | } | 
 | 1171 |  | 
 | 1172 | /* | 
 | 1173 |  * Amount of free RAM allocatable within all zones | 
 | 1174 |  */ | 
 | 1175 | unsigned int nr_free_pagecache_pages(void) | 
 | 1176 | { | 
| Al Viro | af4ca45 | 2005-10-21 02:55:38 -0400 | [diff] [blame] | 1177 | 	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | } | 
 | 1179 |  | 
 | 1180 | #ifdef CONFIG_HIGHMEM | 
 | 1181 | unsigned int nr_free_highpages (void) | 
 | 1182 | { | 
 | 1183 | 	pg_data_t *pgdat; | 
 | 1184 | 	unsigned int pages = 0; | 
 | 1185 |  | 
 | 1186 | 	for_each_pgdat(pgdat) | 
 | 1187 | 		pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; | 
 | 1188 |  | 
 | 1189 | 	return pages; | 
 | 1190 | } | 
 | 1191 | #endif | 
 | 1192 |  | 
 | 1193 | #ifdef CONFIG_NUMA | 
 | 1194 | static void show_node(struct zone *zone) | 
 | 1195 | { | 
 | 1196 | 	printk("Node %d ", zone->zone_pgdat->node_id); | 
 | 1197 | } | 
 | 1198 | #else | 
 | 1199 | #define show_node(zone)	do { } while (0) | 
 | 1200 | #endif | 
 | 1201 |  | 
 | 1202 | /* | 
 | 1203 |  * Accumulate the page_state information across all CPUs. | 
 | 1204 |  * The result is unavoidably approximate - it can change | 
 | 1205 |  * during and after execution of this function. | 
 | 1206 |  */ | 
 | 1207 | static DEFINE_PER_CPU(struct page_state, page_states) = {0}; | 
 | 1208 |  | 
 | 1209 | atomic_t nr_pagecache = ATOMIC_INIT(0); | 
 | 1210 | EXPORT_SYMBOL(nr_pagecache); | 
 | 1211 | #ifdef CONFIG_SMP | 
 | 1212 | DEFINE_PER_CPU(long, nr_pagecache_local) = 0; | 
 | 1213 | #endif | 
 | 1214 |  | 
| Nick Piggin | a86b1f5 | 2006-01-06 00:11:00 -0800 | [diff] [blame] | 1215 | static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | { | 
 | 1217 | 	int cpu = 0; | 
 | 1218 |  | 
| Eric Dumazet | 88a2a4ac | 2006-02-04 23:27:36 -0800 | [diff] [blame] | 1219 | 	memset(ret, 0, nr * sizeof(unsigned long)); | 
| Andrew Morton | 84c2008 | 2006-01-08 01:00:28 -0800 | [diff] [blame] | 1220 | 	cpus_and(*cpumask, *cpumask, cpu_online_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 |  | 
| Martin Hicks | c07e02d | 2005-09-03 15:55:11 -0700 | [diff] [blame] | 1222 | 	cpu = first_cpu(*cpumask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | 	while (cpu < NR_CPUS) { | 
 | 1224 | 		unsigned long *in, *out, off; | 
 | 1225 |  | 
| Eric Dumazet | 88a2a4ac | 2006-02-04 23:27:36 -0800 | [diff] [blame] | 1226 | 		if (!cpu_isset(cpu, *cpumask)) | 
 | 1227 | 			continue; | 
 | 1228 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | 		in = (unsigned long *)&per_cpu(page_states, cpu); | 
 | 1230 |  | 
| Martin Hicks | c07e02d | 2005-09-03 15:55:11 -0700 | [diff] [blame] | 1231 | 		cpu = next_cpu(cpu, *cpumask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1232 |  | 
| Eric Dumazet | 88a2a4ac | 2006-02-04 23:27:36 -0800 | [diff] [blame] | 1233 | 		if (likely(cpu < NR_CPUS)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1234 | 			prefetch(&per_cpu(page_states, cpu)); | 
 | 1235 |  | 
 | 1236 | 		out = (unsigned long *)ret; | 
 | 1237 | 		for (off = 0; off < nr; off++) | 
 | 1238 | 			*out++ += *in++; | 
 | 1239 | 	} | 
 | 1240 | } | 
 | 1241 |  | 
| Martin Hicks | c07e02d | 2005-09-03 15:55:11 -0700 | [diff] [blame] | 1242 | void get_page_state_node(struct page_state *ret, int node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1243 | { | 
 | 1244 | 	int nr; | 
| Martin Hicks | c07e02d | 2005-09-03 15:55:11 -0700 | [diff] [blame] | 1245 | 	cpumask_t mask = node_to_cpumask(node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1246 |  | 
 | 1247 | 	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); | 
 | 1248 | 	nr /= sizeof(unsigned long); | 
 | 1249 |  | 
| Martin Hicks | c07e02d | 2005-09-03 15:55:11 -0700 | [diff] [blame] | 1250 | 	__get_page_state(ret, nr+1, &mask); | 
 | 1251 | } | 
 | 1252 |  | 
 | 1253 | void get_page_state(struct page_state *ret) | 
 | 1254 | { | 
 | 1255 | 	int nr; | 
 | 1256 | 	cpumask_t mask = CPU_MASK_ALL; | 
 | 1257 |  | 
 | 1258 | 	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); | 
 | 1259 | 	nr /= sizeof(unsigned long); | 
 | 1260 |  | 
 | 1261 | 	__get_page_state(ret, nr + 1, &mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1262 | } | 
 | 1263 |  | 
 | 1264 | void get_full_page_state(struct page_state *ret) | 
 | 1265 | { | 
| Martin Hicks | c07e02d | 2005-09-03 15:55:11 -0700 | [diff] [blame] | 1266 | 	cpumask_t mask = CPU_MASK_ALL; | 
 | 1267 |  | 
 | 1268 | 	__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | } | 
 | 1270 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 1271 | unsigned long read_page_state_offset(unsigned long offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | { | 
 | 1273 | 	unsigned long ret = 0; | 
 | 1274 | 	int cpu; | 
 | 1275 |  | 
| Andrew Morton | 84c2008 | 2006-01-08 01:00:28 -0800 | [diff] [blame] | 1276 | 	for_each_online_cpu(cpu) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1277 | 		unsigned long in; | 
 | 1278 |  | 
 | 1279 | 		in = (unsigned long)&per_cpu(page_states, cpu) + offset; | 
 | 1280 | 		ret += *((unsigned long *)in); | 
 | 1281 | 	} | 
 | 1282 | 	return ret; | 
 | 1283 | } | 
 | 1284 |  | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 1285 | void __mod_page_state_offset(unsigned long offset, unsigned long delta) | 
 | 1286 | { | 
 | 1287 | 	void *ptr; | 
 | 1288 |  | 
 | 1289 | 	ptr = &__get_cpu_var(page_states); | 
 | 1290 | 	*(unsigned long *)(ptr + offset) += delta; | 
 | 1291 | } | 
 | 1292 | EXPORT_SYMBOL(__mod_page_state_offset); | 
 | 1293 |  | 
 | 1294 | void mod_page_state_offset(unsigned long offset, unsigned long delta) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1295 | { | 
 | 1296 | 	unsigned long flags; | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 1297 | 	void *ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1298 |  | 
 | 1299 | 	local_irq_save(flags); | 
 | 1300 | 	ptr = &__get_cpu_var(page_states); | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 1301 | 	*(unsigned long *)(ptr + offset) += delta; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | 	local_irq_restore(flags); | 
 | 1303 | } | 
| Nick Piggin | a74609f | 2006-01-06 00:11:20 -0800 | [diff] [blame] | 1304 | EXPORT_SYMBOL(mod_page_state_offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 |  | 
 | 1306 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, | 
 | 1307 | 			unsigned long *free, struct pglist_data *pgdat) | 
 | 1308 | { | 
 | 1309 | 	struct zone *zones = pgdat->node_zones; | 
 | 1310 | 	int i; | 
 | 1311 |  | 
 | 1312 | 	*active = 0; | 
 | 1313 | 	*inactive = 0; | 
 | 1314 | 	*free = 0; | 
 | 1315 | 	for (i = 0; i < MAX_NR_ZONES; i++) { | 
 | 1316 | 		*active += zones[i].nr_active; | 
 | 1317 | 		*inactive += zones[i].nr_inactive; | 
 | 1318 | 		*free += zones[i].free_pages; | 
 | 1319 | 	} | 
 | 1320 | } | 
 | 1321 |  | 
 | 1322 | void get_zone_counts(unsigned long *active, | 
 | 1323 | 		unsigned long *inactive, unsigned long *free) | 
 | 1324 | { | 
 | 1325 | 	struct pglist_data *pgdat; | 
 | 1326 |  | 
 | 1327 | 	*active = 0; | 
 | 1328 | 	*inactive = 0; | 
 | 1329 | 	*free = 0; | 
 | 1330 | 	for_each_pgdat(pgdat) { | 
 | 1331 | 		unsigned long l, m, n; | 
 | 1332 | 		__get_zone_counts(&l, &m, &n, pgdat); | 
 | 1333 | 		*active += l; | 
 | 1334 | 		*inactive += m; | 
 | 1335 | 		*free += n; | 
 | 1336 | 	} | 
 | 1337 | } | 
 | 1338 |  | 
 | 1339 | void si_meminfo(struct sysinfo *val) | 
 | 1340 | { | 
 | 1341 | 	val->totalram = totalram_pages; | 
 | 1342 | 	val->sharedram = 0; | 
 | 1343 | 	val->freeram = nr_free_pages(); | 
 | 1344 | 	val->bufferram = nr_blockdev_pages(); | 
 | 1345 | #ifdef CONFIG_HIGHMEM | 
 | 1346 | 	val->totalhigh = totalhigh_pages; | 
 | 1347 | 	val->freehigh = nr_free_highpages(); | 
 | 1348 | #else | 
 | 1349 | 	val->totalhigh = 0; | 
 | 1350 | 	val->freehigh = 0; | 
 | 1351 | #endif | 
 | 1352 | 	val->mem_unit = PAGE_SIZE; | 
 | 1353 | } | 
 | 1354 |  | 
 | 1355 | EXPORT_SYMBOL(si_meminfo); | 
 | 1356 |  | 
 | 1357 | #ifdef CONFIG_NUMA | 
 | 1358 | void si_meminfo_node(struct sysinfo *val, int nid) | 
 | 1359 | { | 
 | 1360 | 	pg_data_t *pgdat = NODE_DATA(nid); | 
 | 1361 |  | 
 | 1362 | 	val->totalram = pgdat->node_present_pages; | 
 | 1363 | 	val->freeram = nr_free_pages_pgdat(pgdat); | 
 | 1364 | 	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; | 
 | 1365 | 	val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; | 
 | 1366 | 	val->mem_unit = PAGE_SIZE; | 
 | 1367 | } | 
 | 1368 | #endif | 
 | 1369 |  | 
 | 1370 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 
 | 1371 |  | 
 | 1372 | /* | 
 | 1373 |  * Show free area list (used inside shift_scroll-lock stuff) | 
 | 1374 |  * We also calculate the percentage fragmentation. We do this by counting the | 
 | 1375 |  * memory on each free list with the exception of the first item on the list. | 
 | 1376 |  */ | 
 | 1377 | void show_free_areas(void) | 
 | 1378 | { | 
 | 1379 | 	struct page_state ps; | 
 | 1380 | 	int cpu, temperature; | 
 | 1381 | 	unsigned long active; | 
 | 1382 | 	unsigned long inactive; | 
 | 1383 | 	unsigned long free; | 
 | 1384 | 	struct zone *zone; | 
 | 1385 |  | 
 | 1386 | 	for_each_zone(zone) { | 
 | 1387 | 		show_node(zone); | 
 | 1388 | 		printk("%s per-cpu:", zone->name); | 
 | 1389 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 1390 | 		if (!populated_zone(zone)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 | 			printk(" empty\n"); | 
 | 1392 | 			continue; | 
 | 1393 | 		} else | 
 | 1394 | 			printk("\n"); | 
 | 1395 |  | 
| Dave Jones | 6b482c6 | 2005-11-10 15:45:56 -0500 | [diff] [blame] | 1396 | 		for_each_online_cpu(cpu) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1397 | 			struct per_cpu_pageset *pageset; | 
 | 1398 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1399 | 			pageset = zone_pcp(zone, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1400 |  | 
 | 1401 | 			for (temperature = 0; temperature < 2; temperature++) | 
| Nick Piggin | 2d92c5c | 2006-01-06 00:10:59 -0800 | [diff] [blame] | 1402 | 				printk("cpu %d %s: high %d, batch %d used:%d\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | 					cpu, | 
 | 1404 | 					temperature ? "cold" : "hot", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 | 					pageset->pcp[temperature].high, | 
| Christoph Lameter | 4ae7c03 | 2005-06-21 17:14:57 -0700 | [diff] [blame] | 1406 | 					pageset->pcp[temperature].batch, | 
 | 1407 | 					pageset->pcp[temperature].count); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | 		} | 
 | 1409 | 	} | 
 | 1410 |  | 
 | 1411 | 	get_page_state(&ps); | 
 | 1412 | 	get_zone_counts(&active, &inactive, &free); | 
 | 1413 |  | 
| Denis Vlasenko | c0d6221 | 2005-06-21 17:15:14 -0700 | [diff] [blame] | 1414 | 	printk("Free pages: %11ukB (%ukB HighMem)\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | 		K(nr_free_pages()), | 
 | 1416 | 		K(nr_free_highpages())); | 
 | 1417 |  | 
 | 1418 | 	printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " | 
 | 1419 | 		"unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", | 
 | 1420 | 		active, | 
 | 1421 | 		inactive, | 
 | 1422 | 		ps.nr_dirty, | 
 | 1423 | 		ps.nr_writeback, | 
 | 1424 | 		ps.nr_unstable, | 
 | 1425 | 		nr_free_pages(), | 
 | 1426 | 		ps.nr_slab, | 
 | 1427 | 		ps.nr_mapped, | 
 | 1428 | 		ps.nr_page_table_pages); | 
 | 1429 |  | 
 | 1430 | 	for_each_zone(zone) { | 
 | 1431 | 		int i; | 
 | 1432 |  | 
 | 1433 | 		show_node(zone); | 
 | 1434 | 		printk("%s" | 
 | 1435 | 			" free:%lukB" | 
 | 1436 | 			" min:%lukB" | 
 | 1437 | 			" low:%lukB" | 
 | 1438 | 			" high:%lukB" | 
 | 1439 | 			" active:%lukB" | 
 | 1440 | 			" inactive:%lukB" | 
 | 1441 | 			" present:%lukB" | 
 | 1442 | 			" pages_scanned:%lu" | 
 | 1443 | 			" all_unreclaimable? %s" | 
 | 1444 | 			"\n", | 
 | 1445 | 			zone->name, | 
 | 1446 | 			K(zone->free_pages), | 
 | 1447 | 			K(zone->pages_min), | 
 | 1448 | 			K(zone->pages_low), | 
 | 1449 | 			K(zone->pages_high), | 
 | 1450 | 			K(zone->nr_active), | 
 | 1451 | 			K(zone->nr_inactive), | 
 | 1452 | 			K(zone->present_pages), | 
 | 1453 | 			zone->pages_scanned, | 
 | 1454 | 			(zone->all_unreclaimable ? "yes" : "no") | 
 | 1455 | 			); | 
 | 1456 | 		printk("lowmem_reserve[]:"); | 
 | 1457 | 		for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 1458 | 			printk(" %lu", zone->lowmem_reserve[i]); | 
 | 1459 | 		printk("\n"); | 
 | 1460 | 	} | 
 | 1461 |  | 
 | 1462 | 	for_each_zone(zone) { | 
 | 1463 |  		unsigned long nr, flags, order, total = 0; | 
 | 1464 |  | 
 | 1465 | 		show_node(zone); | 
 | 1466 | 		printk("%s: ", zone->name); | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 1467 | 		if (!populated_zone(zone)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | 			printk("empty\n"); | 
 | 1469 | 			continue; | 
 | 1470 | 		} | 
 | 1471 |  | 
 | 1472 | 		spin_lock_irqsave(&zone->lock, flags); | 
 | 1473 | 		for (order = 0; order < MAX_ORDER; order++) { | 
 | 1474 | 			nr = zone->free_area[order].nr_free; | 
 | 1475 | 			total += nr << order; | 
 | 1476 | 			printk("%lu*%lukB ", nr, K(1UL) << order); | 
 | 1477 | 		} | 
 | 1478 | 		spin_unlock_irqrestore(&zone->lock, flags); | 
 | 1479 | 		printk("= %lukB\n", K(total)); | 
 | 1480 | 	} | 
 | 1481 |  | 
 | 1482 | 	show_swap_cache_info(); | 
 | 1483 | } | 
 | 1484 |  | 
 | 1485 | /* | 
 | 1486 |  * Builds allocation fallback zone lists. | 
| Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1487 |  * | 
 | 1488 |  * Add all populated zones of a node to the zonelist. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1489 |  */ | 
| Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1490 | static int __init build_zonelists_node(pg_data_t *pgdat, | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1491 | 			struct zonelist *zonelist, int nr_zones, int zone_type) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1492 | { | 
| Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1493 | 	struct zone *zone; | 
 | 1494 |  | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1495 | 	BUG_ON(zone_type > ZONE_HIGHMEM); | 
| Christoph Lameter | 02a68a5 | 2006-01-06 00:11:18 -0800 | [diff] [blame] | 1496 |  | 
 | 1497 | 	do { | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1498 | 		zone = pgdat->node_zones + zone_type; | 
| Christoph Lameter | 1a93205 | 2006-01-06 00:11:16 -0800 | [diff] [blame] | 1499 | 		if (populated_zone(zone)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1500 | #ifndef CONFIG_HIGHMEM | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1501 | 			BUG_ON(zone_type > ZONE_NORMAL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1502 | #endif | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1503 | 			zonelist->zones[nr_zones++] = zone; | 
 | 1504 | 			check_highest_zone(zone_type); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1505 | 		} | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1506 | 		zone_type--; | 
| Christoph Lameter | 02a68a5 | 2006-01-06 00:11:18 -0800 | [diff] [blame] | 1507 |  | 
| Christoph Lameter | 070f803 | 2006-01-06 00:11:19 -0800 | [diff] [blame] | 1508 | 	} while (zone_type >= 0); | 
 | 1509 | 	return nr_zones; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | } | 
 | 1511 |  | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1512 | static inline int highest_zone(int zone_bits) | 
 | 1513 | { | 
 | 1514 | 	int res = ZONE_NORMAL; | 
 | 1515 | 	if (zone_bits & (__force int)__GFP_HIGHMEM) | 
 | 1516 | 		res = ZONE_HIGHMEM; | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 1517 | 	if (zone_bits & (__force int)__GFP_DMA32) | 
 | 1518 | 		res = ZONE_DMA32; | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1519 | 	if (zone_bits & (__force int)__GFP_DMA) | 
 | 1520 | 		res = ZONE_DMA; | 
 | 1521 | 	return res; | 
 | 1522 | } | 
 | 1523 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1524 | #ifdef CONFIG_NUMA | 
 | 1525 | #define MAX_NODE_LOAD (num_online_nodes()) | 
 | 1526 | static int __initdata node_load[MAX_NUMNODES]; | 
 | 1527 | /** | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 1528 |  * find_next_best_node - find the next node that should appear in a given node's fallback list | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 |  * @node: node whose fallback list we're appending | 
 | 1530 |  * @used_node_mask: nodemask_t of already used nodes | 
 | 1531 |  * | 
 | 1532 |  * We use a number of factors to determine which is the next node that should | 
 | 1533 |  * appear on a given node's fallback list.  The node should not have appeared | 
 | 1534 |  * already in @node's fallback list, and it should be the next closest node | 
 | 1535 |  * according to the distance array (which contains arbitrary distance values | 
 | 1536 |  * from each node to each node in the system), and should also prefer nodes | 
 | 1537 |  * with no CPUs, since presumably they'll have very little allocation pressure | 
 | 1538 |  * on them otherwise. | 
 | 1539 |  * It returns -1 if no node is found. | 
 | 1540 |  */ | 
 | 1541 | static int __init find_next_best_node(int node, nodemask_t *used_node_mask) | 
 | 1542 | { | 
| Linus Torvalds | 4cf808e | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1543 | 	int n, val; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1544 | 	int min_val = INT_MAX; | 
 | 1545 | 	int best_node = -1; | 
 | 1546 |  | 
| Linus Torvalds | 4cf808e | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1547 | 	/* Use the local node if we haven't already */ | 
 | 1548 | 	if (!node_isset(node, *used_node_mask)) { | 
 | 1549 | 		node_set(node, *used_node_mask); | 
 | 1550 | 		return node; | 
 | 1551 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 |  | 
| Linus Torvalds | 4cf808e | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1553 | 	for_each_online_node(n) { | 
 | 1554 | 		cpumask_t tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1555 |  | 
 | 1556 | 		/* Don't want a node to appear more than once */ | 
 | 1557 | 		if (node_isset(n, *used_node_mask)) | 
 | 1558 | 			continue; | 
 | 1559 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1560 | 		/* Use the distance array to find the distance */ | 
 | 1561 | 		val = node_distance(node, n); | 
 | 1562 |  | 
| Linus Torvalds | 4cf808e | 2006-02-17 20:38:21 +0100 | [diff] [blame] | 1563 | 		/* Penalize nodes under us ("prefer the next node") */ | 
 | 1564 | 		val += (n < node); | 
 | 1565 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1566 | 		/* Give preference to headless and unused nodes */ | 
 | 1567 | 		tmp = node_to_cpumask(n); | 
 | 1568 | 		if (!cpus_empty(tmp)) | 
 | 1569 | 			val += PENALTY_FOR_NODE_WITH_CPUS; | 
 | 1570 |  | 
 | 1571 | 		/* Slight preference for less loaded node */ | 
 | 1572 | 		val *= (MAX_NODE_LOAD*MAX_NUMNODES); | 
 | 1573 | 		val += node_load[n]; | 
 | 1574 |  | 
 | 1575 | 		if (val < min_val) { | 
 | 1576 | 			min_val = val; | 
 | 1577 | 			best_node = n; | 
 | 1578 | 		} | 
 | 1579 | 	} | 
 | 1580 |  | 
 | 1581 | 	if (best_node >= 0) | 
 | 1582 | 		node_set(best_node, *used_node_mask); | 
 | 1583 |  | 
 | 1584 | 	return best_node; | 
 | 1585 | } | 
 | 1586 |  | 
 | 1587 | static void __init build_zonelists(pg_data_t *pgdat) | 
 | 1588 | { | 
 | 1589 | 	int i, j, k, node, local_node; | 
 | 1590 | 	int prev_node, load; | 
 | 1591 | 	struct zonelist *zonelist; | 
 | 1592 | 	nodemask_t used_mask; | 
 | 1593 |  | 
 | 1594 | 	/* initialize zonelists */ | 
 | 1595 | 	for (i = 0; i < GFP_ZONETYPES; i++) { | 
 | 1596 | 		zonelist = pgdat->node_zonelists + i; | 
 | 1597 | 		zonelist->zones[0] = NULL; | 
 | 1598 | 	} | 
 | 1599 |  | 
 | 1600 | 	/* NUMA-aware ordering of nodes */ | 
 | 1601 | 	local_node = pgdat->node_id; | 
 | 1602 | 	load = num_online_nodes(); | 
 | 1603 | 	prev_node = local_node; | 
 | 1604 | 	nodes_clear(used_mask); | 
 | 1605 | 	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1606 | 		int distance = node_distance(local_node, node); | 
 | 1607 |  | 
 | 1608 | 		/* | 
 | 1609 | 		 * If another node is sufficiently far away then it is better | 
 | 1610 | 		 * to reclaim pages in a zone before going off node. | 
 | 1611 | 		 */ | 
 | 1612 | 		if (distance > RECLAIM_DISTANCE) | 
 | 1613 | 			zone_reclaim_mode = 1; | 
 | 1614 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1615 | 		/* | 
 | 1616 | 		 * We don't want to pressure a particular node. | 
 | 1617 | 		 * So adding penalty to the first node in same | 
 | 1618 | 		 * distance group to make it round-robin. | 
 | 1619 | 		 */ | 
| Christoph Lameter | 9eeff23 | 2006-01-18 17:42:31 -0800 | [diff] [blame] | 1620 |  | 
 | 1621 | 		if (distance != node_distance(local_node, prev_node)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | 			node_load[node] += load; | 
 | 1623 | 		prev_node = node; | 
 | 1624 | 		load--; | 
 | 1625 | 		for (i = 0; i < GFP_ZONETYPES; i++) { | 
 | 1626 | 			zonelist = pgdat->node_zonelists + i; | 
 | 1627 | 			for (j = 0; zonelist->zones[j] != NULL; j++); | 
 | 1628 |  | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1629 | 			k = highest_zone(i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1630 |  | 
 | 1631 | 	 		j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); | 
 | 1632 | 			zonelist->zones[j] = NULL; | 
 | 1633 | 		} | 
 | 1634 | 	} | 
 | 1635 | } | 
 | 1636 |  | 
 | 1637 | #else	/* CONFIG_NUMA */ | 
 | 1638 |  | 
 | 1639 | static void __init build_zonelists(pg_data_t *pgdat) | 
 | 1640 | { | 
 | 1641 | 	int i, j, k, node, local_node; | 
 | 1642 |  | 
 | 1643 | 	local_node = pgdat->node_id; | 
 | 1644 | 	for (i = 0; i < GFP_ZONETYPES; i++) { | 
 | 1645 | 		struct zonelist *zonelist; | 
 | 1646 |  | 
 | 1647 | 		zonelist = pgdat->node_zonelists + i; | 
 | 1648 |  | 
 | 1649 | 		j = 0; | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 1650 | 		k = highest_zone(i); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 |  		j = build_zonelists_node(pgdat, zonelist, j, k); | 
 | 1652 |  		/* | 
 | 1653 |  		 * Now we build the zonelist so that it contains the zones | 
 | 1654 |  		 * of all the other nodes. | 
 | 1655 |  		 * We don't want to pressure a particular node, so when | 
 | 1656 |  		 * building the zones for node N, we make sure that the | 
 | 1657 |  		 * zones coming right after the local ones are those from | 
 | 1658 |  		 * node N+1 (modulo N) | 
 | 1659 |  		 */ | 
 | 1660 | 		for (node = local_node + 1; node < MAX_NUMNODES; node++) { | 
 | 1661 | 			if (!node_online(node)) | 
 | 1662 | 				continue; | 
 | 1663 | 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); | 
 | 1664 | 		} | 
 | 1665 | 		for (node = 0; node < local_node; node++) { | 
 | 1666 | 			if (!node_online(node)) | 
 | 1667 | 				continue; | 
 | 1668 | 			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); | 
 | 1669 | 		} | 
 | 1670 |  | 
 | 1671 | 		zonelist->zones[j] = NULL; | 
 | 1672 | 	} | 
 | 1673 | } | 
 | 1674 |  | 
 | 1675 | #endif	/* CONFIG_NUMA */ | 
 | 1676 |  | 
 | 1677 | void __init build_all_zonelists(void) | 
 | 1678 | { | 
 | 1679 | 	int i; | 
 | 1680 |  | 
 | 1681 | 	for_each_online_node(i) | 
 | 1682 | 		build_zonelists(NODE_DATA(i)); | 
 | 1683 | 	printk("Built %i zonelists\n", num_online_nodes()); | 
 | 1684 | 	cpuset_init_current_mems_allowed(); | 
 | 1685 | } | 
 | 1686 |  | 
 | 1687 | /* | 
 | 1688 |  * Helper functions to size the waitqueue hash table. | 
 | 1689 |  * Essentially these want to choose hash table sizes sufficiently | 
 | 1690 |  * large so that collisions trying to wait on pages are rare. | 
 | 1691 |  * But in fact, the number of active page waitqueues on typical | 
 | 1692 |  * systems is ridiculously low, less than 200. So this is even | 
 | 1693 |  * conservative, even though it seems large. | 
 | 1694 |  * | 
 | 1695 |  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to | 
 | 1696 |  * waitqueues, i.e. the size of the waitq table given the number of pages. | 
 | 1697 |  */ | 
 | 1698 | #define PAGES_PER_WAITQUEUE	256 | 
 | 1699 |  | 
 | 1700 | static inline unsigned long wait_table_size(unsigned long pages) | 
 | 1701 | { | 
 | 1702 | 	unsigned long size = 1; | 
 | 1703 |  | 
 | 1704 | 	pages /= PAGES_PER_WAITQUEUE; | 
 | 1705 |  | 
 | 1706 | 	while (size < pages) | 
 | 1707 | 		size <<= 1; | 
 | 1708 |  | 
 | 1709 | 	/* | 
 | 1710 | 	 * Once we have dozens or even hundreds of threads sleeping | 
 | 1711 | 	 * on IO we've got bigger problems than wait queue collision. | 
 | 1712 | 	 * Limit the size of the wait table to a reasonable size. | 
 | 1713 | 	 */ | 
 | 1714 | 	size = min(size, 4096UL); | 
 | 1715 |  | 
 | 1716 | 	return max(size, 4UL); | 
 | 1717 | } | 
 | 1718 |  | 
 | 1719 | /* | 
 | 1720 |  * This is an integer logarithm so that shifts can be used later | 
 | 1721 |  * to extract the more random high bits from the multiplicative | 
 | 1722 |  * hash function before the remainder is taken. | 
 | 1723 |  */ | 
 | 1724 | static inline unsigned long wait_table_bits(unsigned long size) | 
 | 1725 | { | 
 | 1726 | 	return ffz(~size); | 
 | 1727 | } | 
 | 1728 |  | 
 | 1729 | #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) | 
 | 1730 |  | 
 | 1731 | static void __init calculate_zone_totalpages(struct pglist_data *pgdat, | 
 | 1732 | 		unsigned long *zones_size, unsigned long *zholes_size) | 
 | 1733 | { | 
 | 1734 | 	unsigned long realtotalpages, totalpages = 0; | 
 | 1735 | 	int i; | 
 | 1736 |  | 
 | 1737 | 	for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 1738 | 		totalpages += zones_size[i]; | 
 | 1739 | 	pgdat->node_spanned_pages = totalpages; | 
 | 1740 |  | 
 | 1741 | 	realtotalpages = totalpages; | 
 | 1742 | 	if (zholes_size) | 
 | 1743 | 		for (i = 0; i < MAX_NR_ZONES; i++) | 
 | 1744 | 			realtotalpages -= zholes_size[i]; | 
 | 1745 | 	pgdat->node_present_pages = realtotalpages; | 
 | 1746 | 	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); | 
 | 1747 | } | 
 | 1748 |  | 
 | 1749 |  | 
 | 1750 | /* | 
 | 1751 |  * Initially all pages are reserved - free ones are freed | 
 | 1752 |  * up by free_all_bootmem() once the early boot process is | 
 | 1753 |  * done. Non-atomic initialization, single-pass. | 
 | 1754 |  */ | 
| Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 1755 | void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1756 | 		unsigned long start_pfn) | 
 | 1757 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1758 | 	struct page *page; | 
| Andy Whitcroft | 29751f6 | 2005-06-23 00:08:00 -0700 | [diff] [blame] | 1759 | 	unsigned long end_pfn = start_pfn + size; | 
 | 1760 | 	unsigned long pfn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1761 |  | 
| Greg Ungerer | cbe8dd4 | 2006-01-12 01:05:24 -0800 | [diff] [blame] | 1762 | 	for (pfn = start_pfn; pfn < end_pfn; pfn++) { | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1763 | 		if (!early_pfn_valid(pfn)) | 
 | 1764 | 			continue; | 
 | 1765 | 		page = pfn_to_page(pfn); | 
 | 1766 | 		set_page_links(page, zone, nid, pfn); | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1767 | 		set_page_count(page, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | 		reset_page_mapcount(page); | 
 | 1769 | 		SetPageReserved(page); | 
 | 1770 | 		INIT_LIST_HEAD(&page->lru); | 
 | 1771 | #ifdef WANT_PAGE_VIRTUAL | 
 | 1772 | 		/* The shift won't overflow because ZONE_NORMAL is below 4G. */ | 
 | 1773 | 		if (!is_highmem_idx(zone)) | 
| Bob Picco | 3212c6b | 2005-06-27 14:36:28 -0700 | [diff] [blame] | 1774 | 			set_page_address(page, __va(pfn << PAGE_SHIFT)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1775 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1776 | 	} | 
 | 1777 | } | 
 | 1778 |  | 
 | 1779 | void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, | 
 | 1780 | 				unsigned long size) | 
 | 1781 | { | 
 | 1782 | 	int order; | 
 | 1783 | 	for (order = 0; order < MAX_ORDER ; order++) { | 
 | 1784 | 		INIT_LIST_HEAD(&zone->free_area[order].free_list); | 
 | 1785 | 		zone->free_area[order].nr_free = 0; | 
 | 1786 | 	} | 
 | 1787 | } | 
 | 1788 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 1789 | #define ZONETABLE_INDEX(x, zone_nr)	((x << ZONES_SHIFT) | zone_nr) | 
 | 1790 | void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, | 
 | 1791 | 		unsigned long size) | 
 | 1792 | { | 
 | 1793 | 	unsigned long snum = pfn_to_section_nr(pfn); | 
 | 1794 | 	unsigned long end = pfn_to_section_nr(pfn + size); | 
 | 1795 |  | 
 | 1796 | 	if (FLAGS_HAS_NODE) | 
 | 1797 | 		zone_table[ZONETABLE_INDEX(nid, zid)] = zone; | 
 | 1798 | 	else | 
 | 1799 | 		for (; snum <= end; snum++) | 
 | 1800 | 			zone_table[ZONETABLE_INDEX(snum, zid)] = zone; | 
 | 1801 | } | 
 | 1802 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1803 | #ifndef __HAVE_ARCH_MEMMAP_INIT | 
 | 1804 | #define memmap_init(size, nid, zone, start_pfn) \ | 
 | 1805 | 	memmap_init_zone((size), (nid), (zone), (start_pfn)) | 
 | 1806 | #endif | 
 | 1807 |  | 
| Ashok Raj | 6292d9a | 2006-02-01 03:04:44 -0800 | [diff] [blame] | 1808 | static int __cpuinit zone_batchsize(struct zone *zone) | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1809 | { | 
 | 1810 | 	int batch; | 
 | 1811 |  | 
 | 1812 | 	/* | 
 | 1813 | 	 * The per-cpu-pages pools are set to around 1000th of the | 
| Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 1814 | 	 * size of the zone.  But no more than 1/2 of a meg. | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1815 | 	 * | 
 | 1816 | 	 * OK, so we don't know how big the cache is.  So guess. | 
 | 1817 | 	 */ | 
 | 1818 | 	batch = zone->present_pages / 1024; | 
| Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 1819 | 	if (batch * PAGE_SIZE > 512 * 1024) | 
 | 1820 | 		batch = (512 * 1024) / PAGE_SIZE; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1821 | 	batch /= 4;		/* We effectively *= 4 below */ | 
 | 1822 | 	if (batch < 1) | 
 | 1823 | 		batch = 1; | 
 | 1824 |  | 
 | 1825 | 	/* | 
| Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 1826 | 	 * Clamp the batch to a 2^n - 1 value. Having a power | 
 | 1827 | 	 * of 2 value was found to be more likely to have | 
 | 1828 | 	 * suboptimal cache aliasing properties in some cases. | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1829 | 	 * | 
| Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 1830 | 	 * For example if 2 tasks are alternately allocating | 
 | 1831 | 	 * batches of pages, one task can end up with a lot | 
 | 1832 | 	 * of pages of one half of the possible page colors | 
 | 1833 | 	 * and the other with pages of the other colors. | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1834 | 	 */ | 
| Nick Piggin | 0ceaacc | 2005-12-04 13:55:25 +1100 | [diff] [blame] | 1835 | 	batch = (1 << (fls(batch + batch/2)-1)) - 1; | 
| Seth, Rohit | ba56e91 | 2005-10-29 18:15:47 -0700 | [diff] [blame] | 1836 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1837 | 	return batch; | 
 | 1838 | } | 
 | 1839 |  | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1840 | inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | 
 | 1841 | { | 
 | 1842 | 	struct per_cpu_pages *pcp; | 
 | 1843 |  | 
| Magnus Damm | 1c6fe94 | 2005-10-26 01:58:59 -0700 | [diff] [blame] | 1844 | 	memset(p, 0, sizeof(*p)); | 
 | 1845 |  | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1846 | 	pcp = &p->pcp[0];		/* hot */ | 
 | 1847 | 	pcp->count = 0; | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1848 | 	pcp->high = 6 * batch; | 
 | 1849 | 	pcp->batch = max(1UL, 1 * batch); | 
 | 1850 | 	INIT_LIST_HEAD(&pcp->list); | 
 | 1851 |  | 
 | 1852 | 	pcp = &p->pcp[1];		/* cold*/ | 
 | 1853 | 	pcp->count = 0; | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1854 | 	pcp->high = 2 * batch; | 
| Seth, Rohit | e46a5e2 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 1855 | 	pcp->batch = max(1UL, batch/2); | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1856 | 	INIT_LIST_HEAD(&pcp->list); | 
 | 1857 | } | 
 | 1858 |  | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 1859 | /* | 
 | 1860 |  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist | 
 | 1861 |  * to the value high for the pageset p. | 
 | 1862 |  */ | 
 | 1863 |  | 
 | 1864 | static void setup_pagelist_highmark(struct per_cpu_pageset *p, | 
 | 1865 | 				unsigned long high) | 
 | 1866 | { | 
 | 1867 | 	struct per_cpu_pages *pcp; | 
 | 1868 |  | 
 | 1869 | 	pcp = &p->pcp[0]; /* hot list */ | 
 | 1870 | 	pcp->high = high; | 
 | 1871 | 	pcp->batch = max(1UL, high/4); | 
 | 1872 | 	if ((high/4) > (PAGE_SHIFT * 8)) | 
 | 1873 | 		pcp->batch = PAGE_SHIFT * 8; | 
 | 1874 | } | 
 | 1875 |  | 
 | 1876 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1877 | #ifdef CONFIG_NUMA | 
 | 1878 | /* | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1879 |  * Boot pageset table. One per cpu which is going to be used for all | 
 | 1880 |  * zones and all nodes. The parameters will be set in such a way | 
 | 1881 |  * that an item put on a list will immediately be handed over to | 
 | 1882 |  * the buddy list. This is safe since pageset manipulation is done | 
 | 1883 |  * with interrupts disabled. | 
 | 1884 |  * | 
 | 1885 |  * Some NUMA counter updates may also be caught by the boot pagesets. | 
| Christoph Lameter | b7c84c6 | 2005-06-22 20:26:07 -0700 | [diff] [blame] | 1886 |  * | 
 | 1887 |  * The boot_pagesets must be kept even after bootup is complete for | 
 | 1888 |  * unused processors and/or zones. They do play a role for bootstrapping | 
 | 1889 |  * hotplugged processors. | 
 | 1890 |  * | 
 | 1891 |  * zoneinfo_show() and maybe other functions do | 
 | 1892 |  * not check if the processor is online before following the pageset pointer. | 
 | 1893 |  * Other parts of the kernel may not check if the zone is available. | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1894 |  */ | 
| Eric Dumazet | 88a2a4ac | 2006-02-04 23:27:36 -0800 | [diff] [blame] | 1895 | static struct per_cpu_pageset boot_pageset[NR_CPUS]; | 
| Christoph Lameter | 2caaad4 | 2005-06-21 17:15:00 -0700 | [diff] [blame] | 1896 |  | 
 | 1897 | /* | 
 | 1898 |  * Dynamically allocate memory for the | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1899 |  * per cpu pageset array in struct zone. | 
 | 1900 |  */ | 
| Ashok Raj | 6292d9a | 2006-02-01 03:04:44 -0800 | [diff] [blame] | 1901 | static int __cpuinit process_zones(int cpu) | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1902 | { | 
 | 1903 | 	struct zone *zone, *dzone; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1904 |  | 
 | 1905 | 	for_each_zone(zone) { | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1906 |  | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1907 | 		zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1908 | 					 GFP_KERNEL, cpu_to_node(cpu)); | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1909 | 		if (!zone_pcp(zone, cpu)) | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1910 | 			goto bad; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1911 |  | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1912 | 		setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 1913 |  | 
 | 1914 | 		if (percpu_pagelist_fraction) | 
 | 1915 | 			setup_pagelist_highmark(zone_pcp(zone, cpu), | 
 | 1916 | 			 	(zone->present_pages / percpu_pagelist_fraction)); | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1917 | 	} | 
 | 1918 |  | 
 | 1919 | 	return 0; | 
 | 1920 | bad: | 
 | 1921 | 	for_each_zone(dzone) { | 
 | 1922 | 		if (dzone == zone) | 
 | 1923 | 			break; | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 1924 | 		kfree(zone_pcp(dzone, cpu)); | 
 | 1925 | 		zone_pcp(dzone, cpu) = NULL; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1926 | 	} | 
 | 1927 | 	return -ENOMEM; | 
 | 1928 | } | 
 | 1929 |  | 
 | 1930 | static inline void free_zone_pagesets(int cpu) | 
 | 1931 | { | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1932 | 	struct zone *zone; | 
 | 1933 |  | 
 | 1934 | 	for_each_zone(zone) { | 
 | 1935 | 		struct per_cpu_pageset *pset = zone_pcp(zone, cpu); | 
 | 1936 |  | 
 | 1937 | 		zone_pcp(zone, cpu) = NULL; | 
 | 1938 | 		kfree(pset); | 
 | 1939 | 	} | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1940 | } | 
 | 1941 |  | 
| Ashok Raj | 6292d9a | 2006-02-01 03:04:44 -0800 | [diff] [blame] | 1942 | static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1943 | 		unsigned long action, | 
 | 1944 | 		void *hcpu) | 
 | 1945 | { | 
 | 1946 | 	int cpu = (long)hcpu; | 
 | 1947 | 	int ret = NOTIFY_OK; | 
 | 1948 |  | 
 | 1949 | 	switch (action) { | 
 | 1950 | 		case CPU_UP_PREPARE: | 
 | 1951 | 			if (process_zones(cpu)) | 
 | 1952 | 				ret = NOTIFY_BAD; | 
 | 1953 | 			break; | 
| Andi Kleen | b0d4169 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 1954 | 		case CPU_UP_CANCELED: | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1955 | 		case CPU_DEAD: | 
 | 1956 | 			free_zone_pagesets(cpu); | 
 | 1957 | 			break; | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1958 | 		default: | 
 | 1959 | 			break; | 
 | 1960 | 	} | 
 | 1961 | 	return ret; | 
 | 1962 | } | 
 | 1963 |  | 
 | 1964 | static struct notifier_block pageset_notifier = | 
 | 1965 | 	{ &pageset_cpuup_callback, NULL, 0 }; | 
 | 1966 |  | 
| Al Viro | 78d9955 | 2005-12-15 09:18:25 +0000 | [diff] [blame] | 1967 | void __init setup_per_cpu_pageset(void) | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 1968 | { | 
 | 1969 | 	int err; | 
 | 1970 |  | 
 | 1971 | 	/* Initialize per_cpu_pageset for cpu 0. | 
 | 1972 | 	 * A cpuup callback will do this for every cpu | 
 | 1973 | 	 * as it comes online | 
 | 1974 | 	 */ | 
 | 1975 | 	err = process_zones(smp_processor_id()); | 
 | 1976 | 	BUG_ON(err); | 
 | 1977 | 	register_cpu_notifier(&pageset_notifier); | 
 | 1978 | } | 
 | 1979 |  | 
 | 1980 | #endif | 
 | 1981 |  | 
| Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 1982 | static __meminit | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 1983 | void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) | 
 | 1984 | { | 
 | 1985 | 	int i; | 
 | 1986 | 	struct pglist_data *pgdat = zone->zone_pgdat; | 
 | 1987 |  | 
 | 1988 | 	/* | 
 | 1989 | 	 * The per-page waitqueue mechanism uses hashed waitqueues | 
 | 1990 | 	 * per zone. | 
 | 1991 | 	 */ | 
 | 1992 | 	zone->wait_table_size = wait_table_size(zone_size_pages); | 
 | 1993 | 	zone->wait_table_bits =	wait_table_bits(zone->wait_table_size); | 
 | 1994 | 	zone->wait_table = (wait_queue_head_t *) | 
 | 1995 | 		alloc_bootmem_node(pgdat, zone->wait_table_size | 
 | 1996 | 					* sizeof(wait_queue_head_t)); | 
 | 1997 |  | 
 | 1998 | 	for(i = 0; i < zone->wait_table_size; ++i) | 
 | 1999 | 		init_waitqueue_head(zone->wait_table + i); | 
 | 2000 | } | 
 | 2001 |  | 
| Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 2002 | static __meminit void zone_pcp_init(struct zone *zone) | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 2003 | { | 
 | 2004 | 	int cpu; | 
 | 2005 | 	unsigned long batch = zone_batchsize(zone); | 
 | 2006 |  | 
 | 2007 | 	for (cpu = 0; cpu < NR_CPUS; cpu++) { | 
 | 2008 | #ifdef CONFIG_NUMA | 
 | 2009 | 		/* Early boot. Slab allocator not functional yet */ | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 2010 | 		zone_pcp(zone, cpu) = &boot_pageset[cpu]; | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 2011 | 		setup_pageset(&boot_pageset[cpu],0); | 
 | 2012 | #else | 
 | 2013 | 		setup_pageset(zone_pcp(zone,cpu), batch); | 
 | 2014 | #endif | 
 | 2015 | 	} | 
 | 2016 | 	printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n", | 
 | 2017 | 		zone->name, zone->present_pages, batch); | 
 | 2018 | } | 
 | 2019 |  | 
| Matt Tolentino | c09b424 | 2006-01-17 07:03:44 +0100 | [diff] [blame] | 2020 | static __meminit void init_currently_empty_zone(struct zone *zone, | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 2021 | 		unsigned long zone_start_pfn, unsigned long size) | 
 | 2022 | { | 
 | 2023 | 	struct pglist_data *pgdat = zone->zone_pgdat; | 
 | 2024 |  | 
 | 2025 | 	zone_wait_table_init(zone, size); | 
 | 2026 | 	pgdat->nr_zones = zone_idx(zone) + 1; | 
 | 2027 |  | 
 | 2028 | 	zone->zone_mem_map = pfn_to_page(zone_start_pfn); | 
 | 2029 | 	zone->zone_start_pfn = zone_start_pfn; | 
 | 2030 |  | 
 | 2031 | 	memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); | 
 | 2032 |  | 
 | 2033 | 	zone_init_free_lists(pgdat, zone, zone->spanned_pages); | 
 | 2034 | } | 
 | 2035 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2036 | /* | 
 | 2037 |  * Set up the zone data structures: | 
 | 2038 |  *   - mark all pages reserved | 
 | 2039 |  *   - mark all memory queues empty | 
 | 2040 |  *   - clear the memory bitmaps | 
 | 2041 |  */ | 
 | 2042 | static void __init free_area_init_core(struct pglist_data *pgdat, | 
 | 2043 | 		unsigned long *zones_size, unsigned long *zholes_size) | 
 | 2044 | { | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 2045 | 	unsigned long j; | 
 | 2046 | 	int nid = pgdat->node_id; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 | 	unsigned long zone_start_pfn = pgdat->node_start_pfn; | 
 | 2048 |  | 
| Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 2049 | 	pgdat_resize_init(pgdat); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2050 | 	pgdat->nr_zones = 0; | 
 | 2051 | 	init_waitqueue_head(&pgdat->kswapd_wait); | 
 | 2052 | 	pgdat->kswapd_max_order = 0; | 
 | 2053 | 	 | 
 | 2054 | 	for (j = 0; j < MAX_NR_ZONES; j++) { | 
 | 2055 | 		struct zone *zone = pgdat->node_zones + j; | 
 | 2056 | 		unsigned long size, realsize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2057 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2058 | 		realsize = size = zones_size[j]; | 
 | 2059 | 		if (zholes_size) | 
 | 2060 | 			realsize -= zholes_size[j]; | 
 | 2061 |  | 
| Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 2062 | 		if (j < ZONE_HIGHMEM) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2063 | 			nr_kernel_pages += realsize; | 
 | 2064 | 		nr_all_pages += realsize; | 
 | 2065 |  | 
 | 2066 | 		zone->spanned_pages = size; | 
 | 2067 | 		zone->present_pages = realsize; | 
 | 2068 | 		zone->name = zone_names[j]; | 
 | 2069 | 		spin_lock_init(&zone->lock); | 
 | 2070 | 		spin_lock_init(&zone->lru_lock); | 
| Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 2071 | 		zone_seqlock_init(zone); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2072 | 		zone->zone_pgdat = pgdat; | 
 | 2073 | 		zone->free_pages = 0; | 
 | 2074 |  | 
 | 2075 | 		zone->temp_priority = zone->prev_priority = DEF_PRIORITY; | 
 | 2076 |  | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 2077 | 		zone_pcp_init(zone); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 | 		INIT_LIST_HEAD(&zone->active_list); | 
 | 2079 | 		INIT_LIST_HEAD(&zone->inactive_list); | 
 | 2080 | 		zone->nr_scan_active = 0; | 
 | 2081 | 		zone->nr_scan_inactive = 0; | 
 | 2082 | 		zone->nr_active = 0; | 
 | 2083 | 		zone->nr_inactive = 0; | 
| Martin Hicks | 53e9a61 | 2005-09-03 15:54:51 -0700 | [diff] [blame] | 2084 | 		atomic_set(&zone->reclaim_in_progress, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2085 | 		if (!size) | 
 | 2086 | 			continue; | 
 | 2087 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2088 | 		zonetable_add(zone, nid, j, zone_start_pfn, size); | 
| Dave Hansen | ed8ece2 | 2005-10-29 18:16:50 -0700 | [diff] [blame] | 2089 | 		init_currently_empty_zone(zone, zone_start_pfn, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2090 | 		zone_start_pfn += size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2091 | 	} | 
 | 2092 | } | 
 | 2093 |  | 
 | 2094 | static void __init alloc_node_mem_map(struct pglist_data *pgdat) | 
 | 2095 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2096 | 	/* Skip empty nodes */ | 
 | 2097 | 	if (!pgdat->node_spanned_pages) | 
 | 2098 | 		return; | 
 | 2099 |  | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2100 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2101 | 	/* ia64 gets its own node_mem_map, before this, without bootmem */ | 
 | 2102 | 	if (!pgdat->node_mem_map) { | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2103 | 		unsigned long size; | 
 | 2104 | 		struct page *map; | 
 | 2105 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2106 | 		size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); | 
| Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 2107 | 		map = alloc_remap(pgdat->node_id, size); | 
 | 2108 | 		if (!map) | 
 | 2109 | 			map = alloc_bootmem_node(pgdat, size); | 
 | 2110 | 		pgdat->node_mem_map = map; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2111 | 	} | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2112 | #ifdef CONFIG_FLATMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2113 | 	/* | 
 | 2114 | 	 * With no DISCONTIG, the global mem_map is just set as node 0's | 
 | 2115 | 	 */ | 
 | 2116 | 	if (pgdat == NODE_DATA(0)) | 
 | 2117 | 		mem_map = NODE_DATA(0)->node_mem_map; | 
 | 2118 | #endif | 
| Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 2119 | #endif /* CONFIG_FLAT_NODE_MEM_MAP */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2120 | } | 
 | 2121 |  | 
 | 2122 | void __init free_area_init_node(int nid, struct pglist_data *pgdat, | 
 | 2123 | 		unsigned long *zones_size, unsigned long node_start_pfn, | 
 | 2124 | 		unsigned long *zholes_size) | 
 | 2125 | { | 
 | 2126 | 	pgdat->node_id = nid; | 
 | 2127 | 	pgdat->node_start_pfn = node_start_pfn; | 
 | 2128 | 	calculate_zone_totalpages(pgdat, zones_size, zholes_size); | 
 | 2129 |  | 
 | 2130 | 	alloc_node_mem_map(pgdat); | 
 | 2131 |  | 
 | 2132 | 	free_area_init_core(pgdat, zones_size, zholes_size); | 
 | 2133 | } | 
 | 2134 |  | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 2135 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2136 | static bootmem_data_t contig_bootmem_data; | 
 | 2137 | struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; | 
 | 2138 |  | 
 | 2139 | EXPORT_SYMBOL(contig_page_data); | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 2140 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2141 |  | 
 | 2142 | void __init free_area_init(unsigned long *zones_size) | 
 | 2143 | { | 
| Dave Hansen | 93b7504 | 2005-06-23 00:07:47 -0700 | [diff] [blame] | 2144 | 	free_area_init_node(0, NODE_DATA(0), zones_size, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2145 | 			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); | 
 | 2146 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2147 |  | 
 | 2148 | #ifdef CONFIG_PROC_FS | 
 | 2149 |  | 
 | 2150 | #include <linux/seq_file.h> | 
 | 2151 |  | 
 | 2152 | static void *frag_start(struct seq_file *m, loff_t *pos) | 
 | 2153 | { | 
 | 2154 | 	pg_data_t *pgdat; | 
 | 2155 | 	loff_t node = *pos; | 
 | 2156 |  | 
 | 2157 | 	for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next) | 
 | 2158 | 		--node; | 
 | 2159 |  | 
 | 2160 | 	return pgdat; | 
 | 2161 | } | 
 | 2162 |  | 
 | 2163 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) | 
 | 2164 | { | 
 | 2165 | 	pg_data_t *pgdat = (pg_data_t *)arg; | 
 | 2166 |  | 
 | 2167 | 	(*pos)++; | 
 | 2168 | 	return pgdat->pgdat_next; | 
 | 2169 | } | 
 | 2170 |  | 
 | 2171 | static void frag_stop(struct seq_file *m, void *arg) | 
 | 2172 | { | 
 | 2173 | } | 
 | 2174 |  | 
 | 2175 | /*  | 
 | 2176 |  * This walks the free areas for each zone. | 
 | 2177 |  */ | 
 | 2178 | static int frag_show(struct seq_file *m, void *arg) | 
 | 2179 | { | 
 | 2180 | 	pg_data_t *pgdat = (pg_data_t *)arg; | 
 | 2181 | 	struct zone *zone; | 
 | 2182 | 	struct zone *node_zones = pgdat->node_zones; | 
 | 2183 | 	unsigned long flags; | 
 | 2184 | 	int order; | 
 | 2185 |  | 
 | 2186 | 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 2187 | 		if (!populated_zone(zone)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2188 | 			continue; | 
 | 2189 |  | 
 | 2190 | 		spin_lock_irqsave(&zone->lock, flags); | 
 | 2191 | 		seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | 
 | 2192 | 		for (order = 0; order < MAX_ORDER; ++order) | 
 | 2193 | 			seq_printf(m, "%6lu ", zone->free_area[order].nr_free); | 
 | 2194 | 		spin_unlock_irqrestore(&zone->lock, flags); | 
 | 2195 | 		seq_putc(m, '\n'); | 
 | 2196 | 	} | 
 | 2197 | 	return 0; | 
 | 2198 | } | 
 | 2199 |  | 
 | 2200 | struct seq_operations fragmentation_op = { | 
 | 2201 | 	.start	= frag_start, | 
 | 2202 | 	.next	= frag_next, | 
 | 2203 | 	.stop	= frag_stop, | 
 | 2204 | 	.show	= frag_show, | 
 | 2205 | }; | 
 | 2206 |  | 
| Nikita Danilov | 295ab93 | 2005-06-21 17:14:38 -0700 | [diff] [blame] | 2207 | /* | 
 | 2208 |  * Output information about zones in @pgdat. | 
 | 2209 |  */ | 
 | 2210 | static int zoneinfo_show(struct seq_file *m, void *arg) | 
 | 2211 | { | 
 | 2212 | 	pg_data_t *pgdat = arg; | 
 | 2213 | 	struct zone *zone; | 
 | 2214 | 	struct zone *node_zones = pgdat->node_zones; | 
 | 2215 | 	unsigned long flags; | 
 | 2216 |  | 
 | 2217 | 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { | 
 | 2218 | 		int i; | 
 | 2219 |  | 
| Con Kolivas | f3fe651 | 2006-01-06 00:11:15 -0800 | [diff] [blame] | 2220 | 		if (!populated_zone(zone)) | 
| Nikita Danilov | 295ab93 | 2005-06-21 17:14:38 -0700 | [diff] [blame] | 2221 | 			continue; | 
 | 2222 |  | 
 | 2223 | 		spin_lock_irqsave(&zone->lock, flags); | 
 | 2224 | 		seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); | 
 | 2225 | 		seq_printf(m, | 
 | 2226 | 			   "\n  pages free     %lu" | 
 | 2227 | 			   "\n        min      %lu" | 
 | 2228 | 			   "\n        low      %lu" | 
 | 2229 | 			   "\n        high     %lu" | 
 | 2230 | 			   "\n        active   %lu" | 
 | 2231 | 			   "\n        inactive %lu" | 
 | 2232 | 			   "\n        scanned  %lu (a: %lu i: %lu)" | 
 | 2233 | 			   "\n        spanned  %lu" | 
 | 2234 | 			   "\n        present  %lu", | 
 | 2235 | 			   zone->free_pages, | 
 | 2236 | 			   zone->pages_min, | 
 | 2237 | 			   zone->pages_low, | 
 | 2238 | 			   zone->pages_high, | 
 | 2239 | 			   zone->nr_active, | 
 | 2240 | 			   zone->nr_inactive, | 
 | 2241 | 			   zone->pages_scanned, | 
 | 2242 | 			   zone->nr_scan_active, zone->nr_scan_inactive, | 
 | 2243 | 			   zone->spanned_pages, | 
 | 2244 | 			   zone->present_pages); | 
 | 2245 | 		seq_printf(m, | 
 | 2246 | 			   "\n        protection: (%lu", | 
 | 2247 | 			   zone->lowmem_reserve[0]); | 
 | 2248 | 		for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) | 
 | 2249 | 			seq_printf(m, ", %lu", zone->lowmem_reserve[i]); | 
 | 2250 | 		seq_printf(m, | 
 | 2251 | 			   ")" | 
 | 2252 | 			   "\n  pagesets"); | 
| Nick Piggin | 23316bc | 2006-01-08 01:00:41 -0800 | [diff] [blame] | 2253 | 		for_each_online_cpu(i) { | 
| Nikita Danilov | 295ab93 | 2005-06-21 17:14:38 -0700 | [diff] [blame] | 2254 | 			struct per_cpu_pageset *pageset; | 
 | 2255 | 			int j; | 
 | 2256 |  | 
| Christoph Lameter | e7c8d5c | 2005-06-21 17:14:47 -0700 | [diff] [blame] | 2257 | 			pageset = zone_pcp(zone, i); | 
| Nikita Danilov | 295ab93 | 2005-06-21 17:14:38 -0700 | [diff] [blame] | 2258 | 			for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { | 
 | 2259 | 				if (pageset->pcp[j].count) | 
 | 2260 | 					break; | 
 | 2261 | 			} | 
 | 2262 | 			if (j == ARRAY_SIZE(pageset->pcp)) | 
 | 2263 | 				continue; | 
 | 2264 | 			for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { | 
 | 2265 | 				seq_printf(m, | 
 | 2266 | 					   "\n    cpu: %i pcp: %i" | 
 | 2267 | 					   "\n              count: %i" | 
| Nikita Danilov | 295ab93 | 2005-06-21 17:14:38 -0700 | [diff] [blame] | 2268 | 					   "\n              high:  %i" | 
 | 2269 | 					   "\n              batch: %i", | 
 | 2270 | 					   i, j, | 
 | 2271 | 					   pageset->pcp[j].count, | 
| Nikita Danilov | 295ab93 | 2005-06-21 17:14:38 -0700 | [diff] [blame] | 2272 | 					   pageset->pcp[j].high, | 
 | 2273 | 					   pageset->pcp[j].batch); | 
 | 2274 | 			} | 
 | 2275 | #ifdef CONFIG_NUMA | 
 | 2276 | 			seq_printf(m, | 
 | 2277 | 				   "\n            numa_hit:       %lu" | 
 | 2278 | 				   "\n            numa_miss:      %lu" | 
 | 2279 | 				   "\n            numa_foreign:   %lu" | 
 | 2280 | 				   "\n            interleave_hit: %lu" | 
 | 2281 | 				   "\n            local_node:     %lu" | 
 | 2282 | 				   "\n            other_node:     %lu", | 
 | 2283 | 				   pageset->numa_hit, | 
 | 2284 | 				   pageset->numa_miss, | 
 | 2285 | 				   pageset->numa_foreign, | 
 | 2286 | 				   pageset->interleave_hit, | 
 | 2287 | 				   pageset->local_node, | 
 | 2288 | 				   pageset->other_node); | 
 | 2289 | #endif | 
 | 2290 | 		} | 
 | 2291 | 		seq_printf(m, | 
 | 2292 | 			   "\n  all_unreclaimable: %u" | 
 | 2293 | 			   "\n  prev_priority:     %i" | 
 | 2294 | 			   "\n  temp_priority:     %i" | 
 | 2295 | 			   "\n  start_pfn:         %lu", | 
 | 2296 | 			   zone->all_unreclaimable, | 
 | 2297 | 			   zone->prev_priority, | 
 | 2298 | 			   zone->temp_priority, | 
 | 2299 | 			   zone->zone_start_pfn); | 
 | 2300 | 		spin_unlock_irqrestore(&zone->lock, flags); | 
 | 2301 | 		seq_putc(m, '\n'); | 
 | 2302 | 	} | 
 | 2303 | 	return 0; | 
 | 2304 | } | 
 | 2305 |  | 
 | 2306 | struct seq_operations zoneinfo_op = { | 
 | 2307 | 	.start	= frag_start, /* iterate over all zones. The same as in | 
 | 2308 | 			       * fragmentation. */ | 
 | 2309 | 	.next	= frag_next, | 
 | 2310 | 	.stop	= frag_stop, | 
 | 2311 | 	.show	= zoneinfo_show, | 
 | 2312 | }; | 
 | 2313 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2314 | static char *vmstat_text[] = { | 
 | 2315 | 	"nr_dirty", | 
 | 2316 | 	"nr_writeback", | 
 | 2317 | 	"nr_unstable", | 
 | 2318 | 	"nr_page_table_pages", | 
 | 2319 | 	"nr_mapped", | 
 | 2320 | 	"nr_slab", | 
 | 2321 |  | 
 | 2322 | 	"pgpgin", | 
 | 2323 | 	"pgpgout", | 
 | 2324 | 	"pswpin", | 
 | 2325 | 	"pswpout", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2326 |  | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2327 | 	"pgalloc_high", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2328 | 	"pgalloc_normal", | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2329 | 	"pgalloc_dma32", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2330 | 	"pgalloc_dma", | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2331 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2332 | 	"pgfree", | 
 | 2333 | 	"pgactivate", | 
 | 2334 | 	"pgdeactivate", | 
 | 2335 |  | 
 | 2336 | 	"pgfault", | 
 | 2337 | 	"pgmajfault", | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2338 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2339 | 	"pgrefill_high", | 
 | 2340 | 	"pgrefill_normal", | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2341 | 	"pgrefill_dma32", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2342 | 	"pgrefill_dma", | 
 | 2343 |  | 
 | 2344 | 	"pgsteal_high", | 
 | 2345 | 	"pgsteal_normal", | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2346 | 	"pgsteal_dma32", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2347 | 	"pgsteal_dma", | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2348 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2349 | 	"pgscan_kswapd_high", | 
 | 2350 | 	"pgscan_kswapd_normal", | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2351 | 	"pgscan_kswapd_dma32", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2352 | 	"pgscan_kswapd_dma", | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2353 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2354 | 	"pgscan_direct_high", | 
 | 2355 | 	"pgscan_direct_normal", | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2356 | 	"pgscan_direct_dma32", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2357 | 	"pgscan_direct_dma", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2358 |  | 
| Nick Piggin | 9328b8f | 2006-01-06 00:11:10 -0800 | [diff] [blame] | 2359 | 	"pginodesteal", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2360 | 	"slabs_scanned", | 
 | 2361 | 	"kswapd_steal", | 
 | 2362 | 	"kswapd_inodesteal", | 
 | 2363 | 	"pageoutrun", | 
 | 2364 | 	"allocstall", | 
 | 2365 |  | 
 | 2366 | 	"pgrotated", | 
| KAMEZAWA Hiroyuki | edfbe2b | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 2367 | 	"nr_bounce", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2368 | }; | 
 | 2369 |  | 
 | 2370 | static void *vmstat_start(struct seq_file *m, loff_t *pos) | 
 | 2371 | { | 
 | 2372 | 	struct page_state *ps; | 
 | 2373 |  | 
 | 2374 | 	if (*pos >= ARRAY_SIZE(vmstat_text)) | 
 | 2375 | 		return NULL; | 
 | 2376 |  | 
 | 2377 | 	ps = kmalloc(sizeof(*ps), GFP_KERNEL); | 
 | 2378 | 	m->private = ps; | 
 | 2379 | 	if (!ps) | 
 | 2380 | 		return ERR_PTR(-ENOMEM); | 
 | 2381 | 	get_full_page_state(ps); | 
 | 2382 | 	ps->pgpgin /= 2;		/* sectors -> kbytes */ | 
 | 2383 | 	ps->pgpgout /= 2; | 
 | 2384 | 	return (unsigned long *)ps + *pos; | 
 | 2385 | } | 
 | 2386 |  | 
 | 2387 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) | 
 | 2388 | { | 
 | 2389 | 	(*pos)++; | 
 | 2390 | 	if (*pos >= ARRAY_SIZE(vmstat_text)) | 
 | 2391 | 		return NULL; | 
 | 2392 | 	return (unsigned long *)m->private + *pos; | 
 | 2393 | } | 
 | 2394 |  | 
 | 2395 | static int vmstat_show(struct seq_file *m, void *arg) | 
 | 2396 | { | 
 | 2397 | 	unsigned long *l = arg; | 
 | 2398 | 	unsigned long off = l - (unsigned long *)m->private; | 
 | 2399 |  | 
 | 2400 | 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l); | 
 | 2401 | 	return 0; | 
 | 2402 | } | 
 | 2403 |  | 
 | 2404 | static void vmstat_stop(struct seq_file *m, void *arg) | 
 | 2405 | { | 
 | 2406 | 	kfree(m->private); | 
 | 2407 | 	m->private = NULL; | 
 | 2408 | } | 
 | 2409 |  | 
 | 2410 | struct seq_operations vmstat_op = { | 
 | 2411 | 	.start	= vmstat_start, | 
 | 2412 | 	.next	= vmstat_next, | 
 | 2413 | 	.stop	= vmstat_stop, | 
 | 2414 | 	.show	= vmstat_show, | 
 | 2415 | }; | 
 | 2416 |  | 
 | 2417 | #endif /* CONFIG_PROC_FS */ | 
 | 2418 |  | 
 | 2419 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 2420 | static int page_alloc_cpu_notify(struct notifier_block *self, | 
 | 2421 | 				 unsigned long action, void *hcpu) | 
 | 2422 | { | 
 | 2423 | 	int cpu = (unsigned long)hcpu; | 
 | 2424 | 	long *count; | 
 | 2425 | 	unsigned long *src, *dest; | 
 | 2426 |  | 
 | 2427 | 	if (action == CPU_DEAD) { | 
 | 2428 | 		int i; | 
 | 2429 |  | 
 | 2430 | 		/* Drain local pagecache count. */ | 
 | 2431 | 		count = &per_cpu(nr_pagecache_local, cpu); | 
 | 2432 | 		atomic_add(*count, &nr_pagecache); | 
 | 2433 | 		*count = 0; | 
 | 2434 | 		local_irq_disable(); | 
 | 2435 | 		__drain_pages(cpu); | 
 | 2436 |  | 
 | 2437 | 		/* Add dead cpu's page_states to our own. */ | 
 | 2438 | 		dest = (unsigned long *)&__get_cpu_var(page_states); | 
 | 2439 | 		src = (unsigned long *)&per_cpu(page_states, cpu); | 
 | 2440 |  | 
 | 2441 | 		for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long); | 
 | 2442 | 				i++) { | 
 | 2443 | 			dest[i] += src[i]; | 
 | 2444 | 			src[i] = 0; | 
 | 2445 | 		} | 
 | 2446 |  | 
 | 2447 | 		local_irq_enable(); | 
 | 2448 | 	} | 
 | 2449 | 	return NOTIFY_OK; | 
 | 2450 | } | 
 | 2451 | #endif /* CONFIG_HOTPLUG_CPU */ | 
 | 2452 |  | 
 | 2453 | void __init page_alloc_init(void) | 
 | 2454 | { | 
 | 2455 | 	hotcpu_notifier(page_alloc_cpu_notify, 0); | 
 | 2456 | } | 
 | 2457 |  | 
 | 2458 | /* | 
 | 2459 |  * setup_per_zone_lowmem_reserve - called whenever | 
 | 2460 |  *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone | 
 | 2461 |  *	has a correct pages reserved value, so an adequate number of | 
 | 2462 |  *	pages are left in the zone after a successful __alloc_pages(). | 
 | 2463 |  */ | 
 | 2464 | static void setup_per_zone_lowmem_reserve(void) | 
 | 2465 | { | 
 | 2466 | 	struct pglist_data *pgdat; | 
 | 2467 | 	int j, idx; | 
 | 2468 |  | 
 | 2469 | 	for_each_pgdat(pgdat) { | 
 | 2470 | 		for (j = 0; j < MAX_NR_ZONES; j++) { | 
 | 2471 | 			struct zone *zone = pgdat->node_zones + j; | 
 | 2472 | 			unsigned long present_pages = zone->present_pages; | 
 | 2473 |  | 
 | 2474 | 			zone->lowmem_reserve[j] = 0; | 
 | 2475 |  | 
 | 2476 | 			for (idx = j-1; idx >= 0; idx--) { | 
 | 2477 | 				struct zone *lower_zone; | 
 | 2478 |  | 
 | 2479 | 				if (sysctl_lowmem_reserve_ratio[idx] < 1) | 
 | 2480 | 					sysctl_lowmem_reserve_ratio[idx] = 1; | 
 | 2481 |  | 
 | 2482 | 				lower_zone = pgdat->node_zones + idx; | 
 | 2483 | 				lower_zone->lowmem_reserve[j] = present_pages / | 
 | 2484 | 					sysctl_lowmem_reserve_ratio[idx]; | 
 | 2485 | 				present_pages += lower_zone->present_pages; | 
 | 2486 | 			} | 
 | 2487 | 		} | 
 | 2488 | 	} | 
 | 2489 | } | 
 | 2490 |  | 
 | 2491 | /* | 
 | 2492 |  * setup_per_zone_pages_min - called when min_free_kbytes changes.  Ensures  | 
 | 2493 |  *	that the pages_{min,low,high} values for each zone are set correctly  | 
 | 2494 |  *	with respect to min_free_kbytes. | 
 | 2495 |  */ | 
| Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 2496 | void setup_per_zone_pages_min(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2497 | { | 
 | 2498 | 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); | 
 | 2499 | 	unsigned long lowmem_pages = 0; | 
 | 2500 | 	struct zone *zone; | 
 | 2501 | 	unsigned long flags; | 
 | 2502 |  | 
 | 2503 | 	/* Calculate total number of !ZONE_HIGHMEM pages */ | 
 | 2504 | 	for_each_zone(zone) { | 
 | 2505 | 		if (!is_highmem(zone)) | 
 | 2506 | 			lowmem_pages += zone->present_pages; | 
 | 2507 | 	} | 
 | 2508 |  | 
 | 2509 | 	for_each_zone(zone) { | 
| Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2510 | 		unsigned long tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2511 | 		spin_lock_irqsave(&zone->lru_lock, flags); | 
| Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2512 | 		tmp = (pages_min * zone->present_pages) / lowmem_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2513 | 		if (is_highmem(zone)) { | 
 | 2514 | 			/* | 
| Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2515 | 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't | 
 | 2516 | 			 * need highmem pages, so cap pages_min to a small | 
 | 2517 | 			 * value here. | 
 | 2518 | 			 * | 
 | 2519 | 			 * The (pages_high-pages_low) and (pages_low-pages_min) | 
 | 2520 | 			 * deltas controls asynch page reclaim, and so should | 
 | 2521 | 			 * not be capped for highmem. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2522 | 			 */ | 
 | 2523 | 			int min_pages; | 
 | 2524 |  | 
 | 2525 | 			min_pages = zone->present_pages / 1024; | 
 | 2526 | 			if (min_pages < SWAP_CLUSTER_MAX) | 
 | 2527 | 				min_pages = SWAP_CLUSTER_MAX; | 
 | 2528 | 			if (min_pages > 128) | 
 | 2529 | 				min_pages = 128; | 
 | 2530 | 			zone->pages_min = min_pages; | 
 | 2531 | 		} else { | 
| Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2532 | 			/* | 
 | 2533 | 			 * If it's a lowmem zone, reserve a number of pages | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2534 | 			 * proportionate to the zone's size. | 
 | 2535 | 			 */ | 
| Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2536 | 			zone->pages_min = tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2537 | 		} | 
 | 2538 |  | 
| Nick Piggin | 669ed17 | 2005-11-13 16:06:45 -0800 | [diff] [blame] | 2539 | 		zone->pages_low   = zone->pages_min + tmp / 4; | 
 | 2540 | 		zone->pages_high  = zone->pages_min + tmp / 2; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2541 | 		spin_unlock_irqrestore(&zone->lru_lock, flags); | 
 | 2542 | 	} | 
 | 2543 | } | 
 | 2544 |  | 
 | 2545 | /* | 
 | 2546 |  * Initialise min_free_kbytes. | 
 | 2547 |  * | 
 | 2548 |  * For small machines we want it small (128k min).  For large machines | 
 | 2549 |  * we want it large (64MB max).  But it is not linear, because network | 
 | 2550 |  * bandwidth does not increase linearly with machine size.  We use | 
 | 2551 |  * | 
 | 2552 |  * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: | 
 | 2553 |  *	min_free_kbytes = sqrt(lowmem_kbytes * 16) | 
 | 2554 |  * | 
 | 2555 |  * which yields | 
 | 2556 |  * | 
 | 2557 |  * 16MB:	512k | 
 | 2558 |  * 32MB:	724k | 
 | 2559 |  * 64MB:	1024k | 
 | 2560 |  * 128MB:	1448k | 
 | 2561 |  * 256MB:	2048k | 
 | 2562 |  * 512MB:	2896k | 
 | 2563 |  * 1024MB:	4096k | 
 | 2564 |  * 2048MB:	5792k | 
 | 2565 |  * 4096MB:	8192k | 
 | 2566 |  * 8192MB:	11584k | 
 | 2567 |  * 16384MB:	16384k | 
 | 2568 |  */ | 
 | 2569 | static int __init init_per_zone_pages_min(void) | 
 | 2570 | { | 
 | 2571 | 	unsigned long lowmem_kbytes; | 
 | 2572 |  | 
 | 2573 | 	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); | 
 | 2574 |  | 
 | 2575 | 	min_free_kbytes = int_sqrt(lowmem_kbytes * 16); | 
 | 2576 | 	if (min_free_kbytes < 128) | 
 | 2577 | 		min_free_kbytes = 128; | 
 | 2578 | 	if (min_free_kbytes > 65536) | 
 | 2579 | 		min_free_kbytes = 65536; | 
 | 2580 | 	setup_per_zone_pages_min(); | 
 | 2581 | 	setup_per_zone_lowmem_reserve(); | 
 | 2582 | 	return 0; | 
 | 2583 | } | 
 | 2584 | module_init(init_per_zone_pages_min) | 
 | 2585 |  | 
 | 2586 | /* | 
 | 2587 |  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so  | 
 | 2588 |  *	that we can call two helper functions whenever min_free_kbytes | 
 | 2589 |  *	changes. | 
 | 2590 |  */ | 
 | 2591 | int min_free_kbytes_sysctl_handler(ctl_table *table, int write,  | 
 | 2592 | 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
 | 2593 | { | 
 | 2594 | 	proc_dointvec(table, write, file, buffer, length, ppos); | 
 | 2595 | 	setup_per_zone_pages_min(); | 
 | 2596 | 	return 0; | 
 | 2597 | } | 
 | 2598 |  | 
 | 2599 | /* | 
 | 2600 |  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around | 
 | 2601 |  *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() | 
 | 2602 |  *	whenever sysctl_lowmem_reserve_ratio changes. | 
 | 2603 |  * | 
 | 2604 |  * The reserve ratio obviously has absolutely no relation with the | 
 | 2605 |  * pages_min watermarks. The lowmem reserve ratio can only make sense | 
 | 2606 |  * if in function of the boot time zone sizes. | 
 | 2607 |  */ | 
 | 2608 | int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, | 
 | 2609 | 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
 | 2610 | { | 
 | 2611 | 	proc_dointvec_minmax(table, write, file, buffer, length, ppos); | 
 | 2612 | 	setup_per_zone_lowmem_reserve(); | 
 | 2613 | 	return 0; | 
 | 2614 | } | 
 | 2615 |  | 
| Rohit Seth | 8ad4b1f | 2006-01-08 01:00:40 -0800 | [diff] [blame] | 2616 | /* | 
 | 2617 |  * percpu_pagelist_fraction - changes the pcp->high for each zone on each | 
 | 2618 |  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist | 
 | 2619 |  * can have before it gets flushed back to buddy allocator. | 
 | 2620 |  */ | 
 | 2621 |  | 
 | 2622 | int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, | 
 | 2623 | 	struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 
 | 2624 | { | 
 | 2625 | 	struct zone *zone; | 
 | 2626 | 	unsigned int cpu; | 
 | 2627 | 	int ret; | 
 | 2628 |  | 
 | 2629 | 	ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); | 
 | 2630 | 	if (!write || (ret == -EINVAL)) | 
 | 2631 | 		return ret; | 
 | 2632 | 	for_each_zone(zone) { | 
 | 2633 | 		for_each_online_cpu(cpu) { | 
 | 2634 | 			unsigned long  high; | 
 | 2635 | 			high = zone->present_pages / percpu_pagelist_fraction; | 
 | 2636 | 			setup_pagelist_highmark(zone_pcp(zone, cpu), high); | 
 | 2637 | 		} | 
 | 2638 | 	} | 
 | 2639 | 	return 0; | 
 | 2640 | } | 
 | 2641 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2642 | __initdata int hashdist = HASHDIST_DEFAULT; | 
 | 2643 |  | 
 | 2644 | #ifdef CONFIG_NUMA | 
 | 2645 | static int __init set_hashdist(char *str) | 
 | 2646 | { | 
 | 2647 | 	if (!str) | 
 | 2648 | 		return 0; | 
 | 2649 | 	hashdist = simple_strtoul(str, &str, 0); | 
 | 2650 | 	return 1; | 
 | 2651 | } | 
 | 2652 | __setup("hashdist=", set_hashdist); | 
 | 2653 | #endif | 
 | 2654 |  | 
 | 2655 | /* | 
 | 2656 |  * allocate a large system hash table from bootmem | 
 | 2657 |  * - it is assumed that the hash table must contain an exact power-of-2 | 
 | 2658 |  *   quantity of entries | 
 | 2659 |  * - limit is the number of hash buckets, not the total allocation size | 
 | 2660 |  */ | 
 | 2661 | void *__init alloc_large_system_hash(const char *tablename, | 
 | 2662 | 				     unsigned long bucketsize, | 
 | 2663 | 				     unsigned long numentries, | 
 | 2664 | 				     int scale, | 
 | 2665 | 				     int flags, | 
 | 2666 | 				     unsigned int *_hash_shift, | 
 | 2667 | 				     unsigned int *_hash_mask, | 
 | 2668 | 				     unsigned long limit) | 
 | 2669 | { | 
 | 2670 | 	unsigned long long max = limit; | 
 | 2671 | 	unsigned long log2qty, size; | 
 | 2672 | 	void *table = NULL; | 
 | 2673 |  | 
 | 2674 | 	/* allow the kernel cmdline to have a say */ | 
 | 2675 | 	if (!numentries) { | 
 | 2676 | 		/* round applicable memory size up to nearest megabyte */ | 
 | 2677 | 		numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; | 
 | 2678 | 		numentries += (1UL << (20 - PAGE_SHIFT)) - 1; | 
 | 2679 | 		numentries >>= 20 - PAGE_SHIFT; | 
 | 2680 | 		numentries <<= 20 - PAGE_SHIFT; | 
 | 2681 |  | 
 | 2682 | 		/* limit to 1 bucket per 2^scale bytes of low memory */ | 
 | 2683 | 		if (scale > PAGE_SHIFT) | 
 | 2684 | 			numentries >>= (scale - PAGE_SHIFT); | 
 | 2685 | 		else | 
 | 2686 | 			numentries <<= (PAGE_SHIFT - scale); | 
 | 2687 | 	} | 
 | 2688 | 	/* rounded up to nearest power of 2 in size */ | 
 | 2689 | 	numentries = 1UL << (long_log2(numentries) + 1); | 
 | 2690 |  | 
 | 2691 | 	/* limit allocation size to 1/16 total memory by default */ | 
 | 2692 | 	if (max == 0) { | 
 | 2693 | 		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; | 
 | 2694 | 		do_div(max, bucketsize); | 
 | 2695 | 	} | 
 | 2696 |  | 
 | 2697 | 	if (numentries > max) | 
 | 2698 | 		numentries = max; | 
 | 2699 |  | 
 | 2700 | 	log2qty = long_log2(numentries); | 
 | 2701 |  | 
 | 2702 | 	do { | 
 | 2703 | 		size = bucketsize << log2qty; | 
 | 2704 | 		if (flags & HASH_EARLY) | 
 | 2705 | 			table = alloc_bootmem(size); | 
 | 2706 | 		else if (hashdist) | 
 | 2707 | 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); | 
 | 2708 | 		else { | 
 | 2709 | 			unsigned long order; | 
 | 2710 | 			for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) | 
 | 2711 | 				; | 
 | 2712 | 			table = (void*) __get_free_pages(GFP_ATOMIC, order); | 
 | 2713 | 		} | 
 | 2714 | 	} while (!table && size > PAGE_SIZE && --log2qty); | 
 | 2715 |  | 
 | 2716 | 	if (!table) | 
 | 2717 | 		panic("Failed to allocate %s hash table\n", tablename); | 
 | 2718 |  | 
 | 2719 | 	printk("%s hash table entries: %d (order: %d, %lu bytes)\n", | 
 | 2720 | 	       tablename, | 
 | 2721 | 	       (1U << log2qty), | 
 | 2722 | 	       long_log2(size) - PAGE_SHIFT, | 
 | 2723 | 	       size); | 
 | 2724 |  | 
 | 2725 | 	if (_hash_shift) | 
 | 2726 | 		*_hash_shift = log2qty; | 
 | 2727 | 	if (_hash_mask) | 
 | 2728 | 		*_hash_mask = (1 << log2qty) - 1; | 
 | 2729 |  | 
 | 2730 | 	return table; | 
 | 2731 | } |