| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Generic hugetlb support. | 
 | 3 |  * (C) William Irwin, April 2004 | 
 | 4 |  */ | 
 | 5 | #include <linux/gfp.h> | 
 | 6 | #include <linux/list.h> | 
 | 7 | #include <linux/init.h> | 
 | 8 | #include <linux/module.h> | 
 | 9 | #include <linux/mm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/sysctl.h> | 
 | 11 | #include <linux/highmem.h> | 
 | 12 | #include <linux/nodemask.h> | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 13 | #include <linux/pagemap.h> | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 14 | #include <linux/mempolicy.h> | 
| Christoph Lameter | aea47ff | 2006-01-08 01:00:57 -0800 | [diff] [blame] | 15 | #include <linux/cpuset.h> | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 16 | #include <linux/mutex.h> | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 17 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 18 | #include <asm/page.h> | 
 | 19 | #include <asm/pgtable.h> | 
 | 20 |  | 
 | 21 | #include <linux/hugetlb.h> | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 22 | #include "internal.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
 | 24 | const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 25 | static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 26 | static unsigned long surplus_huge_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | unsigned long max_huge_pages; | 
 | 28 | static struct list_head hugepage_freelists[MAX_NUMNODES]; | 
 | 29 | static unsigned int nr_huge_pages_node[MAX_NUMNODES]; | 
 | 30 | static unsigned int free_huge_pages_node[MAX_NUMNODES]; | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 31 | static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; | 
| Mel Gorman | 396faf0 | 2007-07-17 04:03:13 -0700 | [diff] [blame] | 32 | static gfp_t htlb_alloc_mask = GFP_HIGHUSER; | 
 | 33 | unsigned long hugepages_treat_as_movable; | 
| Nishanth Aravamudan | d1c3fb1 | 2007-12-17 16:20:12 -0800 | [diff] [blame] | 34 | unsigned long nr_overcommit_huge_pages; | 
| Nishanth Aravamudan | 63b4613 | 2007-10-16 01:26:24 -0700 | [diff] [blame] | 35 | static int hugetlb_next_nid; | 
| Mel Gorman | 396faf0 | 2007-07-17 04:03:13 -0700 | [diff] [blame] | 36 |  | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 37 | /* | 
 | 38 |  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages | 
 | 39 |  */ | 
 | 40 | static DEFINE_SPINLOCK(hugetlb_lock); | 
| Eric Paris | 0bd0f9f | 2005-11-21 21:32:28 -0800 | [diff] [blame] | 41 |  | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 42 | static void clear_huge_page(struct page *page, unsigned long addr) | 
 | 43 | { | 
 | 44 | 	int i; | 
 | 45 |  | 
 | 46 | 	might_sleep(); | 
 | 47 | 	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { | 
 | 48 | 		cond_resched(); | 
| Ralf Baechle | 281e0e3 | 2007-10-01 01:20:10 -0700 | [diff] [blame] | 49 | 		clear_user_highpage(page + i, addr + i * PAGE_SIZE); | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 50 | 	} | 
 | 51 | } | 
 | 52 |  | 
 | 53 | static void copy_huge_page(struct page *dst, struct page *src, | 
| Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 54 | 			   unsigned long addr, struct vm_area_struct *vma) | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 55 | { | 
 | 56 | 	int i; | 
 | 57 |  | 
 | 58 | 	might_sleep(); | 
 | 59 | 	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { | 
 | 60 | 		cond_resched(); | 
| Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 61 | 		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 62 | 	} | 
 | 63 | } | 
 | 64 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | static void enqueue_huge_page(struct page *page) | 
 | 66 | { | 
 | 67 | 	int nid = page_to_nid(page); | 
 | 68 | 	list_add(&page->lru, &hugepage_freelists[nid]); | 
 | 69 | 	free_huge_pages++; | 
 | 70 | 	free_huge_pages_node[nid]++; | 
 | 71 | } | 
 | 72 |  | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 73 | static struct page *dequeue_huge_page(struct vm_area_struct *vma, | 
 | 74 | 				unsigned long address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | { | 
| Nishanth Aravamudan | 31a5c6e | 2007-07-15 23:38:02 -0700 | [diff] [blame] | 76 | 	int nid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | 	struct page *page = NULL; | 
| Lee Schermerhorn | 480eccf | 2007-09-18 22:46:47 -0700 | [diff] [blame] | 78 | 	struct mempolicy *mpol; | 
| Mel Gorman | 396faf0 | 2007-07-17 04:03:13 -0700 | [diff] [blame] | 79 | 	struct zonelist *zonelist = huge_zonelist(vma, address, | 
| Lee Schermerhorn | 480eccf | 2007-09-18 22:46:47 -0700 | [diff] [blame] | 80 | 					htlb_alloc_mask, &mpol); | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 81 | 	struct zone **z; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 |  | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 83 | 	for (z = zonelist->zones; *z; z++) { | 
| Christoph Lameter | 89fa302 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 84 | 		nid = zone_to_nid(*z); | 
| Mel Gorman | 396faf0 | 2007-07-17 04:03:13 -0700 | [diff] [blame] | 85 | 		if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) && | 
| Andrew Morton | 3abf7af | 2007-07-19 01:49:08 -0700 | [diff] [blame] | 86 | 		    !list_empty(&hugepage_freelists[nid])) { | 
 | 87 | 			page = list_entry(hugepage_freelists[nid].next, | 
 | 88 | 					  struct page, lru); | 
 | 89 | 			list_del(&page->lru); | 
 | 90 | 			free_huge_pages--; | 
 | 91 | 			free_huge_pages_node[nid]--; | 
| Adam Litke | e4e574b | 2007-10-16 01:26:19 -0700 | [diff] [blame] | 92 | 			if (vma && vma->vm_flags & VM_MAYSHARE) | 
 | 93 | 				resv_huge_pages--; | 
| Ken Chen | 5ab3ee7 | 2007-07-23 18:44:00 -0700 | [diff] [blame] | 94 | 			break; | 
| Andrew Morton | 3abf7af | 2007-07-19 01:49:08 -0700 | [diff] [blame] | 95 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | 	} | 
| Lee Schermerhorn | 480eccf | 2007-09-18 22:46:47 -0700 | [diff] [blame] | 97 | 	mpol_free(mpol);	/* unref if mpol !NULL */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | 	return page; | 
 | 99 | } | 
 | 100 |  | 
| Adam Litke | 6af2acb | 2007-10-16 01:26:16 -0700 | [diff] [blame] | 101 | static void update_and_free_page(struct page *page) | 
 | 102 | { | 
 | 103 | 	int i; | 
 | 104 | 	nr_huge_pages--; | 
 | 105 | 	nr_huge_pages_node[page_to_nid(page)]--; | 
 | 106 | 	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { | 
 | 107 | 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | | 
 | 108 | 				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | | 
 | 109 | 				1 << PG_private | 1<< PG_writeback); | 
 | 110 | 	} | 
 | 111 | 	set_compound_page_dtor(page, NULL); | 
 | 112 | 	set_page_refcounted(page); | 
 | 113 | 	__free_pages(page, HUGETLB_PAGE_ORDER); | 
 | 114 | } | 
 | 115 |  | 
| David Gibson | 27a85ef | 2006-03-22 00:08:56 -0800 | [diff] [blame] | 116 | static void free_huge_page(struct page *page) | 
 | 117 | { | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 118 | 	int nid = page_to_nid(page); | 
| Adam Litke | c79fb75 | 2007-11-14 16:59:38 -0800 | [diff] [blame] | 119 | 	struct address_space *mapping; | 
| David Gibson | 27a85ef | 2006-03-22 00:08:56 -0800 | [diff] [blame] | 120 |  | 
| Adam Litke | c79fb75 | 2007-11-14 16:59:38 -0800 | [diff] [blame] | 121 | 	mapping = (struct address_space *) page_private(page); | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 122 | 	BUG_ON(page_count(page)); | 
| David Gibson | 27a85ef | 2006-03-22 00:08:56 -0800 | [diff] [blame] | 123 | 	INIT_LIST_HEAD(&page->lru); | 
 | 124 |  | 
 | 125 | 	spin_lock(&hugetlb_lock); | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 126 | 	if (surplus_huge_pages_node[nid]) { | 
 | 127 | 		update_and_free_page(page); | 
 | 128 | 		surplus_huge_pages--; | 
 | 129 | 		surplus_huge_pages_node[nid]--; | 
 | 130 | 	} else { | 
 | 131 | 		enqueue_huge_page(page); | 
 | 132 | 	} | 
| David Gibson | 27a85ef | 2006-03-22 00:08:56 -0800 | [diff] [blame] | 133 | 	spin_unlock(&hugetlb_lock); | 
| Adam Litke | c79fb75 | 2007-11-14 16:59:38 -0800 | [diff] [blame] | 134 | 	if (mapping) | 
| Adam Litke | 9a119c0 | 2007-11-14 16:59:41 -0800 | [diff] [blame] | 135 | 		hugetlb_put_quota(mapping, 1); | 
| Adam Litke | c79fb75 | 2007-11-14 16:59:38 -0800 | [diff] [blame] | 136 | 	set_page_private(page, 0); | 
| David Gibson | 27a85ef | 2006-03-22 00:08:56 -0800 | [diff] [blame] | 137 | } | 
 | 138 |  | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 139 | /* | 
 | 140 |  * Increment or decrement surplus_huge_pages.  Keep node-specific counters | 
 | 141 |  * balanced by operating on them in a round-robin fashion. | 
 | 142 |  * Returns 1 if an adjustment was made. | 
 | 143 |  */ | 
 | 144 | static int adjust_pool_surplus(int delta) | 
 | 145 | { | 
 | 146 | 	static int prev_nid; | 
 | 147 | 	int nid = prev_nid; | 
 | 148 | 	int ret = 0; | 
 | 149 |  | 
 | 150 | 	VM_BUG_ON(delta != -1 && delta != 1); | 
 | 151 | 	do { | 
 | 152 | 		nid = next_node(nid, node_online_map); | 
 | 153 | 		if (nid == MAX_NUMNODES) | 
 | 154 | 			nid = first_node(node_online_map); | 
 | 155 |  | 
 | 156 | 		/* To shrink on this node, there must be a surplus page */ | 
 | 157 | 		if (delta < 0 && !surplus_huge_pages_node[nid]) | 
 | 158 | 			continue; | 
 | 159 | 		/* Surplus cannot exceed the total number of pages */ | 
 | 160 | 		if (delta > 0 && surplus_huge_pages_node[nid] >= | 
 | 161 | 						nr_huge_pages_node[nid]) | 
 | 162 | 			continue; | 
 | 163 |  | 
 | 164 | 		surplus_huge_pages += delta; | 
 | 165 | 		surplus_huge_pages_node[nid] += delta; | 
 | 166 | 		ret = 1; | 
 | 167 | 		break; | 
 | 168 | 	} while (nid != prev_nid); | 
 | 169 |  | 
 | 170 | 	prev_nid = nid; | 
 | 171 | 	return ret; | 
 | 172 | } | 
 | 173 |  | 
| Nishanth Aravamudan | 63b4613 | 2007-10-16 01:26:24 -0700 | [diff] [blame] | 174 | static struct page *alloc_fresh_huge_page_node(int nid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | 	struct page *page; | 
| Joe Jin | f96efd5 | 2007-07-15 23:38:12 -0700 | [diff] [blame] | 177 |  | 
| Nishanth Aravamudan | 63b4613 | 2007-10-16 01:26:24 -0700 | [diff] [blame] | 178 | 	page = alloc_pages_node(nid, | 
 | 179 | 		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, | 
 | 180 | 		HUGETLB_PAGE_ORDER); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | 	if (page) { | 
| Andy Whitcroft | 33f2ef8 | 2006-12-06 20:33:32 -0800 | [diff] [blame] | 182 | 		set_compound_page_dtor(page, free_huge_page); | 
| Eric Paris | 0bd0f9f | 2005-11-21 21:32:28 -0800 | [diff] [blame] | 183 | 		spin_lock(&hugetlb_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | 		nr_huge_pages++; | 
| Nishanth Aravamudan | 63b4613 | 2007-10-16 01:26:24 -0700 | [diff] [blame] | 185 | 		nr_huge_pages_node[nid]++; | 
| Eric Paris | 0bd0f9f | 2005-11-21 21:32:28 -0800 | [diff] [blame] | 186 | 		spin_unlock(&hugetlb_lock); | 
| Nick Piggin | a482289 | 2006-03-22 00:08:08 -0800 | [diff] [blame] | 187 | 		put_page(page); /* free it into the hugepage allocator */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | 	} | 
| Nishanth Aravamudan | 63b4613 | 2007-10-16 01:26:24 -0700 | [diff] [blame] | 189 |  | 
 | 190 | 	return page; | 
 | 191 | } | 
 | 192 |  | 
 | 193 | static int alloc_fresh_huge_page(void) | 
 | 194 | { | 
 | 195 | 	struct page *page; | 
 | 196 | 	int start_nid; | 
 | 197 | 	int next_nid; | 
 | 198 | 	int ret = 0; | 
 | 199 |  | 
 | 200 | 	start_nid = hugetlb_next_nid; | 
 | 201 |  | 
 | 202 | 	do { | 
 | 203 | 		page = alloc_fresh_huge_page_node(hugetlb_next_nid); | 
 | 204 | 		if (page) | 
 | 205 | 			ret = 1; | 
 | 206 | 		/* | 
 | 207 | 		 * Use a helper variable to find the next node and then | 
 | 208 | 		 * copy it back to hugetlb_next_nid afterwards: | 
 | 209 | 		 * otherwise there's a window in which a racer might | 
 | 210 | 		 * pass invalid nid MAX_NUMNODES to alloc_pages_node. | 
 | 211 | 		 * But we don't need to use a spin_lock here: it really | 
 | 212 | 		 * doesn't matter if occasionally a racer chooses the | 
 | 213 | 		 * same nid as we do.  Move nid forward in the mask even | 
 | 214 | 		 * if we just successfully allocated a hugepage so that | 
 | 215 | 		 * the next caller gets hugepages on the next node. | 
 | 216 | 		 */ | 
 | 217 | 		next_nid = next_node(hugetlb_next_nid, node_online_map); | 
 | 218 | 		if (next_nid == MAX_NUMNODES) | 
 | 219 | 			next_nid = first_node(node_online_map); | 
 | 220 | 		hugetlb_next_nid = next_nid; | 
 | 221 | 	} while (!page && hugetlb_next_nid != start_nid); | 
 | 222 |  | 
 | 223 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | } | 
 | 225 |  | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 226 | static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, | 
 | 227 | 						unsigned long address) | 
 | 228 | { | 
 | 229 | 	struct page *page; | 
| Nishanth Aravamudan | d1c3fb1 | 2007-12-17 16:20:12 -0800 | [diff] [blame] | 230 | 	unsigned int nid; | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 231 |  | 
| Nishanth Aravamudan | d1c3fb1 | 2007-12-17 16:20:12 -0800 | [diff] [blame] | 232 | 	/* | 
 | 233 | 	 * Assume we will successfully allocate the surplus page to | 
 | 234 | 	 * prevent racing processes from causing the surplus to exceed | 
 | 235 | 	 * overcommit | 
 | 236 | 	 * | 
 | 237 | 	 * This however introduces a different race, where a process B | 
 | 238 | 	 * tries to grow the static hugepage pool while alloc_pages() is | 
 | 239 | 	 * called by process A. B will only examine the per-node | 
 | 240 | 	 * counters in determining if surplus huge pages can be | 
 | 241 | 	 * converted to normal huge pages in adjust_pool_surplus(). A | 
 | 242 | 	 * won't be able to increment the per-node counter, until the | 
 | 243 | 	 * lock is dropped by B, but B doesn't drop hugetlb_lock until | 
 | 244 | 	 * no more huge pages can be converted from surplus to normal | 
 | 245 | 	 * state (and doesn't try to convert again). Thus, we have a | 
 | 246 | 	 * case where a surplus huge page exists, the pool is grown, and | 
 | 247 | 	 * the surplus huge page still exists after, even though it | 
 | 248 | 	 * should just have been converted to a normal huge page. This | 
 | 249 | 	 * does not leak memory, though, as the hugepage will be freed | 
 | 250 | 	 * once it is out of use. It also does not allow the counters to | 
 | 251 | 	 * go out of whack in adjust_pool_surplus() as we don't modify | 
 | 252 | 	 * the node values until we've gotten the hugepage and only the | 
 | 253 | 	 * per-node value is checked there. | 
 | 254 | 	 */ | 
 | 255 | 	spin_lock(&hugetlb_lock); | 
 | 256 | 	if (surplus_huge_pages >= nr_overcommit_huge_pages) { | 
 | 257 | 		spin_unlock(&hugetlb_lock); | 
 | 258 | 		return NULL; | 
 | 259 | 	} else { | 
 | 260 | 		nr_huge_pages++; | 
 | 261 | 		surplus_huge_pages++; | 
 | 262 | 	} | 
 | 263 | 	spin_unlock(&hugetlb_lock); | 
 | 264 |  | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 265 | 	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, | 
 | 266 | 					HUGETLB_PAGE_ORDER); | 
| Nishanth Aravamudan | d1c3fb1 | 2007-12-17 16:20:12 -0800 | [diff] [blame] | 267 |  | 
 | 268 | 	spin_lock(&hugetlb_lock); | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 269 | 	if (page) { | 
| Nishanth Aravamudan | d1c3fb1 | 2007-12-17 16:20:12 -0800 | [diff] [blame] | 270 | 		nid = page_to_nid(page); | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 271 | 		set_compound_page_dtor(page, free_huge_page); | 
| Nishanth Aravamudan | d1c3fb1 | 2007-12-17 16:20:12 -0800 | [diff] [blame] | 272 | 		/* | 
 | 273 | 		 * We incremented the global counters already | 
 | 274 | 		 */ | 
 | 275 | 		nr_huge_pages_node[nid]++; | 
 | 276 | 		surplus_huge_pages_node[nid]++; | 
 | 277 | 	} else { | 
 | 278 | 		nr_huge_pages--; | 
 | 279 | 		surplus_huge_pages--; | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 280 | 	} | 
| Nishanth Aravamudan | d1c3fb1 | 2007-12-17 16:20:12 -0800 | [diff] [blame] | 281 | 	spin_unlock(&hugetlb_lock); | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 282 |  | 
 | 283 | 	return page; | 
 | 284 | } | 
 | 285 |  | 
| Adam Litke | e4e574b | 2007-10-16 01:26:19 -0700 | [diff] [blame] | 286 | /* | 
 | 287 |  * Increase the hugetlb pool such that it can accomodate a reservation | 
 | 288 |  * of size 'delta'. | 
 | 289 |  */ | 
 | 290 | static int gather_surplus_pages(int delta) | 
 | 291 | { | 
 | 292 | 	struct list_head surplus_list; | 
 | 293 | 	struct page *page, *tmp; | 
 | 294 | 	int ret, i; | 
 | 295 | 	int needed, allocated; | 
 | 296 |  | 
 | 297 | 	needed = (resv_huge_pages + delta) - free_huge_pages; | 
 | 298 | 	if (needed <= 0) | 
 | 299 | 		return 0; | 
 | 300 |  | 
 | 301 | 	allocated = 0; | 
 | 302 | 	INIT_LIST_HEAD(&surplus_list); | 
 | 303 |  | 
 | 304 | 	ret = -ENOMEM; | 
 | 305 | retry: | 
 | 306 | 	spin_unlock(&hugetlb_lock); | 
 | 307 | 	for (i = 0; i < needed; i++) { | 
 | 308 | 		page = alloc_buddy_huge_page(NULL, 0); | 
 | 309 | 		if (!page) { | 
 | 310 | 			/* | 
 | 311 | 			 * We were not able to allocate enough pages to | 
 | 312 | 			 * satisfy the entire reservation so we free what | 
 | 313 | 			 * we've allocated so far. | 
 | 314 | 			 */ | 
 | 315 | 			spin_lock(&hugetlb_lock); | 
 | 316 | 			needed = 0; | 
 | 317 | 			goto free; | 
 | 318 | 		} | 
 | 319 |  | 
 | 320 | 		list_add(&page->lru, &surplus_list); | 
 | 321 | 	} | 
 | 322 | 	allocated += needed; | 
 | 323 |  | 
 | 324 | 	/* | 
 | 325 | 	 * After retaking hugetlb_lock, we need to recalculate 'needed' | 
 | 326 | 	 * because either resv_huge_pages or free_huge_pages may have changed. | 
 | 327 | 	 */ | 
 | 328 | 	spin_lock(&hugetlb_lock); | 
 | 329 | 	needed = (resv_huge_pages + delta) - (free_huge_pages + allocated); | 
 | 330 | 	if (needed > 0) | 
 | 331 | 		goto retry; | 
 | 332 |  | 
 | 333 | 	/* | 
 | 334 | 	 * The surplus_list now contains _at_least_ the number of extra pages | 
 | 335 | 	 * needed to accomodate the reservation.  Add the appropriate number | 
 | 336 | 	 * of pages to the hugetlb pool and free the extras back to the buddy | 
 | 337 | 	 * allocator. | 
 | 338 | 	 */ | 
 | 339 | 	needed += allocated; | 
 | 340 | 	ret = 0; | 
 | 341 | free: | 
 | 342 | 	list_for_each_entry_safe(page, tmp, &surplus_list, lru) { | 
 | 343 | 		list_del(&page->lru); | 
 | 344 | 		if ((--needed) >= 0) | 
 | 345 | 			enqueue_huge_page(page); | 
| Adam Litke | af767cb | 2007-10-16 01:26:25 -0700 | [diff] [blame] | 346 | 		else { | 
 | 347 | 			/* | 
 | 348 | 			 * Decrement the refcount and free the page using its | 
 | 349 | 			 * destructor.  This must be done with hugetlb_lock | 
 | 350 | 			 * unlocked which is safe because free_huge_page takes | 
 | 351 | 			 * hugetlb_lock before deciding how to free the page. | 
 | 352 | 			 */ | 
 | 353 | 			spin_unlock(&hugetlb_lock); | 
 | 354 | 			put_page(page); | 
 | 355 | 			spin_lock(&hugetlb_lock); | 
 | 356 | 		} | 
| Adam Litke | e4e574b | 2007-10-16 01:26:19 -0700 | [diff] [blame] | 357 | 	} | 
 | 358 |  | 
 | 359 | 	return ret; | 
 | 360 | } | 
 | 361 |  | 
 | 362 | /* | 
 | 363 |  * When releasing a hugetlb pool reservation, any surplus pages that were | 
 | 364 |  * allocated to satisfy the reservation must be explicitly freed if they were | 
 | 365 |  * never used. | 
 | 366 |  */ | 
| Adrian Bunk | 8cde045 | 2007-11-14 16:59:43 -0800 | [diff] [blame] | 367 | static void return_unused_surplus_pages(unsigned long unused_resv_pages) | 
| Adam Litke | e4e574b | 2007-10-16 01:26:19 -0700 | [diff] [blame] | 368 | { | 
 | 369 | 	static int nid = -1; | 
 | 370 | 	struct page *page; | 
 | 371 | 	unsigned long nr_pages; | 
 | 372 |  | 
 | 373 | 	nr_pages = min(unused_resv_pages, surplus_huge_pages); | 
 | 374 |  | 
 | 375 | 	while (nr_pages) { | 
 | 376 | 		nid = next_node(nid, node_online_map); | 
 | 377 | 		if (nid == MAX_NUMNODES) | 
 | 378 | 			nid = first_node(node_online_map); | 
 | 379 |  | 
 | 380 | 		if (!surplus_huge_pages_node[nid]) | 
 | 381 | 			continue; | 
 | 382 |  | 
 | 383 | 		if (!list_empty(&hugepage_freelists[nid])) { | 
 | 384 | 			page = list_entry(hugepage_freelists[nid].next, | 
 | 385 | 					  struct page, lru); | 
 | 386 | 			list_del(&page->lru); | 
 | 387 | 			update_and_free_page(page); | 
 | 388 | 			free_huge_pages--; | 
 | 389 | 			free_huge_pages_node[nid]--; | 
 | 390 | 			surplus_huge_pages--; | 
 | 391 | 			surplus_huge_pages_node[nid]--; | 
 | 392 | 			nr_pages--; | 
 | 393 | 		} | 
 | 394 | 	} | 
 | 395 | } | 
 | 396 |  | 
| Adam Litke | 348ea20 | 2007-11-14 16:59:37 -0800 | [diff] [blame] | 397 |  | 
 | 398 | static struct page *alloc_huge_page_shared(struct vm_area_struct *vma, | 
 | 399 | 						unsigned long addr) | 
 | 400 | { | 
 | 401 | 	struct page *page; | 
 | 402 |  | 
 | 403 | 	spin_lock(&hugetlb_lock); | 
 | 404 | 	page = dequeue_huge_page(vma, addr); | 
 | 405 | 	spin_unlock(&hugetlb_lock); | 
| Adam Litke | 90d8b7e | 2007-11-14 16:59:42 -0800 | [diff] [blame] | 406 | 	return page ? page : ERR_PTR(-VM_FAULT_OOM); | 
| Adam Litke | 348ea20 | 2007-11-14 16:59:37 -0800 | [diff] [blame] | 407 | } | 
 | 408 |  | 
 | 409 | static struct page *alloc_huge_page_private(struct vm_area_struct *vma, | 
 | 410 | 						unsigned long addr) | 
 | 411 | { | 
 | 412 | 	struct page *page = NULL; | 
 | 413 |  | 
| Adam Litke | 90d8b7e | 2007-11-14 16:59:42 -0800 | [diff] [blame] | 414 | 	if (hugetlb_get_quota(vma->vm_file->f_mapping, 1)) | 
 | 415 | 		return ERR_PTR(-VM_FAULT_SIGBUS); | 
 | 416 |  | 
| Adam Litke | 348ea20 | 2007-11-14 16:59:37 -0800 | [diff] [blame] | 417 | 	spin_lock(&hugetlb_lock); | 
 | 418 | 	if (free_huge_pages > resv_huge_pages) | 
 | 419 | 		page = dequeue_huge_page(vma, addr); | 
 | 420 | 	spin_unlock(&hugetlb_lock); | 
| Ken Chen | 68842c9 | 2008-01-14 00:55:19 -0800 | [diff] [blame] | 421 | 	if (!page) { | 
| Adam Litke | 348ea20 | 2007-11-14 16:59:37 -0800 | [diff] [blame] | 422 | 		page = alloc_buddy_huge_page(vma, addr); | 
| Ken Chen | 68842c9 | 2008-01-14 00:55:19 -0800 | [diff] [blame] | 423 | 		if (!page) { | 
 | 424 | 			hugetlb_put_quota(vma->vm_file->f_mapping, 1); | 
 | 425 | 			return ERR_PTR(-VM_FAULT_OOM); | 
 | 426 | 		} | 
 | 427 | 	} | 
 | 428 | 	return page; | 
| Adam Litke | 348ea20 | 2007-11-14 16:59:37 -0800 | [diff] [blame] | 429 | } | 
 | 430 |  | 
| David Gibson | 27a85ef | 2006-03-22 00:08:56 -0800 | [diff] [blame] | 431 | static struct page *alloc_huge_page(struct vm_area_struct *vma, | 
 | 432 | 				    unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | { | 
| Adam Litke | 348ea20 | 2007-11-14 16:59:37 -0800 | [diff] [blame] | 434 | 	struct page *page; | 
| Adam Litke | 2fc39ce | 2007-11-14 16:59:39 -0800 | [diff] [blame] | 435 | 	struct address_space *mapping = vma->vm_file->f_mapping; | 
 | 436 |  | 
| Adam Litke | 348ea20 | 2007-11-14 16:59:37 -0800 | [diff] [blame] | 437 | 	if (vma->vm_flags & VM_MAYSHARE) | 
 | 438 | 		page = alloc_huge_page_shared(vma, addr); | 
 | 439 | 	else | 
 | 440 | 		page = alloc_huge_page_private(vma, addr); | 
| Adam Litke | 90d8b7e | 2007-11-14 16:59:42 -0800 | [diff] [blame] | 441 |  | 
 | 442 | 	if (!IS_ERR(page)) { | 
| Adam Litke | 348ea20 | 2007-11-14 16:59:37 -0800 | [diff] [blame] | 443 | 		set_page_refcounted(page); | 
| Adam Litke | 2fc39ce | 2007-11-14 16:59:39 -0800 | [diff] [blame] | 444 | 		set_page_private(page, (unsigned long) mapping); | 
| Adam Litke | 90d8b7e | 2007-11-14 16:59:42 -0800 | [diff] [blame] | 445 | 	} | 
 | 446 | 	return page; | 
| David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 447 | } | 
 | 448 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | static int __init hugetlb_init(void) | 
 | 450 | { | 
 | 451 | 	unsigned long i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 |  | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 453 | 	if (HPAGE_SHIFT == 0) | 
 | 454 | 		return 0; | 
 | 455 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | 	for (i = 0; i < MAX_NUMNODES; ++i) | 
 | 457 | 		INIT_LIST_HEAD(&hugepage_freelists[i]); | 
 | 458 |  | 
| Nishanth Aravamudan | 63b4613 | 2007-10-16 01:26:24 -0700 | [diff] [blame] | 459 | 	hugetlb_next_nid = first_node(node_online_map); | 
 | 460 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | 	for (i = 0; i < max_huge_pages; ++i) { | 
| Nick Piggin | a482289 | 2006-03-22 00:08:08 -0800 | [diff] [blame] | 462 | 		if (!alloc_fresh_huge_page()) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | 	} | 
 | 465 | 	max_huge_pages = free_huge_pages = nr_huge_pages = i; | 
 | 466 | 	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); | 
 | 467 | 	return 0; | 
 | 468 | } | 
 | 469 | module_init(hugetlb_init); | 
 | 470 |  | 
 | 471 | static int __init hugetlb_setup(char *s) | 
 | 472 | { | 
 | 473 | 	if (sscanf(s, "%lu", &max_huge_pages) <= 0) | 
 | 474 | 		max_huge_pages = 0; | 
 | 475 | 	return 1; | 
 | 476 | } | 
 | 477 | __setup("hugepages=", hugetlb_setup); | 
 | 478 |  | 
| Ken Chen | 8a63011 | 2007-05-09 02:33:34 -0700 | [diff] [blame] | 479 | static unsigned int cpuset_mems_nr(unsigned int *array) | 
 | 480 | { | 
 | 481 | 	int node; | 
 | 482 | 	unsigned int nr = 0; | 
 | 483 |  | 
 | 484 | 	for_each_node_mask(node, cpuset_current_mems_allowed) | 
 | 485 | 		nr += array[node]; | 
 | 486 |  | 
 | 487 | 	return nr; | 
 | 488 | } | 
 | 489 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | #ifdef CONFIG_SYSCTL | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | #ifdef CONFIG_HIGHMEM | 
 | 492 | static void try_to_free_low(unsigned long count) | 
 | 493 | { | 
| Christoph Lameter | 4415cc8 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 494 | 	int i; | 
 | 495 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | 	for (i = 0; i < MAX_NUMNODES; ++i) { | 
 | 497 | 		struct page *page, *next; | 
 | 498 | 		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { | 
| Adam Litke | 6b0c880 | 2007-10-16 01:26:23 -0700 | [diff] [blame] | 499 | 			if (count >= nr_huge_pages) | 
 | 500 | 				return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | 			if (PageHighMem(page)) | 
 | 502 | 				continue; | 
 | 503 | 			list_del(&page->lru); | 
 | 504 | 			update_and_free_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | 			free_huge_pages--; | 
| Christoph Lameter | 4415cc8 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 506 | 			free_huge_pages_node[page_to_nid(page)]--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | 		} | 
 | 508 | 	} | 
 | 509 | } | 
 | 510 | #else | 
 | 511 | static inline void try_to_free_low(unsigned long count) | 
 | 512 | { | 
 | 513 | } | 
 | 514 | #endif | 
 | 515 |  | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 516 | #define persistent_huge_pages (nr_huge_pages - surplus_huge_pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | static unsigned long set_max_huge_pages(unsigned long count) | 
 | 518 | { | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 519 | 	unsigned long min_count, ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 |  | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 521 | 	/* | 
 | 522 | 	 * Increase the pool size | 
 | 523 | 	 * First take pages out of surplus state.  Then make up the | 
 | 524 | 	 * remaining difference by allocating fresh huge pages. | 
| Nishanth Aravamudan | d1c3fb1 | 2007-12-17 16:20:12 -0800 | [diff] [blame] | 525 | 	 * | 
 | 526 | 	 * We might race with alloc_buddy_huge_page() here and be unable | 
 | 527 | 	 * to convert a surplus huge page to a normal huge page. That is | 
 | 528 | 	 * not critical, though, it just means the overall size of the | 
 | 529 | 	 * pool might be one hugepage larger than it needs to be, but | 
 | 530 | 	 * within all the constraints specified by the sysctls. | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 531 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | 	spin_lock(&hugetlb_lock); | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 533 | 	while (surplus_huge_pages && count > persistent_huge_pages) { | 
 | 534 | 		if (!adjust_pool_surplus(-1)) | 
 | 535 | 			break; | 
 | 536 | 	} | 
 | 537 |  | 
 | 538 | 	while (count > persistent_huge_pages) { | 
 | 539 | 		int ret; | 
 | 540 | 		/* | 
 | 541 | 		 * If this allocation races such that we no longer need the | 
 | 542 | 		 * page, free_huge_page will handle it by freeing the page | 
 | 543 | 		 * and reducing the surplus. | 
 | 544 | 		 */ | 
 | 545 | 		spin_unlock(&hugetlb_lock); | 
 | 546 | 		ret = alloc_fresh_huge_page(); | 
 | 547 | 		spin_lock(&hugetlb_lock); | 
 | 548 | 		if (!ret) | 
 | 549 | 			goto out; | 
 | 550 |  | 
 | 551 | 	} | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 552 |  | 
 | 553 | 	/* | 
 | 554 | 	 * Decrease the pool size | 
 | 555 | 	 * First return free pages to the buddy allocator (being careful | 
 | 556 | 	 * to keep enough around to satisfy reservations).  Then place | 
 | 557 | 	 * pages into surplus state as needed so the pool will shrink | 
 | 558 | 	 * to the desired size as pages become free. | 
| Nishanth Aravamudan | d1c3fb1 | 2007-12-17 16:20:12 -0800 | [diff] [blame] | 559 | 	 * | 
 | 560 | 	 * By placing pages into the surplus state independent of the | 
 | 561 | 	 * overcommit value, we are allowing the surplus pool size to | 
 | 562 | 	 * exceed overcommit. There are few sane options here. Since | 
 | 563 | 	 * alloc_buddy_huge_page() is checking the global counter, | 
 | 564 | 	 * though, we'll note that we're not allowed to exceed surplus | 
 | 565 | 	 * and won't grow the pool anywhere else. Not until one of the | 
 | 566 | 	 * sysctls are changed, or the surplus pages go out of use. | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 567 | 	 */ | 
| Adam Litke | 6b0c880 | 2007-10-16 01:26:23 -0700 | [diff] [blame] | 568 | 	min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; | 
 | 569 | 	min_count = max(count, min_count); | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 570 | 	try_to_free_low(min_count); | 
 | 571 | 	while (min_count < persistent_huge_pages) { | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 572 | 		struct page *page = dequeue_huge_page(NULL, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | 		if (!page) | 
 | 574 | 			break; | 
 | 575 | 		update_and_free_page(page); | 
 | 576 | 	} | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 577 | 	while (count < persistent_huge_pages) { | 
 | 578 | 		if (!adjust_pool_surplus(1)) | 
 | 579 | 			break; | 
 | 580 | 	} | 
 | 581 | out: | 
 | 582 | 	ret = persistent_huge_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | 	spin_unlock(&hugetlb_lock); | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 584 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | } | 
 | 586 |  | 
 | 587 | int hugetlb_sysctl_handler(struct ctl_table *table, int write, | 
 | 588 | 			   struct file *file, void __user *buffer, | 
 | 589 | 			   size_t *length, loff_t *ppos) | 
 | 590 | { | 
 | 591 | 	proc_doulongvec_minmax(table, write, file, buffer, length, ppos); | 
 | 592 | 	max_huge_pages = set_max_huge_pages(max_huge_pages); | 
 | 593 | 	return 0; | 
 | 594 | } | 
| Mel Gorman | 396faf0 | 2007-07-17 04:03:13 -0700 | [diff] [blame] | 595 |  | 
 | 596 | int hugetlb_treat_movable_handler(struct ctl_table *table, int write, | 
 | 597 | 			struct file *file, void __user *buffer, | 
 | 598 | 			size_t *length, loff_t *ppos) | 
 | 599 | { | 
 | 600 | 	proc_dointvec(table, write, file, buffer, length, ppos); | 
 | 601 | 	if (hugepages_treat_as_movable) | 
 | 602 | 		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE; | 
 | 603 | 	else | 
 | 604 | 		htlb_alloc_mask = GFP_HIGHUSER; | 
 | 605 | 	return 0; | 
 | 606 | } | 
 | 607 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | #endif /* CONFIG_SYSCTL */ | 
 | 609 |  | 
 | 610 | int hugetlb_report_meminfo(char *buf) | 
 | 611 | { | 
 | 612 | 	return sprintf(buf, | 
 | 613 | 			"HugePages_Total: %5lu\n" | 
 | 614 | 			"HugePages_Free:  %5lu\n" | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 615 | 			"HugePages_Rsvd:  %5lu\n" | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 616 | 			"HugePages_Surp:  %5lu\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | 			"Hugepagesize:    %5lu kB\n", | 
 | 618 | 			nr_huge_pages, | 
 | 619 | 			free_huge_pages, | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 620 | 			resv_huge_pages, | 
| Adam Litke | 7893d1d | 2007-10-16 01:26:18 -0700 | [diff] [blame] | 621 | 			surplus_huge_pages, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | 			HPAGE_SIZE/1024); | 
 | 623 | } | 
 | 624 |  | 
 | 625 | int hugetlb_report_node_meminfo(int nid, char *buf) | 
 | 626 | { | 
 | 627 | 	return sprintf(buf, | 
 | 628 | 		"Node %d HugePages_Total: %5u\n" | 
 | 629 | 		"Node %d HugePages_Free:  %5u\n", | 
 | 630 | 		nid, nr_huge_pages_node[nid], | 
 | 631 | 		nid, free_huge_pages_node[nid]); | 
 | 632 | } | 
 | 633 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ | 
 | 635 | unsigned long hugetlb_total_pages(void) | 
 | 636 | { | 
 | 637 | 	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); | 
 | 638 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 |  | 
 | 640 | /* | 
 | 641 |  * We cannot handle pagefaults against hugetlb pages at all.  They cause | 
 | 642 |  * handle_mm_fault() to try to instantiate regular-sized pages in the | 
 | 643 |  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get | 
 | 644 |  * this far. | 
 | 645 |  */ | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 646 | static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | { | 
 | 648 | 	BUG(); | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 649 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } | 
 | 651 |  | 
 | 652 | struct vm_operations_struct hugetlb_vm_ops = { | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 653 | 	.fault = hugetlb_vm_op_fault, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | }; | 
 | 655 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 656 | static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, | 
 | 657 | 				int writable) | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 658 | { | 
 | 659 | 	pte_t entry; | 
 | 660 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 661 | 	if (writable) { | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 662 | 		entry = | 
 | 663 | 		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | 
 | 664 | 	} else { | 
 | 665 | 		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | 
 | 666 | 	} | 
 | 667 | 	entry = pte_mkyoung(entry); | 
 | 668 | 	entry = pte_mkhuge(entry); | 
 | 669 |  | 
 | 670 | 	return entry; | 
 | 671 | } | 
 | 672 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 673 | static void set_huge_ptep_writable(struct vm_area_struct *vma, | 
 | 674 | 				   unsigned long address, pte_t *ptep) | 
 | 675 | { | 
 | 676 | 	pte_t entry; | 
 | 677 |  | 
 | 678 | 	entry = pte_mkwrite(pte_mkdirty(*ptep)); | 
| Benjamin Herrenschmidt | 8dab524 | 2007-06-16 10:16:12 -0700 | [diff] [blame] | 679 | 	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { | 
 | 680 | 		update_mmu_cache(vma, address, entry); | 
| Benjamin Herrenschmidt | 8dab524 | 2007-06-16 10:16:12 -0700 | [diff] [blame] | 681 | 	} | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 682 | } | 
 | 683 |  | 
 | 684 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 685 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | 
 | 686 | 			    struct vm_area_struct *vma) | 
 | 687 | { | 
 | 688 | 	pte_t *src_pte, *dst_pte, entry; | 
 | 689 | 	struct page *ptepage; | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 690 | 	unsigned long addr; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 691 | 	int cow; | 
 | 692 |  | 
 | 693 | 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 694 |  | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 695 | 	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 696 | 		src_pte = huge_pte_offset(src, addr); | 
 | 697 | 		if (!src_pte) | 
 | 698 | 			continue; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 699 | 		dst_pte = huge_pte_alloc(dst, addr); | 
 | 700 | 		if (!dst_pte) | 
 | 701 | 			goto nomem; | 
| Larry Woodman | c5c9942 | 2008-01-24 05:49:25 -0800 | [diff] [blame] | 702 |  | 
 | 703 | 		/* If the pagetables are shared don't copy or take references */ | 
 | 704 | 		if (dst_pte == src_pte) | 
 | 705 | 			continue; | 
 | 706 |  | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 707 | 		spin_lock(&dst->page_table_lock); | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 708 | 		spin_lock(&src->page_table_lock); | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 709 | 		if (!pte_none(*src_pte)) { | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 710 | 			if (cow) | 
 | 711 | 				ptep_set_wrprotect(src, addr, src_pte); | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 712 | 			entry = *src_pte; | 
 | 713 | 			ptepage = pte_page(entry); | 
 | 714 | 			get_page(ptepage); | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 715 | 			set_huge_pte_at(dst, addr, dst_pte, entry); | 
 | 716 | 		} | 
 | 717 | 		spin_unlock(&src->page_table_lock); | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 718 | 		spin_unlock(&dst->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 719 | 	} | 
 | 720 | 	return 0; | 
 | 721 |  | 
 | 722 | nomem: | 
 | 723 | 	return -ENOMEM; | 
 | 724 | } | 
 | 725 |  | 
| Chen, Kenneth W | 502717f | 2006-10-11 01:20:46 -0700 | [diff] [blame] | 726 | void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 
 | 727 | 			    unsigned long end) | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 728 | { | 
 | 729 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 730 | 	unsigned long address; | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 731 | 	pte_t *ptep; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 732 | 	pte_t pte; | 
 | 733 | 	struct page *page; | 
| Chen, Kenneth W | fe1668a | 2006-10-04 02:15:24 -0700 | [diff] [blame] | 734 | 	struct page *tmp; | 
| Chen, Kenneth W | c0a499c | 2006-12-06 20:31:39 -0800 | [diff] [blame] | 735 | 	/* | 
 | 736 | 	 * A page gathering list, protected by per file i_mmap_lock. The | 
 | 737 | 	 * lock is used to avoid list corruption from multiple unmapping | 
 | 738 | 	 * of the same page since we are using page->lru. | 
 | 739 | 	 */ | 
| Chen, Kenneth W | fe1668a | 2006-10-04 02:15:24 -0700 | [diff] [blame] | 740 | 	LIST_HEAD(page_list); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 741 |  | 
 | 742 | 	WARN_ON(!is_vm_hugetlb_page(vma)); | 
 | 743 | 	BUG_ON(start & ~HPAGE_MASK); | 
 | 744 | 	BUG_ON(end & ~HPAGE_MASK); | 
 | 745 |  | 
| Hugh Dickins | 508034a | 2005-10-29 18:16:30 -0700 | [diff] [blame] | 746 | 	spin_lock(&mm->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 747 | 	for (address = start; address < end; address += HPAGE_SIZE) { | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 748 | 		ptep = huge_pte_offset(mm, address); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 749 | 		if (!ptep) | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 750 | 			continue; | 
 | 751 |  | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 752 | 		if (huge_pmd_unshare(mm, &address, ptep)) | 
 | 753 | 			continue; | 
 | 754 |  | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 755 | 		pte = huge_ptep_get_and_clear(mm, address, ptep); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 756 | 		if (pte_none(pte)) | 
 | 757 | 			continue; | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 758 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 759 | 		page = pte_page(pte); | 
| Ken Chen | 6649a38 | 2007-02-08 14:20:27 -0800 | [diff] [blame] | 760 | 		if (pte_dirty(pte)) | 
 | 761 | 			set_page_dirty(page); | 
| Chen, Kenneth W | fe1668a | 2006-10-04 02:15:24 -0700 | [diff] [blame] | 762 | 		list_add(&page->lru, &page_list); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 763 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | 	spin_unlock(&mm->page_table_lock); | 
| Hugh Dickins | 508034a | 2005-10-29 18:16:30 -0700 | [diff] [blame] | 765 | 	flush_tlb_range(vma, start, end); | 
| Chen, Kenneth W | fe1668a | 2006-10-04 02:15:24 -0700 | [diff] [blame] | 766 | 	list_for_each_entry_safe(page, tmp, &page_list, lru) { | 
 | 767 | 		list_del(&page->lru); | 
 | 768 | 		put_page(page); | 
 | 769 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | } | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 771 |  | 
| Chen, Kenneth W | 502717f | 2006-10-11 01:20:46 -0700 | [diff] [blame] | 772 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 
 | 773 | 			  unsigned long end) | 
 | 774 | { | 
 | 775 | 	/* | 
 | 776 | 	 * It is undesirable to test vma->vm_file as it should be non-null | 
 | 777 | 	 * for valid hugetlb area. However, vm_file will be NULL in the error | 
 | 778 | 	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, | 
 | 779 | 	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function | 
 | 780 | 	 * to clean up. Since no pte has actually been setup, it is safe to | 
 | 781 | 	 * do nothing in this case. | 
 | 782 | 	 */ | 
 | 783 | 	if (vma->vm_file) { | 
 | 784 | 		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); | 
 | 785 | 		__unmap_hugepage_range(vma, start, end); | 
 | 786 | 		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); | 
 | 787 | 	} | 
 | 788 | } | 
 | 789 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 790 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 791 | 			unsigned long address, pte_t *ptep, pte_t pte) | 
 | 792 | { | 
 | 793 | 	struct page *old_page, *new_page; | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 794 | 	int avoidcopy; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 795 |  | 
 | 796 | 	old_page = pte_page(pte); | 
 | 797 |  | 
 | 798 | 	/* If no-one else is actually using this page, avoid the copy | 
 | 799 | 	 * and just make the page writable */ | 
 | 800 | 	avoidcopy = (page_count(old_page) == 1); | 
 | 801 | 	if (avoidcopy) { | 
 | 802 | 		set_huge_ptep_writable(vma, address, ptep); | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 803 | 		return 0; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 804 | 	} | 
 | 805 |  | 
 | 806 | 	page_cache_get(old_page); | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 807 | 	new_page = alloc_huge_page(vma, address); | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 808 |  | 
| Adam Litke | 2fc39ce | 2007-11-14 16:59:39 -0800 | [diff] [blame] | 809 | 	if (IS_ERR(new_page)) { | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 810 | 		page_cache_release(old_page); | 
| Adam Litke | 2fc39ce | 2007-11-14 16:59:39 -0800 | [diff] [blame] | 811 | 		return -PTR_ERR(new_page); | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 812 | 	} | 
 | 813 |  | 
 | 814 | 	spin_unlock(&mm->page_table_lock); | 
| Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 815 | 	copy_huge_page(new_page, old_page, address, vma); | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 816 | 	spin_lock(&mm->page_table_lock); | 
 | 817 |  | 
 | 818 | 	ptep = huge_pte_offset(mm, address & HPAGE_MASK); | 
 | 819 | 	if (likely(pte_same(*ptep, pte))) { | 
 | 820 | 		/* Break COW */ | 
 | 821 | 		set_huge_pte_at(mm, address, ptep, | 
 | 822 | 				make_huge_pte(vma, new_page, 1)); | 
 | 823 | 		/* Make the old page be freed below */ | 
 | 824 | 		new_page = old_page; | 
 | 825 | 	} | 
 | 826 | 	page_cache_release(new_page); | 
 | 827 | 	page_cache_release(old_page); | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 828 | 	return 0; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 829 | } | 
 | 830 |  | 
| Robert P. J. Day | a1ed3dd | 2007-07-17 04:03:33 -0700 | [diff] [blame] | 831 | static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 832 | 			unsigned long address, pte_t *ptep, int write_access) | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 833 | { | 
 | 834 | 	int ret = VM_FAULT_SIGBUS; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 835 | 	unsigned long idx; | 
 | 836 | 	unsigned long size; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 837 | 	struct page *page; | 
 | 838 | 	struct address_space *mapping; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 839 | 	pte_t new_pte; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 840 |  | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 841 | 	mapping = vma->vm_file->f_mapping; | 
 | 842 | 	idx = ((address - vma->vm_start) >> HPAGE_SHIFT) | 
 | 843 | 		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | 
 | 844 |  | 
 | 845 | 	/* | 
 | 846 | 	 * Use page lock to guard against racing truncation | 
 | 847 | 	 * before we get page_table_lock. | 
 | 848 | 	 */ | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 849 | retry: | 
 | 850 | 	page = find_lock_page(mapping, idx); | 
 | 851 | 	if (!page) { | 
| Hugh Dickins | ebed4bf | 2006-10-28 10:38:43 -0700 | [diff] [blame] | 852 | 		size = i_size_read(mapping->host) >> HPAGE_SHIFT; | 
 | 853 | 		if (idx >= size) | 
 | 854 | 			goto out; | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 855 | 		page = alloc_huge_page(vma, address); | 
| Adam Litke | 2fc39ce | 2007-11-14 16:59:39 -0800 | [diff] [blame] | 856 | 		if (IS_ERR(page)) { | 
 | 857 | 			ret = -PTR_ERR(page); | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 858 | 			goto out; | 
 | 859 | 		} | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 860 | 		clear_huge_page(page, address); | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 861 |  | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 862 | 		if (vma->vm_flags & VM_SHARED) { | 
 | 863 | 			int err; | 
| Ken Chen | 45c682a | 2007-11-14 16:59:44 -0800 | [diff] [blame] | 864 | 			struct inode *inode = mapping->host; | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 865 |  | 
 | 866 | 			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); | 
 | 867 | 			if (err) { | 
 | 868 | 				put_page(page); | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 869 | 				if (err == -EEXIST) | 
 | 870 | 					goto retry; | 
 | 871 | 				goto out; | 
 | 872 | 			} | 
| Ken Chen | 45c682a | 2007-11-14 16:59:44 -0800 | [diff] [blame] | 873 |  | 
 | 874 | 			spin_lock(&inode->i_lock); | 
 | 875 | 			inode->i_blocks += BLOCKS_PER_HUGEPAGE; | 
 | 876 | 			spin_unlock(&inode->i_lock); | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 877 | 		} else | 
 | 878 | 			lock_page(page); | 
 | 879 | 	} | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 880 |  | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 881 | 	spin_lock(&mm->page_table_lock); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 882 | 	size = i_size_read(mapping->host) >> HPAGE_SHIFT; | 
 | 883 | 	if (idx >= size) | 
 | 884 | 		goto backout; | 
 | 885 |  | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 886 | 	ret = 0; | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 887 | 	if (!pte_none(*ptep)) | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 888 | 		goto backout; | 
 | 889 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 890 | 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) | 
 | 891 | 				&& (vma->vm_flags & VM_SHARED))); | 
 | 892 | 	set_huge_pte_at(mm, address, ptep, new_pte); | 
 | 893 |  | 
 | 894 | 	if (write_access && !(vma->vm_flags & VM_SHARED)) { | 
 | 895 | 		/* Optimization, do the COW without a second fault */ | 
 | 896 | 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte); | 
 | 897 | 	} | 
 | 898 |  | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 899 | 	spin_unlock(&mm->page_table_lock); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 900 | 	unlock_page(page); | 
 | 901 | out: | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 902 | 	return ret; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 903 |  | 
 | 904 | backout: | 
 | 905 | 	spin_unlock(&mm->page_table_lock); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 906 | 	unlock_page(page); | 
 | 907 | 	put_page(page); | 
 | 908 | 	goto out; | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 909 | } | 
 | 910 |  | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 911 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 912 | 			unsigned long address, int write_access) | 
 | 913 | { | 
 | 914 | 	pte_t *ptep; | 
 | 915 | 	pte_t entry; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 916 | 	int ret; | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 917 | 	static DEFINE_MUTEX(hugetlb_instantiation_mutex); | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 918 |  | 
 | 919 | 	ptep = huge_pte_alloc(mm, address); | 
 | 920 | 	if (!ptep) | 
 | 921 | 		return VM_FAULT_OOM; | 
 | 922 |  | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 923 | 	/* | 
 | 924 | 	 * Serialize hugepage allocation and instantiation, so that we don't | 
 | 925 | 	 * get spurious allocation failures if two CPUs race to instantiate | 
 | 926 | 	 * the same page in the page cache. | 
 | 927 | 	 */ | 
 | 928 | 	mutex_lock(&hugetlb_instantiation_mutex); | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 929 | 	entry = *ptep; | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 930 | 	if (pte_none(entry)) { | 
 | 931 | 		ret = hugetlb_no_page(mm, vma, address, ptep, write_access); | 
 | 932 | 		mutex_unlock(&hugetlb_instantiation_mutex); | 
 | 933 | 		return ret; | 
 | 934 | 	} | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 935 |  | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 936 | 	ret = 0; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 937 |  | 
 | 938 | 	spin_lock(&mm->page_table_lock); | 
 | 939 | 	/* Check for a racing update before calling hugetlb_cow */ | 
 | 940 | 	if (likely(pte_same(entry, *ptep))) | 
 | 941 | 		if (write_access && !pte_write(entry)) | 
 | 942 | 			ret = hugetlb_cow(mm, vma, address, ptep, entry); | 
 | 943 | 	spin_unlock(&mm->page_table_lock); | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 944 | 	mutex_unlock(&hugetlb_instantiation_mutex); | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 945 |  | 
 | 946 | 	return ret; | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 947 | } | 
 | 948 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 949 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 950 | 			struct page **pages, struct vm_area_struct **vmas, | 
| Adam Litke | 5b23dbe | 2007-11-14 16:59:33 -0800 | [diff] [blame] | 951 | 			unsigned long *position, int *length, int i, | 
 | 952 | 			int write) | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 953 | { | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 954 | 	unsigned long pfn_offset; | 
 | 955 | 	unsigned long vaddr = *position; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 956 | 	int remainder = *length; | 
 | 957 |  | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 958 | 	spin_lock(&mm->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 959 | 	while (vaddr < vma->vm_end && remainder) { | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 960 | 		pte_t *pte; | 
 | 961 | 		struct page *page; | 
 | 962 |  | 
 | 963 | 		/* | 
 | 964 | 		 * Some archs (sparc64, sh*) have multiple pte_ts to | 
 | 965 | 		 * each hugepage.  We have to make * sure we get the | 
 | 966 | 		 * first, for the page indexing below to work. | 
 | 967 | 		 */ | 
 | 968 | 		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); | 
 | 969 |  | 
| Adam Litke | 72fad71 | 2007-12-10 15:49:28 -0800 | [diff] [blame] | 970 | 		if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) { | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 971 | 			int ret; | 
 | 972 |  | 
 | 973 | 			spin_unlock(&mm->page_table_lock); | 
| Adam Litke | 5b23dbe | 2007-11-14 16:59:33 -0800 | [diff] [blame] | 974 | 			ret = hugetlb_fault(mm, vma, vaddr, write); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 975 | 			spin_lock(&mm->page_table_lock); | 
| Adam Litke | a89182c | 2007-08-22 14:01:51 -0700 | [diff] [blame] | 976 | 			if (!(ret & VM_FAULT_ERROR)) | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 977 | 				continue; | 
 | 978 |  | 
 | 979 | 			remainder = 0; | 
 | 980 | 			if (!i) | 
 | 981 | 				i = -EFAULT; | 
 | 982 | 			break; | 
 | 983 | 		} | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 984 |  | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 985 | 		pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; | 
 | 986 | 		page = pte_page(*pte); | 
 | 987 | same_page: | 
| Chen, Kenneth W | d669218 | 2006-03-31 02:29:57 -0800 | [diff] [blame] | 988 | 		if (pages) { | 
 | 989 | 			get_page(page); | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 990 | 			pages[i] = page + pfn_offset; | 
| Chen, Kenneth W | d669218 | 2006-03-31 02:29:57 -0800 | [diff] [blame] | 991 | 		} | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 992 |  | 
 | 993 | 		if (vmas) | 
 | 994 | 			vmas[i] = vma; | 
 | 995 |  | 
 | 996 | 		vaddr += PAGE_SIZE; | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 997 | 		++pfn_offset; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 998 | 		--remainder; | 
 | 999 | 		++i; | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 1000 | 		if (vaddr < vma->vm_end && remainder && | 
 | 1001 | 				pfn_offset < HPAGE_SIZE/PAGE_SIZE) { | 
 | 1002 | 			/* | 
 | 1003 | 			 * We use pfn_offset to avoid touching the pageframes | 
 | 1004 | 			 * of this compound page. | 
 | 1005 | 			 */ | 
 | 1006 | 			goto same_page; | 
 | 1007 | 		} | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 1008 | 	} | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 1009 | 	spin_unlock(&mm->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 1010 | 	*length = remainder; | 
 | 1011 | 	*position = vaddr; | 
 | 1012 |  | 
 | 1013 | 	return i; | 
 | 1014 | } | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 1015 |  | 
 | 1016 | void hugetlb_change_protection(struct vm_area_struct *vma, | 
 | 1017 | 		unsigned long address, unsigned long end, pgprot_t newprot) | 
 | 1018 | { | 
 | 1019 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 1020 | 	unsigned long start = address; | 
 | 1021 | 	pte_t *ptep; | 
 | 1022 | 	pte_t pte; | 
 | 1023 |  | 
 | 1024 | 	BUG_ON(address >= end); | 
 | 1025 | 	flush_cache_range(vma, address, end); | 
 | 1026 |  | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 1027 | 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 1028 | 	spin_lock(&mm->page_table_lock); | 
 | 1029 | 	for (; address < end; address += HPAGE_SIZE) { | 
 | 1030 | 		ptep = huge_pte_offset(mm, address); | 
 | 1031 | 		if (!ptep) | 
 | 1032 | 			continue; | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 1033 | 		if (huge_pmd_unshare(mm, &address, ptep)) | 
 | 1034 | 			continue; | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 1035 | 		if (!pte_none(*ptep)) { | 
 | 1036 | 			pte = huge_ptep_get_and_clear(mm, address, ptep); | 
 | 1037 | 			pte = pte_mkhuge(pte_modify(pte, newprot)); | 
 | 1038 | 			set_huge_pte_at(mm, address, ptep, pte); | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 1039 | 		} | 
 | 1040 | 	} | 
 | 1041 | 	spin_unlock(&mm->page_table_lock); | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 1042 | 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 1043 |  | 
 | 1044 | 	flush_tlb_range(vma, start, end); | 
 | 1045 | } | 
 | 1046 |  | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 1047 | struct file_region { | 
 | 1048 | 	struct list_head link; | 
 | 1049 | 	long from; | 
 | 1050 | 	long to; | 
 | 1051 | }; | 
 | 1052 |  | 
 | 1053 | static long region_add(struct list_head *head, long f, long t) | 
 | 1054 | { | 
 | 1055 | 	struct file_region *rg, *nrg, *trg; | 
 | 1056 |  | 
 | 1057 | 	/* Locate the region we are either in or before. */ | 
 | 1058 | 	list_for_each_entry(rg, head, link) | 
 | 1059 | 		if (f <= rg->to) | 
 | 1060 | 			break; | 
 | 1061 |  | 
 | 1062 | 	/* Round our left edge to the current segment if it encloses us. */ | 
 | 1063 | 	if (f > rg->from) | 
 | 1064 | 		f = rg->from; | 
 | 1065 |  | 
 | 1066 | 	/* Check for and consume any regions we now overlap with. */ | 
 | 1067 | 	nrg = rg; | 
 | 1068 | 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) { | 
 | 1069 | 		if (&rg->link == head) | 
 | 1070 | 			break; | 
 | 1071 | 		if (rg->from > t) | 
 | 1072 | 			break; | 
 | 1073 |  | 
 | 1074 | 		/* If this area reaches higher then extend our area to | 
 | 1075 | 		 * include it completely.  If this is not the first area | 
 | 1076 | 		 * which we intend to reuse, free it. */ | 
 | 1077 | 		if (rg->to > t) | 
 | 1078 | 			t = rg->to; | 
 | 1079 | 		if (rg != nrg) { | 
 | 1080 | 			list_del(&rg->link); | 
 | 1081 | 			kfree(rg); | 
 | 1082 | 		} | 
 | 1083 | 	} | 
 | 1084 | 	nrg->from = f; | 
 | 1085 | 	nrg->to = t; | 
 | 1086 | 	return 0; | 
 | 1087 | } | 
 | 1088 |  | 
 | 1089 | static long region_chg(struct list_head *head, long f, long t) | 
 | 1090 | { | 
 | 1091 | 	struct file_region *rg, *nrg; | 
 | 1092 | 	long chg = 0; | 
 | 1093 |  | 
 | 1094 | 	/* Locate the region we are before or in. */ | 
 | 1095 | 	list_for_each_entry(rg, head, link) | 
 | 1096 | 		if (f <= rg->to) | 
 | 1097 | 			break; | 
 | 1098 |  | 
 | 1099 | 	/* If we are below the current region then a new region is required. | 
 | 1100 | 	 * Subtle, allocate a new region at the position but make it zero | 
| Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 1101 | 	 * size such that we can guarantee to record the reservation. */ | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 1102 | 	if (&rg->link == head || t < rg->from) { | 
 | 1103 | 		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); | 
| Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 1104 | 		if (!nrg) | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 1105 | 			return -ENOMEM; | 
 | 1106 | 		nrg->from = f; | 
 | 1107 | 		nrg->to   = f; | 
 | 1108 | 		INIT_LIST_HEAD(&nrg->link); | 
 | 1109 | 		list_add(&nrg->link, rg->link.prev); | 
 | 1110 |  | 
 | 1111 | 		return t - f; | 
 | 1112 | 	} | 
 | 1113 |  | 
 | 1114 | 	/* Round our left edge to the current segment if it encloses us. */ | 
 | 1115 | 	if (f > rg->from) | 
 | 1116 | 		f = rg->from; | 
 | 1117 | 	chg = t - f; | 
 | 1118 |  | 
 | 1119 | 	/* Check for and consume any regions we now overlap with. */ | 
 | 1120 | 	list_for_each_entry(rg, rg->link.prev, link) { | 
 | 1121 | 		if (&rg->link == head) | 
 | 1122 | 			break; | 
 | 1123 | 		if (rg->from > t) | 
 | 1124 | 			return chg; | 
 | 1125 |  | 
 | 1126 | 		/* We overlap with this area, if it extends futher than | 
 | 1127 | 		 * us then we must extend ourselves.  Account for its | 
 | 1128 | 		 * existing reservation. */ | 
 | 1129 | 		if (rg->to > t) { | 
 | 1130 | 			chg += rg->to - t; | 
 | 1131 | 			t = rg->to; | 
 | 1132 | 		} | 
 | 1133 | 		chg -= rg->to - rg->from; | 
 | 1134 | 	} | 
 | 1135 | 	return chg; | 
 | 1136 | } | 
 | 1137 |  | 
 | 1138 | static long region_truncate(struct list_head *head, long end) | 
 | 1139 | { | 
 | 1140 | 	struct file_region *rg, *trg; | 
 | 1141 | 	long chg = 0; | 
 | 1142 |  | 
 | 1143 | 	/* Locate the region we are either in or before. */ | 
 | 1144 | 	list_for_each_entry(rg, head, link) | 
 | 1145 | 		if (end <= rg->to) | 
 | 1146 | 			break; | 
 | 1147 | 	if (&rg->link == head) | 
 | 1148 | 		return 0; | 
 | 1149 |  | 
 | 1150 | 	/* If we are in the middle of a region then adjust it. */ | 
 | 1151 | 	if (end > rg->from) { | 
 | 1152 | 		chg = rg->to - end; | 
 | 1153 | 		rg->to = end; | 
 | 1154 | 		rg = list_entry(rg->link.next, typeof(*rg), link); | 
 | 1155 | 	} | 
 | 1156 |  | 
 | 1157 | 	/* Drop any remaining regions. */ | 
 | 1158 | 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) { | 
 | 1159 | 		if (&rg->link == head) | 
 | 1160 | 			break; | 
 | 1161 | 		chg += rg->to - rg->from; | 
 | 1162 | 		list_del(&rg->link); | 
 | 1163 | 		kfree(rg); | 
 | 1164 | 	} | 
 | 1165 | 	return chg; | 
 | 1166 | } | 
 | 1167 |  | 
 | 1168 | static int hugetlb_acct_memory(long delta) | 
 | 1169 | { | 
 | 1170 | 	int ret = -ENOMEM; | 
 | 1171 |  | 
 | 1172 | 	spin_lock(&hugetlb_lock); | 
| Ken Chen | 8a63011 | 2007-05-09 02:33:34 -0700 | [diff] [blame] | 1173 | 	/* | 
 | 1174 | 	 * When cpuset is configured, it breaks the strict hugetlb page | 
 | 1175 | 	 * reservation as the accounting is done on a global variable. Such | 
 | 1176 | 	 * reservation is completely rubbish in the presence of cpuset because | 
 | 1177 | 	 * the reservation is not checked against page availability for the | 
 | 1178 | 	 * current cpuset. Application can still potentially OOM'ed by kernel | 
 | 1179 | 	 * with lack of free htlb page in cpuset that the task is in. | 
 | 1180 | 	 * Attempt to enforce strict accounting with cpuset is almost | 
 | 1181 | 	 * impossible (or too ugly) because cpuset is too fluid that | 
 | 1182 | 	 * task or memory node can be dynamically moved between cpusets. | 
 | 1183 | 	 * | 
 | 1184 | 	 * The change of semantics for shared hugetlb mapping with cpuset is | 
 | 1185 | 	 * undesirable. However, in order to preserve some of the semantics, | 
 | 1186 | 	 * we fall back to check against current free page availability as | 
 | 1187 | 	 * a best attempt and hopefully to minimize the impact of changing | 
 | 1188 | 	 * semantics that cpuset has. | 
 | 1189 | 	 */ | 
| Adam Litke | e4e574b | 2007-10-16 01:26:19 -0700 | [diff] [blame] | 1190 | 	if (delta > 0) { | 
 | 1191 | 		if (gather_surplus_pages(delta) < 0) | 
 | 1192 | 			goto out; | 
 | 1193 |  | 
 | 1194 | 		if (delta > cpuset_mems_nr(free_huge_pages_node)) | 
 | 1195 | 			goto out; | 
 | 1196 | 	} | 
 | 1197 |  | 
 | 1198 | 	ret = 0; | 
 | 1199 | 	resv_huge_pages += delta; | 
 | 1200 | 	if (delta < 0) | 
 | 1201 | 		return_unused_surplus_pages((unsigned long) -delta); | 
 | 1202 |  | 
 | 1203 | out: | 
 | 1204 | 	spin_unlock(&hugetlb_lock); | 
 | 1205 | 	return ret; | 
 | 1206 | } | 
 | 1207 |  | 
 | 1208 | int hugetlb_reserve_pages(struct inode *inode, long from, long to) | 
 | 1209 | { | 
 | 1210 | 	long ret, chg; | 
 | 1211 |  | 
 | 1212 | 	chg = region_chg(&inode->i_mapping->private_list, from, to); | 
 | 1213 | 	if (chg < 0) | 
 | 1214 | 		return chg; | 
| Ken Chen | 8a63011 | 2007-05-09 02:33:34 -0700 | [diff] [blame] | 1215 |  | 
| Adam Litke | 90d8b7e | 2007-11-14 16:59:42 -0800 | [diff] [blame] | 1216 | 	if (hugetlb_get_quota(inode->i_mapping, chg)) | 
 | 1217 | 		return -ENOSPC; | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 1218 | 	ret = hugetlb_acct_memory(chg); | 
| Ken Chen | 68842c9 | 2008-01-14 00:55:19 -0800 | [diff] [blame] | 1219 | 	if (ret < 0) { | 
 | 1220 | 		hugetlb_put_quota(inode->i_mapping, chg); | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 1221 | 		return ret; | 
| Ken Chen | 68842c9 | 2008-01-14 00:55:19 -0800 | [diff] [blame] | 1222 | 	} | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 1223 | 	region_add(&inode->i_mapping->private_list, from, to); | 
 | 1224 | 	return 0; | 
 | 1225 | } | 
 | 1226 |  | 
 | 1227 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) | 
 | 1228 | { | 
 | 1229 | 	long chg = region_truncate(&inode->i_mapping->private_list, offset); | 
| Ken Chen | 45c682a | 2007-11-14 16:59:44 -0800 | [diff] [blame] | 1230 |  | 
 | 1231 | 	spin_lock(&inode->i_lock); | 
 | 1232 | 	inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; | 
 | 1233 | 	spin_unlock(&inode->i_lock); | 
 | 1234 |  | 
| Adam Litke | 90d8b7e | 2007-11-14 16:59:42 -0800 | [diff] [blame] | 1235 | 	hugetlb_put_quota(inode->i_mapping, (chg - freed)); | 
 | 1236 | 	hugetlb_acct_memory(-(chg - freed)); | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 1237 | } |