| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Generic hugetlb support. | 
 | 3 |  * (C) William Irwin, April 2004 | 
 | 4 |  */ | 
 | 5 | #include <linux/gfp.h> | 
 | 6 | #include <linux/list.h> | 
 | 7 | #include <linux/init.h> | 
 | 8 | #include <linux/module.h> | 
 | 9 | #include <linux/mm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/sysctl.h> | 
 | 11 | #include <linux/highmem.h> | 
 | 12 | #include <linux/nodemask.h> | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 13 | #include <linux/pagemap.h> | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 14 | #include <linux/mempolicy.h> | 
| Christoph Lameter | aea47ff | 2006-01-08 01:00:57 -0800 | [diff] [blame] | 15 | #include <linux/cpuset.h> | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 16 | #include <linux/mutex.h> | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 17 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 18 | #include <asm/page.h> | 
 | 19 | #include <asm/pgtable.h> | 
 | 20 |  | 
 | 21 | #include <linux/hugetlb.h> | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 22 | #include "internal.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
 | 24 | const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 25 | static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | unsigned long max_huge_pages; | 
 | 27 | static struct list_head hugepage_freelists[MAX_NUMNODES]; | 
 | 28 | static unsigned int nr_huge_pages_node[MAX_NUMNODES]; | 
 | 29 | static unsigned int free_huge_pages_node[MAX_NUMNODES]; | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 30 | /* | 
 | 31 |  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages | 
 | 32 |  */ | 
 | 33 | static DEFINE_SPINLOCK(hugetlb_lock); | 
| Eric Paris | 0bd0f9f | 2005-11-21 21:32:28 -0800 | [diff] [blame] | 34 |  | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 35 | static void clear_huge_page(struct page *page, unsigned long addr) | 
 | 36 | { | 
 | 37 | 	int i; | 
 | 38 |  | 
 | 39 | 	might_sleep(); | 
 | 40 | 	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { | 
 | 41 | 		cond_resched(); | 
 | 42 | 		clear_user_highpage(page + i, addr); | 
 | 43 | 	} | 
 | 44 | } | 
 | 45 |  | 
 | 46 | static void copy_huge_page(struct page *dst, struct page *src, | 
| Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 47 | 			   unsigned long addr, struct vm_area_struct *vma) | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 48 | { | 
 | 49 | 	int i; | 
 | 50 |  | 
 | 51 | 	might_sleep(); | 
 | 52 | 	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { | 
 | 53 | 		cond_resched(); | 
| Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 54 | 		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 55 | 	} | 
 | 56 | } | 
 | 57 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | static void enqueue_huge_page(struct page *page) | 
 | 59 | { | 
 | 60 | 	int nid = page_to_nid(page); | 
 | 61 | 	list_add(&page->lru, &hugepage_freelists[nid]); | 
 | 62 | 	free_huge_pages++; | 
 | 63 | 	free_huge_pages_node[nid]++; | 
 | 64 | } | 
 | 65 |  | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 66 | static struct page *dequeue_huge_page(struct vm_area_struct *vma, | 
 | 67 | 				unsigned long address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | { | 
 | 69 | 	int nid = numa_node_id(); | 
 | 70 | 	struct page *page = NULL; | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 71 | 	struct zonelist *zonelist = huge_zonelist(vma, address); | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 72 | 	struct zone **z; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 |  | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 74 | 	for (z = zonelist->zones; *z; z++) { | 
| Christoph Lameter | 89fa302 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 75 | 		nid = zone_to_nid(*z); | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 76 | 		if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) && | 
| Christoph Lameter | aea47ff | 2006-01-08 01:00:57 -0800 | [diff] [blame] | 77 | 		    !list_empty(&hugepage_freelists[nid])) | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 78 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | 	} | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 80 |  | 
 | 81 | 	if (*z) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | 		page = list_entry(hugepage_freelists[nid].next, | 
 | 83 | 				  struct page, lru); | 
 | 84 | 		list_del(&page->lru); | 
 | 85 | 		free_huge_pages--; | 
 | 86 | 		free_huge_pages_node[nid]--; | 
 | 87 | 	} | 
 | 88 | 	return page; | 
 | 89 | } | 
 | 90 |  | 
| David Gibson | 27a85ef | 2006-03-22 00:08:56 -0800 | [diff] [blame] | 91 | static void free_huge_page(struct page *page) | 
 | 92 | { | 
 | 93 | 	BUG_ON(page_count(page)); | 
 | 94 |  | 
 | 95 | 	INIT_LIST_HEAD(&page->lru); | 
 | 96 |  | 
 | 97 | 	spin_lock(&hugetlb_lock); | 
 | 98 | 	enqueue_huge_page(page); | 
 | 99 | 	spin_unlock(&hugetlb_lock); | 
 | 100 | } | 
 | 101 |  | 
| Nick Piggin | a482289 | 2006-03-22 00:08:08 -0800 | [diff] [blame] | 102 | static int alloc_fresh_huge_page(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | { | 
 | 104 | 	static int nid = 0; | 
 | 105 | 	struct page *page; | 
 | 106 | 	page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, | 
 | 107 | 					HUGETLB_PAGE_ORDER); | 
| Paul Jackson | fdb7cc5 | 2006-03-22 00:09:10 -0800 | [diff] [blame] | 108 | 	nid = next_node(nid, node_online_map); | 
 | 109 | 	if (nid == MAX_NUMNODES) | 
 | 110 | 		nid = first_node(node_online_map); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | 	if (page) { | 
| Andy Whitcroft | 33f2ef8 | 2006-12-06 20:33:32 -0800 | [diff] [blame] | 112 | 		set_compound_page_dtor(page, free_huge_page); | 
| Eric Paris | 0bd0f9f | 2005-11-21 21:32:28 -0800 | [diff] [blame] | 113 | 		spin_lock(&hugetlb_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | 		nr_huge_pages++; | 
 | 115 | 		nr_huge_pages_node[page_to_nid(page)]++; | 
| Eric Paris | 0bd0f9f | 2005-11-21 21:32:28 -0800 | [diff] [blame] | 116 | 		spin_unlock(&hugetlb_lock); | 
| Nick Piggin | a482289 | 2006-03-22 00:08:08 -0800 | [diff] [blame] | 117 | 		put_page(page); /* free it into the hugepage allocator */ | 
 | 118 | 		return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | 	} | 
| Nick Piggin | a482289 | 2006-03-22 00:08:08 -0800 | [diff] [blame] | 120 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | } | 
 | 122 |  | 
| David Gibson | 27a85ef | 2006-03-22 00:08:56 -0800 | [diff] [blame] | 123 | static struct page *alloc_huge_page(struct vm_area_struct *vma, | 
 | 124 | 				    unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | { | 
 | 126 | 	struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 |  | 
 | 128 | 	spin_lock(&hugetlb_lock); | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 129 | 	if (vma->vm_flags & VM_MAYSHARE) | 
 | 130 | 		resv_huge_pages--; | 
 | 131 | 	else if (free_huge_pages <= resv_huge_pages) | 
 | 132 | 		goto fail; | 
| David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 133 |  | 
 | 134 | 	page = dequeue_huge_page(vma, addr); | 
 | 135 | 	if (!page) | 
 | 136 | 		goto fail; | 
 | 137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | 	spin_unlock(&hugetlb_lock); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 139 | 	set_page_refcounted(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | 	return page; | 
| David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 141 |  | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 142 | fail: | 
| Ken Chen | ace4bd2 | 2007-05-09 02:33:09 -0700 | [diff] [blame] | 143 | 	if (vma->vm_flags & VM_MAYSHARE) | 
 | 144 | 		resv_huge_pages++; | 
| David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 145 | 	spin_unlock(&hugetlb_lock); | 
 | 146 | 	return NULL; | 
 | 147 | } | 
 | 148 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | static int __init hugetlb_init(void) | 
 | 150 | { | 
 | 151 | 	unsigned long i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 |  | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 153 | 	if (HPAGE_SHIFT == 0) | 
 | 154 | 		return 0; | 
 | 155 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | 	for (i = 0; i < MAX_NUMNODES; ++i) | 
 | 157 | 		INIT_LIST_HEAD(&hugepage_freelists[i]); | 
 | 158 |  | 
 | 159 | 	for (i = 0; i < max_huge_pages; ++i) { | 
| Nick Piggin | a482289 | 2006-03-22 00:08:08 -0800 | [diff] [blame] | 160 | 		if (!alloc_fresh_huge_page()) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | 	} | 
 | 163 | 	max_huge_pages = free_huge_pages = nr_huge_pages = i; | 
 | 164 | 	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); | 
 | 165 | 	return 0; | 
 | 166 | } | 
 | 167 | module_init(hugetlb_init); | 
 | 168 |  | 
 | 169 | static int __init hugetlb_setup(char *s) | 
 | 170 | { | 
 | 171 | 	if (sscanf(s, "%lu", &max_huge_pages) <= 0) | 
 | 172 | 		max_huge_pages = 0; | 
 | 173 | 	return 1; | 
 | 174 | } | 
 | 175 | __setup("hugepages=", hugetlb_setup); | 
 | 176 |  | 
| Ken Chen | 8a63011 | 2007-05-09 02:33:34 -0700 | [diff] [blame] | 177 | static unsigned int cpuset_mems_nr(unsigned int *array) | 
 | 178 | { | 
 | 179 | 	int node; | 
 | 180 | 	unsigned int nr = 0; | 
 | 181 |  | 
 | 182 | 	for_each_node_mask(node, cpuset_current_mems_allowed) | 
 | 183 | 		nr += array[node]; | 
 | 184 |  | 
 | 185 | 	return nr; | 
 | 186 | } | 
 | 187 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | #ifdef CONFIG_SYSCTL | 
 | 189 | static void update_and_free_page(struct page *page) | 
 | 190 | { | 
 | 191 | 	int i; | 
 | 192 | 	nr_huge_pages--; | 
| Christoph Lameter | 4415cc8 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 193 | 	nr_huge_pages_node[page_to_nid(page)]--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | 	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { | 
 | 195 | 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | | 
 | 196 | 				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | | 
 | 197 | 				1 << PG_private | 1<< PG_writeback); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | 	} | 
| Nick Piggin | a482289 | 2006-03-22 00:08:08 -0800 | [diff] [blame] | 199 | 	page[1].lru.next = NULL; | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 200 | 	set_page_refcounted(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | 	__free_pages(page, HUGETLB_PAGE_ORDER); | 
 | 202 | } | 
 | 203 |  | 
 | 204 | #ifdef CONFIG_HIGHMEM | 
 | 205 | static void try_to_free_low(unsigned long count) | 
 | 206 | { | 
| Christoph Lameter | 4415cc8 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 207 | 	int i; | 
 | 208 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | 	for (i = 0; i < MAX_NUMNODES; ++i) { | 
 | 210 | 		struct page *page, *next; | 
 | 211 | 		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { | 
 | 212 | 			if (PageHighMem(page)) | 
 | 213 | 				continue; | 
 | 214 | 			list_del(&page->lru); | 
 | 215 | 			update_and_free_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | 			free_huge_pages--; | 
| Christoph Lameter | 4415cc8 | 2006-09-25 23:31:55 -0700 | [diff] [blame] | 217 | 			free_huge_pages_node[page_to_nid(page)]--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | 			if (count >= nr_huge_pages) | 
 | 219 | 				return; | 
 | 220 | 		} | 
 | 221 | 	} | 
 | 222 | } | 
 | 223 | #else | 
 | 224 | static inline void try_to_free_low(unsigned long count) | 
 | 225 | { | 
 | 226 | } | 
 | 227 | #endif | 
 | 228 |  | 
 | 229 | static unsigned long set_max_huge_pages(unsigned long count) | 
 | 230 | { | 
 | 231 | 	while (count > nr_huge_pages) { | 
| Nick Piggin | a482289 | 2006-03-22 00:08:08 -0800 | [diff] [blame] | 232 | 		if (!alloc_fresh_huge_page()) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | 			return nr_huge_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | 	} | 
 | 235 | 	if (count >= nr_huge_pages) | 
 | 236 | 		return nr_huge_pages; | 
 | 237 |  | 
 | 238 | 	spin_lock(&hugetlb_lock); | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 239 | 	count = max(count, resv_huge_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | 	try_to_free_low(count); | 
 | 241 | 	while (count < nr_huge_pages) { | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 242 | 		struct page *page = dequeue_huge_page(NULL, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | 		if (!page) | 
 | 244 | 			break; | 
 | 245 | 		update_and_free_page(page); | 
 | 246 | 	} | 
 | 247 | 	spin_unlock(&hugetlb_lock); | 
 | 248 | 	return nr_huge_pages; | 
 | 249 | } | 
 | 250 |  | 
 | 251 | int hugetlb_sysctl_handler(struct ctl_table *table, int write, | 
 | 252 | 			   struct file *file, void __user *buffer, | 
 | 253 | 			   size_t *length, loff_t *ppos) | 
 | 254 | { | 
 | 255 | 	proc_doulongvec_minmax(table, write, file, buffer, length, ppos); | 
 | 256 | 	max_huge_pages = set_max_huge_pages(max_huge_pages); | 
 | 257 | 	return 0; | 
 | 258 | } | 
 | 259 | #endif /* CONFIG_SYSCTL */ | 
 | 260 |  | 
 | 261 | int hugetlb_report_meminfo(char *buf) | 
 | 262 | { | 
 | 263 | 	return sprintf(buf, | 
 | 264 | 			"HugePages_Total: %5lu\n" | 
 | 265 | 			"HugePages_Free:  %5lu\n" | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 266 | 			"HugePages_Rsvd:  %5lu\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | 			"Hugepagesize:    %5lu kB\n", | 
 | 268 | 			nr_huge_pages, | 
 | 269 | 			free_huge_pages, | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 270 | 			resv_huge_pages, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | 			HPAGE_SIZE/1024); | 
 | 272 | } | 
 | 273 |  | 
 | 274 | int hugetlb_report_node_meminfo(int nid, char *buf) | 
 | 275 | { | 
 | 276 | 	return sprintf(buf, | 
 | 277 | 		"Node %d HugePages_Total: %5u\n" | 
 | 278 | 		"Node %d HugePages_Free:  %5u\n", | 
 | 279 | 		nid, nr_huge_pages_node[nid], | 
 | 280 | 		nid, free_huge_pages_node[nid]); | 
 | 281 | } | 
 | 282 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ | 
 | 284 | unsigned long hugetlb_total_pages(void) | 
 | 285 | { | 
 | 286 | 	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); | 
 | 287 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 |  | 
 | 289 | /* | 
 | 290 |  * We cannot handle pagefaults against hugetlb pages at all.  They cause | 
 | 291 |  * handle_mm_fault() to try to instantiate regular-sized pages in the | 
 | 292 |  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get | 
 | 293 |  * this far. | 
 | 294 |  */ | 
 | 295 | static struct page *hugetlb_nopage(struct vm_area_struct *vma, | 
 | 296 | 				unsigned long address, int *unused) | 
 | 297 | { | 
 | 298 | 	BUG(); | 
 | 299 | 	return NULL; | 
 | 300 | } | 
 | 301 |  | 
 | 302 | struct vm_operations_struct hugetlb_vm_ops = { | 
 | 303 | 	.nopage = hugetlb_nopage, | 
 | 304 | }; | 
 | 305 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 306 | static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, | 
 | 307 | 				int writable) | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 308 | { | 
 | 309 | 	pte_t entry; | 
 | 310 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 311 | 	if (writable) { | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 312 | 		entry = | 
 | 313 | 		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | 
 | 314 | 	} else { | 
 | 315 | 		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | 
 | 316 | 	} | 
 | 317 | 	entry = pte_mkyoung(entry); | 
 | 318 | 	entry = pte_mkhuge(entry); | 
 | 319 |  | 
 | 320 | 	return entry; | 
 | 321 | } | 
 | 322 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 323 | static void set_huge_ptep_writable(struct vm_area_struct *vma, | 
 | 324 | 				   unsigned long address, pte_t *ptep) | 
 | 325 | { | 
 | 326 | 	pte_t entry; | 
 | 327 |  | 
 | 328 | 	entry = pte_mkwrite(pte_mkdirty(*ptep)); | 
| Benjamin Herrenschmidt | 8dab524 | 2007-06-16 10:16:12 -0700 | [diff] [blame] | 329 | 	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) { | 
 | 330 | 		update_mmu_cache(vma, address, entry); | 
 | 331 | 		lazy_mmu_prot_update(entry); | 
 | 332 | 	} | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 333 | } | 
 | 334 |  | 
 | 335 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 336 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | 
 | 337 | 			    struct vm_area_struct *vma) | 
 | 338 | { | 
 | 339 | 	pte_t *src_pte, *dst_pte, entry; | 
 | 340 | 	struct page *ptepage; | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 341 | 	unsigned long addr; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 342 | 	int cow; | 
 | 343 |  | 
 | 344 | 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 345 |  | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 346 | 	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 347 | 		src_pte = huge_pte_offset(src, addr); | 
 | 348 | 		if (!src_pte) | 
 | 349 | 			continue; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 350 | 		dst_pte = huge_pte_alloc(dst, addr); | 
 | 351 | 		if (!dst_pte) | 
 | 352 | 			goto nomem; | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 353 | 		spin_lock(&dst->page_table_lock); | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 354 | 		spin_lock(&src->page_table_lock); | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 355 | 		if (!pte_none(*src_pte)) { | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 356 | 			if (cow) | 
 | 357 | 				ptep_set_wrprotect(src, addr, src_pte); | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 358 | 			entry = *src_pte; | 
 | 359 | 			ptepage = pte_page(entry); | 
 | 360 | 			get_page(ptepage); | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 361 | 			set_huge_pte_at(dst, addr, dst_pte, entry); | 
 | 362 | 		} | 
 | 363 | 		spin_unlock(&src->page_table_lock); | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 364 | 		spin_unlock(&dst->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 365 | 	} | 
 | 366 | 	return 0; | 
 | 367 |  | 
 | 368 | nomem: | 
 | 369 | 	return -ENOMEM; | 
 | 370 | } | 
 | 371 |  | 
| Chen, Kenneth W | 502717f | 2006-10-11 01:20:46 -0700 | [diff] [blame] | 372 | void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 
 | 373 | 			    unsigned long end) | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 374 | { | 
 | 375 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 376 | 	unsigned long address; | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 377 | 	pte_t *ptep; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 378 | 	pte_t pte; | 
 | 379 | 	struct page *page; | 
| Chen, Kenneth W | fe1668a | 2006-10-04 02:15:24 -0700 | [diff] [blame] | 380 | 	struct page *tmp; | 
| Chen, Kenneth W | c0a499c | 2006-12-06 20:31:39 -0800 | [diff] [blame] | 381 | 	/* | 
 | 382 | 	 * A page gathering list, protected by per file i_mmap_lock. The | 
 | 383 | 	 * lock is used to avoid list corruption from multiple unmapping | 
 | 384 | 	 * of the same page since we are using page->lru. | 
 | 385 | 	 */ | 
| Chen, Kenneth W | fe1668a | 2006-10-04 02:15:24 -0700 | [diff] [blame] | 386 | 	LIST_HEAD(page_list); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 387 |  | 
 | 388 | 	WARN_ON(!is_vm_hugetlb_page(vma)); | 
 | 389 | 	BUG_ON(start & ~HPAGE_MASK); | 
 | 390 | 	BUG_ON(end & ~HPAGE_MASK); | 
 | 391 |  | 
| Hugh Dickins | 508034a | 2005-10-29 18:16:30 -0700 | [diff] [blame] | 392 | 	spin_lock(&mm->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 393 | 	for (address = start; address < end; address += HPAGE_SIZE) { | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 394 | 		ptep = huge_pte_offset(mm, address); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 395 | 		if (!ptep) | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 396 | 			continue; | 
 | 397 |  | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 398 | 		if (huge_pmd_unshare(mm, &address, ptep)) | 
 | 399 | 			continue; | 
 | 400 |  | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 401 | 		pte = huge_ptep_get_and_clear(mm, address, ptep); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 402 | 		if (pte_none(pte)) | 
 | 403 | 			continue; | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 404 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 405 | 		page = pte_page(pte); | 
| Ken Chen | 6649a38 | 2007-02-08 14:20:27 -0800 | [diff] [blame] | 406 | 		if (pte_dirty(pte)) | 
 | 407 | 			set_page_dirty(page); | 
| Chen, Kenneth W | fe1668a | 2006-10-04 02:15:24 -0700 | [diff] [blame] | 408 | 		list_add(&page->lru, &page_list); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 409 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | 	spin_unlock(&mm->page_table_lock); | 
| Hugh Dickins | 508034a | 2005-10-29 18:16:30 -0700 | [diff] [blame] | 411 | 	flush_tlb_range(vma, start, end); | 
| Chen, Kenneth W | fe1668a | 2006-10-04 02:15:24 -0700 | [diff] [blame] | 412 | 	list_for_each_entry_safe(page, tmp, &page_list, lru) { | 
 | 413 | 		list_del(&page->lru); | 
 | 414 | 		put_page(page); | 
 | 415 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | } | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 417 |  | 
| Chen, Kenneth W | 502717f | 2006-10-11 01:20:46 -0700 | [diff] [blame] | 418 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 
 | 419 | 			  unsigned long end) | 
 | 420 | { | 
 | 421 | 	/* | 
 | 422 | 	 * It is undesirable to test vma->vm_file as it should be non-null | 
 | 423 | 	 * for valid hugetlb area. However, vm_file will be NULL in the error | 
 | 424 | 	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails, | 
 | 425 | 	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function | 
 | 426 | 	 * to clean up. Since no pte has actually been setup, it is safe to | 
 | 427 | 	 * do nothing in this case. | 
 | 428 | 	 */ | 
 | 429 | 	if (vma->vm_file) { | 
 | 430 | 		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); | 
 | 431 | 		__unmap_hugepage_range(vma, start, end); | 
 | 432 | 		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); | 
 | 433 | 	} | 
 | 434 | } | 
 | 435 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 436 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 437 | 			unsigned long address, pte_t *ptep, pte_t pte) | 
 | 438 | { | 
 | 439 | 	struct page *old_page, *new_page; | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 440 | 	int avoidcopy; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 441 |  | 
 | 442 | 	old_page = pte_page(pte); | 
 | 443 |  | 
 | 444 | 	/* If no-one else is actually using this page, avoid the copy | 
 | 445 | 	 * and just make the page writable */ | 
 | 446 | 	avoidcopy = (page_count(old_page) == 1); | 
 | 447 | 	if (avoidcopy) { | 
 | 448 | 		set_huge_ptep_writable(vma, address, ptep); | 
 | 449 | 		return VM_FAULT_MINOR; | 
 | 450 | 	} | 
 | 451 |  | 
 | 452 | 	page_cache_get(old_page); | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 453 | 	new_page = alloc_huge_page(vma, address); | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 454 |  | 
 | 455 | 	if (!new_page) { | 
 | 456 | 		page_cache_release(old_page); | 
| Christoph Lameter | 0df420d | 2006-02-07 12:58:30 -0800 | [diff] [blame] | 457 | 		return VM_FAULT_OOM; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 458 | 	} | 
 | 459 |  | 
 | 460 | 	spin_unlock(&mm->page_table_lock); | 
| Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 461 | 	copy_huge_page(new_page, old_page, address, vma); | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 462 | 	spin_lock(&mm->page_table_lock); | 
 | 463 |  | 
 | 464 | 	ptep = huge_pte_offset(mm, address & HPAGE_MASK); | 
 | 465 | 	if (likely(pte_same(*ptep, pte))) { | 
 | 466 | 		/* Break COW */ | 
 | 467 | 		set_huge_pte_at(mm, address, ptep, | 
 | 468 | 				make_huge_pte(vma, new_page, 1)); | 
 | 469 | 		/* Make the old page be freed below */ | 
 | 470 | 		new_page = old_page; | 
 | 471 | 	} | 
 | 472 | 	page_cache_release(new_page); | 
 | 473 | 	page_cache_release(old_page); | 
 | 474 | 	return VM_FAULT_MINOR; | 
 | 475 | } | 
 | 476 |  | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 477 | int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 478 | 			unsigned long address, pte_t *ptep, int write_access) | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 479 | { | 
 | 480 | 	int ret = VM_FAULT_SIGBUS; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 481 | 	unsigned long idx; | 
 | 482 | 	unsigned long size; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 483 | 	struct page *page; | 
 | 484 | 	struct address_space *mapping; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 485 | 	pte_t new_pte; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 486 |  | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 487 | 	mapping = vma->vm_file->f_mapping; | 
 | 488 | 	idx = ((address - vma->vm_start) >> HPAGE_SHIFT) | 
 | 489 | 		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | 
 | 490 |  | 
 | 491 | 	/* | 
 | 492 | 	 * Use page lock to guard against racing truncation | 
 | 493 | 	 * before we get page_table_lock. | 
 | 494 | 	 */ | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 495 | retry: | 
 | 496 | 	page = find_lock_page(mapping, idx); | 
 | 497 | 	if (!page) { | 
| Hugh Dickins | ebed4bf | 2006-10-28 10:38:43 -0700 | [diff] [blame] | 498 | 		size = i_size_read(mapping->host) >> HPAGE_SHIFT; | 
 | 499 | 		if (idx >= size) | 
 | 500 | 			goto out; | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 501 | 		if (hugetlb_get_quota(mapping)) | 
 | 502 | 			goto out; | 
 | 503 | 		page = alloc_huge_page(vma, address); | 
 | 504 | 		if (!page) { | 
 | 505 | 			hugetlb_put_quota(mapping); | 
| Christoph Lameter | 0df420d | 2006-02-07 12:58:30 -0800 | [diff] [blame] | 506 | 			ret = VM_FAULT_OOM; | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 507 | 			goto out; | 
 | 508 | 		} | 
| David Gibson | 79ac6ba | 2006-03-22 00:08:51 -0800 | [diff] [blame] | 509 | 		clear_huge_page(page, address); | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 510 |  | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 511 | 		if (vma->vm_flags & VM_SHARED) { | 
 | 512 | 			int err; | 
 | 513 |  | 
 | 514 | 			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); | 
 | 515 | 			if (err) { | 
 | 516 | 				put_page(page); | 
 | 517 | 				hugetlb_put_quota(mapping); | 
 | 518 | 				if (err == -EEXIST) | 
 | 519 | 					goto retry; | 
 | 520 | 				goto out; | 
 | 521 | 			} | 
 | 522 | 		} else | 
 | 523 | 			lock_page(page); | 
 | 524 | 	} | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 525 |  | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 526 | 	spin_lock(&mm->page_table_lock); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 527 | 	size = i_size_read(mapping->host) >> HPAGE_SHIFT; | 
 | 528 | 	if (idx >= size) | 
 | 529 | 		goto backout; | 
 | 530 |  | 
 | 531 | 	ret = VM_FAULT_MINOR; | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 532 | 	if (!pte_none(*ptep)) | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 533 | 		goto backout; | 
 | 534 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 535 | 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) | 
 | 536 | 				&& (vma->vm_flags & VM_SHARED))); | 
 | 537 | 	set_huge_pte_at(mm, address, ptep, new_pte); | 
 | 538 |  | 
 | 539 | 	if (write_access && !(vma->vm_flags & VM_SHARED)) { | 
 | 540 | 		/* Optimization, do the COW without a second fault */ | 
 | 541 | 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte); | 
 | 542 | 	} | 
 | 543 |  | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 544 | 	spin_unlock(&mm->page_table_lock); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 545 | 	unlock_page(page); | 
 | 546 | out: | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 547 | 	return ret; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 548 |  | 
 | 549 | backout: | 
 | 550 | 	spin_unlock(&mm->page_table_lock); | 
 | 551 | 	hugetlb_put_quota(mapping); | 
 | 552 | 	unlock_page(page); | 
 | 553 | 	put_page(page); | 
 | 554 | 	goto out; | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 555 | } | 
 | 556 |  | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 557 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 558 | 			unsigned long address, int write_access) | 
 | 559 | { | 
 | 560 | 	pte_t *ptep; | 
 | 561 | 	pte_t entry; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 562 | 	int ret; | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 563 | 	static DEFINE_MUTEX(hugetlb_instantiation_mutex); | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 564 |  | 
 | 565 | 	ptep = huge_pte_alloc(mm, address); | 
 | 566 | 	if (!ptep) | 
 | 567 | 		return VM_FAULT_OOM; | 
 | 568 |  | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 569 | 	/* | 
 | 570 | 	 * Serialize hugepage allocation and instantiation, so that we don't | 
 | 571 | 	 * get spurious allocation failures if two CPUs race to instantiate | 
 | 572 | 	 * the same page in the page cache. | 
 | 573 | 	 */ | 
 | 574 | 	mutex_lock(&hugetlb_instantiation_mutex); | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 575 | 	entry = *ptep; | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 576 | 	if (pte_none(entry)) { | 
 | 577 | 		ret = hugetlb_no_page(mm, vma, address, ptep, write_access); | 
 | 578 | 		mutex_unlock(&hugetlb_instantiation_mutex); | 
 | 579 | 		return ret; | 
 | 580 | 	} | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 581 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 582 | 	ret = VM_FAULT_MINOR; | 
 | 583 |  | 
 | 584 | 	spin_lock(&mm->page_table_lock); | 
 | 585 | 	/* Check for a racing update before calling hugetlb_cow */ | 
 | 586 | 	if (likely(pte_same(entry, *ptep))) | 
 | 587 | 		if (write_access && !pte_write(entry)) | 
 | 588 | 			ret = hugetlb_cow(mm, vma, address, ptep, entry); | 
 | 589 | 	spin_unlock(&mm->page_table_lock); | 
| David Gibson | 3935baa | 2006-03-22 00:08:53 -0800 | [diff] [blame] | 590 | 	mutex_unlock(&hugetlb_instantiation_mutex); | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 591 |  | 
 | 592 | 	return ret; | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 593 | } | 
 | 594 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 595 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 596 | 			struct page **pages, struct vm_area_struct **vmas, | 
 | 597 | 			unsigned long *position, int *length, int i) | 
 | 598 | { | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 599 | 	unsigned long pfn_offset; | 
 | 600 | 	unsigned long vaddr = *position; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 601 | 	int remainder = *length; | 
 | 602 |  | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 603 | 	spin_lock(&mm->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 604 | 	while (vaddr < vma->vm_end && remainder) { | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 605 | 		pte_t *pte; | 
 | 606 | 		struct page *page; | 
 | 607 |  | 
 | 608 | 		/* | 
 | 609 | 		 * Some archs (sparc64, sh*) have multiple pte_ts to | 
 | 610 | 		 * each hugepage.  We have to make * sure we get the | 
 | 611 | 		 * first, for the page indexing below to work. | 
 | 612 | 		 */ | 
 | 613 | 		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); | 
 | 614 |  | 
 | 615 | 		if (!pte || pte_none(*pte)) { | 
 | 616 | 			int ret; | 
 | 617 |  | 
 | 618 | 			spin_unlock(&mm->page_table_lock); | 
 | 619 | 			ret = hugetlb_fault(mm, vma, vaddr, 0); | 
 | 620 | 			spin_lock(&mm->page_table_lock); | 
 | 621 | 			if (ret == VM_FAULT_MINOR) | 
 | 622 | 				continue; | 
 | 623 |  | 
 | 624 | 			remainder = 0; | 
 | 625 | 			if (!i) | 
 | 626 | 				i = -EFAULT; | 
 | 627 | 			break; | 
 | 628 | 		} | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 629 |  | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 630 | 		pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT; | 
 | 631 | 		page = pte_page(*pte); | 
 | 632 | same_page: | 
| Chen, Kenneth W | d669218 | 2006-03-31 02:29:57 -0800 | [diff] [blame] | 633 | 		if (pages) { | 
 | 634 | 			get_page(page); | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 635 | 			pages[i] = page + pfn_offset; | 
| Chen, Kenneth W | d669218 | 2006-03-31 02:29:57 -0800 | [diff] [blame] | 636 | 		} | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 637 |  | 
 | 638 | 		if (vmas) | 
 | 639 | 			vmas[i] = vma; | 
 | 640 |  | 
 | 641 | 		vaddr += PAGE_SIZE; | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 642 | 		++pfn_offset; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 643 | 		--remainder; | 
 | 644 | 		++i; | 
| Chen, Kenneth W | d5d4b0a | 2006-03-22 00:09:03 -0800 | [diff] [blame] | 645 | 		if (vaddr < vma->vm_end && remainder && | 
 | 646 | 				pfn_offset < HPAGE_SIZE/PAGE_SIZE) { | 
 | 647 | 			/* | 
 | 648 | 			 * We use pfn_offset to avoid touching the pageframes | 
 | 649 | 			 * of this compound page. | 
 | 650 | 			 */ | 
 | 651 | 			goto same_page; | 
 | 652 | 		} | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 653 | 	} | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 654 | 	spin_unlock(&mm->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 655 | 	*length = remainder; | 
 | 656 | 	*position = vaddr; | 
 | 657 |  | 
 | 658 | 	return i; | 
 | 659 | } | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 660 |  | 
 | 661 | void hugetlb_change_protection(struct vm_area_struct *vma, | 
 | 662 | 		unsigned long address, unsigned long end, pgprot_t newprot) | 
 | 663 | { | 
 | 664 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 665 | 	unsigned long start = address; | 
 | 666 | 	pte_t *ptep; | 
 | 667 | 	pte_t pte; | 
 | 668 |  | 
 | 669 | 	BUG_ON(address >= end); | 
 | 670 | 	flush_cache_range(vma, address, end); | 
 | 671 |  | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 672 | 	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 673 | 	spin_lock(&mm->page_table_lock); | 
 | 674 | 	for (; address < end; address += HPAGE_SIZE) { | 
 | 675 | 		ptep = huge_pte_offset(mm, address); | 
 | 676 | 		if (!ptep) | 
 | 677 | 			continue; | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 678 | 		if (huge_pmd_unshare(mm, &address, ptep)) | 
 | 679 | 			continue; | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 680 | 		if (!pte_none(*ptep)) { | 
 | 681 | 			pte = huge_ptep_get_and_clear(mm, address, ptep); | 
 | 682 | 			pte = pte_mkhuge(pte_modify(pte, newprot)); | 
 | 683 | 			set_huge_pte_at(mm, address, ptep, pte); | 
 | 684 | 			lazy_mmu_prot_update(pte); | 
 | 685 | 		} | 
 | 686 | 	} | 
 | 687 | 	spin_unlock(&mm->page_table_lock); | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 688 | 	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 689 |  | 
 | 690 | 	flush_tlb_range(vma, start, end); | 
 | 691 | } | 
 | 692 |  | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 693 | struct file_region { | 
 | 694 | 	struct list_head link; | 
 | 695 | 	long from; | 
 | 696 | 	long to; | 
 | 697 | }; | 
 | 698 |  | 
 | 699 | static long region_add(struct list_head *head, long f, long t) | 
 | 700 | { | 
 | 701 | 	struct file_region *rg, *nrg, *trg; | 
 | 702 |  | 
 | 703 | 	/* Locate the region we are either in or before. */ | 
 | 704 | 	list_for_each_entry(rg, head, link) | 
 | 705 | 		if (f <= rg->to) | 
 | 706 | 			break; | 
 | 707 |  | 
 | 708 | 	/* Round our left edge to the current segment if it encloses us. */ | 
 | 709 | 	if (f > rg->from) | 
 | 710 | 		f = rg->from; | 
 | 711 |  | 
 | 712 | 	/* Check for and consume any regions we now overlap with. */ | 
 | 713 | 	nrg = rg; | 
 | 714 | 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) { | 
 | 715 | 		if (&rg->link == head) | 
 | 716 | 			break; | 
 | 717 | 		if (rg->from > t) | 
 | 718 | 			break; | 
 | 719 |  | 
 | 720 | 		/* If this area reaches higher then extend our area to | 
 | 721 | 		 * include it completely.  If this is not the first area | 
 | 722 | 		 * which we intend to reuse, free it. */ | 
 | 723 | 		if (rg->to > t) | 
 | 724 | 			t = rg->to; | 
 | 725 | 		if (rg != nrg) { | 
 | 726 | 			list_del(&rg->link); | 
 | 727 | 			kfree(rg); | 
 | 728 | 		} | 
 | 729 | 	} | 
 | 730 | 	nrg->from = f; | 
 | 731 | 	nrg->to = t; | 
 | 732 | 	return 0; | 
 | 733 | } | 
 | 734 |  | 
 | 735 | static long region_chg(struct list_head *head, long f, long t) | 
 | 736 | { | 
 | 737 | 	struct file_region *rg, *nrg; | 
 | 738 | 	long chg = 0; | 
 | 739 |  | 
 | 740 | 	/* Locate the region we are before or in. */ | 
 | 741 | 	list_for_each_entry(rg, head, link) | 
 | 742 | 		if (f <= rg->to) | 
 | 743 | 			break; | 
 | 744 |  | 
 | 745 | 	/* If we are below the current region then a new region is required. | 
 | 746 | 	 * Subtle, allocate a new region at the position but make it zero | 
 | 747 | 	 * size such that we can guarentee to record the reservation. */ | 
 | 748 | 	if (&rg->link == head || t < rg->from) { | 
 | 749 | 		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); | 
 | 750 | 		if (nrg == 0) | 
 | 751 | 			return -ENOMEM; | 
 | 752 | 		nrg->from = f; | 
 | 753 | 		nrg->to   = f; | 
 | 754 | 		INIT_LIST_HEAD(&nrg->link); | 
 | 755 | 		list_add(&nrg->link, rg->link.prev); | 
 | 756 |  | 
 | 757 | 		return t - f; | 
 | 758 | 	} | 
 | 759 |  | 
 | 760 | 	/* Round our left edge to the current segment if it encloses us. */ | 
 | 761 | 	if (f > rg->from) | 
 | 762 | 		f = rg->from; | 
 | 763 | 	chg = t - f; | 
 | 764 |  | 
 | 765 | 	/* Check for and consume any regions we now overlap with. */ | 
 | 766 | 	list_for_each_entry(rg, rg->link.prev, link) { | 
 | 767 | 		if (&rg->link == head) | 
 | 768 | 			break; | 
 | 769 | 		if (rg->from > t) | 
 | 770 | 			return chg; | 
 | 771 |  | 
 | 772 | 		/* We overlap with this area, if it extends futher than | 
 | 773 | 		 * us then we must extend ourselves.  Account for its | 
 | 774 | 		 * existing reservation. */ | 
 | 775 | 		if (rg->to > t) { | 
 | 776 | 			chg += rg->to - t; | 
 | 777 | 			t = rg->to; | 
 | 778 | 		} | 
 | 779 | 		chg -= rg->to - rg->from; | 
 | 780 | 	} | 
 | 781 | 	return chg; | 
 | 782 | } | 
 | 783 |  | 
 | 784 | static long region_truncate(struct list_head *head, long end) | 
 | 785 | { | 
 | 786 | 	struct file_region *rg, *trg; | 
 | 787 | 	long chg = 0; | 
 | 788 |  | 
 | 789 | 	/* Locate the region we are either in or before. */ | 
 | 790 | 	list_for_each_entry(rg, head, link) | 
 | 791 | 		if (end <= rg->to) | 
 | 792 | 			break; | 
 | 793 | 	if (&rg->link == head) | 
 | 794 | 		return 0; | 
 | 795 |  | 
 | 796 | 	/* If we are in the middle of a region then adjust it. */ | 
 | 797 | 	if (end > rg->from) { | 
 | 798 | 		chg = rg->to - end; | 
 | 799 | 		rg->to = end; | 
 | 800 | 		rg = list_entry(rg->link.next, typeof(*rg), link); | 
 | 801 | 	} | 
 | 802 |  | 
 | 803 | 	/* Drop any remaining regions. */ | 
 | 804 | 	list_for_each_entry_safe(rg, trg, rg->link.prev, link) { | 
 | 805 | 		if (&rg->link == head) | 
 | 806 | 			break; | 
 | 807 | 		chg += rg->to - rg->from; | 
 | 808 | 		list_del(&rg->link); | 
 | 809 | 		kfree(rg); | 
 | 810 | 	} | 
 | 811 | 	return chg; | 
 | 812 | } | 
 | 813 |  | 
 | 814 | static int hugetlb_acct_memory(long delta) | 
 | 815 | { | 
 | 816 | 	int ret = -ENOMEM; | 
 | 817 |  | 
 | 818 | 	spin_lock(&hugetlb_lock); | 
 | 819 | 	if ((delta + resv_huge_pages) <= free_huge_pages) { | 
 | 820 | 		resv_huge_pages += delta; | 
 | 821 | 		ret = 0; | 
 | 822 | 	} | 
 | 823 | 	spin_unlock(&hugetlb_lock); | 
 | 824 | 	return ret; | 
 | 825 | } | 
 | 826 |  | 
 | 827 | int hugetlb_reserve_pages(struct inode *inode, long from, long to) | 
 | 828 | { | 
 | 829 | 	long ret, chg; | 
 | 830 |  | 
 | 831 | 	chg = region_chg(&inode->i_mapping->private_list, from, to); | 
 | 832 | 	if (chg < 0) | 
 | 833 | 		return chg; | 
| Ken Chen | 8a63011 | 2007-05-09 02:33:34 -0700 | [diff] [blame] | 834 | 	/* | 
 | 835 | 	 * When cpuset is configured, it breaks the strict hugetlb page | 
 | 836 | 	 * reservation as the accounting is done on a global variable. Such | 
 | 837 | 	 * reservation is completely rubbish in the presence of cpuset because | 
 | 838 | 	 * the reservation is not checked against page availability for the | 
 | 839 | 	 * current cpuset. Application can still potentially OOM'ed by kernel | 
 | 840 | 	 * with lack of free htlb page in cpuset that the task is in. | 
 | 841 | 	 * Attempt to enforce strict accounting with cpuset is almost | 
 | 842 | 	 * impossible (or too ugly) because cpuset is too fluid that | 
 | 843 | 	 * task or memory node can be dynamically moved between cpusets. | 
 | 844 | 	 * | 
 | 845 | 	 * The change of semantics for shared hugetlb mapping with cpuset is | 
 | 846 | 	 * undesirable. However, in order to preserve some of the semantics, | 
 | 847 | 	 * we fall back to check against current free page availability as | 
 | 848 | 	 * a best attempt and hopefully to minimize the impact of changing | 
 | 849 | 	 * semantics that cpuset has. | 
 | 850 | 	 */ | 
 | 851 | 	if (chg > cpuset_mems_nr(free_huge_pages_node)) | 
 | 852 | 		return -ENOMEM; | 
 | 853 |  | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 854 | 	ret = hugetlb_acct_memory(chg); | 
 | 855 | 	if (ret < 0) | 
 | 856 | 		return ret; | 
 | 857 | 	region_add(&inode->i_mapping->private_list, from, to); | 
 | 858 | 	return 0; | 
 | 859 | } | 
 | 860 |  | 
 | 861 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) | 
 | 862 | { | 
 | 863 | 	long chg = region_truncate(&inode->i_mapping->private_list, offset); | 
 | 864 | 	hugetlb_acct_memory(freed - chg); | 
 | 865 | } |