| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Generic hugetlb support. | 
 | 3 |  * (C) William Irwin, April 2004 | 
 | 4 |  */ | 
 | 5 | #include <linux/gfp.h> | 
 | 6 | #include <linux/list.h> | 
 | 7 | #include <linux/init.h> | 
 | 8 | #include <linux/module.h> | 
 | 9 | #include <linux/mm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/sysctl.h> | 
 | 11 | #include <linux/highmem.h> | 
 | 12 | #include <linux/nodemask.h> | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 13 | #include <linux/pagemap.h> | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 14 | #include <linux/mempolicy.h> | 
| Christoph Lameter | aea47ff | 2006-01-08 01:00:57 -0800 | [diff] [blame] | 15 | #include <linux/cpuset.h> | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 16 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 17 | #include <asm/page.h> | 
 | 18 | #include <asm/pgtable.h> | 
 | 19 |  | 
 | 20 | #include <linux/hugetlb.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  | 
 | 22 | const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; | 
 | 23 | static unsigned long nr_huge_pages, free_huge_pages; | 
 | 24 | unsigned long max_huge_pages; | 
 | 25 | static struct list_head hugepage_freelists[MAX_NUMNODES]; | 
 | 26 | static unsigned int nr_huge_pages_node[MAX_NUMNODES]; | 
 | 27 | static unsigned int free_huge_pages_node[MAX_NUMNODES]; | 
| Eric Paris | 0bd0f9f | 2005-11-21 21:32:28 -0800 | [diff] [blame] | 28 |  | 
 | 29 | /* | 
 | 30 |  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages | 
 | 31 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | static DEFINE_SPINLOCK(hugetlb_lock); | 
 | 33 |  | 
 | 34 | static void enqueue_huge_page(struct page *page) | 
 | 35 | { | 
 | 36 | 	int nid = page_to_nid(page); | 
 | 37 | 	list_add(&page->lru, &hugepage_freelists[nid]); | 
 | 38 | 	free_huge_pages++; | 
 | 39 | 	free_huge_pages_node[nid]++; | 
 | 40 | } | 
 | 41 |  | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 42 | static struct page *dequeue_huge_page(struct vm_area_struct *vma, | 
 | 43 | 				unsigned long address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | { | 
 | 45 | 	int nid = numa_node_id(); | 
 | 46 | 	struct page *page = NULL; | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 47 | 	struct zonelist *zonelist = huge_zonelist(vma, address); | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 48 | 	struct zone **z; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 |  | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 50 | 	for (z = zonelist->zones; *z; z++) { | 
 | 51 | 		nid = (*z)->zone_pgdat->node_id; | 
| Christoph Lameter | aea47ff | 2006-01-08 01:00:57 -0800 | [diff] [blame] | 52 | 		if (cpuset_zone_allowed(*z, GFP_HIGHUSER) && | 
 | 53 | 		    !list_empty(&hugepage_freelists[nid])) | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 54 | 			break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | 	} | 
| Christoph Lameter | 96df933 | 2006-01-06 00:10:45 -0800 | [diff] [blame] | 56 |  | 
 | 57 | 	if (*z) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | 		page = list_entry(hugepage_freelists[nid].next, | 
 | 59 | 				  struct page, lru); | 
 | 60 | 		list_del(&page->lru); | 
 | 61 | 		free_huge_pages--; | 
 | 62 | 		free_huge_pages_node[nid]--; | 
 | 63 | 	} | 
 | 64 | 	return page; | 
 | 65 | } | 
 | 66 |  | 
 | 67 | static struct page *alloc_fresh_huge_page(void) | 
 | 68 | { | 
 | 69 | 	static int nid = 0; | 
 | 70 | 	struct page *page; | 
 | 71 | 	page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, | 
 | 72 | 					HUGETLB_PAGE_ORDER); | 
 | 73 | 	nid = (nid + 1) % num_online_nodes(); | 
 | 74 | 	if (page) { | 
| Eric Paris | 0bd0f9f | 2005-11-21 21:32:28 -0800 | [diff] [blame] | 75 | 		spin_lock(&hugetlb_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | 		nr_huge_pages++; | 
 | 77 | 		nr_huge_pages_node[page_to_nid(page)]++; | 
| Eric Paris | 0bd0f9f | 2005-11-21 21:32:28 -0800 | [diff] [blame] | 78 | 		spin_unlock(&hugetlb_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | 	} | 
 | 80 | 	return page; | 
 | 81 | } | 
 | 82 |  | 
 | 83 | void free_huge_page(struct page *page) | 
 | 84 | { | 
 | 85 | 	BUG_ON(page_count(page)); | 
 | 86 |  | 
 | 87 | 	INIT_LIST_HEAD(&page->lru); | 
| Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 88 | 	page[1].lru.next = NULL;			/* reset dtor */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 |  | 
 | 90 | 	spin_lock(&hugetlb_lock); | 
 | 91 | 	enqueue_huge_page(page); | 
 | 92 | 	spin_unlock(&hugetlb_lock); | 
 | 93 | } | 
 | 94 |  | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 95 | struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | { | 
 | 97 | 	struct page *page; | 
 | 98 | 	int i; | 
 | 99 |  | 
 | 100 | 	spin_lock(&hugetlb_lock); | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 101 | 	page = dequeue_huge_page(vma, addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | 	if (!page) { | 
 | 103 | 		spin_unlock(&hugetlb_lock); | 
 | 104 | 		return NULL; | 
 | 105 | 	} | 
 | 106 | 	spin_unlock(&hugetlb_lock); | 
 | 107 | 	set_page_count(page, 1); | 
| Hugh Dickins | 41d78ba | 2006-02-14 13:52:58 -0800 | [diff] [blame] | 108 | 	page[1].lru.next = (void *)free_huge_page;	/* set dtor */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | 	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) | 
| David Gibson | a2dfef6 | 2006-02-07 12:58:25 -0800 | [diff] [blame] | 110 | 		clear_user_highpage(&page[i], addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | 	return page; | 
 | 112 | } | 
 | 113 |  | 
 | 114 | static int __init hugetlb_init(void) | 
 | 115 | { | 
 | 116 | 	unsigned long i; | 
 | 117 | 	struct page *page; | 
 | 118 |  | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 119 | 	if (HPAGE_SHIFT == 0) | 
 | 120 | 		return 0; | 
 | 121 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | 	for (i = 0; i < MAX_NUMNODES; ++i) | 
 | 123 | 		INIT_LIST_HEAD(&hugepage_freelists[i]); | 
 | 124 |  | 
 | 125 | 	for (i = 0; i < max_huge_pages; ++i) { | 
 | 126 | 		page = alloc_fresh_huge_page(); | 
 | 127 | 		if (!page) | 
 | 128 | 			break; | 
 | 129 | 		spin_lock(&hugetlb_lock); | 
 | 130 | 		enqueue_huge_page(page); | 
 | 131 | 		spin_unlock(&hugetlb_lock); | 
 | 132 | 	} | 
 | 133 | 	max_huge_pages = free_huge_pages = nr_huge_pages = i; | 
 | 134 | 	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); | 
 | 135 | 	return 0; | 
 | 136 | } | 
 | 137 | module_init(hugetlb_init); | 
 | 138 |  | 
 | 139 | static int __init hugetlb_setup(char *s) | 
 | 140 | { | 
 | 141 | 	if (sscanf(s, "%lu", &max_huge_pages) <= 0) | 
 | 142 | 		max_huge_pages = 0; | 
 | 143 | 	return 1; | 
 | 144 | } | 
 | 145 | __setup("hugepages=", hugetlb_setup); | 
 | 146 |  | 
 | 147 | #ifdef CONFIG_SYSCTL | 
 | 148 | static void update_and_free_page(struct page *page) | 
 | 149 | { | 
 | 150 | 	int i; | 
 | 151 | 	nr_huge_pages--; | 
 | 152 | 	nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--; | 
 | 153 | 	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { | 
 | 154 | 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | | 
 | 155 | 				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | | 
 | 156 | 				1 << PG_private | 1<< PG_writeback); | 
 | 157 | 		set_page_count(&page[i], 0); | 
 | 158 | 	} | 
 | 159 | 	set_page_count(page, 1); | 
 | 160 | 	__free_pages(page, HUGETLB_PAGE_ORDER); | 
 | 161 | } | 
 | 162 |  | 
 | 163 | #ifdef CONFIG_HIGHMEM | 
 | 164 | static void try_to_free_low(unsigned long count) | 
 | 165 | { | 
 | 166 | 	int i, nid; | 
 | 167 | 	for (i = 0; i < MAX_NUMNODES; ++i) { | 
 | 168 | 		struct page *page, *next; | 
 | 169 | 		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { | 
 | 170 | 			if (PageHighMem(page)) | 
 | 171 | 				continue; | 
 | 172 | 			list_del(&page->lru); | 
 | 173 | 			update_and_free_page(page); | 
 | 174 | 			nid = page_zone(page)->zone_pgdat->node_id; | 
 | 175 | 			free_huge_pages--; | 
 | 176 | 			free_huge_pages_node[nid]--; | 
 | 177 | 			if (count >= nr_huge_pages) | 
 | 178 | 				return; | 
 | 179 | 		} | 
 | 180 | 	} | 
 | 181 | } | 
 | 182 | #else | 
 | 183 | static inline void try_to_free_low(unsigned long count) | 
 | 184 | { | 
 | 185 | } | 
 | 186 | #endif | 
 | 187 |  | 
 | 188 | static unsigned long set_max_huge_pages(unsigned long count) | 
 | 189 | { | 
 | 190 | 	while (count > nr_huge_pages) { | 
 | 191 | 		struct page *page = alloc_fresh_huge_page(); | 
 | 192 | 		if (!page) | 
 | 193 | 			return nr_huge_pages; | 
 | 194 | 		spin_lock(&hugetlb_lock); | 
 | 195 | 		enqueue_huge_page(page); | 
 | 196 | 		spin_unlock(&hugetlb_lock); | 
 | 197 | 	} | 
 | 198 | 	if (count >= nr_huge_pages) | 
 | 199 | 		return nr_huge_pages; | 
 | 200 |  | 
 | 201 | 	spin_lock(&hugetlb_lock); | 
 | 202 | 	try_to_free_low(count); | 
 | 203 | 	while (count < nr_huge_pages) { | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 204 | 		struct page *page = dequeue_huge_page(NULL, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | 		if (!page) | 
 | 206 | 			break; | 
 | 207 | 		update_and_free_page(page); | 
 | 208 | 	} | 
 | 209 | 	spin_unlock(&hugetlb_lock); | 
 | 210 | 	return nr_huge_pages; | 
 | 211 | } | 
 | 212 |  | 
 | 213 | int hugetlb_sysctl_handler(struct ctl_table *table, int write, | 
 | 214 | 			   struct file *file, void __user *buffer, | 
 | 215 | 			   size_t *length, loff_t *ppos) | 
 | 216 | { | 
 | 217 | 	proc_doulongvec_minmax(table, write, file, buffer, length, ppos); | 
 | 218 | 	max_huge_pages = set_max_huge_pages(max_huge_pages); | 
 | 219 | 	return 0; | 
 | 220 | } | 
 | 221 | #endif /* CONFIG_SYSCTL */ | 
 | 222 |  | 
 | 223 | int hugetlb_report_meminfo(char *buf) | 
 | 224 | { | 
 | 225 | 	return sprintf(buf, | 
 | 226 | 			"HugePages_Total: %5lu\n" | 
 | 227 | 			"HugePages_Free:  %5lu\n" | 
 | 228 | 			"Hugepagesize:    %5lu kB\n", | 
 | 229 | 			nr_huge_pages, | 
 | 230 | 			free_huge_pages, | 
 | 231 | 			HPAGE_SIZE/1024); | 
 | 232 | } | 
 | 233 |  | 
 | 234 | int hugetlb_report_node_meminfo(int nid, char *buf) | 
 | 235 | { | 
 | 236 | 	return sprintf(buf, | 
 | 237 | 		"Node %d HugePages_Total: %5u\n" | 
 | 238 | 		"Node %d HugePages_Free:  %5u\n", | 
 | 239 | 		nid, nr_huge_pages_node[nid], | 
 | 240 | 		nid, free_huge_pages_node[nid]); | 
 | 241 | } | 
 | 242 |  | 
 | 243 | int is_hugepage_mem_enough(size_t size) | 
 | 244 | { | 
 | 245 | 	return (size + ~HPAGE_MASK)/HPAGE_SIZE <= free_huge_pages; | 
 | 246 | } | 
 | 247 |  | 
 | 248 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ | 
 | 249 | unsigned long hugetlb_total_pages(void) | 
 | 250 | { | 
 | 251 | 	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); | 
 | 252 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 |  | 
 | 254 | /* | 
 | 255 |  * We cannot handle pagefaults against hugetlb pages at all.  They cause | 
 | 256 |  * handle_mm_fault() to try to instantiate regular-sized pages in the | 
 | 257 |  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get | 
 | 258 |  * this far. | 
 | 259 |  */ | 
 | 260 | static struct page *hugetlb_nopage(struct vm_area_struct *vma, | 
 | 261 | 				unsigned long address, int *unused) | 
 | 262 | { | 
 | 263 | 	BUG(); | 
 | 264 | 	return NULL; | 
 | 265 | } | 
 | 266 |  | 
 | 267 | struct vm_operations_struct hugetlb_vm_ops = { | 
 | 268 | 	.nopage = hugetlb_nopage, | 
 | 269 | }; | 
 | 270 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 271 | static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, | 
 | 272 | 				int writable) | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 273 | { | 
 | 274 | 	pte_t entry; | 
 | 275 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 276 | 	if (writable) { | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 277 | 		entry = | 
 | 278 | 		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | 
 | 279 | 	} else { | 
 | 280 | 		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | 
 | 281 | 	} | 
 | 282 | 	entry = pte_mkyoung(entry); | 
 | 283 | 	entry = pte_mkhuge(entry); | 
 | 284 |  | 
 | 285 | 	return entry; | 
 | 286 | } | 
 | 287 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 288 | static void set_huge_ptep_writable(struct vm_area_struct *vma, | 
 | 289 | 				   unsigned long address, pte_t *ptep) | 
 | 290 | { | 
 | 291 | 	pte_t entry; | 
 | 292 |  | 
 | 293 | 	entry = pte_mkwrite(pte_mkdirty(*ptep)); | 
 | 294 | 	ptep_set_access_flags(vma, address, ptep, entry, 1); | 
 | 295 | 	update_mmu_cache(vma, address, entry); | 
 | 296 | 	lazy_mmu_prot_update(entry); | 
 | 297 | } | 
 | 298 |  | 
 | 299 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 300 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | 
 | 301 | 			    struct vm_area_struct *vma) | 
 | 302 | { | 
 | 303 | 	pte_t *src_pte, *dst_pte, entry; | 
 | 304 | 	struct page *ptepage; | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 305 | 	unsigned long addr; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 306 | 	int cow; | 
 | 307 |  | 
 | 308 | 	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 309 |  | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 310 | 	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 311 | 		src_pte = huge_pte_offset(src, addr); | 
 | 312 | 		if (!src_pte) | 
 | 313 | 			continue; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 314 | 		dst_pte = huge_pte_alloc(dst, addr); | 
 | 315 | 		if (!dst_pte) | 
 | 316 | 			goto nomem; | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 317 | 		spin_lock(&dst->page_table_lock); | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 318 | 		spin_lock(&src->page_table_lock); | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 319 | 		if (!pte_none(*src_pte)) { | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 320 | 			if (cow) | 
 | 321 | 				ptep_set_wrprotect(src, addr, src_pte); | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 322 | 			entry = *src_pte; | 
 | 323 | 			ptepage = pte_page(entry); | 
 | 324 | 			get_page(ptepage); | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 325 | 			add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE); | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 326 | 			set_huge_pte_at(dst, addr, dst_pte, entry); | 
 | 327 | 		} | 
 | 328 | 		spin_unlock(&src->page_table_lock); | 
| Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 329 | 		spin_unlock(&dst->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 330 | 	} | 
 | 331 | 	return 0; | 
 | 332 |  | 
 | 333 | nomem: | 
 | 334 | 	return -ENOMEM; | 
 | 335 | } | 
 | 336 |  | 
 | 337 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | 
 | 338 | 			  unsigned long end) | 
 | 339 | { | 
 | 340 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 341 | 	unsigned long address; | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 342 | 	pte_t *ptep; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 343 | 	pte_t pte; | 
 | 344 | 	struct page *page; | 
 | 345 |  | 
 | 346 | 	WARN_ON(!is_vm_hugetlb_page(vma)); | 
 | 347 | 	BUG_ON(start & ~HPAGE_MASK); | 
 | 348 | 	BUG_ON(end & ~HPAGE_MASK); | 
 | 349 |  | 
| Hugh Dickins | 508034a | 2005-10-29 18:16:30 -0700 | [diff] [blame] | 350 | 	spin_lock(&mm->page_table_lock); | 
 | 351 |  | 
| Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 352 | 	/* Update high watermark before we lower rss */ | 
 | 353 | 	update_hiwater_rss(mm); | 
 | 354 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 355 | 	for (address = start; address < end; address += HPAGE_SIZE) { | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 356 | 		ptep = huge_pte_offset(mm, address); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 357 | 		if (!ptep) | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 358 | 			continue; | 
 | 359 |  | 
 | 360 | 		pte = huge_ptep_get_and_clear(mm, address, ptep); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 361 | 		if (pte_none(pte)) | 
 | 362 | 			continue; | 
| David Gibson | c7546f8 | 2005-08-05 11:59:35 -0700 | [diff] [blame] | 363 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 364 | 		page = pte_page(pte); | 
 | 365 | 		put_page(page); | 
| Hugh Dickins | 4294621 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 366 | 		add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 367 | 	} | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 368 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | 	spin_unlock(&mm->page_table_lock); | 
| Hugh Dickins | 508034a | 2005-10-29 18:16:30 -0700 | [diff] [blame] | 370 | 	flush_tlb_range(vma, start, end); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | } | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 372 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 373 | static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 374 | 			unsigned long address, pte_t *ptep, pte_t pte) | 
 | 375 | { | 
 | 376 | 	struct page *old_page, *new_page; | 
 | 377 | 	int i, avoidcopy; | 
 | 378 |  | 
 | 379 | 	old_page = pte_page(pte); | 
 | 380 |  | 
 | 381 | 	/* If no-one else is actually using this page, avoid the copy | 
 | 382 | 	 * and just make the page writable */ | 
 | 383 | 	avoidcopy = (page_count(old_page) == 1); | 
 | 384 | 	if (avoidcopy) { | 
 | 385 | 		set_huge_ptep_writable(vma, address, ptep); | 
 | 386 | 		return VM_FAULT_MINOR; | 
 | 387 | 	} | 
 | 388 |  | 
 | 389 | 	page_cache_get(old_page); | 
| Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 390 | 	new_page = alloc_huge_page(vma, address); | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 391 |  | 
 | 392 | 	if (!new_page) { | 
 | 393 | 		page_cache_release(old_page); | 
| Christoph Lameter | 0df420d | 2006-02-07 12:58:30 -0800 | [diff] [blame] | 394 | 		return VM_FAULT_OOM; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 395 | 	} | 
 | 396 |  | 
 | 397 | 	spin_unlock(&mm->page_table_lock); | 
 | 398 | 	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) | 
 | 399 | 		copy_user_highpage(new_page + i, old_page + i, | 
 | 400 | 				   address + i*PAGE_SIZE); | 
 | 401 | 	spin_lock(&mm->page_table_lock); | 
 | 402 |  | 
 | 403 | 	ptep = huge_pte_offset(mm, address & HPAGE_MASK); | 
 | 404 | 	if (likely(pte_same(*ptep, pte))) { | 
 | 405 | 		/* Break COW */ | 
 | 406 | 		set_huge_pte_at(mm, address, ptep, | 
 | 407 | 				make_huge_pte(vma, new_page, 1)); | 
 | 408 | 		/* Make the old page be freed below */ | 
 | 409 | 		new_page = old_page; | 
 | 410 | 	} | 
 | 411 | 	page_cache_release(new_page); | 
 | 412 | 	page_cache_release(old_page); | 
 | 413 | 	return VM_FAULT_MINOR; | 
 | 414 | } | 
 | 415 |  | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 416 | int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 417 | 			unsigned long address, pte_t *ptep, int write_access) | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 418 | { | 
 | 419 | 	int ret = VM_FAULT_SIGBUS; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 420 | 	unsigned long idx; | 
 | 421 | 	unsigned long size; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 422 | 	struct page *page; | 
 | 423 | 	struct address_space *mapping; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 424 | 	pte_t new_pte; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 425 |  | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 426 | 	mapping = vma->vm_file->f_mapping; | 
 | 427 | 	idx = ((address - vma->vm_start) >> HPAGE_SHIFT) | 
 | 428 | 		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | 
 | 429 |  | 
 | 430 | 	/* | 
 | 431 | 	 * Use page lock to guard against racing truncation | 
 | 432 | 	 * before we get page_table_lock. | 
 | 433 | 	 */ | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 434 | retry: | 
 | 435 | 	page = find_lock_page(mapping, idx); | 
 | 436 | 	if (!page) { | 
 | 437 | 		if (hugetlb_get_quota(mapping)) | 
 | 438 | 			goto out; | 
 | 439 | 		page = alloc_huge_page(vma, address); | 
 | 440 | 		if (!page) { | 
 | 441 | 			hugetlb_put_quota(mapping); | 
| Christoph Lameter | 0df420d | 2006-02-07 12:58:30 -0800 | [diff] [blame] | 442 | 			ret = VM_FAULT_OOM; | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 443 | 			goto out; | 
 | 444 | 		} | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 445 |  | 
| Christoph Lameter | 6bda666 | 2006-01-06 00:10:49 -0800 | [diff] [blame] | 446 | 		if (vma->vm_flags & VM_SHARED) { | 
 | 447 | 			int err; | 
 | 448 |  | 
 | 449 | 			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); | 
 | 450 | 			if (err) { | 
 | 451 | 				put_page(page); | 
 | 452 | 				hugetlb_put_quota(mapping); | 
 | 453 | 				if (err == -EEXIST) | 
 | 454 | 					goto retry; | 
 | 455 | 				goto out; | 
 | 456 | 			} | 
 | 457 | 		} else | 
 | 458 | 			lock_page(page); | 
 | 459 | 	} | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 460 |  | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 461 | 	spin_lock(&mm->page_table_lock); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 462 | 	size = i_size_read(mapping->host) >> HPAGE_SHIFT; | 
 | 463 | 	if (idx >= size) | 
 | 464 | 		goto backout; | 
 | 465 |  | 
 | 466 | 	ret = VM_FAULT_MINOR; | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 467 | 	if (!pte_none(*ptep)) | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 468 | 		goto backout; | 
 | 469 |  | 
 | 470 | 	add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 471 | 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) | 
 | 472 | 				&& (vma->vm_flags & VM_SHARED))); | 
 | 473 | 	set_huge_pte_at(mm, address, ptep, new_pte); | 
 | 474 |  | 
 | 475 | 	if (write_access && !(vma->vm_flags & VM_SHARED)) { | 
 | 476 | 		/* Optimization, do the COW without a second fault */ | 
 | 477 | 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte); | 
 | 478 | 	} | 
 | 479 |  | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 480 | 	spin_unlock(&mm->page_table_lock); | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 481 | 	unlock_page(page); | 
 | 482 | out: | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 483 | 	return ret; | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 484 |  | 
 | 485 | backout: | 
 | 486 | 	spin_unlock(&mm->page_table_lock); | 
 | 487 | 	hugetlb_put_quota(mapping); | 
 | 488 | 	unlock_page(page); | 
 | 489 | 	put_page(page); | 
 | 490 | 	goto out; | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 491 | } | 
 | 492 |  | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 493 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 494 | 			unsigned long address, int write_access) | 
 | 495 | { | 
 | 496 | 	pte_t *ptep; | 
 | 497 | 	pte_t entry; | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 498 | 	int ret; | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 499 |  | 
 | 500 | 	ptep = huge_pte_alloc(mm, address); | 
 | 501 | 	if (!ptep) | 
 | 502 | 		return VM_FAULT_OOM; | 
 | 503 |  | 
 | 504 | 	entry = *ptep; | 
 | 505 | 	if (pte_none(entry)) | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 506 | 		return hugetlb_no_page(mm, vma, address, ptep, write_access); | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 507 |  | 
| David Gibson | 1e8f889 | 2006-01-06 00:10:44 -0800 | [diff] [blame] | 508 | 	ret = VM_FAULT_MINOR; | 
 | 509 |  | 
 | 510 | 	spin_lock(&mm->page_table_lock); | 
 | 511 | 	/* Check for a racing update before calling hugetlb_cow */ | 
 | 512 | 	if (likely(pte_same(entry, *ptep))) | 
 | 513 | 		if (write_access && !pte_write(entry)) | 
 | 514 | 			ret = hugetlb_cow(mm, vma, address, ptep, entry); | 
 | 515 | 	spin_unlock(&mm->page_table_lock); | 
 | 516 |  | 
 | 517 | 	return ret; | 
| Adam Litke | 86e5216 | 2006-01-06 00:10:43 -0800 | [diff] [blame] | 518 | } | 
 | 519 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 520 | int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | 
 | 521 | 			struct page **pages, struct vm_area_struct **vmas, | 
 | 522 | 			unsigned long *position, int *length, int i) | 
 | 523 | { | 
 | 524 | 	unsigned long vpfn, vaddr = *position; | 
 | 525 | 	int remainder = *length; | 
 | 526 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 527 | 	vpfn = vaddr/PAGE_SIZE; | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 528 | 	spin_lock(&mm->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 529 | 	while (vaddr < vma->vm_end && remainder) { | 
| Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 530 | 		pte_t *pte; | 
 | 531 | 		struct page *page; | 
 | 532 |  | 
 | 533 | 		/* | 
 | 534 | 		 * Some archs (sparc64, sh*) have multiple pte_ts to | 
 | 535 | 		 * each hugepage.  We have to make * sure we get the | 
 | 536 | 		 * first, for the page indexing below to work. | 
 | 537 | 		 */ | 
 | 538 | 		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); | 
 | 539 |  | 
 | 540 | 		if (!pte || pte_none(*pte)) { | 
 | 541 | 			int ret; | 
 | 542 |  | 
 | 543 | 			spin_unlock(&mm->page_table_lock); | 
 | 544 | 			ret = hugetlb_fault(mm, vma, vaddr, 0); | 
 | 545 | 			spin_lock(&mm->page_table_lock); | 
 | 546 | 			if (ret == VM_FAULT_MINOR) | 
 | 547 | 				continue; | 
 | 548 |  | 
 | 549 | 			remainder = 0; | 
 | 550 | 			if (!i) | 
 | 551 | 				i = -EFAULT; | 
 | 552 | 			break; | 
 | 553 | 		} | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 554 |  | 
 | 555 | 		if (pages) { | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 556 | 			page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 557 | 			get_page(page); | 
 | 558 | 			pages[i] = page; | 
 | 559 | 		} | 
 | 560 |  | 
 | 561 | 		if (vmas) | 
 | 562 | 			vmas[i] = vma; | 
 | 563 |  | 
 | 564 | 		vaddr += PAGE_SIZE; | 
 | 565 | 		++vpfn; | 
 | 566 | 		--remainder; | 
 | 567 | 		++i; | 
 | 568 | 	} | 
| Hugh Dickins | 1c59827 | 2005-10-19 21:23:43 -0700 | [diff] [blame] | 569 | 	spin_unlock(&mm->page_table_lock); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 570 | 	*length = remainder; | 
 | 571 | 	*position = vaddr; | 
 | 572 |  | 
 | 573 | 	return i; | 
 | 574 | } |