| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * High memory handling common code and variables. | 
 | 3 |  * | 
 | 4 |  * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de | 
 | 5 |  *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de | 
 | 6 |  * | 
 | 7 |  * | 
 | 8 |  * Redesigned the x86 32-bit VM architecture to deal with | 
 | 9 |  * 64-bit physical space. With current x86 CPUs this | 
 | 10 |  * means up to 64 Gigabytes physical RAM. | 
 | 11 |  * | 
 | 12 |  * Rewrote high memory support to move the page cache into | 
 | 13 |  * high memory. Implemented permanent (schedulable) kmaps | 
 | 14 |  * based on Linus' idea. | 
 | 15 |  * | 
 | 16 |  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | 
 | 17 |  */ | 
 | 18 |  | 
 | 19 | #include <linux/mm.h> | 
 | 20 | #include <linux/module.h> | 
 | 21 | #include <linux/swap.h> | 
 | 22 | #include <linux/bio.h> | 
 | 23 | #include <linux/pagemap.h> | 
 | 24 | #include <linux/mempool.h> | 
 | 25 | #include <linux/blkdev.h> | 
 | 26 | #include <linux/init.h> | 
 | 27 | #include <linux/hash.h> | 
 | 28 | #include <linux/highmem.h> | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 29 | #include <linux/blktrace_api.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm/tlbflush.h> | 
 | 31 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* | 
 | 33 |  * Virtual_count is not a pure "count". | 
 | 34 |  *  0 means that it is not mapped, and has not been mapped | 
 | 35 |  *    since a TLB flush - it is usable. | 
 | 36 |  *  1 means that there are no users, but it has been mapped | 
 | 37 |  *    since the last TLB flush - so we can't use it. | 
 | 38 |  *  n means that there are (n-1) current users of it. | 
 | 39 |  */ | 
 | 40 | #ifdef CONFIG_HIGHMEM | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 41 |  | 
| Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 42 | unsigned long totalhigh_pages __read_mostly; | 
 | 43 |  | 
 | 44 | unsigned int nr_free_highpages (void) | 
 | 45 | { | 
 | 46 | 	pg_data_t *pgdat; | 
 | 47 | 	unsigned int pages = 0; | 
 | 48 |  | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 49 | 	for_each_online_pgdat(pgdat) { | 
| Christoph Lameter | d23ad42 | 2007-02-10 01:43:02 -0800 | [diff] [blame] | 50 | 		pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], | 
 | 51 | 			NR_FREE_PAGES); | 
| Mel Gorman | 2a1e274 | 2007-07-17 04:03:12 -0700 | [diff] [blame] | 52 | 		if (zone_movable_is_highmem()) | 
 | 53 | 			pages += zone_page_state( | 
 | 54 | 					&pgdat->node_zones[ZONE_MOVABLE], | 
 | 55 | 					NR_FREE_PAGES); | 
 | 56 | 	} | 
| Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 57 |  | 
 | 58 | 	return pages; | 
 | 59 | } | 
 | 60 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | static int pkmap_count[LAST_PKMAP]; | 
 | 62 | static unsigned int last_pkmap_nr; | 
 | 63 | static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); | 
 | 64 |  | 
 | 65 | pte_t * pkmap_page_table; | 
 | 66 |  | 
 | 67 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | 
 | 68 |  | 
 | 69 | static void flush_all_zero_pkmaps(void) | 
 | 70 | { | 
 | 71 | 	int i; | 
 | 72 |  | 
 | 73 | 	flush_cache_kmaps(); | 
 | 74 |  | 
 | 75 | 	for (i = 0; i < LAST_PKMAP; i++) { | 
 | 76 | 		struct page *page; | 
 | 77 |  | 
 | 78 | 		/* | 
 | 79 | 		 * zero means we don't have anything to do, | 
 | 80 | 		 * >1 means that it is still in use. Only | 
 | 81 | 		 * a count of 1 means that it is free but | 
 | 82 | 		 * needs to be unmapped | 
 | 83 | 		 */ | 
 | 84 | 		if (pkmap_count[i] != 1) | 
 | 85 | 			continue; | 
 | 86 | 		pkmap_count[i] = 0; | 
 | 87 |  | 
 | 88 | 		/* sanity check */ | 
| Eric Sesterhenn | 75babca | 2006-04-02 13:47:35 +0200 | [diff] [blame] | 89 | 		BUG_ON(pte_none(pkmap_page_table[i])); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 |  | 
 | 91 | 		/* | 
 | 92 | 		 * Don't need an atomic fetch-and-clear op here; | 
 | 93 | 		 * no-one has the page mapped, and cannot get at | 
 | 94 | 		 * its virtual address (and hence PTE) without first | 
 | 95 | 		 * getting the kmap_lock (which is held here). | 
 | 96 | 		 * So no dangers, even with speculative execution. | 
 | 97 | 		 */ | 
 | 98 | 		page = pte_page(pkmap_page_table[i]); | 
 | 99 | 		pte_clear(&init_mm, (unsigned long)page_address(page), | 
 | 100 | 			  &pkmap_page_table[i]); | 
 | 101 |  | 
 | 102 | 		set_page_address(page, NULL); | 
 | 103 | 	} | 
 | 104 | 	flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); | 
 | 105 | } | 
 | 106 |  | 
| Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 107 | /* Flush all unused kmap mappings in order to remove stray | 
 | 108 |    mappings. */ | 
 | 109 | void kmap_flush_unused(void) | 
 | 110 | { | 
 | 111 | 	spin_lock(&kmap_lock); | 
 | 112 | 	flush_all_zero_pkmaps(); | 
 | 113 | 	spin_unlock(&kmap_lock); | 
 | 114 | } | 
 | 115 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | static inline unsigned long map_new_virtual(struct page *page) | 
 | 117 | { | 
 | 118 | 	unsigned long vaddr; | 
 | 119 | 	int count; | 
 | 120 |  | 
 | 121 | start: | 
 | 122 | 	count = LAST_PKMAP; | 
 | 123 | 	/* Find an empty entry */ | 
 | 124 | 	for (;;) { | 
 | 125 | 		last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; | 
 | 126 | 		if (!last_pkmap_nr) { | 
 | 127 | 			flush_all_zero_pkmaps(); | 
 | 128 | 			count = LAST_PKMAP; | 
 | 129 | 		} | 
 | 130 | 		if (!pkmap_count[last_pkmap_nr]) | 
 | 131 | 			break;	/* Found a usable entry */ | 
 | 132 | 		if (--count) | 
 | 133 | 			continue; | 
 | 134 |  | 
 | 135 | 		/* | 
 | 136 | 		 * Sleep for somebody else to unmap their entries | 
 | 137 | 		 */ | 
 | 138 | 		{ | 
 | 139 | 			DECLARE_WAITQUEUE(wait, current); | 
 | 140 |  | 
 | 141 | 			__set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 142 | 			add_wait_queue(&pkmap_map_wait, &wait); | 
 | 143 | 			spin_unlock(&kmap_lock); | 
 | 144 | 			schedule(); | 
 | 145 | 			remove_wait_queue(&pkmap_map_wait, &wait); | 
 | 146 | 			spin_lock(&kmap_lock); | 
 | 147 |  | 
 | 148 | 			/* Somebody else might have mapped it while we slept */ | 
 | 149 | 			if (page_address(page)) | 
 | 150 | 				return (unsigned long)page_address(page); | 
 | 151 |  | 
 | 152 | 			/* Re-start */ | 
 | 153 | 			goto start; | 
 | 154 | 		} | 
 | 155 | 	} | 
 | 156 | 	vaddr = PKMAP_ADDR(last_pkmap_nr); | 
 | 157 | 	set_pte_at(&init_mm, vaddr, | 
 | 158 | 		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); | 
 | 159 |  | 
 | 160 | 	pkmap_count[last_pkmap_nr] = 1; | 
 | 161 | 	set_page_address(page, (void *)vaddr); | 
 | 162 |  | 
 | 163 | 	return vaddr; | 
 | 164 | } | 
 | 165 |  | 
 | 166 | void fastcall *kmap_high(struct page *page) | 
 | 167 | { | 
 | 168 | 	unsigned long vaddr; | 
 | 169 |  | 
 | 170 | 	/* | 
 | 171 | 	 * For highmem pages, we can't trust "virtual" until | 
 | 172 | 	 * after we have the lock. | 
 | 173 | 	 * | 
 | 174 | 	 * We cannot call this from interrupts, as it may block | 
 | 175 | 	 */ | 
 | 176 | 	spin_lock(&kmap_lock); | 
 | 177 | 	vaddr = (unsigned long)page_address(page); | 
 | 178 | 	if (!vaddr) | 
 | 179 | 		vaddr = map_new_virtual(page); | 
 | 180 | 	pkmap_count[PKMAP_NR(vaddr)]++; | 
| Eric Sesterhenn | 75babca | 2006-04-02 13:47:35 +0200 | [diff] [blame] | 181 | 	BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | 	spin_unlock(&kmap_lock); | 
 | 183 | 	return (void*) vaddr; | 
 | 184 | } | 
 | 185 |  | 
 | 186 | EXPORT_SYMBOL(kmap_high); | 
 | 187 |  | 
 | 188 | void fastcall kunmap_high(struct page *page) | 
 | 189 | { | 
 | 190 | 	unsigned long vaddr; | 
 | 191 | 	unsigned long nr; | 
 | 192 | 	int need_wakeup; | 
 | 193 |  | 
 | 194 | 	spin_lock(&kmap_lock); | 
 | 195 | 	vaddr = (unsigned long)page_address(page); | 
| Eric Sesterhenn | 75babca | 2006-04-02 13:47:35 +0200 | [diff] [blame] | 196 | 	BUG_ON(!vaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | 	nr = PKMAP_NR(vaddr); | 
 | 198 |  | 
 | 199 | 	/* | 
 | 200 | 	 * A count must never go down to zero | 
 | 201 | 	 * without a TLB flush! | 
 | 202 | 	 */ | 
 | 203 | 	need_wakeup = 0; | 
 | 204 | 	switch (--pkmap_count[nr]) { | 
 | 205 | 	case 0: | 
 | 206 | 		BUG(); | 
 | 207 | 	case 1: | 
 | 208 | 		/* | 
 | 209 | 		 * Avoid an unnecessary wake_up() function call. | 
 | 210 | 		 * The common case is pkmap_count[] == 1, but | 
 | 211 | 		 * no waiters. | 
 | 212 | 		 * The tasks queued in the wait-queue are guarded | 
 | 213 | 		 * by both the lock in the wait-queue-head and by | 
 | 214 | 		 * the kmap_lock.  As the kmap_lock is held here, | 
 | 215 | 		 * no need for the wait-queue-head's lock.  Simply | 
 | 216 | 		 * test if the queue is empty. | 
 | 217 | 		 */ | 
 | 218 | 		need_wakeup = waitqueue_active(&pkmap_map_wait); | 
 | 219 | 	} | 
 | 220 | 	spin_unlock(&kmap_lock); | 
 | 221 |  | 
 | 222 | 	/* do wake-up, if needed, race-free outside of the spin lock */ | 
 | 223 | 	if (need_wakeup) | 
 | 224 | 		wake_up(&pkmap_map_wait); | 
 | 225 | } | 
 | 226 |  | 
 | 227 | EXPORT_SYMBOL(kunmap_high); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | #endif | 
 | 229 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | #if defined(HASHED_PAGE_VIRTUAL) | 
 | 231 |  | 
 | 232 | #define PA_HASH_ORDER	7 | 
 | 233 |  | 
 | 234 | /* | 
 | 235 |  * Describes one page->virtual association | 
 | 236 |  */ | 
 | 237 | struct page_address_map { | 
 | 238 | 	struct page *page; | 
 | 239 | 	void *virtual; | 
 | 240 | 	struct list_head list; | 
 | 241 | }; | 
 | 242 |  | 
 | 243 | /* | 
 | 244 |  * page_address_map freelist, allocated from page_address_maps. | 
 | 245 |  */ | 
 | 246 | static struct list_head page_address_pool;	/* freelist */ | 
 | 247 | static spinlock_t pool_lock;			/* protects page_address_pool */ | 
 | 248 |  | 
 | 249 | /* | 
 | 250 |  * Hash table bucket | 
 | 251 |  */ | 
 | 252 | static struct page_address_slot { | 
 | 253 | 	struct list_head lh;			/* List of page_address_maps */ | 
 | 254 | 	spinlock_t lock;			/* Protect this bucket's list */ | 
 | 255 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; | 
 | 256 |  | 
 | 257 | static struct page_address_slot *page_slot(struct page *page) | 
 | 258 | { | 
 | 259 | 	return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; | 
 | 260 | } | 
 | 261 |  | 
 | 262 | void *page_address(struct page *page) | 
 | 263 | { | 
 | 264 | 	unsigned long flags; | 
 | 265 | 	void *ret; | 
 | 266 | 	struct page_address_slot *pas; | 
 | 267 |  | 
 | 268 | 	if (!PageHighMem(page)) | 
 | 269 | 		return lowmem_page_address(page); | 
 | 270 |  | 
 | 271 | 	pas = page_slot(page); | 
 | 272 | 	ret = NULL; | 
 | 273 | 	spin_lock_irqsave(&pas->lock, flags); | 
 | 274 | 	if (!list_empty(&pas->lh)) { | 
 | 275 | 		struct page_address_map *pam; | 
 | 276 |  | 
 | 277 | 		list_for_each_entry(pam, &pas->lh, list) { | 
 | 278 | 			if (pam->page == page) { | 
 | 279 | 				ret = pam->virtual; | 
 | 280 | 				goto done; | 
 | 281 | 			} | 
 | 282 | 		} | 
 | 283 | 	} | 
 | 284 | done: | 
 | 285 | 	spin_unlock_irqrestore(&pas->lock, flags); | 
 | 286 | 	return ret; | 
 | 287 | } | 
 | 288 |  | 
 | 289 | EXPORT_SYMBOL(page_address); | 
 | 290 |  | 
 | 291 | void set_page_address(struct page *page, void *virtual) | 
 | 292 | { | 
 | 293 | 	unsigned long flags; | 
 | 294 | 	struct page_address_slot *pas; | 
 | 295 | 	struct page_address_map *pam; | 
 | 296 |  | 
 | 297 | 	BUG_ON(!PageHighMem(page)); | 
 | 298 |  | 
 | 299 | 	pas = page_slot(page); | 
 | 300 | 	if (virtual) {		/* Add */ | 
 | 301 | 		BUG_ON(list_empty(&page_address_pool)); | 
 | 302 |  | 
 | 303 | 		spin_lock_irqsave(&pool_lock, flags); | 
 | 304 | 		pam = list_entry(page_address_pool.next, | 
 | 305 | 				struct page_address_map, list); | 
 | 306 | 		list_del(&pam->list); | 
 | 307 | 		spin_unlock_irqrestore(&pool_lock, flags); | 
 | 308 |  | 
 | 309 | 		pam->page = page; | 
 | 310 | 		pam->virtual = virtual; | 
 | 311 |  | 
 | 312 | 		spin_lock_irqsave(&pas->lock, flags); | 
 | 313 | 		list_add_tail(&pam->list, &pas->lh); | 
 | 314 | 		spin_unlock_irqrestore(&pas->lock, flags); | 
 | 315 | 	} else {		/* Remove */ | 
 | 316 | 		spin_lock_irqsave(&pas->lock, flags); | 
 | 317 | 		list_for_each_entry(pam, &pas->lh, list) { | 
 | 318 | 			if (pam->page == page) { | 
 | 319 | 				list_del(&pam->list); | 
 | 320 | 				spin_unlock_irqrestore(&pas->lock, flags); | 
 | 321 | 				spin_lock_irqsave(&pool_lock, flags); | 
 | 322 | 				list_add_tail(&pam->list, &page_address_pool); | 
 | 323 | 				spin_unlock_irqrestore(&pool_lock, flags); | 
 | 324 | 				goto done; | 
 | 325 | 			} | 
 | 326 | 		} | 
 | 327 | 		spin_unlock_irqrestore(&pas->lock, flags); | 
 | 328 | 	} | 
 | 329 | done: | 
 | 330 | 	return; | 
 | 331 | } | 
 | 332 |  | 
 | 333 | static struct page_address_map page_address_maps[LAST_PKMAP]; | 
 | 334 |  | 
 | 335 | void __init page_address_init(void) | 
 | 336 | { | 
 | 337 | 	int i; | 
 | 338 |  | 
 | 339 | 	INIT_LIST_HEAD(&page_address_pool); | 
 | 340 | 	for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) | 
 | 341 | 		list_add(&page_address_maps[i].list, &page_address_pool); | 
 | 342 | 	for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { | 
 | 343 | 		INIT_LIST_HEAD(&page_address_htable[i].lh); | 
 | 344 | 		spin_lock_init(&page_address_htable[i].lock); | 
 | 345 | 	} | 
 | 346 | 	spin_lock_init(&pool_lock); | 
 | 347 | } | 
 | 348 |  | 
 | 349 | #endif	/* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |