| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * High memory handling common code and variables. | 
 | 3 |  * | 
 | 4 |  * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de | 
 | 5 |  *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de | 
 | 6 |  * | 
 | 7 |  * | 
 | 8 |  * Redesigned the x86 32-bit VM architecture to deal with | 
 | 9 |  * 64-bit physical space. With current x86 CPUs this | 
 | 10 |  * means up to 64 Gigabytes physical RAM. | 
 | 11 |  * | 
 | 12 |  * Rewrote high memory support to move the page cache into | 
 | 13 |  * high memory. Implemented permanent (schedulable) kmaps | 
 | 14 |  * based on Linus' idea. | 
 | 15 |  * | 
 | 16 |  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | 
 | 17 |  */ | 
 | 18 |  | 
 | 19 | #include <linux/mm.h> | 
 | 20 | #include <linux/module.h> | 
 | 21 | #include <linux/swap.h> | 
 | 22 | #include <linux/bio.h> | 
 | 23 | #include <linux/pagemap.h> | 
 | 24 | #include <linux/mempool.h> | 
 | 25 | #include <linux/blkdev.h> | 
 | 26 | #include <linux/init.h> | 
 | 27 | #include <linux/hash.h> | 
 | 28 | #include <linux/highmem.h> | 
 | 29 | #include <asm/tlbflush.h> | 
 | 30 |  | 
 | 31 | static mempool_t *page_pool, *isa_page_pool; | 
 | 32 |  | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 33 | static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | { | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 35 | 	return alloc_page(gfp_mask | GFP_DMA); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | } | 
 | 37 |  | 
 | 38 | static void page_pool_free(void *page, void *data) | 
 | 39 | { | 
 | 40 | 	__free_page(page); | 
 | 41 | } | 
 | 42 |  | 
 | 43 | /* | 
 | 44 |  * Virtual_count is not a pure "count". | 
 | 45 |  *  0 means that it is not mapped, and has not been mapped | 
 | 46 |  *    since a TLB flush - it is usable. | 
 | 47 |  *  1 means that there are no users, but it has been mapped | 
 | 48 |  *    since the last TLB flush - so we can't use it. | 
 | 49 |  *  n means that there are (n-1) current users of it. | 
 | 50 |  */ | 
 | 51 | #ifdef CONFIG_HIGHMEM | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 52 |  | 
 | 53 | static void *page_pool_alloc(gfp_t gfp_mask, void *data) | 
 | 54 | { | 
 | 55 | 	return alloc_page(gfp_mask); | 
 | 56 | } | 
 | 57 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | static int pkmap_count[LAST_PKMAP]; | 
 | 59 | static unsigned int last_pkmap_nr; | 
 | 60 | static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); | 
 | 61 |  | 
 | 62 | pte_t * pkmap_page_table; | 
 | 63 |  | 
 | 64 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); | 
 | 65 |  | 
 | 66 | static void flush_all_zero_pkmaps(void) | 
 | 67 | { | 
 | 68 | 	int i; | 
 | 69 |  | 
 | 70 | 	flush_cache_kmaps(); | 
 | 71 |  | 
 | 72 | 	for (i = 0; i < LAST_PKMAP; i++) { | 
 | 73 | 		struct page *page; | 
 | 74 |  | 
 | 75 | 		/* | 
 | 76 | 		 * zero means we don't have anything to do, | 
 | 77 | 		 * >1 means that it is still in use. Only | 
 | 78 | 		 * a count of 1 means that it is free but | 
 | 79 | 		 * needs to be unmapped | 
 | 80 | 		 */ | 
 | 81 | 		if (pkmap_count[i] != 1) | 
 | 82 | 			continue; | 
 | 83 | 		pkmap_count[i] = 0; | 
 | 84 |  | 
 | 85 | 		/* sanity check */ | 
 | 86 | 		if (pte_none(pkmap_page_table[i])) | 
 | 87 | 			BUG(); | 
 | 88 |  | 
 | 89 | 		/* | 
 | 90 | 		 * Don't need an atomic fetch-and-clear op here; | 
 | 91 | 		 * no-one has the page mapped, and cannot get at | 
 | 92 | 		 * its virtual address (and hence PTE) without first | 
 | 93 | 		 * getting the kmap_lock (which is held here). | 
 | 94 | 		 * So no dangers, even with speculative execution. | 
 | 95 | 		 */ | 
 | 96 | 		page = pte_page(pkmap_page_table[i]); | 
 | 97 | 		pte_clear(&init_mm, (unsigned long)page_address(page), | 
 | 98 | 			  &pkmap_page_table[i]); | 
 | 99 |  | 
 | 100 | 		set_page_address(page, NULL); | 
 | 101 | 	} | 
 | 102 | 	flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); | 
 | 103 | } | 
 | 104 |  | 
 | 105 | static inline unsigned long map_new_virtual(struct page *page) | 
 | 106 | { | 
 | 107 | 	unsigned long vaddr; | 
 | 108 | 	int count; | 
 | 109 |  | 
 | 110 | start: | 
 | 111 | 	count = LAST_PKMAP; | 
 | 112 | 	/* Find an empty entry */ | 
 | 113 | 	for (;;) { | 
 | 114 | 		last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; | 
 | 115 | 		if (!last_pkmap_nr) { | 
 | 116 | 			flush_all_zero_pkmaps(); | 
 | 117 | 			count = LAST_PKMAP; | 
 | 118 | 		} | 
 | 119 | 		if (!pkmap_count[last_pkmap_nr]) | 
 | 120 | 			break;	/* Found a usable entry */ | 
 | 121 | 		if (--count) | 
 | 122 | 			continue; | 
 | 123 |  | 
 | 124 | 		/* | 
 | 125 | 		 * Sleep for somebody else to unmap their entries | 
 | 126 | 		 */ | 
 | 127 | 		{ | 
 | 128 | 			DECLARE_WAITQUEUE(wait, current); | 
 | 129 |  | 
 | 130 | 			__set_current_state(TASK_UNINTERRUPTIBLE); | 
 | 131 | 			add_wait_queue(&pkmap_map_wait, &wait); | 
 | 132 | 			spin_unlock(&kmap_lock); | 
 | 133 | 			schedule(); | 
 | 134 | 			remove_wait_queue(&pkmap_map_wait, &wait); | 
 | 135 | 			spin_lock(&kmap_lock); | 
 | 136 |  | 
 | 137 | 			/* Somebody else might have mapped it while we slept */ | 
 | 138 | 			if (page_address(page)) | 
 | 139 | 				return (unsigned long)page_address(page); | 
 | 140 |  | 
 | 141 | 			/* Re-start */ | 
 | 142 | 			goto start; | 
 | 143 | 		} | 
 | 144 | 	} | 
 | 145 | 	vaddr = PKMAP_ADDR(last_pkmap_nr); | 
 | 146 | 	set_pte_at(&init_mm, vaddr, | 
 | 147 | 		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); | 
 | 148 |  | 
 | 149 | 	pkmap_count[last_pkmap_nr] = 1; | 
 | 150 | 	set_page_address(page, (void *)vaddr); | 
 | 151 |  | 
 | 152 | 	return vaddr; | 
 | 153 | } | 
 | 154 |  | 
 | 155 | void fastcall *kmap_high(struct page *page) | 
 | 156 | { | 
 | 157 | 	unsigned long vaddr; | 
 | 158 |  | 
 | 159 | 	/* | 
 | 160 | 	 * For highmem pages, we can't trust "virtual" until | 
 | 161 | 	 * after we have the lock. | 
 | 162 | 	 * | 
 | 163 | 	 * We cannot call this from interrupts, as it may block | 
 | 164 | 	 */ | 
 | 165 | 	spin_lock(&kmap_lock); | 
 | 166 | 	vaddr = (unsigned long)page_address(page); | 
 | 167 | 	if (!vaddr) | 
 | 168 | 		vaddr = map_new_virtual(page); | 
 | 169 | 	pkmap_count[PKMAP_NR(vaddr)]++; | 
 | 170 | 	if (pkmap_count[PKMAP_NR(vaddr)] < 2) | 
 | 171 | 		BUG(); | 
 | 172 | 	spin_unlock(&kmap_lock); | 
 | 173 | 	return (void*) vaddr; | 
 | 174 | } | 
 | 175 |  | 
 | 176 | EXPORT_SYMBOL(kmap_high); | 
 | 177 |  | 
 | 178 | void fastcall kunmap_high(struct page *page) | 
 | 179 | { | 
 | 180 | 	unsigned long vaddr; | 
 | 181 | 	unsigned long nr; | 
 | 182 | 	int need_wakeup; | 
 | 183 |  | 
 | 184 | 	spin_lock(&kmap_lock); | 
 | 185 | 	vaddr = (unsigned long)page_address(page); | 
 | 186 | 	if (!vaddr) | 
 | 187 | 		BUG(); | 
 | 188 | 	nr = PKMAP_NR(vaddr); | 
 | 189 |  | 
 | 190 | 	/* | 
 | 191 | 	 * A count must never go down to zero | 
 | 192 | 	 * without a TLB flush! | 
 | 193 | 	 */ | 
 | 194 | 	need_wakeup = 0; | 
 | 195 | 	switch (--pkmap_count[nr]) { | 
 | 196 | 	case 0: | 
 | 197 | 		BUG(); | 
 | 198 | 	case 1: | 
 | 199 | 		/* | 
 | 200 | 		 * Avoid an unnecessary wake_up() function call. | 
 | 201 | 		 * The common case is pkmap_count[] == 1, but | 
 | 202 | 		 * no waiters. | 
 | 203 | 		 * The tasks queued in the wait-queue are guarded | 
 | 204 | 		 * by both the lock in the wait-queue-head and by | 
 | 205 | 		 * the kmap_lock.  As the kmap_lock is held here, | 
 | 206 | 		 * no need for the wait-queue-head's lock.  Simply | 
 | 207 | 		 * test if the queue is empty. | 
 | 208 | 		 */ | 
 | 209 | 		need_wakeup = waitqueue_active(&pkmap_map_wait); | 
 | 210 | 	} | 
 | 211 | 	spin_unlock(&kmap_lock); | 
 | 212 |  | 
 | 213 | 	/* do wake-up, if needed, race-free outside of the spin lock */ | 
 | 214 | 	if (need_wakeup) | 
 | 215 | 		wake_up(&pkmap_map_wait); | 
 | 216 | } | 
 | 217 |  | 
 | 218 | EXPORT_SYMBOL(kunmap_high); | 
 | 219 |  | 
 | 220 | #define POOL_SIZE	64 | 
 | 221 |  | 
 | 222 | static __init int init_emergency_pool(void) | 
 | 223 | { | 
 | 224 | 	struct sysinfo i; | 
 | 225 | 	si_meminfo(&i); | 
 | 226 | 	si_swapinfo(&i); | 
 | 227 |          | 
 | 228 | 	if (!i.totalhigh) | 
 | 229 | 		return 0; | 
 | 230 |  | 
 | 231 | 	page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); | 
 | 232 | 	if (!page_pool) | 
 | 233 | 		BUG(); | 
 | 234 | 	printk("highmem bounce pool size: %d pages\n", POOL_SIZE); | 
 | 235 |  | 
 | 236 | 	return 0; | 
 | 237 | } | 
 | 238 |  | 
 | 239 | __initcall(init_emergency_pool); | 
 | 240 |  | 
 | 241 | /* | 
 | 242 |  * highmem version, map in to vec | 
 | 243 |  */ | 
 | 244 | static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) | 
 | 245 | { | 
 | 246 | 	unsigned long flags; | 
 | 247 | 	unsigned char *vto; | 
 | 248 |  | 
 | 249 | 	local_irq_save(flags); | 
 | 250 | 	vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); | 
 | 251 | 	memcpy(vto + to->bv_offset, vfrom, to->bv_len); | 
 | 252 | 	kunmap_atomic(vto, KM_BOUNCE_READ); | 
 | 253 | 	local_irq_restore(flags); | 
 | 254 | } | 
 | 255 |  | 
 | 256 | #else /* CONFIG_HIGHMEM */ | 
 | 257 |  | 
 | 258 | #define bounce_copy_vec(to, vfrom)	\ | 
 | 259 | 	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) | 
 | 260 |  | 
 | 261 | #endif | 
 | 262 |  | 
 | 263 | #define ISA_POOL_SIZE	16 | 
 | 264 |  | 
 | 265 | /* | 
 | 266 |  * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA | 
 | 267 |  * as the max address, so check if the pool has already been created. | 
 | 268 |  */ | 
 | 269 | int init_emergency_isa_pool(void) | 
 | 270 | { | 
 | 271 | 	if (isa_page_pool) | 
 | 272 | 		return 0; | 
 | 273 |  | 
| Al Viro | 260b236 | 2005-10-21 03:22:44 -0400 | [diff] [blame] | 274 | 	isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | 	if (!isa_page_pool) | 
 | 276 | 		BUG(); | 
 | 277 |  | 
 | 278 | 	printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); | 
 | 279 | 	return 0; | 
 | 280 | } | 
 | 281 |  | 
 | 282 | /* | 
 | 283 |  * Simple bounce buffer support for highmem pages. Depending on the | 
 | 284 |  * queue gfp mask set, *to may or may not be a highmem page. kmap it | 
 | 285 |  * always, it will do the Right Thing | 
 | 286 |  */ | 
 | 287 | static void copy_to_high_bio_irq(struct bio *to, struct bio *from) | 
 | 288 | { | 
 | 289 | 	unsigned char *vfrom; | 
 | 290 | 	struct bio_vec *tovec, *fromvec; | 
 | 291 | 	int i; | 
 | 292 |  | 
 | 293 | 	__bio_for_each_segment(tovec, to, i, 0) { | 
 | 294 | 		fromvec = from->bi_io_vec + i; | 
 | 295 |  | 
 | 296 | 		/* | 
 | 297 | 		 * not bounced | 
 | 298 | 		 */ | 
 | 299 | 		if (tovec->bv_page == fromvec->bv_page) | 
 | 300 | 			continue; | 
 | 301 |  | 
 | 302 | 		/* | 
 | 303 | 		 * fromvec->bv_offset and fromvec->bv_len might have been | 
 | 304 | 		 * modified by the block layer, so use the original copy, | 
 | 305 | 		 * bounce_copy_vec already uses tovec->bv_len | 
 | 306 | 		 */ | 
 | 307 | 		vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; | 
 | 308 |  | 
 | 309 | 		flush_dcache_page(tovec->bv_page); | 
 | 310 | 		bounce_copy_vec(tovec, vfrom); | 
 | 311 | 	} | 
 | 312 | } | 
 | 313 |  | 
 | 314 | static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) | 
 | 315 | { | 
 | 316 | 	struct bio *bio_orig = bio->bi_private; | 
 | 317 | 	struct bio_vec *bvec, *org_vec; | 
 | 318 | 	int i; | 
 | 319 |  | 
 | 320 | 	if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) | 
 | 321 | 		set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); | 
 | 322 |  | 
 | 323 | 	/* | 
 | 324 | 	 * free up bounce indirect pages used | 
 | 325 | 	 */ | 
 | 326 | 	__bio_for_each_segment(bvec, bio, i, 0) { | 
 | 327 | 		org_vec = bio_orig->bi_io_vec + i; | 
 | 328 | 		if (bvec->bv_page == org_vec->bv_page) | 
 | 329 | 			continue; | 
 | 330 |  | 
 | 331 | 		mempool_free(bvec->bv_page, pool);	 | 
| KAMEZAWA Hiroyuki | edfbe2b | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 332 | 		dec_page_state(nr_bounce); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | 	} | 
 | 334 |  | 
 | 335 | 	bio_endio(bio_orig, bio_orig->bi_size, err); | 
 | 336 | 	bio_put(bio); | 
 | 337 | } | 
 | 338 |  | 
 | 339 | static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) | 
 | 340 | { | 
 | 341 | 	if (bio->bi_size) | 
 | 342 | 		return 1; | 
 | 343 |  | 
 | 344 | 	bounce_end_io(bio, page_pool, err); | 
 | 345 | 	return 0; | 
 | 346 | } | 
 | 347 |  | 
 | 348 | static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err) | 
 | 349 | { | 
 | 350 | 	if (bio->bi_size) | 
 | 351 | 		return 1; | 
 | 352 |  | 
 | 353 | 	bounce_end_io(bio, isa_page_pool, err); | 
 | 354 | 	return 0; | 
 | 355 | } | 
 | 356 |  | 
 | 357 | static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) | 
 | 358 | { | 
 | 359 | 	struct bio *bio_orig = bio->bi_private; | 
 | 360 |  | 
 | 361 | 	if (test_bit(BIO_UPTODATE, &bio->bi_flags)) | 
 | 362 | 		copy_to_high_bio_irq(bio_orig, bio); | 
 | 363 |  | 
 | 364 | 	bounce_end_io(bio, pool, err); | 
 | 365 | } | 
 | 366 |  | 
 | 367 | static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err) | 
 | 368 | { | 
 | 369 | 	if (bio->bi_size) | 
 | 370 | 		return 1; | 
 | 371 |  | 
 | 372 | 	__bounce_end_io_read(bio, page_pool, err); | 
 | 373 | 	return 0; | 
 | 374 | } | 
 | 375 |  | 
 | 376 | static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err) | 
 | 377 | { | 
 | 378 | 	if (bio->bi_size) | 
 | 379 | 		return 1; | 
 | 380 |  | 
 | 381 | 	__bounce_end_io_read(bio, isa_page_pool, err); | 
 | 382 | 	return 0; | 
 | 383 | } | 
 | 384 |  | 
 | 385 | static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, | 
 | 386 | 			mempool_t *pool) | 
 | 387 | { | 
 | 388 | 	struct page *page; | 
 | 389 | 	struct bio *bio = NULL; | 
 | 390 | 	int i, rw = bio_data_dir(*bio_orig); | 
 | 391 | 	struct bio_vec *to, *from; | 
 | 392 |  | 
 | 393 | 	bio_for_each_segment(from, *bio_orig, i) { | 
 | 394 | 		page = from->bv_page; | 
 | 395 |  | 
 | 396 | 		/* | 
 | 397 | 		 * is destination page below bounce pfn? | 
 | 398 | 		 */ | 
 | 399 | 		if (page_to_pfn(page) < q->bounce_pfn) | 
 | 400 | 			continue; | 
 | 401 |  | 
 | 402 | 		/* | 
 | 403 | 		 * irk, bounce it | 
 | 404 | 		 */ | 
 | 405 | 		if (!bio) | 
 | 406 | 			bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt); | 
 | 407 |  | 
 | 408 | 		to = bio->bi_io_vec + i; | 
 | 409 |  | 
 | 410 | 		to->bv_page = mempool_alloc(pool, q->bounce_gfp); | 
 | 411 | 		to->bv_len = from->bv_len; | 
 | 412 | 		to->bv_offset = from->bv_offset; | 
| KAMEZAWA Hiroyuki | edfbe2b | 2005-05-01 08:58:37 -0700 | [diff] [blame] | 413 | 		inc_page_state(nr_bounce); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 |  | 
 | 415 | 		if (rw == WRITE) { | 
 | 416 | 			char *vto, *vfrom; | 
 | 417 |  | 
 | 418 | 			flush_dcache_page(from->bv_page); | 
 | 419 | 			vto = page_address(to->bv_page) + to->bv_offset; | 
 | 420 | 			vfrom = kmap(from->bv_page) + from->bv_offset; | 
 | 421 | 			memcpy(vto, vfrom, to->bv_len); | 
 | 422 | 			kunmap(from->bv_page); | 
 | 423 | 		} | 
 | 424 | 	} | 
 | 425 |  | 
 | 426 | 	/* | 
 | 427 | 	 * no pages bounced | 
 | 428 | 	 */ | 
 | 429 | 	if (!bio) | 
 | 430 | 		return; | 
 | 431 |  | 
 | 432 | 	/* | 
 | 433 | 	 * at least one page was bounced, fill in possible non-highmem | 
 | 434 | 	 * pages | 
 | 435 | 	 */ | 
 | 436 | 	__bio_for_each_segment(from, *bio_orig, i, 0) { | 
 | 437 | 		to = bio_iovec_idx(bio, i); | 
 | 438 | 		if (!to->bv_page) { | 
 | 439 | 			to->bv_page = from->bv_page; | 
 | 440 | 			to->bv_len = from->bv_len; | 
 | 441 | 			to->bv_offset = from->bv_offset; | 
 | 442 | 		} | 
 | 443 | 	} | 
 | 444 |  | 
 | 445 | 	bio->bi_bdev = (*bio_orig)->bi_bdev; | 
 | 446 | 	bio->bi_flags |= (1 << BIO_BOUNCED); | 
 | 447 | 	bio->bi_sector = (*bio_orig)->bi_sector; | 
 | 448 | 	bio->bi_rw = (*bio_orig)->bi_rw; | 
 | 449 |  | 
 | 450 | 	bio->bi_vcnt = (*bio_orig)->bi_vcnt; | 
 | 451 | 	bio->bi_idx = (*bio_orig)->bi_idx; | 
 | 452 | 	bio->bi_size = (*bio_orig)->bi_size; | 
 | 453 |  | 
 | 454 | 	if (pool == page_pool) { | 
 | 455 | 		bio->bi_end_io = bounce_end_io_write; | 
 | 456 | 		if (rw == READ) | 
 | 457 | 			bio->bi_end_io = bounce_end_io_read; | 
 | 458 | 	} else { | 
 | 459 | 		bio->bi_end_io = bounce_end_io_write_isa; | 
 | 460 | 		if (rw == READ) | 
 | 461 | 			bio->bi_end_io = bounce_end_io_read_isa; | 
 | 462 | 	} | 
 | 463 |  | 
 | 464 | 	bio->bi_private = *bio_orig; | 
 | 465 | 	*bio_orig = bio; | 
 | 466 | } | 
 | 467 |  | 
 | 468 | void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) | 
 | 469 | { | 
 | 470 | 	mempool_t *pool; | 
 | 471 |  | 
 | 472 | 	/* | 
 | 473 | 	 * for non-isa bounce case, just check if the bounce pfn is equal | 
 | 474 | 	 * to or bigger than the highest pfn in the system -- in that case, | 
 | 475 | 	 * don't waste time iterating over bio segments | 
 | 476 | 	 */ | 
 | 477 | 	if (!(q->bounce_gfp & GFP_DMA)) { | 
 | 478 | 		if (q->bounce_pfn >= blk_max_pfn) | 
 | 479 | 			return; | 
 | 480 | 		pool = page_pool; | 
 | 481 | 	} else { | 
 | 482 | 		BUG_ON(!isa_page_pool); | 
 | 483 | 		pool = isa_page_pool; | 
 | 484 | 	} | 
 | 485 |  | 
 | 486 | 	/* | 
 | 487 | 	 * slow path | 
 | 488 | 	 */ | 
 | 489 | 	__blk_queue_bounce(q, bio_orig, pool); | 
 | 490 | } | 
 | 491 |  | 
 | 492 | EXPORT_SYMBOL(blk_queue_bounce); | 
 | 493 |  | 
 | 494 | #if defined(HASHED_PAGE_VIRTUAL) | 
 | 495 |  | 
 | 496 | #define PA_HASH_ORDER	7 | 
 | 497 |  | 
 | 498 | /* | 
 | 499 |  * Describes one page->virtual association | 
 | 500 |  */ | 
 | 501 | struct page_address_map { | 
 | 502 | 	struct page *page; | 
 | 503 | 	void *virtual; | 
 | 504 | 	struct list_head list; | 
 | 505 | }; | 
 | 506 |  | 
 | 507 | /* | 
 | 508 |  * page_address_map freelist, allocated from page_address_maps. | 
 | 509 |  */ | 
 | 510 | static struct list_head page_address_pool;	/* freelist */ | 
 | 511 | static spinlock_t pool_lock;			/* protects page_address_pool */ | 
 | 512 |  | 
 | 513 | /* | 
 | 514 |  * Hash table bucket | 
 | 515 |  */ | 
 | 516 | static struct page_address_slot { | 
 | 517 | 	struct list_head lh;			/* List of page_address_maps */ | 
 | 518 | 	spinlock_t lock;			/* Protect this bucket's list */ | 
 | 519 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; | 
 | 520 |  | 
 | 521 | static struct page_address_slot *page_slot(struct page *page) | 
 | 522 | { | 
 | 523 | 	return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; | 
 | 524 | } | 
 | 525 |  | 
 | 526 | void *page_address(struct page *page) | 
 | 527 | { | 
 | 528 | 	unsigned long flags; | 
 | 529 | 	void *ret; | 
 | 530 | 	struct page_address_slot *pas; | 
 | 531 |  | 
 | 532 | 	if (!PageHighMem(page)) | 
 | 533 | 		return lowmem_page_address(page); | 
 | 534 |  | 
 | 535 | 	pas = page_slot(page); | 
 | 536 | 	ret = NULL; | 
 | 537 | 	spin_lock_irqsave(&pas->lock, flags); | 
 | 538 | 	if (!list_empty(&pas->lh)) { | 
 | 539 | 		struct page_address_map *pam; | 
 | 540 |  | 
 | 541 | 		list_for_each_entry(pam, &pas->lh, list) { | 
 | 542 | 			if (pam->page == page) { | 
 | 543 | 				ret = pam->virtual; | 
 | 544 | 				goto done; | 
 | 545 | 			} | 
 | 546 | 		} | 
 | 547 | 	} | 
 | 548 | done: | 
 | 549 | 	spin_unlock_irqrestore(&pas->lock, flags); | 
 | 550 | 	return ret; | 
 | 551 | } | 
 | 552 |  | 
 | 553 | EXPORT_SYMBOL(page_address); | 
 | 554 |  | 
 | 555 | void set_page_address(struct page *page, void *virtual) | 
 | 556 | { | 
 | 557 | 	unsigned long flags; | 
 | 558 | 	struct page_address_slot *pas; | 
 | 559 | 	struct page_address_map *pam; | 
 | 560 |  | 
 | 561 | 	BUG_ON(!PageHighMem(page)); | 
 | 562 |  | 
 | 563 | 	pas = page_slot(page); | 
 | 564 | 	if (virtual) {		/* Add */ | 
 | 565 | 		BUG_ON(list_empty(&page_address_pool)); | 
 | 566 |  | 
 | 567 | 		spin_lock_irqsave(&pool_lock, flags); | 
 | 568 | 		pam = list_entry(page_address_pool.next, | 
 | 569 | 				struct page_address_map, list); | 
 | 570 | 		list_del(&pam->list); | 
 | 571 | 		spin_unlock_irqrestore(&pool_lock, flags); | 
 | 572 |  | 
 | 573 | 		pam->page = page; | 
 | 574 | 		pam->virtual = virtual; | 
 | 575 |  | 
 | 576 | 		spin_lock_irqsave(&pas->lock, flags); | 
 | 577 | 		list_add_tail(&pam->list, &pas->lh); | 
 | 578 | 		spin_unlock_irqrestore(&pas->lock, flags); | 
 | 579 | 	} else {		/* Remove */ | 
 | 580 | 		spin_lock_irqsave(&pas->lock, flags); | 
 | 581 | 		list_for_each_entry(pam, &pas->lh, list) { | 
 | 582 | 			if (pam->page == page) { | 
 | 583 | 				list_del(&pam->list); | 
 | 584 | 				spin_unlock_irqrestore(&pas->lock, flags); | 
 | 585 | 				spin_lock_irqsave(&pool_lock, flags); | 
 | 586 | 				list_add_tail(&pam->list, &page_address_pool); | 
 | 587 | 				spin_unlock_irqrestore(&pool_lock, flags); | 
 | 588 | 				goto done; | 
 | 589 | 			} | 
 | 590 | 		} | 
 | 591 | 		spin_unlock_irqrestore(&pas->lock, flags); | 
 | 592 | 	} | 
 | 593 | done: | 
 | 594 | 	return; | 
 | 595 | } | 
 | 596 |  | 
 | 597 | static struct page_address_map page_address_maps[LAST_PKMAP]; | 
 | 598 |  | 
 | 599 | void __init page_address_init(void) | 
 | 600 | { | 
 | 601 | 	int i; | 
 | 602 |  | 
 | 603 | 	INIT_LIST_HEAD(&page_address_pool); | 
 | 604 | 	for (i = 0; i < ARRAY_SIZE(page_address_maps); i++) | 
 | 605 | 		list_add(&page_address_maps[i].list, &page_address_pool); | 
 | 606 | 	for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { | 
 | 607 | 		INIT_LIST_HEAD(&page_address_htable[i].lh); | 
 | 608 | 		spin_lock_init(&page_address_htable[i].lock); | 
 | 609 | 	} | 
 | 610 | 	spin_lock_init(&pool_lock); | 
 | 611 | } | 
 | 612 |  | 
 | 613 | #endif	/* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |