| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #include <linux/module.h> | 
 | 2 | #include <linux/highmem.h> | 
 | 3 | #include <asm/tlbflush.h> | 
 | 4 |  | 
 | 5 | void *__kmap(struct page *page) | 
 | 6 | { | 
 | 7 | 	void *addr; | 
 | 8 |  | 
 | 9 | 	might_sleep(); | 
 | 10 | 	if (!PageHighMem(page)) | 
 | 11 | 		return page_address(page); | 
 | 12 | 	addr = kmap_high(page); | 
 | 13 | 	flush_tlb_one((unsigned long)addr); | 
 | 14 |  | 
 | 15 | 	return addr; | 
 | 16 | } | 
 | 17 |  | 
 | 18 | void __kunmap(struct page *page) | 
 | 19 | { | 
| Ralf Baechle | b72b709 | 2009-03-30 14:49:44 +0200 | [diff] [blame] | 20 | 	BUG_ON(in_interrupt()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | 	if (!PageHighMem(page)) | 
 | 22 | 		return; | 
 | 23 | 	kunmap_high(page); | 
 | 24 | } | 
 | 25 |  | 
 | 26 | /* | 
 | 27 |  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | 
 | 28 |  * no global lock is needed and because the kmap code must perform a global TLB | 
 | 29 |  * invalidation when the kmap pool wraps. | 
 | 30 |  * | 
 | 31 |  * However when holding an atomic kmap is is not legal to sleep, so atomic | 
 | 32 |  * kmaps are appropriate for short, tight code paths only. | 
 | 33 |  */ | 
 | 34 |  | 
 | 35 | void *__kmap_atomic(struct page *page, enum km_type type) | 
 | 36 | { | 
 | 37 | 	enum fixed_addresses idx; | 
 | 38 | 	unsigned long vaddr; | 
 | 39 |  | 
 | 40 | 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 
| Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 41 | 	pagefault_disable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | 	if (!PageHighMem(page)) | 
 | 43 | 		return page_address(page); | 
 | 44 |  | 
| Akinobu Mita | 7ca43e7 | 2009-03-31 15:23:25 -0700 | [diff] [blame] | 45 | 	debug_kmap_atomic(type); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | 	idx = type + KM_TYPE_NR*smp_processor_id(); | 
 | 47 | 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 
 | 48 | #ifdef CONFIG_DEBUG_HIGHMEM | 
| Ralf Baechle | b72b709 | 2009-03-30 14:49:44 +0200 | [diff] [blame] | 49 | 	BUG_ON(!pte_none(*(kmap_pte - idx))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #endif | 
 | 51 | 	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); | 
 | 52 | 	local_flush_tlb_one((unsigned long)vaddr); | 
 | 53 |  | 
 | 54 | 	return (void*) vaddr; | 
 | 55 | } | 
 | 56 |  | 
 | 57 | void __kunmap_atomic(void *kvaddr, enum km_type type) | 
 | 58 | { | 
 | 59 | #ifdef CONFIG_DEBUG_HIGHMEM | 
 | 60 | 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 
 | 61 | 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 
 | 62 |  | 
 | 63 | 	if (vaddr < FIXADDR_START) { // FIXME | 
| Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 64 | 		pagefault_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | 		return; | 
 | 66 | 	} | 
 | 67 |  | 
| Ralf Baechle | b72b709 | 2009-03-30 14:49:44 +0200 | [diff] [blame] | 68 | 	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 |  | 
 | 70 | 	/* | 
 | 71 | 	 * force other mappings to Oops if they'll try to access | 
 | 72 | 	 * this pte without first remap it | 
 | 73 | 	 */ | 
 | 74 | 	pte_clear(&init_mm, vaddr, kmap_pte-idx); | 
 | 75 | 	local_flush_tlb_one(vaddr); | 
 | 76 | #endif | 
 | 77 |  | 
| Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 78 | 	pagefault_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | } | 
 | 80 |  | 
| Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 81 | /* | 
 | 82 |  * This is the same as kmap_atomic() but can map memory that doesn't | 
 | 83 |  * have a struct page associated with it. | 
 | 84 |  */ | 
 | 85 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | 
 | 86 | { | 
 | 87 | 	enum fixed_addresses idx; | 
 | 88 | 	unsigned long vaddr; | 
 | 89 |  | 
| Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 90 | 	pagefault_disable(); | 
| Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 91 |  | 
| Akinobu Mita | 7ca43e7 | 2009-03-31 15:23:25 -0700 | [diff] [blame] | 92 | 	debug_kmap_atomic(type); | 
| Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 93 | 	idx = type + KM_TYPE_NR*smp_processor_id(); | 
 | 94 | 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 
 | 95 | 	set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); | 
 | 96 | 	flush_tlb_one(vaddr); | 
 | 97 |  | 
 | 98 | 	return (void*) vaddr; | 
 | 99 | } | 
 | 100 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | struct page *__kmap_atomic_to_page(void *ptr) | 
 | 102 | { | 
 | 103 | 	unsigned long idx, vaddr = (unsigned long)ptr; | 
 | 104 | 	pte_t *pte; | 
 | 105 |  | 
 | 106 | 	if (vaddr < FIXADDR_START) | 
 | 107 | 		return virt_to_page(ptr); | 
 | 108 |  | 
 | 109 | 	idx = virt_to_fix(vaddr); | 
 | 110 | 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | 
 | 111 | 	return pte_page(*pte); | 
 | 112 | } | 
 | 113 |  | 
 | 114 | EXPORT_SYMBOL(__kmap); | 
 | 115 | EXPORT_SYMBOL(__kunmap); | 
 | 116 | EXPORT_SYMBOL(__kmap_atomic); | 
 | 117 | EXPORT_SYMBOL(__kunmap_atomic); |