| Benjamin Herrenschmidt | 850f6ac | 2009-06-18 19:25:00 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * highmem.c: virtual kernel memory mappings for high memory | 
 | 3 |  * | 
 | 4 |  * PowerPC version, stolen from the i386 version. | 
 | 5 |  * | 
 | 6 |  * Used in CONFIG_HIGHMEM systems for memory pages which | 
 | 7 |  * are not addressable by direct kernel virtual addresses. | 
 | 8 |  * | 
 | 9 |  * Copyright (C) 1999 Gerhard Wichert, Siemens AG | 
 | 10 |  *		      Gerhard.Wichert@pdb.siemens.de | 
 | 11 |  * | 
 | 12 |  * | 
 | 13 |  * Redesigned the x86 32-bit VM architecture to deal with | 
 | 14 |  * up to 16 Terrabyte physical memory. With current x86 CPUs | 
 | 15 |  * we now support up to 64 Gigabytes physical RAM. | 
 | 16 |  * | 
 | 17 |  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> | 
 | 18 |  * | 
 | 19 |  * Reworked for PowerPC by various contributors. Moved from | 
 | 20 |  * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. | 
 | 21 |  */ | 
 | 22 |  | 
 | 23 | #include <linux/highmem.h> | 
 | 24 | #include <linux/module.h> | 
 | 25 |  | 
 | 26 | /* | 
 | 27 |  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap | 
 | 28 |  * gives a more generic (and caching) interface. But kmap_atomic can | 
 | 29 |  * be used in IRQ contexts, so in some (very limited) cases we need | 
 | 30 |  * it. | 
 | 31 |  */ | 
 | 32 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | 
 | 33 | { | 
 | 34 | 	unsigned int idx; | 
 | 35 | 	unsigned long vaddr; | 
 | 36 |  | 
 | 37 | 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 
 | 38 | 	pagefault_disable(); | 
 | 39 | 	if (!PageHighMem(page)) | 
 | 40 | 		return page_address(page); | 
 | 41 |  | 
 | 42 | 	debug_kmap_atomic(type); | 
 | 43 | 	idx = type + KM_TYPE_NR*smp_processor_id(); | 
 | 44 | 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 
 | 45 | #ifdef CONFIG_DEBUG_HIGHMEM | 
 | 46 | 	BUG_ON(!pte_none(*(kmap_pte-idx))); | 
 | 47 | #endif | 
 | 48 | 	__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); | 
 | 49 | 	local_flush_tlb_page(NULL, vaddr); | 
 | 50 |  | 
 | 51 | 	return (void*) vaddr; | 
 | 52 | } | 
 | 53 | EXPORT_SYMBOL(kmap_atomic_prot); | 
 | 54 |  | 
 | 55 | void kunmap_atomic(void *kvaddr, enum km_type type) | 
 | 56 | { | 
 | 57 | #ifdef CONFIG_DEBUG_HIGHMEM | 
 | 58 | 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 
 | 59 | 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 
 | 60 |  | 
 | 61 | 	if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | 
 | 62 | 		pagefault_enable(); | 
 | 63 | 		return; | 
 | 64 | 	} | 
 | 65 |  | 
 | 66 | 	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 
 | 67 |  | 
 | 68 | 	/* | 
 | 69 | 	 * force other mappings to Oops if they'll try to access | 
 | 70 | 	 * this pte without first remap it | 
 | 71 | 	 */ | 
 | 72 | 	pte_clear(&init_mm, vaddr, kmap_pte-idx); | 
 | 73 | 	local_flush_tlb_page(NULL, vaddr); | 
 | 74 | #endif | 
 | 75 | 	pagefault_enable(); | 
 | 76 | } | 
 | 77 | EXPORT_SYMBOL(kunmap_atomic); |