blob: f9f647cdbc7ba3550e83d82c0c9e6996b3ae8878 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/highmem.h>
Alexey Dobriyan129f6942005-06-23 00:08:33 -07002#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
4void *kmap(struct page *page)
5{
6 might_sleep();
7 if (!PageHighMem(page))
8 return page_address(page);
9 return kmap_high(page);
10}
11
12void kunmap(struct page *page)
13{
14 if (in_interrupt())
15 BUG();
16 if (!PageHighMem(page))
17 return;
18 kunmap_high(page);
19}
20
21/*
22 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
23 * no global lock is needed and because the kmap code must perform a global TLB
24 * invalidation when the kmap pool wraps.
25 *
26 * However when holding an atomic kmap is is not legal to sleep, so atomic
27 * kmaps are appropriate for short, tight code paths only.
28 */
29void *kmap_atomic(struct page *page, enum km_type type)
30{
31 enum fixed_addresses idx;
32 unsigned long vaddr;
33
34 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
35 inc_preempt_count();
36 if (!PageHighMem(page))
37 return page_address(page);
38
39 idx = type + KM_TYPE_NR*smp_processor_id();
40 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 if (!pte_none(*(kmap_pte-idx)))
42 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45 return (void*) vaddr;
46}
47
48void kunmap_atomic(void *kvaddr, enum km_type type)
49{
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
51 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
52
Zachary Amsden23002d82006-09-30 23:29:35 -070053#ifdef CONFIG_DEBUG_HIGHMEM
Jan Beulichba9c2312006-09-26 10:52:31 +020054 if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 dec_preempt_count();
56 preempt_check_resched();
57 return;
58 }
59
60 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
61 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#endif
Zachary Amsden23002d82006-09-30 23:29:35 -070063 /*
64 * Force other mappings to Oops if they'll try to access this pte
65 * without first remap it. Keeping stale mappings around is a bad idea
66 * also, in case the page changes cacheability attributes or becomes
67 * a protected page in a hypervisor.
68 */
69 kpte_clear_flush(kmap_pte-idx, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
71 dec_preempt_count();
72 preempt_check_resched();
73}
74
Vivek Goyal60e64d42005-06-25 14:58:19 -070075/* This is the same as kmap_atomic() but can map memory that doesn't
76 * have a struct page associated with it.
77 */
78void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
79{
80 enum fixed_addresses idx;
81 unsigned long vaddr;
82
83 inc_preempt_count();
84
85 idx = type + KM_TYPE_NR*smp_processor_id();
86 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
87 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
Vivek Goyal60e64d42005-06-25 14:58:19 -070088
89 return (void*) vaddr;
90}
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092struct page *kmap_atomic_to_page(void *ptr)
93{
94 unsigned long idx, vaddr = (unsigned long)ptr;
95 pte_t *pte;
96
97 if (vaddr < FIXADDR_START)
98 return virt_to_page(ptr);
99
100 idx = virt_to_fix(vaddr);
101 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
102 return pte_page(*pte);
103}
104
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700105EXPORT_SYMBOL(kmap);
106EXPORT_SYMBOL(kunmap);
107EXPORT_SYMBOL(kmap_atomic);
108EXPORT_SYMBOL(kunmap_atomic);
109EXPORT_SYMBOL(kmap_atomic_to_page);