blob: 675502ada5a27d5f958e6b58bea8e4219ff77ff1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/module.h>
2#include <linux/highmem.h>
3#include <asm/tlbflush.h>
4
5void *__kmap(struct page *page)
6{
7 void *addr;
8
9 might_sleep();
10 if (!PageHighMem(page))
11 return page_address(page);
12 addr = kmap_high(page);
13 flush_tlb_one((unsigned long)addr);
14
15 return addr;
16}
17
18void __kunmap(struct page *page)
19{
20 if (in_interrupt())
21 BUG();
22 if (!PageHighMem(page))
23 return;
24 kunmap_high(page);
25}
26
27/*
28 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
29 * no global lock is needed and because the kmap code must perform a global TLB
30 * invalidation when the kmap pool wraps.
31 *
32 * However when holding an atomic kmap is is not legal to sleep, so atomic
33 * kmaps are appropriate for short, tight code paths only.
34 */
35
36void *__kmap_atomic(struct page *page, enum km_type type)
37{
38 enum fixed_addresses idx;
39 unsigned long vaddr;
40
41 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
Peter Zijlstraa8663742006-12-06 20:32:20 -080042 pagefault_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 if (!PageHighMem(page))
44 return page_address(page);
45
46 idx = type + KM_TYPE_NR*smp_processor_id();
47 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
48#ifdef CONFIG_DEBUG_HIGHMEM
49 if (!pte_none(*(kmap_pte-idx)))
50 BUG();
51#endif
52 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
53 local_flush_tlb_one((unsigned long)vaddr);
54
55 return (void*) vaddr;
56}
57
58void __kunmap_atomic(void *kvaddr, enum km_type type)
59{
60#ifdef CONFIG_DEBUG_HIGHMEM
61 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
62 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
63
64 if (vaddr < FIXADDR_START) { // FIXME
Peter Zijlstraa8663742006-12-06 20:32:20 -080065 pagefault_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 return;
67 }
68
69 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
70 BUG();
71
72 /*
73 * force other mappings to Oops if they'll try to access
74 * this pte without first remap it
75 */
76 pte_clear(&init_mm, vaddr, kmap_pte-idx);
77 local_flush_tlb_one(vaddr);
78#endif
79
Peter Zijlstraa8663742006-12-06 20:32:20 -080080 pagefault_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
Ralf Baechlef13b68e2006-04-02 13:13:19 +010083#ifndef CONFIG_LIMITED_DMA
Ralf Baechle60080262005-07-11 20:45:51 +000084/*
85 * This is the same as kmap_atomic() but can map memory that doesn't
86 * have a struct page associated with it.
87 */
88void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
89{
90 enum fixed_addresses idx;
91 unsigned long vaddr;
92
Peter Zijlstraa8663742006-12-06 20:32:20 -080093 pagefault_disable();
Ralf Baechle60080262005-07-11 20:45:51 +000094
95 idx = type + KM_TYPE_NR*smp_processor_id();
96 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
97 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
98 flush_tlb_one(vaddr);
99
100 return (void*) vaddr;
101}
Ralf Baechlef13b68e2006-04-02 13:13:19 +0100102#endif /* CONFIG_LIMITED_DMA */
Ralf Baechle60080262005-07-11 20:45:51 +0000103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104struct page *__kmap_atomic_to_page(void *ptr)
105{
106 unsigned long idx, vaddr = (unsigned long)ptr;
107 pte_t *pte;
108
109 if (vaddr < FIXADDR_START)
110 return virt_to_page(ptr);
111
112 idx = virt_to_fix(vaddr);
113 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
114 return pte_page(*pte);
115}
116
117EXPORT_SYMBOL(__kmap);
118EXPORT_SYMBOL(__kunmap);
119EXPORT_SYMBOL(__kmap_atomic);
120EXPORT_SYMBOL(__kunmap_atomic);
121EXPORT_SYMBOL(__kmap_atomic_to_page);