blob: 14490e9443af8ca9f52e9a41244eb3f0cd74d95f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
Ralf Baechle875d43e2005-09-03 15:56:16 -070011#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/pgtable-32.h>
13#endif
Ralf Baechle875d43e2005-09-03 15:56:16 -070014#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/pgtable-64.h>
16#endif
17
Pete Popovf10fae02005-07-14 00:17:05 +000018#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/pgtable-bits.h>
20
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080021struct mm_struct;
22struct vm_area_struct;
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
Steven J. Hill05857c62012-09-13 16:51:46 -050025#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
Chris Dearman35133692007-09-19 00:58:24 +010026 _page_cachable_default)
Steven J. Hill05857c62012-09-13 16:51:46 -050027#define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
28 (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
29#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
Chris Dearman35133692007-09-19 00:58:24 +010030 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
Chris Dearman35133692007-09-19 00:58:24 +010032 _PAGE_GLOBAL | _page_cachable_default)
Steven J. Hill05857c62012-09-13 16:51:46 -050033#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
Chris Dearman35133692007-09-19 00:58:24 +010034 _page_cachable_default)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
36 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
37
38/*
David Daney6dd93442010-02-10 15:12:47 -080039 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
40 * execute, and consider it to be the same as read. Also, write
41 * permissions imply read permissions. This is the closest we can get
42 * by reasonable means..
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Chris Dearman35133692007-09-19 00:58:24 +010045/*
46 * Dummy values to fill the table in mmap.c
47 * The real values will be generated at runtime
48 */
49#define __P000 __pgprot(0)
50#define __P001 __pgprot(0)
51#define __P010 __pgprot(0)
52#define __P011 __pgprot(0)
53#define __P100 __pgprot(0)
54#define __P101 __pgprot(0)
55#define __P110 __pgprot(0)
56#define __P111 __pgprot(0)
57
58#define __S000 __pgprot(0)
59#define __S001 __pgprot(0)
60#define __S010 __pgprot(0)
61#define __S011 __pgprot(0)
62#define __S100 __pgprot(0)
63#define __S101 __pgprot(0)
64#define __S110 __pgprot(0)
65#define __S111 __pgprot(0)
66
67extern unsigned long _page_cachable_default;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69/*
70 * ZERO_PAGE is a global shared page that is always zero; used
71 * for zero-mapped memory areas etc..
72 */
73
74extern unsigned long empty_zero_page;
75extern unsigned long zero_page_mask;
76
77#define ZERO_PAGE(vaddr) \
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020078 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080079#define __HAVE_COLOR_ZERO_PAGE
Hugh Dickins62eede62009-09-21 17:03:34 -070080
Linus Torvalds1da177e2005-04-16 15:20:36 -070081extern void paging_init(void);
82
83/*
84 * Conversion functions: convert a page and protection to a page entry,
85 * and a page entry and page directory to the page they refer to.
86 */
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010087#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
Dave McCracken46a82b22006-09-25 23:31:48 -070089#define pmd_page_vaddr(pmd) pmd_val(pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Chris Dearman962f4802007-09-19 00:46:32 +010091#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +040092
93#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
94#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096static inline void set_pte(pte_t *ptep, pte_t pte)
97{
98 ptep->pte_high = pte.pte_high;
99 smp_wmb();
100 ptep->pte_low = pte.pte_low;
101 //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low);
102
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400103 if (pte.pte_low & _PAGE_GLOBAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 pte_t *buddy = ptep_buddy(ptep);
105 /*
106 * Make sure the buddy is global too (if it's !none,
107 * it better already be global)
108 */
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400109 if (pte_none(*buddy)) {
110 buddy->pte_low |= _PAGE_GLOBAL;
111 buddy->pte_high |= _PAGE_GLOBAL;
112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 }
114}
Ralf Baechle21a151d2007-10-11 23:46:15 +0100115#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
117static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
118{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400119 pte_t null = __pte(0);
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 /* Preserve global status for the pair */
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400122 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
123 null.pte_low = null.pte_high = _PAGE_GLOBAL;
124
125 set_pte_at(mm, addr, ptep, null);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126}
127#else
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400128
129#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
130#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/*
133 * Certain architectures need to do special things when pte's
134 * within a page table are directly modified. Thus, the following
135 * hook is made available.
136 */
137static inline void set_pte(pte_t *ptep, pte_t pteval)
138{
139 *ptep = pteval;
140#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
141 if (pte_val(pteval) & _PAGE_GLOBAL) {
142 pte_t *buddy = ptep_buddy(ptep);
143 /*
144 * Make sure the buddy is global too (if it's !none,
145 * it better already be global)
146 */
147 if (pte_none(*buddy))
148 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
149 }
150#endif
151}
Ralf Baechle21a151d2007-10-11 23:46:15 +0100152#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
155{
156#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
157 /* Preserve global status for the pair */
158 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
159 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
160 else
161#endif
162 set_pte_at(mm, addr, ptep, __pte(0));
163}
164#endif
165
166/*
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000167 * (pmds are folded into puds so this doesn't get actually called,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 * but the define is needed for a generic inline function.)
169 */
170#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000171
David Daney325f8a02009-12-04 13:52:36 -0800172#ifndef __PAGETABLE_PMD_FOLDED
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000173/*
174 * (puds are folded into pgds so this doesn't get actually called,
175 * but the define is needed for a generic inline function.)
176 */
177#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
178#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Ralf Baechle5ff97472007-08-01 15:25:28 +0100180#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
181#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
182#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Ralf Baechle9975e772007-08-13 12:44:41 +0100184/*
185 * We used to declare this array with size but gcc 3.3 and older are not able
186 * to find that this expression is a constant, so the size is dropped.
187 */
188extern pgd_t swapper_pg_dir[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190/*
191 * The following only work if pte_present() is true.
192 * Undefined behaviour if not..
193 */
Chris Dearman962f4802007-09-19 00:46:32 +0100194#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400195static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
196static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
197static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
198static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200static inline pte_t pte_wrprotect(pte_t pte)
201{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400202 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
203 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 return pte;
205}
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207static inline pte_t pte_mkclean(pte_t pte)
208{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400209 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
210 pte.pte_high &= ~_PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 return pte;
212}
213
214static inline pte_t pte_mkold(pte_t pte)
215{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400216 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
217 pte.pte_high &= ~_PAGE_SILENT_READ;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 return pte;
219}
220
221static inline pte_t pte_mkwrite(pte_t pte)
222{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400223 pte.pte_low |= _PAGE_WRITE;
224 if (pte.pte_low & _PAGE_MODIFIED) {
225 pte.pte_low |= _PAGE_SILENT_WRITE;
226 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 }
228 return pte;
229}
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231static inline pte_t pte_mkdirty(pte_t pte)
232{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400233 pte.pte_low |= _PAGE_MODIFIED;
234 if (pte.pte_low & _PAGE_WRITE) {
235 pte.pte_low |= _PAGE_SILENT_WRITE;
236 pte.pte_high |= _PAGE_SILENT_WRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 }
238 return pte;
239}
240
241static inline pte_t pte_mkyoung(pte_t pte)
242{
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400243 pte.pte_low |= _PAGE_ACCESSED;
Ilpo Järvinen057229f2008-05-02 14:08:20 +0300244 if (pte.pte_low & _PAGE_READ) {
Sergei Shtylyov6e953892006-04-16 23:27:21 +0400245 pte.pte_low |= _PAGE_SILENT_READ;
246 pte.pte_high |= _PAGE_SILENT_READ;
Ilpo Järvinen057229f2008-05-02 14:08:20 +0300247 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 return pte;
249}
250#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
252static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
253static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
254static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
255
256static inline pte_t pte_wrprotect(pte_t pte)
257{
258 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
259 return pte;
260}
261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262static inline pte_t pte_mkclean(pte_t pte)
263{
264 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
265 return pte;
266}
267
268static inline pte_t pte_mkold(pte_t pte)
269{
270 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
271 return pte;
272}
273
274static inline pte_t pte_mkwrite(pte_t pte)
275{
276 pte_val(pte) |= _PAGE_WRITE;
277 if (pte_val(pte) & _PAGE_MODIFIED)
278 pte_val(pte) |= _PAGE_SILENT_WRITE;
279 return pte;
280}
281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282static inline pte_t pte_mkdirty(pte_t pte)
283{
284 pte_val(pte) |= _PAGE_MODIFIED;
285 if (pte_val(pte) & _PAGE_WRITE)
286 pte_val(pte) |= _PAGE_SILENT_WRITE;
287 return pte;
288}
289
290static inline pte_t pte_mkyoung(pte_t pte)
291{
292 pte_val(pte) |= _PAGE_ACCESSED;
Steven J. Hill05857c62012-09-13 16:51:46 -0500293 if (cpu_has_rixi) {
David Daney6dd93442010-02-10 15:12:47 -0800294 if (!(pte_val(pte) & _PAGE_NO_READ))
295 pte_val(pte) |= _PAGE_SILENT_READ;
296 } else {
297 if (pte_val(pte) & _PAGE_READ)
298 pte_val(pte) |= _PAGE_SILENT_READ;
299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 return pte;
301}
David Daneydd794392009-05-27 17:47:43 -0700302
303#ifdef _PAGE_HUGE
304static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
305
306static inline pte_t pte_mkhuge(pte_t pte)
307{
308 pte_val(pte) |= _PAGE_HUGE;
309 return pte;
310}
311#endif /* _PAGE_HUGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312#endif
Nick Piggin7e675132008-04-28 02:13:00 -0700313static inline int pte_special(pte_t pte) { return 0; }
314static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316/*
317 * Macro to make mark a page protection value as "uncacheable". Note
318 * that "protection" is really a misnomer here as the protection value
319 * contains the memory attribute bits, dirty bits, and various other
320 * bits as well.
321 */
322#define pgprot_noncached pgprot_noncached
323
324static inline pgprot_t pgprot_noncached(pgprot_t _prot)
325{
326 unsigned long prot = pgprot_val(_prot);
327
328 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
329
330 return __pgprot(prot);
331}
332
333/*
334 * Conversion functions: convert a page and protection to a page entry,
335 * and a page entry and page directory to the page they refer to.
336 */
337#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
338
Chris Dearman962f4802007-09-19 00:46:32 +0100339#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
341{
Sergei Shtylyov79e0bc32006-05-03 22:56:43 +0400342 pte.pte_low &= _PAGE_CHG_MASK;
343 pte.pte_high &= ~0x3f;
344 pte.pte_low |= pgprot_val(newprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 pte.pte_high |= pgprot_val(newprot) & 0x3f;
346 return pte;
347}
348#else
349static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
350{
351 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
352}
353#endif
354
355
356extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
357 pte_t pte);
358extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
359 pte_t pte);
360
361static inline void update_mmu_cache(struct vm_area_struct *vma,
Russell King4b3073e2009-12-18 16:40:18 +0000362 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
Russell King4b3073e2009-12-18 16:40:18 +0000364 pte_t pte = *ptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 __update_tlb(vma, address, pte);
366 __update_cache(vma, address, pte);
367}
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369#define kern_addr_valid(addr) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371#ifdef CONFIG_64BIT_PHYS_ADDR
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374static inline int io_remap_pfn_range(struct vm_area_struct *vma,
375 unsigned long vaddr,
376 unsigned long pfn,
377 unsigned long size,
378 pgprot_t prot)
379{
380 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
Thiemo Seuferac5d8c02005-04-11 12:24:16 +0000381 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382}
383#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
385 remap_pfn_range(vma, vaddr, pfn, size, prot)
386#endif
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388#include <asm-generic/pgtable.h>
389
390/*
Wu Zhangjin22f1fdf2009-11-11 13:59:23 +0800391 * uncached accelerated TLB map for video memory access
392 */
393#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
394#define __HAVE_PHYS_MEM_ACCESS_PROT
395
396struct file;
397pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
398 unsigned long size, pgprot_t vma_prot);
399int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
400 unsigned long size, pgprot_t *vma_prot);
401#endif
402
403/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 * We provide our own get_unmapped area to cope with the virtual aliasing
405 * constraints placed on us by the cache architecture.
406 */
407#define HAVE_ARCH_UNMAPPED_AREA
Jian Pengd0be89f2011-05-17 12:27:49 -0700408#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410/*
411 * No page table caches to initialise
412 */
413#define pgtable_cache_init() do { } while (0)
414
415#endif /* _ASM_PGTABLE_H */