| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 3 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 4 | * for more details. | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 2003 Ralf Baechle | 
|  | 7 | */ | 
|  | 8 | #ifndef _ASM_PGTABLE_H | 
|  | 9 | #define _ASM_PGTABLE_H | 
|  | 10 |  | 
| Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 11 | #include <linux/mmzone.h> | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 12 | #ifdef CONFIG_32BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <asm/pgtable-32.h> | 
|  | 14 | #endif | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 15 | #ifdef CONFIG_64BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/pgtable-64.h> | 
|  | 17 | #endif | 
|  | 18 |  | 
| Pete Popov | f10fae0 | 2005-07-14 00:17:05 +0000 | [diff] [blame] | 19 | #include <asm/io.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/pgtable-bits.h> | 
|  | 21 |  | 
| Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 22 | struct mm_struct; | 
|  | 23 | struct vm_area_struct; | 
|  | 24 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) | 
| Steven J. Hill | 05857c6 | 2012-09-13 16:51:46 -0500 | [diff] [blame] | 26 | #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \ | 
| Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 27 | _page_cachable_default) | 
| Steven J. Hill | 05857c6 | 2012-09-13 16:51:46 -0500 | [diff] [blame] | 28 | #define PAGE_COPY	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ | 
|  | 29 | (cpu_has_rixi ?  _PAGE_NO_EXEC : 0) | _page_cachable_default) | 
|  | 30 | #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ | 
| Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 31 | _page_cachable_default) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ | 
| Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 33 | _PAGE_GLOBAL | _page_cachable_default) | 
| Steven J. Hill | 05857c6 | 2012-09-13 16:51:46 -0500 | [diff] [blame] | 34 | #define PAGE_USERIO	__pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ | 
| Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 35 | _page_cachable_default) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ | 
|  | 37 | __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) | 
|  | 38 |  | 
|  | 39 | /* | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 40 | * If _PAGE_NO_EXEC is not defined, we can't do page protection for | 
|  | 41 | * execute, and consider it to be the same as read. Also, write | 
|  | 42 | * permissions imply read permissions. This is the closest we can get | 
|  | 43 | * by reasonable means.. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 |  | 
| Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 46 | /* | 
|  | 47 | * Dummy values to fill the table in mmap.c | 
|  | 48 | * The real values will be generated at runtime | 
|  | 49 | */ | 
|  | 50 | #define __P000 __pgprot(0) | 
|  | 51 | #define __P001 __pgprot(0) | 
|  | 52 | #define __P010 __pgprot(0) | 
|  | 53 | #define __P011 __pgprot(0) | 
|  | 54 | #define __P100 __pgprot(0) | 
|  | 55 | #define __P101 __pgprot(0) | 
|  | 56 | #define __P110 __pgprot(0) | 
|  | 57 | #define __P111 __pgprot(0) | 
|  | 58 |  | 
|  | 59 | #define __S000 __pgprot(0) | 
|  | 60 | #define __S001 __pgprot(0) | 
|  | 61 | #define __S010 __pgprot(0) | 
|  | 62 | #define __S011 __pgprot(0) | 
|  | 63 | #define __S100 __pgprot(0) | 
|  | 64 | #define __S101 __pgprot(0) | 
|  | 65 | #define __S110 __pgprot(0) | 
|  | 66 | #define __S111 __pgprot(0) | 
|  | 67 |  | 
|  | 68 | extern unsigned long _page_cachable_default; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 |  | 
|  | 70 | /* | 
|  | 71 | * ZERO_PAGE is a global shared page that is always zero; used | 
|  | 72 | * for zero-mapped memory areas etc.. | 
|  | 73 | */ | 
|  | 74 |  | 
|  | 75 | extern unsigned long empty_zero_page; | 
|  | 76 | extern unsigned long zero_page_mask; | 
|  | 77 |  | 
|  | 78 | #define ZERO_PAGE(vaddr) \ | 
| Franck Bui-Huu | 99e3b94 | 2006-10-19 13:19:59 +0200 | [diff] [blame] | 79 | (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) | 
| Kirill A. Shutemov | 816422a | 2012-12-12 13:52:36 -0800 | [diff] [blame] | 80 | #define __HAVE_COLOR_ZERO_PAGE | 
| Hugh Dickins | 62eede6 | 2009-09-21 17:03:34 -0700 | [diff] [blame] | 81 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | extern void paging_init(void); | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * Conversion functions: convert a page and protection to a page entry, | 
|  | 86 | * and a page entry and page directory to the page they refer to. | 
|  | 87 | */ | 
| Franck Bui-Huu | c9d0696 | 2007-03-19 17:36:42 +0100 | [diff] [blame] | 88 | #define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd)) | 
| Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 89 |  | 
|  | 90 | #define __pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) | 
|  | 91 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE | 
|  | 92 | #define pmd_page(pmd)		__pmd_page(pmd) | 
|  | 93 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */ | 
|  | 94 |  | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 95 | #define pmd_page_vaddr(pmd)	pmd_val(pmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 |  | 
| Chris Dearman | 962f480 | 2007-09-19 00:46:32 +0100 | [diff] [blame] | 97 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 98 |  | 
|  | 99 | #define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) | 
|  | 100 | #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT) | 
|  | 101 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | static inline void set_pte(pte_t *ptep, pte_t pte) | 
|  | 103 | { | 
|  | 104 | ptep->pte_high = pte.pte_high; | 
|  | 105 | smp_wmb(); | 
|  | 106 | ptep->pte_low = pte.pte_low; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 |  | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 108 | if (pte.pte_low & _PAGE_GLOBAL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | pte_t *buddy = ptep_buddy(ptep); | 
|  | 110 | /* | 
|  | 111 | * Make sure the buddy is global too (if it's !none, | 
|  | 112 | * it better already be global) | 
|  | 113 | */ | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 114 | if (pte_none(*buddy)) { | 
|  | 115 | buddy->pte_low  |= _PAGE_GLOBAL; | 
|  | 116 | buddy->pte_high |= _PAGE_GLOBAL; | 
|  | 117 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | } | 
|  | 119 | } | 
| Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 120 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 |  | 
|  | 122 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
|  | 123 | { | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 124 | pte_t null = __pte(0); | 
|  | 125 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | /* Preserve global status for the pair */ | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 127 | if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) | 
|  | 128 | null.pte_low = null.pte_high = _PAGE_GLOBAL; | 
|  | 129 |  | 
|  | 130 | set_pte_at(mm, addr, ptep, null); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | } | 
|  | 132 | #else | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 133 |  | 
|  | 134 | #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL)) | 
|  | 135 | #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT) | 
|  | 136 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | /* | 
|  | 138 | * Certain architectures need to do special things when pte's | 
|  | 139 | * within a page table are directly modified.  Thus, the following | 
|  | 140 | * hook is made available. | 
|  | 141 | */ | 
|  | 142 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 
|  | 143 | { | 
|  | 144 | *ptep = pteval; | 
|  | 145 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) | 
|  | 146 | if (pte_val(pteval) & _PAGE_GLOBAL) { | 
|  | 147 | pte_t *buddy = ptep_buddy(ptep); | 
|  | 148 | /* | 
|  | 149 | * Make sure the buddy is global too (if it's !none, | 
|  | 150 | * it better already be global) | 
|  | 151 | */ | 
|  | 152 | if (pte_none(*buddy)) | 
|  | 153 | pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; | 
|  | 154 | } | 
|  | 155 | #endif | 
|  | 156 | } | 
| Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 157 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 |  | 
|  | 159 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
|  | 160 | { | 
|  | 161 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) | 
|  | 162 | /* Preserve global status for the pair */ | 
|  | 163 | if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) | 
|  | 164 | set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); | 
|  | 165 | else | 
|  | 166 | #endif | 
|  | 167 | set_pte_at(mm, addr, ptep, __pte(0)); | 
|  | 168 | } | 
|  | 169 | #endif | 
|  | 170 |  | 
|  | 171 | /* | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 172 | * (pmds are folded into puds so this doesn't get actually called, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | * but the define is needed for a generic inline function.) | 
|  | 174 | */ | 
|  | 175 | #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 176 |  | 
| David Daney | 325f8a0 | 2009-12-04 13:52:36 -0800 | [diff] [blame] | 177 | #ifndef __PAGETABLE_PMD_FOLDED | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 178 | /* | 
|  | 179 | * (puds are folded into pgds so this doesn't get actually called, | 
|  | 180 | * but the define is needed for a generic inline function.) | 
|  | 181 | */ | 
|  | 182 | #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) | 
|  | 183 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 |  | 
| Ralf Baechle | 5ff9747 | 2007-08-01 15:25:28 +0100 | [diff] [blame] | 185 | #define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1) | 
|  | 186 | #define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1) | 
|  | 187 | #define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  | 
| Ralf Baechle | 9975e77 | 2007-08-13 12:44:41 +0100 | [diff] [blame] | 189 | /* | 
|  | 190 | * We used to declare this array with size but gcc 3.3 and older are not able | 
|  | 191 | * to find that this expression is a constant, so the size is dropped. | 
|  | 192 | */ | 
|  | 193 | extern pgd_t swapper_pg_dir[]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 |  | 
|  | 195 | /* | 
|  | 196 | * The following only work if pte_present() is true. | 
|  | 197 | * Undefined behaviour if not.. | 
|  | 198 | */ | 
| Chris Dearman | 962f480 | 2007-09-19 00:46:32 +0100 | [diff] [blame] | 199 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 200 | static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; } | 
|  | 201 | static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; } | 
|  | 202 | static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; } | 
|  | 203 | static inline int pte_file(pte_t pte)	{ return pte.pte_low & _PAGE_FILE; } | 
|  | 204 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | static inline pte_t pte_wrprotect(pte_t pte) | 
|  | 206 | { | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 207 | pte.pte_low  &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); | 
|  | 208 | pte.pte_high &= ~_PAGE_SILENT_WRITE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | return pte; | 
|  | 210 | } | 
|  | 211 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | static inline pte_t pte_mkclean(pte_t pte) | 
|  | 213 | { | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 214 | pte.pte_low  &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); | 
|  | 215 | pte.pte_high &= ~_PAGE_SILENT_WRITE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | return pte; | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | static inline pte_t pte_mkold(pte_t pte) | 
|  | 220 | { | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 221 | pte.pte_low  &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); | 
|  | 222 | pte.pte_high &= ~_PAGE_SILENT_READ; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | return pte; | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | static inline pte_t pte_mkwrite(pte_t pte) | 
|  | 227 | { | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 228 | pte.pte_low |= _PAGE_WRITE; | 
|  | 229 | if (pte.pte_low & _PAGE_MODIFIED) { | 
|  | 230 | pte.pte_low  |= _PAGE_SILENT_WRITE; | 
|  | 231 | pte.pte_high |= _PAGE_SILENT_WRITE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } | 
|  | 233 | return pte; | 
|  | 234 | } | 
|  | 235 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | static inline pte_t pte_mkdirty(pte_t pte) | 
|  | 237 | { | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 238 | pte.pte_low |= _PAGE_MODIFIED; | 
|  | 239 | if (pte.pte_low & _PAGE_WRITE) { | 
|  | 240 | pte.pte_low  |= _PAGE_SILENT_WRITE; | 
|  | 241 | pte.pte_high |= _PAGE_SILENT_WRITE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | } | 
|  | 243 | return pte; | 
|  | 244 | } | 
|  | 245 |  | 
|  | 246 | static inline pte_t pte_mkyoung(pte_t pte) | 
|  | 247 | { | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 248 | pte.pte_low |= _PAGE_ACCESSED; | 
| Ilpo Järvinen | 057229f | 2008-05-02 14:08:20 +0300 | [diff] [blame] | 249 | if (pte.pte_low & _PAGE_READ) { | 
| Sergei Shtylyov | 6e95389 | 2006-04-16 23:27:21 +0400 | [diff] [blame] | 250 | pte.pte_low  |= _PAGE_SILENT_READ; | 
|  | 251 | pte.pte_high |= _PAGE_SILENT_READ; | 
| Ilpo Järvinen | 057229f | 2008-05-02 14:08:20 +0300 | [diff] [blame] | 252 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | return pte; | 
|  | 254 | } | 
|  | 255 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; } | 
|  | 257 | static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; } | 
|  | 258 | static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; } | 
|  | 259 | static inline int pte_file(pte_t pte)	{ return pte_val(pte) & _PAGE_FILE; } | 
|  | 260 |  | 
|  | 261 | static inline pte_t pte_wrprotect(pte_t pte) | 
|  | 262 | { | 
|  | 263 | pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); | 
|  | 264 | return pte; | 
|  | 265 | } | 
|  | 266 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | static inline pte_t pte_mkclean(pte_t pte) | 
|  | 268 | { | 
|  | 269 | pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); | 
|  | 270 | return pte; | 
|  | 271 | } | 
|  | 272 |  | 
|  | 273 | static inline pte_t pte_mkold(pte_t pte) | 
|  | 274 | { | 
|  | 275 | pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); | 
|  | 276 | return pte; | 
|  | 277 | } | 
|  | 278 |  | 
|  | 279 | static inline pte_t pte_mkwrite(pte_t pte) | 
|  | 280 | { | 
|  | 281 | pte_val(pte) |= _PAGE_WRITE; | 
|  | 282 | if (pte_val(pte) & _PAGE_MODIFIED) | 
|  | 283 | pte_val(pte) |= _PAGE_SILENT_WRITE; | 
|  | 284 | return pte; | 
|  | 285 | } | 
|  | 286 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | static inline pte_t pte_mkdirty(pte_t pte) | 
|  | 288 | { | 
|  | 289 | pte_val(pte) |= _PAGE_MODIFIED; | 
|  | 290 | if (pte_val(pte) & _PAGE_WRITE) | 
|  | 291 | pte_val(pte) |= _PAGE_SILENT_WRITE; | 
|  | 292 | return pte; | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | static inline pte_t pte_mkyoung(pte_t pte) | 
|  | 296 | { | 
|  | 297 | pte_val(pte) |= _PAGE_ACCESSED; | 
| Steven J. Hill | 05857c6 | 2012-09-13 16:51:46 -0500 | [diff] [blame] | 298 | if (cpu_has_rixi) { | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 299 | if (!(pte_val(pte) & _PAGE_NO_READ)) | 
|  | 300 | pte_val(pte) |= _PAGE_SILENT_READ; | 
|  | 301 | } else { | 
|  | 302 | if (pte_val(pte) & _PAGE_READ) | 
|  | 303 | pte_val(pte) |= _PAGE_SILENT_READ; | 
|  | 304 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | return pte; | 
|  | 306 | } | 
| David Daney | dd79439 | 2009-05-27 17:47:43 -0700 | [diff] [blame] | 307 |  | 
|  | 308 | #ifdef _PAGE_HUGE | 
|  | 309 | static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; } | 
|  | 310 |  | 
|  | 311 | static inline pte_t pte_mkhuge(pte_t pte) | 
|  | 312 | { | 
|  | 313 | pte_val(pte) |= _PAGE_HUGE; | 
|  | 314 | return pte; | 
|  | 315 | } | 
|  | 316 | #endif /* _PAGE_HUGE */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | #endif | 
| Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 318 | static inline int pte_special(pte_t pte)	{ return 0; } | 
|  | 319 | static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 |  | 
|  | 321 | /* | 
|  | 322 | * Macro to make mark a page protection value as "uncacheable".  Note | 
|  | 323 | * that "protection" is really a misnomer here as the protection value | 
|  | 324 | * contains the memory attribute bits, dirty bits, and various other | 
|  | 325 | * bits as well. | 
|  | 326 | */ | 
|  | 327 | #define pgprot_noncached pgprot_noncached | 
|  | 328 |  | 
|  | 329 | static inline pgprot_t pgprot_noncached(pgprot_t _prot) | 
|  | 330 | { | 
|  | 331 | unsigned long prot = pgprot_val(_prot); | 
|  | 332 |  | 
|  | 333 | prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; | 
|  | 334 |  | 
|  | 335 | return __pgprot(prot); | 
|  | 336 | } | 
|  | 337 |  | 
|  | 338 | /* | 
|  | 339 | * Conversion functions: convert a page and protection to a page entry, | 
|  | 340 | * and a page entry and page directory to the page they refer to. | 
|  | 341 | */ | 
|  | 342 | #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot)) | 
|  | 343 |  | 
| Chris Dearman | 962f480 | 2007-09-19 00:46:32 +0100 | [diff] [blame] | 344 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 
|  | 346 | { | 
| Sergei Shtylyov | 79e0bc3 | 2006-05-03 22:56:43 +0400 | [diff] [blame] | 347 | pte.pte_low  &= _PAGE_CHG_MASK; | 
|  | 348 | pte.pte_high &= ~0x3f; | 
|  | 349 | pte.pte_low  |= pgprot_val(newprot); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | pte.pte_high |= pgprot_val(newprot) & 0x3f; | 
|  | 351 | return pte; | 
|  | 352 | } | 
|  | 353 | #else | 
|  | 354 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 
|  | 355 | { | 
|  | 356 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | 
|  | 357 | } | 
|  | 358 | #endif | 
|  | 359 |  | 
|  | 360 |  | 
|  | 361 | extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, | 
|  | 362 | pte_t pte); | 
|  | 363 | extern void __update_cache(struct vm_area_struct *vma, unsigned long address, | 
|  | 364 | pte_t pte); | 
|  | 365 |  | 
|  | 366 | static inline void update_mmu_cache(struct vm_area_struct *vma, | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 367 | unsigned long address, pte_t *ptep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | { | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 369 | pte_t pte = *ptep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | __update_tlb(vma, address, pte); | 
|  | 371 | __update_cache(vma, address, pte); | 
|  | 372 | } | 
|  | 373 |  | 
| Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 374 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, | 
|  | 375 | unsigned long address, pmd_t *pmdp) | 
|  | 376 | { | 
|  | 377 | pte_t pte = *(pte_t *)pmdp; | 
|  | 378 |  | 
|  | 379 | __update_tlb(vma, address, pte); | 
|  | 380 | } | 
|  | 381 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | #define kern_addr_valid(addr)	(1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 |  | 
|  | 384 | #ifdef CONFIG_64BIT_PHYS_ADDR | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); | 
|  | 386 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | static inline int io_remap_pfn_range(struct vm_area_struct *vma, | 
|  | 388 | unsigned long vaddr, | 
|  | 389 | unsigned long pfn, | 
|  | 390 | unsigned long size, | 
|  | 391 | pgprot_t prot) | 
|  | 392 | { | 
|  | 393 | phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); | 
| Thiemo Seufer | ac5d8c0 | 2005-04-11 12:24:16 +0000 | [diff] [blame] | 394 | return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | } | 
|  | 396 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\ | 
|  | 398 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 
|  | 399 | #endif | 
|  | 400 |  | 
| Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 401 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 
|  | 402 |  | 
|  | 403 | extern int has_transparent_hugepage(void); | 
|  | 404 |  | 
|  | 405 | static inline int pmd_trans_huge(pmd_t pmd) | 
|  | 406 | { | 
|  | 407 | return !!(pmd_val(pmd) & _PAGE_HUGE); | 
|  | 408 | } | 
|  | 409 |  | 
|  | 410 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | 
|  | 411 | { | 
|  | 412 | pmd_val(pmd) |= _PAGE_HUGE; | 
|  | 413 |  | 
|  | 414 | return pmd; | 
|  | 415 | } | 
|  | 416 |  | 
|  | 417 | static inline int pmd_trans_splitting(pmd_t pmd) | 
|  | 418 | { | 
|  | 419 | return !!(pmd_val(pmd) & _PAGE_SPLITTING); | 
|  | 420 | } | 
|  | 421 |  | 
|  | 422 | static inline pmd_t pmd_mksplitting(pmd_t pmd) | 
|  | 423 | { | 
|  | 424 | pmd_val(pmd) |= _PAGE_SPLITTING; | 
|  | 425 |  | 
|  | 426 | return pmd; | 
|  | 427 | } | 
|  | 428 |  | 
|  | 429 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | 
|  | 430 | pmd_t *pmdp, pmd_t pmd); | 
|  | 431 |  | 
|  | 432 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH | 
|  | 433 | /* Extern to avoid header file madness */ | 
|  | 434 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, | 
|  | 435 | unsigned long address, | 
|  | 436 | pmd_t *pmdp); | 
|  | 437 |  | 
|  | 438 | #define __HAVE_ARCH_PMD_WRITE | 
|  | 439 | static inline int pmd_write(pmd_t pmd) | 
|  | 440 | { | 
|  | 441 | return !!(pmd_val(pmd) & _PAGE_WRITE); | 
|  | 442 | } | 
|  | 443 |  | 
|  | 444 | static inline pmd_t pmd_wrprotect(pmd_t pmd) | 
|  | 445 | { | 
|  | 446 | pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); | 
|  | 447 | return pmd; | 
|  | 448 | } | 
|  | 449 |  | 
|  | 450 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | 
|  | 451 | { | 
|  | 452 | pmd_val(pmd) |= _PAGE_WRITE; | 
|  | 453 | if (pmd_val(pmd) & _PAGE_MODIFIED) | 
|  | 454 | pmd_val(pmd) |= _PAGE_SILENT_WRITE; | 
|  | 455 |  | 
|  | 456 | return pmd; | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | static inline int pmd_dirty(pmd_t pmd) | 
|  | 460 | { | 
|  | 461 | return !!(pmd_val(pmd) & _PAGE_MODIFIED); | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 | static inline pmd_t pmd_mkclean(pmd_t pmd) | 
|  | 465 | { | 
|  | 466 | pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); | 
|  | 467 | return pmd; | 
|  | 468 | } | 
|  | 469 |  | 
|  | 470 | static inline pmd_t pmd_mkdirty(pmd_t pmd) | 
|  | 471 | { | 
|  | 472 | pmd_val(pmd) |= _PAGE_MODIFIED; | 
|  | 473 | if (pmd_val(pmd) & _PAGE_WRITE) | 
|  | 474 | pmd_val(pmd) |= _PAGE_SILENT_WRITE; | 
|  | 475 |  | 
|  | 476 | return pmd; | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 | static inline int pmd_young(pmd_t pmd) | 
|  | 480 | { | 
|  | 481 | return !!(pmd_val(pmd) & _PAGE_ACCESSED); | 
|  | 482 | } | 
|  | 483 |  | 
|  | 484 | static inline pmd_t pmd_mkold(pmd_t pmd) | 
|  | 485 | { | 
|  | 486 | pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); | 
|  | 487 |  | 
|  | 488 | return pmd; | 
|  | 489 | } | 
|  | 490 |  | 
|  | 491 | static inline pmd_t pmd_mkyoung(pmd_t pmd) | 
|  | 492 | { | 
|  | 493 | pmd_val(pmd) |= _PAGE_ACCESSED; | 
|  | 494 |  | 
|  | 495 | if (cpu_has_rixi) { | 
|  | 496 | if (!(pmd_val(pmd) & _PAGE_NO_READ)) | 
|  | 497 | pmd_val(pmd) |= _PAGE_SILENT_READ; | 
|  | 498 | } else { | 
|  | 499 | if (pmd_val(pmd) & _PAGE_READ) | 
|  | 500 | pmd_val(pmd) |= _PAGE_SILENT_READ; | 
|  | 501 | } | 
|  | 502 |  | 
|  | 503 | return pmd; | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | /* Extern to avoid header file madness */ | 
|  | 507 | extern pmd_t mk_pmd(struct page *page, pgprot_t prot); | 
|  | 508 |  | 
|  | 509 | static inline unsigned long pmd_pfn(pmd_t pmd) | 
|  | 510 | { | 
|  | 511 | return pmd_val(pmd) >> _PFN_SHIFT; | 
|  | 512 | } | 
|  | 513 |  | 
|  | 514 | static inline struct page *pmd_page(pmd_t pmd) | 
|  | 515 | { | 
|  | 516 | if (pmd_trans_huge(pmd)) | 
|  | 517 | return pfn_to_page(pmd_pfn(pmd)); | 
|  | 518 |  | 
|  | 519 | return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); | 
|  | 520 | } | 
|  | 521 |  | 
|  | 522 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | 
|  | 523 | { | 
|  | 524 | pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); | 
|  | 525 | return pmd; | 
|  | 526 | } | 
|  | 527 |  | 
|  | 528 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | 
|  | 529 | { | 
|  | 530 | pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); | 
|  | 531 |  | 
|  | 532 | return pmd; | 
|  | 533 | } | 
|  | 534 |  | 
|  | 535 | /* | 
|  | 536 | * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a | 
|  | 537 | * different prototype. | 
|  | 538 | */ | 
|  | 539 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR | 
|  | 540 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | 
|  | 541 | unsigned long address, pmd_t *pmdp) | 
|  | 542 | { | 
|  | 543 | pmd_t old = *pmdp; | 
|  | 544 |  | 
|  | 545 | pmd_clear(pmdp); | 
|  | 546 |  | 
|  | 547 | return old; | 
|  | 548 | } | 
|  | 549 |  | 
|  | 550 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 
|  | 551 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | #include <asm-generic/pgtable.h> | 
|  | 553 |  | 
|  | 554 | /* | 
| Wu Zhangjin | 22f1fdf | 2009-11-11 13:59:23 +0800 | [diff] [blame] | 555 | * uncached accelerated TLB map for video memory access | 
|  | 556 | */ | 
|  | 557 | #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED | 
|  | 558 | #define __HAVE_PHYS_MEM_ACCESS_PROT | 
|  | 559 |  | 
|  | 560 | struct file; | 
|  | 561 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 
|  | 562 | unsigned long size, pgprot_t vma_prot); | 
|  | 563 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | 
|  | 564 | unsigned long size, pgprot_t *vma_prot); | 
|  | 565 | #endif | 
|  | 566 |  | 
|  | 567 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | * We provide our own get_unmapped area to cope with the virtual aliasing | 
|  | 569 | * constraints placed on us by the cache architecture. | 
|  | 570 | */ | 
|  | 571 | #define HAVE_ARCH_UNMAPPED_AREA | 
| Jian Peng | d0be89f | 2011-05-17 12:27:49 -0700 | [diff] [blame] | 572 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 |  | 
|  | 574 | /* | 
|  | 575 | * No page table caches to initialise | 
|  | 576 | */ | 
|  | 577 | #define pgtable_cache_init()	do { } while (0) | 
|  | 578 |  | 
|  | 579 | #endif /* _ASM_PGTABLE_H */ |