| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 
|  | 3 | * Copyright 2003 PathScale, Inc. | 
|  | 4 | * Derived from include/asm-i386/pgtable.h | 
|  | 5 | * Licensed under the GPL | 
|  | 6 | */ | 
|  | 7 |  | 
|  | 8 | #ifndef __UM_PGTABLE_H | 
|  | 9 | #define __UM_PGTABLE_H | 
|  | 10 |  | 
|  | 11 | #include "linux/sched.h" | 
|  | 12 | #include "linux/linkage.h" | 
|  | 13 | #include "asm/processor.h" | 
|  | 14 | #include "asm/page.h" | 
|  | 15 | #include "asm/fixmap.h" | 
|  | 16 |  | 
|  | 17 | #define _PAGE_PRESENT	0x001 | 
|  | 18 | #define _PAGE_NEWPAGE	0x002 | 
| Paolo 'Blaisorblade' Giarrusso | 9b4ee40 | 2005-09-03 15:54:57 -0700 | [diff] [blame] | 19 | #define _PAGE_NEWPROT	0x004 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #define _PAGE_RW	0x020 | 
|  | 21 | #define _PAGE_USER	0x040 | 
|  | 22 | #define _PAGE_ACCESSED	0x080 | 
|  | 23 | #define _PAGE_DIRTY	0x100 | 
| Paolo 'Blaisorblade' Giarrusso | 9b4ee40 | 2005-09-03 15:54:57 -0700 | [diff] [blame] | 24 | /* If _PAGE_PRESENT is clear, we use these: */ | 
|  | 25 | #define _PAGE_FILE	0x008	/* nonlinear file mapping, saved PTE; unset:swap */ | 
|  | 26 | #define _PAGE_PROTNONE	0x010	/* if the user mapped it with PROT_NONE; | 
|  | 27 | pte_present gives true */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
|  | 29 | #ifdef CONFIG_3_LEVEL_PGTABLES | 
|  | 30 | #include "asm/pgtable-3level.h" | 
|  | 31 | #else | 
|  | 32 | #include "asm/pgtable-2level.h" | 
|  | 33 | #endif | 
|  | 34 |  | 
|  | 35 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 
|  | 36 |  | 
|  | 37 | extern void *um_virt_to_phys(struct task_struct *task, unsigned long virt, | 
|  | 38 | pte_t *pte_out); | 
|  | 39 |  | 
|  | 40 | /* zero page used for uninitialized stuff */ | 
|  | 41 | extern unsigned long *empty_zero_page; | 
|  | 42 |  | 
|  | 43 | #define pgtable_cache_init() do ; while (0) | 
|  | 44 |  | 
|  | 45 | /* | 
|  | 46 | * pgd entries used up by user/kernel: | 
|  | 47 | */ | 
|  | 48 |  | 
|  | 49 | #define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT) | 
|  | 50 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | 
|  | 51 |  | 
|  | 52 | #ifndef __ASSEMBLY__ | 
|  | 53 | /* Just any arbitrary offset to the start of the vmalloc VM area: the | 
|  | 54 | * current 8MB value just means that there will be a 8MB "hole" after the | 
|  | 55 | * physical memory until the kernel virtual memory starts.  That means that | 
|  | 56 | * any out-of-bounds memory accesses will hopefully be caught. | 
|  | 57 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | 
|  | 58 | * area for the same reason. ;) | 
|  | 59 | */ | 
|  | 60 |  | 
|  | 61 | extern unsigned long end_iomem; | 
|  | 62 |  | 
|  | 63 | #define VMALLOC_OFFSET	(__va_space) | 
|  | 64 | #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | 
|  | 65 |  | 
|  | 66 | #ifdef CONFIG_HIGHMEM | 
|  | 67 | # define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE) | 
|  | 68 | #else | 
|  | 69 | # define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE) | 
|  | 70 | #endif | 
|  | 71 |  | 
|  | 72 | #define REGION_SHIFT	(sizeof(pte_t) * 8 - 4) | 
|  | 73 | #define REGION_MASK	(((unsigned long) 0xf) << REGION_SHIFT) | 
|  | 74 |  | 
|  | 75 | #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 
|  | 76 | #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 
|  | 77 | #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 
|  | 78 |  | 
|  | 79 | #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | 
|  | 80 | #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | 
|  | 81 | #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 
|  | 82 | #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 
|  | 83 | #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | 
|  | 84 | #define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED) | 
|  | 85 |  | 
|  | 86 | /* | 
|  | 87 | * The i386 can't do page protection for execute, and considers that the same are read. | 
|  | 88 | * Also, write permissions imply read permissions. This is the closest we can get.. | 
|  | 89 | */ | 
|  | 90 | #define __P000	PAGE_NONE | 
|  | 91 | #define __P001	PAGE_READONLY | 
|  | 92 | #define __P010	PAGE_COPY | 
|  | 93 | #define __P011	PAGE_COPY | 
|  | 94 | #define __P100	PAGE_READONLY | 
|  | 95 | #define __P101	PAGE_READONLY | 
|  | 96 | #define __P110	PAGE_COPY | 
|  | 97 | #define __P111	PAGE_COPY | 
|  | 98 |  | 
|  | 99 | #define __S000	PAGE_NONE | 
|  | 100 | #define __S001	PAGE_READONLY | 
|  | 101 | #define __S010	PAGE_SHARED | 
|  | 102 | #define __S011	PAGE_SHARED | 
|  | 103 | #define __S100	PAGE_READONLY | 
|  | 104 | #define __S101	PAGE_READONLY | 
|  | 105 | #define __S110	PAGE_SHARED | 
|  | 106 | #define __S111	PAGE_SHARED | 
|  | 107 |  | 
|  | 108 | /* | 
|  | 109 | * Define this if things work differently on an i386 and an i486: | 
|  | 110 | * it will (on an i486) warn about kernel memory accesses that are | 
| Jesper Juhl | 3d67554 | 2005-05-05 16:16:13 -0700 | [diff] [blame] | 111 | * done without a 'access_ok(VERIFY_WRITE,..)' | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | */ | 
|  | 113 | #undef TEST_VERIFY_AREA | 
|  | 114 |  | 
|  | 115 | /* page table for 0-4MB for everybody */ | 
|  | 116 | extern unsigned long pg0[1024]; | 
|  | 117 |  | 
|  | 118 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | * ZERO_PAGE is a global shared page that is always zero: used | 
|  | 120 | * for zero-mapped memory areas etc.. | 
|  | 121 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 |  | 
|  | 123 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) | 
|  | 124 |  | 
|  | 125 | /* number of bits that fit into a memory pointer */ | 
|  | 126 | #define BITS_PER_PTR			(8*sizeof(unsigned long)) | 
|  | 127 |  | 
|  | 128 | /* to align the pointer to a pointer address */ | 
|  | 129 | #define PTR_MASK			(~(sizeof(void*)-1)) | 
|  | 130 |  | 
|  | 131 | /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ | 
|  | 132 | /* 64-bit machines, beware!  SRB. */ | 
|  | 133 | #define SIZEOF_PTR_LOG2			3 | 
|  | 134 |  | 
|  | 135 | /* to find an entry in a page-table */ | 
|  | 136 | #define PAGE_PTR(address) \ | 
|  | 137 | ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) | 
|  | 138 |  | 
|  | 139 | #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) | 
|  | 140 |  | 
| Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 141 | #define pmd_none(x)	(!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | #define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | 
|  | 143 | #define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT) | 
|  | 144 | #define pmd_clear(xp)	do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) | 
|  | 145 |  | 
|  | 146 | #define pmd_newpage(x)  (pmd_val(x) & _PAGE_NEWPAGE) | 
|  | 147 | #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) | 
|  | 148 |  | 
|  | 149 | #define pud_newpage(x)  (pud_val(x) & _PAGE_NEWPAGE) | 
|  | 150 | #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) | 
|  | 151 |  | 
|  | 152 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | 
|  | 153 |  | 
|  | 154 | #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) | 
|  | 155 |  | 
| Jeff Dike | 08964c5 | 2005-09-03 15:57:41 -0700 | [diff] [blame] | 156 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | #define pte_address(x) (__va(pte_val(x) & PAGE_MASK)) | 
|  | 158 | #define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT)) | 
|  | 159 | #define phys_addr(p) ((p) & ~REGION_MASK) | 
|  | 160 |  | 
| Jeff Dike | 08964c5 | 2005-09-03 15:57:41 -0700 | [diff] [blame] | 161 | #define pte_present(x)	pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) | 
|  | 162 |  | 
|  | 163 | /* | 
|  | 164 | * ================================= | 
|  | 165 | * Flags checking section. | 
|  | 166 | * ================================= | 
|  | 167 | */ | 
|  | 168 |  | 
|  | 169 | static inline int pte_none(pte_t pte) | 
|  | 170 | { | 
|  | 171 | return pte_is_zero(pte); | 
|  | 172 | } | 
|  | 173 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | /* | 
|  | 175 | * The following only work if pte_present() is true. | 
|  | 176 | * Undefined behaviour if not.. | 
|  | 177 | */ | 
|  | 178 | static inline int pte_user(pte_t pte) | 
|  | 179 | { | 
|  | 180 | return((pte_get_bits(pte, _PAGE_USER)) && | 
|  | 181 | !(pte_get_bits(pte, _PAGE_PROTNONE))); | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | static inline int pte_read(pte_t pte) | 
|  | 185 | { | 
|  | 186 | return((pte_get_bits(pte, _PAGE_USER)) && | 
|  | 187 | !(pte_get_bits(pte, _PAGE_PROTNONE))); | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | static inline int pte_exec(pte_t pte){ | 
|  | 191 | return((pte_get_bits(pte, _PAGE_USER)) && | 
|  | 192 | !(pte_get_bits(pte, _PAGE_PROTNONE))); | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | static inline int pte_write(pte_t pte) | 
|  | 196 | { | 
|  | 197 | return((pte_get_bits(pte, _PAGE_RW)) && | 
|  | 198 | !(pte_get_bits(pte, _PAGE_PROTNONE))); | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | /* | 
|  | 202 | * The following only works if pte_present() is not true. | 
|  | 203 | */ | 
|  | 204 | static inline int pte_file(pte_t pte) | 
|  | 205 | { | 
|  | 206 | return pte_get_bits(pte, _PAGE_FILE); | 
|  | 207 | } | 
|  | 208 |  | 
|  | 209 | static inline int pte_dirty(pte_t pte) | 
|  | 210 | { | 
|  | 211 | return pte_get_bits(pte, _PAGE_DIRTY); | 
|  | 212 | } | 
|  | 213 |  | 
|  | 214 | static inline int pte_young(pte_t pte) | 
|  | 215 | { | 
|  | 216 | return pte_get_bits(pte, _PAGE_ACCESSED); | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | static inline int pte_newpage(pte_t pte) | 
|  | 220 | { | 
|  | 221 | return pte_get_bits(pte, _PAGE_NEWPAGE); | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | static inline int pte_newprot(pte_t pte) | 
|  | 225 | { | 
|  | 226 | return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); | 
|  | 227 | } | 
|  | 228 |  | 
| Jeff Dike | 08964c5 | 2005-09-03 15:57:41 -0700 | [diff] [blame] | 229 | /* | 
|  | 230 | * ================================= | 
|  | 231 | * Flags setting section. | 
|  | 232 | * ================================= | 
|  | 233 | */ | 
|  | 234 |  | 
|  | 235 | static inline pte_t pte_mknewprot(pte_t pte) | 
|  | 236 | { | 
|  | 237 | pte_set_bits(pte, _PAGE_NEWPROT); | 
|  | 238 | return(pte); | 
|  | 239 | } | 
|  | 240 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | static inline pte_t pte_rdprotect(pte_t pte) | 
|  | 242 | { | 
|  | 243 | pte_clear_bits(pte, _PAGE_USER); | 
|  | 244 | return(pte_mknewprot(pte)); | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | static inline pte_t pte_exprotect(pte_t pte) | 
|  | 248 | { | 
|  | 249 | pte_clear_bits(pte, _PAGE_USER); | 
|  | 250 | return(pte_mknewprot(pte)); | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | static inline pte_t pte_mkclean(pte_t pte) | 
|  | 254 | { | 
|  | 255 | pte_clear_bits(pte, _PAGE_DIRTY); | 
|  | 256 | return(pte); | 
|  | 257 | } | 
|  | 258 |  | 
|  | 259 | static inline pte_t pte_mkold(pte_t pte) | 
|  | 260 | { | 
|  | 261 | pte_clear_bits(pte, _PAGE_ACCESSED); | 
|  | 262 | return(pte); | 
|  | 263 | } | 
|  | 264 |  | 
|  | 265 | static inline pte_t pte_wrprotect(pte_t pte) | 
|  | 266 | { | 
|  | 267 | pte_clear_bits(pte, _PAGE_RW); | 
|  | 268 | return(pte_mknewprot(pte)); | 
|  | 269 | } | 
|  | 270 |  | 
|  | 271 | static inline pte_t pte_mkread(pte_t pte) | 
|  | 272 | { | 
| Jeff Dike | 1463fdb | 2007-02-28 20:13:33 -0800 | [diff] [blame] | 273 | pte_set_bits(pte, _PAGE_USER); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | return(pte_mknewprot(pte)); | 
|  | 275 | } | 
|  | 276 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | static inline pte_t pte_mkdirty(pte_t pte) | 
|  | 278 | { | 
|  | 279 | pte_set_bits(pte, _PAGE_DIRTY); | 
|  | 280 | return(pte); | 
|  | 281 | } | 
|  | 282 |  | 
|  | 283 | static inline pte_t pte_mkyoung(pte_t pte) | 
|  | 284 | { | 
|  | 285 | pte_set_bits(pte, _PAGE_ACCESSED); | 
|  | 286 | return(pte); | 
|  | 287 | } | 
|  | 288 |  | 
|  | 289 | static inline pte_t pte_mkwrite(pte_t pte) | 
|  | 290 | { | 
|  | 291 | pte_set_bits(pte, _PAGE_RW); | 
|  | 292 | return(pte_mknewprot(pte)); | 
|  | 293 | } | 
|  | 294 |  | 
|  | 295 | static inline pte_t pte_mkuptodate(pte_t pte) | 
|  | 296 | { | 
|  | 297 | pte_clear_bits(pte, _PAGE_NEWPAGE); | 
|  | 298 | if(pte_present(pte)) | 
|  | 299 | pte_clear_bits(pte, _PAGE_NEWPROT); | 
|  | 300 | return(pte); | 
|  | 301 | } | 
|  | 302 |  | 
| Jeff Dike | 08964c5 | 2005-09-03 15:57:41 -0700 | [diff] [blame] | 303 | static inline pte_t pte_mknewpage(pte_t pte) | 
|  | 304 | { | 
|  | 305 | pte_set_bits(pte, _PAGE_NEWPAGE); | 
|  | 306 | return(pte); | 
|  | 307 | } | 
|  | 308 |  | 
|  | 309 | static inline void set_pte(pte_t *pteptr, pte_t pteval) | 
|  | 310 | { | 
|  | 311 | pte_copy(*pteptr, pteval); | 
|  | 312 |  | 
|  | 313 | /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so | 
|  | 314 | * fix_range knows to unmap it.  _PAGE_NEWPROT is specific to | 
|  | 315 | * mapped pages. | 
|  | 316 | */ | 
|  | 317 |  | 
|  | 318 | *pteptr = pte_mknewpage(*pteptr); | 
|  | 319 | if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr); | 
|  | 320 | } | 
|  | 321 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 
|  | 322 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | /* | 
|  | 324 | * Conversion functions: convert a page and protection to a page entry, | 
|  | 325 | * and a page entry and page directory to the page they refer to. | 
|  | 326 | */ | 
|  | 327 |  | 
| Paolo 'Blaisorblade' Giarrusso | d99c402 | 2005-09-10 19:44:56 +0200 | [diff] [blame] | 328 | #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) | 
|  | 329 | #define __virt_to_page(virt) phys_to_page(__pa(virt)) | 
|  | 330 | #define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) | 
|  | 331 |  | 
|  | 332 | #define mk_pte(page, pgprot) \ | 
|  | 333 | ({ pte_t pte;					\ | 
|  | 334 | \ | 
|  | 335 | pte_set_val(pte, page_to_phys(page), (pgprot));	\ | 
|  | 336 | if (pte_present(pte))				\ | 
|  | 337 | pte_mknewprot(pte_mknewpage(pte));	\ | 
|  | 338 | pte;}) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 |  | 
|  | 340 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 
|  | 341 | { | 
|  | 342 | pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | return pte; | 
|  | 344 | } | 
|  | 345 |  | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 346 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 |  | 
|  | 348 | /* | 
|  | 349 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | 
|  | 350 | * | 
|  | 351 | * this macro returns the index of the entry in the pgd page which would | 
|  | 352 | * control the given virtual address | 
|  | 353 | */ | 
|  | 354 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 
|  | 355 |  | 
|  | 356 | #define pgd_index_k(addr) pgd_index(addr) | 
|  | 357 |  | 
|  | 358 | /* | 
|  | 359 | * pgd_offset() returns a (pgd_t *) | 
|  | 360 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | 
|  | 361 | */ | 
|  | 362 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | 
|  | 363 |  | 
|  | 364 | /* | 
|  | 365 | * a shortcut which implies the use of the kernel's pgd, instead | 
|  | 366 | * of a process's | 
|  | 367 | */ | 
|  | 368 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 
|  | 369 |  | 
|  | 370 | /* | 
|  | 371 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | 
|  | 372 | * | 
|  | 373 | * this macro returns the index of the entry in the pmd page which would | 
|  | 374 | * control the given virtual address | 
|  | 375 | */ | 
|  | 376 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 
|  | 377 |  | 
|  | 378 | /* | 
|  | 379 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | 
|  | 380 | * | 
|  | 381 | * this macro returns the index of the entry in the pte page which would | 
|  | 382 | * control the given virtual address | 
|  | 383 | */ | 
|  | 384 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 
|  | 385 | #define pte_offset_kernel(dir, address) \ | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 386 | ((pte_t *) pmd_page_vaddr(*(dir)) +  pte_index(address)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | #define pte_offset_map(dir, address) \ | 
|  | 388 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | 
|  | 389 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | 
|  | 390 | #define pte_unmap(pte) do { } while (0) | 
|  | 391 | #define pte_unmap_nested(pte) do { } while (0) | 
|  | 392 |  | 
|  | 393 | #define update_mmu_cache(vma,address,pte) do ; while (0) | 
|  | 394 |  | 
|  | 395 | /* Encode and de-code a swap entry */ | 
|  | 396 | #define __swp_type(x)			(((x).val >> 4) & 0x3f) | 
|  | 397 | #define __swp_offset(x)			((x).val >> 11) | 
|  | 398 |  | 
|  | 399 | #define __swp_entry(type, offset) \ | 
|  | 400 | ((swp_entry_t) { ((type) << 4) | ((offset) << 11) }) | 
|  | 401 | #define __pte_to_swp_entry(pte) \ | 
|  | 402 | ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) }) | 
|  | 403 | #define __swp_entry_to_pte(x)		((pte_t) { (x).val }) | 
|  | 404 |  | 
|  | 405 | #define kern_addr_valid(addr) (1) | 
|  | 406 |  | 
|  | 407 | #include <asm-generic/pgtable.h> | 
|  | 408 |  | 
|  | 409 | #include <asm-generic/pgtable-nopud.h> | 
|  | 410 |  | 
| Al Viro | 04add67 | 2007-02-01 13:53:04 +0000 | [diff] [blame] | 411 | #ifdef CONFIG_HIGHMEM | 
|  | 412 | /* Clear a kernel PTE and flush it from the TLB */ | 
|  | 413 | #define kpte_clear_flush(ptep, vaddr)					\ | 
|  | 414 | do {									\ | 
|  | 415 | pte_clear(&init_mm, vaddr, ptep);				\ | 
|  | 416 | __flush_tlb_one(vaddr);						\ | 
|  | 417 | } while (0) | 
|  | 418 | #endif | 
|  | 419 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | #endif | 
|  | 421 | #endif | 
|  | 422 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) | 
|  | 424 |  | 
|  | 425 | /* | 
|  | 426 | * Overrides for Emacs so that we follow Linus's tabbing style. | 
|  | 427 | * Emacs will notice this stuff at the end of the file and automatically | 
|  | 428 | * adjust the settings for this buffer only.  This must remain at the end | 
|  | 429 | * of the file. | 
|  | 430 | * --------------------------------------------------------------------------- | 
|  | 431 | * Local variables: | 
|  | 432 | * c-file-style: "linux" | 
|  | 433 | * End: | 
|  | 434 | */ |