| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 3 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 4 | * for more details. | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle | 
|  | 7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. | 
|  | 8 | */ | 
|  | 9 | #ifndef _ASM_PGTABLE_64_H | 
|  | 10 | #define _ASM_PGTABLE_64_H | 
|  | 11 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/linkage.h> | 
|  | 13 |  | 
|  | 14 | #include <asm/addrspace.h> | 
|  | 15 | #include <asm/page.h> | 
|  | 16 | #include <asm/cachectl.h> | 
| Atsushi Nemoto | 656be92 | 2006-10-26 00:08:31 +0900 | [diff] [blame] | 17 | #include <asm/fixmap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 19 | #include <asm-generic/pgtable-nopud.h> | 
|  | 20 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | /* | 
|  | 22 | * Each address space has 2 4K pages as its page directory, giving 1024 | 
|  | 23 | * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 24 | * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page | 
|  | 25 | * tables. Each page table is also a single 4K page, giving 512 (== | 
|  | 26 | * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to | 
|  | 27 | * invalid_pmd_table, each pmd entry is initialized to point to | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | * invalid_pte_table, each pte is initialized to 0. When memory is low, | 
|  | 29 | * and a pmd table or a page table allocation fails, empty_bad_pmd_table | 
|  | 30 | * and empty_bad_page_table is returned back to higher layer code, so | 
|  | 31 | * that the failure is recognized later on. Linux does not seem to | 
|  | 32 | * handle these failures very well though. The empty_bad_page_table has | 
|  | 33 | * invalid pte entries in it, to force page faults. | 
|  | 34 | * | 
|  | 35 | * Kernel mappings: kernel mappings are held in the swapper_pg_table. | 
|  | 36 | * The layout is identical to userspace except it's indexed with the | 
|  | 37 | * fault address - VMALLOC_START. | 
|  | 38 | */ | 
|  | 39 |  | 
|  | 40 | /* PMD_SHIFT determines the size of the area a second-level page table can map */ | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 41 | #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #define PMD_SIZE	(1UL << PMD_SHIFT) | 
|  | 43 | #define PMD_MASK	(~(PMD_SIZE-1)) | 
|  | 44 |  | 
|  | 45 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 46 | #define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #define PGDIR_SIZE	(1UL << PGDIR_SHIFT) | 
|  | 48 | #define PGDIR_MASK	(~(PGDIR_SIZE-1)) | 
|  | 49 |  | 
|  | 50 | /* | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 51 | * For 4kB page size we use a 3 level page tree and an 8kB pud, which | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | * permits us mapping 40 bits of virtual address space. | 
|  | 53 | * | 
|  | 54 | * We used to implement 41 bits by having an order 1 pmd level but that seemed | 
|  | 55 | * rather pointless. | 
|  | 56 | * | 
|  | 57 | * For 8kB page size we use a 3 level page tree which permits a total of | 
|  | 58 | * 8TB of address space.  Alternatively a 33-bit / 8GB organization using | 
|  | 59 | * two levels would be easy to implement. | 
|  | 60 | * | 
|  | 61 | * For 16kB page size we use a 2 level page tree which permits a total of | 
| Thiemo Seufer | f29244a | 2005-02-21 11:11:32 +0000 | [diff] [blame] | 62 | * 36 bits of virtual address space.  We could add a third level but it seems | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | * like at the moment there's no need for this. | 
|  | 64 | * | 
|  | 65 | * For 64kB page size we use a 2 level page table tree for a total of 42 bits | 
|  | 66 | * of virtual address space. | 
|  | 67 | */ | 
|  | 68 | #ifdef CONFIG_PAGE_SIZE_4KB | 
|  | 69 | #define PGD_ORDER		1 | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 70 | #define PUD_ORDER		aieeee_attempt_to_allocate_pud | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | #define PMD_ORDER		0 | 
|  | 72 | #define PTE_ORDER		0 | 
|  | 73 | #endif | 
|  | 74 | #ifdef CONFIG_PAGE_SIZE_8KB | 
|  | 75 | #define PGD_ORDER		0 | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 76 | #define PUD_ORDER		aieeee_attempt_to_allocate_pud | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | #define PMD_ORDER		0 | 
|  | 78 | #define PTE_ORDER		0 | 
|  | 79 | #endif | 
|  | 80 | #ifdef CONFIG_PAGE_SIZE_16KB | 
|  | 81 | #define PGD_ORDER		0 | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 82 | #define PUD_ORDER		aieeee_attempt_to_allocate_pud | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | #define PMD_ORDER		0 | 
|  | 84 | #define PTE_ORDER		0 | 
|  | 85 | #endif | 
|  | 86 | #ifdef CONFIG_PAGE_SIZE_64KB | 
|  | 87 | #define PGD_ORDER		0 | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 88 | #define PUD_ORDER		aieeee_attempt_to_allocate_pud | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | #define PMD_ORDER		0 | 
|  | 90 | #define PTE_ORDER		0 | 
|  | 91 | #endif | 
|  | 92 |  | 
|  | 93 | #define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) | 
|  | 94 | #define PTRS_PER_PMD	((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) | 
|  | 95 | #define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) | 
|  | 96 |  | 
| Peter Watkins | 9dbd7b9 | 2006-08-23 11:15:49 -0400 | [diff] [blame] | 97 | #if PGDIR_SIZE >= TASK_SIZE | 
|  | 98 | #define USER_PTRS_PER_PGD       (1) | 
|  | 99 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE) | 
| Peter Watkins | 9dbd7b9 | 2006-08-23 11:15:49 -0400 | [diff] [blame] | 101 | #endif | 
|  | 102 | #define FIRST_USER_ADDRESS	0UL | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 |  | 
| Thiemo Seufer | f29244a | 2005-02-21 11:11:32 +0000 | [diff] [blame] | 104 | #define VMALLOC_START		MAP_BASE | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | #define VMALLOC_END	\ | 
|  | 106 | (VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE) | 
| Atsushi Nemoto | 656be92 | 2006-10-26 00:08:31 +0900 | [diff] [blame] | 107 | #if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64) && \ | 
|  | 108 | VMALLOC_START != CKSSEG | 
|  | 109 | /* Load modules into 32bit-compatible segment. */ | 
|  | 110 | #define MODULE_START	CKSSEG | 
|  | 111 | #define MODULE_END	(FIXADDR_START-2*PAGE_SIZE) | 
|  | 112 | extern pgd_t module_pg_dir[PTRS_PER_PGD]; | 
|  | 113 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 |  | 
|  | 115 | #define pte_ERROR(e) \ | 
|  | 116 | printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) | 
|  | 117 | #define pmd_ERROR(e) \ | 
|  | 118 | printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) | 
|  | 119 | #define pgd_ERROR(e) \ | 
|  | 120 | printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) | 
|  | 121 |  | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 122 | extern pte_t invalid_pte_table[PTRS_PER_PTE]; | 
|  | 123 | extern pte_t empty_bad_page_table[PTRS_PER_PTE]; | 
|  | 124 | extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; | 
|  | 125 | extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 |  | 
|  | 127 | /* | 
| Thiemo Seufer | 1b3a6e9 | 2005-04-01 14:07:13 +0000 | [diff] [blame] | 128 | * Empty pgd/pmd entries point to the invalid_pte_table. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | */ | 
|  | 130 | static inline int pmd_none(pmd_t pmd) | 
|  | 131 | { | 
|  | 132 | return pmd_val(pmd) == (unsigned long) invalid_pte_table; | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | #define pmd_bad(pmd)		(pmd_val(pmd) & ~PAGE_MASK) | 
|  | 136 |  | 
|  | 137 | static inline int pmd_present(pmd_t pmd) | 
|  | 138 | { | 
|  | 139 | return pmd_val(pmd) != (unsigned long) invalid_pte_table; | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | static inline void pmd_clear(pmd_t *pmdp) | 
|  | 143 | { | 
|  | 144 | pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); | 
|  | 145 | } | 
|  | 146 |  | 
|  | 147 | /* | 
| Thiemo Seufer | f29244a | 2005-02-21 11:11:32 +0000 | [diff] [blame] | 148 | * Empty pud entries point to the invalid_pmd_table. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | */ | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 150 | static inline int pud_none(pud_t pud) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | { | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 152 | return pud_val(pud) == (unsigned long) invalid_pmd_table; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } | 
|  | 154 |  | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 155 | static inline int pud_bad(pud_t pud) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | { | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 157 | return pud_val(pud) & ~PAGE_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | } | 
|  | 159 |  | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 160 | static inline int pud_present(pud_t pud) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | { | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 162 | return pud_val(pud) != (unsigned long) invalid_pmd_table; | 
|  | 163 | } | 
|  | 164 |  | 
|  | 165 | static inline void pud_clear(pud_t *pudp) | 
|  | 166 | { | 
|  | 167 | pud_val(*pudp) = ((unsigned long) invalid_pmd_table); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | } | 
|  | 169 |  | 
| Thiemo Seufer | 1b3a6e9 | 2005-04-01 14:07:13 +0000 | [diff] [blame] | 170 | #define pte_page(x)		pfn_to_page(pte_pfn(x)) | 
|  | 171 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | #ifdef CONFIG_CPU_VR41XX | 
|  | 173 | #define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) | 
|  | 174 | #define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) | 
|  | 175 | #else | 
|  | 176 | #define pte_pfn(x)		((unsigned long)((x).pte >> PAGE_SHIFT)) | 
|  | 177 | #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 
|  | 178 | #endif | 
|  | 179 |  | 
|  | 180 | #define __pgd_offset(address)	pgd_index(address) | 
| Thiemo Seufer | f29244a | 2005-02-21 11:11:32 +0000 | [diff] [blame] | 181 | #define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | 
| Thiemo Seufer | 1b3a6e9 | 2005-04-01 14:07:13 +0000 | [diff] [blame] | 182 | #define __pmd_offset(address)	pmd_index(address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 |  | 
|  | 184 | /* to find an entry in a kernel page-table-directory */ | 
| Atsushi Nemoto | 656be92 | 2006-10-26 00:08:31 +0900 | [diff] [blame] | 185 | #ifdef MODULE_START | 
|  | 186 | #define pgd_offset_k(address) \ | 
|  | 187 | ((address) >= MODULE_START ? module_pg_dir : pgd_offset(&init_mm, 0UL)) | 
|  | 188 | #else | 
| Ralf Baechle | 242954b | 2006-10-24 02:29:01 +0100 | [diff] [blame] | 189 | #define pgd_offset_k(address) pgd_offset(&init_mm, 0UL) | 
| Atsushi Nemoto | 656be92 | 2006-10-26 00:08:31 +0900 | [diff] [blame] | 190 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 |  | 
| Thiemo Seufer | f29244a | 2005-02-21 11:11:32 +0000 | [diff] [blame] | 192 | #define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 
| Thiemo Seufer | 1b3a6e9 | 2005-04-01 14:07:13 +0000 | [diff] [blame] | 193 | #define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 |  | 
|  | 195 | /* to find an entry in a page-table-directory */ | 
|  | 196 | #define pgd_offset(mm,addr)	((mm)->pgd + pgd_index(addr)) | 
|  | 197 |  | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 198 | static inline unsigned long pud_page_vaddr(pud_t pud) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | { | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 200 | return pud_val(pud); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | } | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 202 | #define pud_phys(pud)		(pud_val(pud) - PAGE_OFFSET) | 
|  | 203 | #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 |  | 
|  | 205 | /* Find an entry in the second-level page table.. */ | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 206 | static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | { | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 208 | return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | } | 
|  | 210 |  | 
|  | 211 | /* Find an entry in the third-level page table.. */ | 
|  | 212 | #define __pte_offset(address)						\ | 
|  | 213 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 
|  | 214 | #define pte_offset(dir, address)					\ | 
| Franck Bui-Huu | 5b70a31 | 2006-12-05 10:39:56 +0100 | [diff] [blame] | 215 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | #define pte_offset_kernel(dir, address)					\ | 
| Franck Bui-Huu | 5b70a31 | 2006-12-05 10:39:56 +0100 | [diff] [blame] | 217 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | #define pte_offset_map(dir, address)					\ | 
|  | 219 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | 
|  | 220 | #define pte_offset_map_nested(dir, address)				\ | 
|  | 221 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | 
|  | 222 | #define pte_unmap(pte) ((void)(pte)) | 
|  | 223 | #define pte_unmap_nested(pte) ((void)(pte)) | 
|  | 224 |  | 
|  | 225 | /* | 
|  | 226 | * Initialize a new pgd / pmd table with invalid pointers. | 
|  | 227 | */ | 
|  | 228 | extern void pgd_init(unsigned long page); | 
|  | 229 | extern void pmd_init(unsigned long page, unsigned long pagetable); | 
|  | 230 |  | 
|  | 231 | /* | 
|  | 232 | * Non-present pages:  high 24 bits are offset, next 8 bits type, | 
|  | 233 | * low 32 bits zero. | 
|  | 234 | */ | 
|  | 235 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | 
|  | 236 | { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } | 
|  | 237 |  | 
|  | 238 | #define __swp_type(x)		(((x).val >> 32) & 0xff) | 
|  | 239 | #define __swp_offset(x)		((x).val >> 40) | 
|  | 240 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | 
|  | 241 | #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) }) | 
|  | 242 | #define __swp_entry_to_pte(x)	((pte_t) { (x).val }) | 
|  | 243 |  | 
|  | 244 | /* | 
| Sergei Shtylyov | 7cb710c | 2006-05-27 22:39:39 +0400 | [diff] [blame] | 245 | * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to | 
|  | 246 | * make things easier, and only use the upper 56 bits for the page offset... | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | */ | 
| Sergei Shtylyov | 7cb710c | 2006-05-27 22:39:39 +0400 | [diff] [blame] | 248 | #define PTE_FILE_MAX_BITS	56 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 |  | 
| Sergei Shtylyov | 7cb710c | 2006-05-27 22:39:39 +0400 | [diff] [blame] | 250 | #define pte_to_pgoff(_pte)	((_pte).pte >> 8) | 
|  | 251 | #define pgoff_to_pte(off)	((pte_t) { ((off) << 8) | _PAGE_FILE }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 |  | 
|  | 253 | #endif /* _ASM_PGTABLE_64_H */ |