| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH64_PGTABLE_H | 
 | 2 | #define __ASM_SH64_PGTABLE_H | 
 | 3 |  | 
 | 4 | #include <asm-generic/4level-fixup.h> | 
 | 5 |  | 
 | 6 | /* | 
 | 7 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 8 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 9 |  * for more details. | 
 | 10 |  * | 
 | 11 |  * include/asm-sh64/pgtable.h | 
 | 12 |  * | 
 | 13 |  * Copyright (C) 2000, 2001  Paolo Alberelli | 
 | 14 |  * Copyright (C) 2003, 2004  Paul Mundt | 
 | 15 |  * Copyright (C) 2003, 2004  Richard Curnow | 
 | 16 |  * | 
 | 17 |  * This file contains the functions and defines necessary to modify and use | 
 | 18 |  * the SuperH page table tree. | 
 | 19 |  */ | 
 | 20 |  | 
 | 21 | #ifndef __ASSEMBLY__ | 
 | 22 | #include <asm/processor.h> | 
 | 23 | #include <asm/page.h> | 
 | 24 | #include <linux/threads.h> | 
 | 25 | #include <linux/config.h> | 
 | 26 |  | 
 | 27 | extern void paging_init(void); | 
 | 28 |  | 
 | 29 | /* We provide our own get_unmapped_area to avoid cache synonym issue */ | 
 | 30 | #define HAVE_ARCH_UNMAPPED_AREA | 
 | 31 |  | 
 | 32 | /* | 
 | 33 |  * Basically we have the same two-level (which is the logical three level | 
 | 34 |  * Linux page table layout folded) page tables as the i386. | 
 | 35 |  */ | 
 | 36 |  | 
 | 37 | /* | 
 | 38 |  * ZERO_PAGE is a global shared page that is always zero: used | 
 | 39 |  * for zero-mapped memory areas etc.. | 
 | 40 |  */ | 
 | 41 | extern unsigned char empty_zero_page[PAGE_SIZE]; | 
 | 42 | #define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) | 
 | 43 |  | 
 | 44 | #endif /* !__ASSEMBLY__ */ | 
 | 45 |  | 
 | 46 | /* | 
 | 47 |  * NEFF and NPHYS related defines. | 
 | 48 |  * FIXME : These need to be model-dependent.  For now this is OK, SH5-101 and SH5-103 | 
 | 49 |  * implement 32 bits effective and 32 bits physical.  But future implementations may | 
 | 50 |  * extend beyond this. | 
 | 51 |  */ | 
 | 52 | #define NEFF		32 | 
 | 53 | #define	NEFF_SIGN	(1LL << (NEFF - 1)) | 
 | 54 | #define	NEFF_MASK	(-1LL << NEFF) | 
 | 55 |  | 
 | 56 | #define NPHYS		32 | 
 | 57 | #define	NPHYS_SIGN	(1LL << (NPHYS - 1)) | 
 | 58 | #define	NPHYS_MASK	(-1LL << NPHYS) | 
 | 59 |  | 
 | 60 | /* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond | 
 | 61 |    that 3-level would be appropriate. */ | 
 | 62 | #if defined(CONFIG_SH64_PGTABLE_2_LEVEL) | 
 | 63 | /* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */ | 
 | 64 | #define PTRS_PER_PTE	((1<<PAGE_SHIFT)/sizeof(unsigned long long)) | 
 | 65 | #define PTE_MAGNITUDE	3	      /* sizeof(unsigned long long) magnit. */ | 
 | 66 | #define PTE_SHIFT	PAGE_SHIFT | 
 | 67 | #define PTE_BITS	(PAGE_SHIFT - PTE_MAGNITUDE) | 
 | 68 |  | 
 | 69 | /* top level: PMD. */ | 
 | 70 | #define PGDIR_SHIFT	(PTE_SHIFT + PTE_BITS) | 
 | 71 | #define PGD_BITS	(NEFF - PGDIR_SHIFT) | 
 | 72 | #define PTRS_PER_PGD	(1<<PGD_BITS) | 
 | 73 |  | 
 | 74 | /* middle level: PMD. This doesn't do anything for the 2-level case. */ | 
 | 75 | #define PTRS_PER_PMD	(1) | 
 | 76 |  | 
 | 77 | #define PGDIR_SIZE	(1UL << PGDIR_SHIFT) | 
 | 78 | #define PGDIR_MASK	(~(PGDIR_SIZE-1)) | 
 | 79 | #define PMD_SHIFT	PGDIR_SHIFT | 
 | 80 | #define PMD_SIZE	PGDIR_SIZE | 
 | 81 | #define PMD_MASK	PGDIR_MASK | 
 | 82 |  | 
 | 83 | #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) | 
 | 84 | /* | 
 | 85 |  * three-level asymmetric paging structure: PGD is top level. | 
 | 86 |  * The asymmetry comes from 32-bit pointers and 64-bit PTEs. | 
 | 87 |  */ | 
 | 88 | /* bottom level: PTE. It's 9 bits = 512 pointers */ | 
 | 89 | #define PTRS_PER_PTE	((1<<PAGE_SHIFT)/sizeof(unsigned long long)) | 
 | 90 | #define PTE_MAGNITUDE	3	      /* sizeof(unsigned long long) magnit. */ | 
 | 91 | #define PTE_SHIFT	PAGE_SHIFT | 
 | 92 | #define PTE_BITS	(PAGE_SHIFT - PTE_MAGNITUDE) | 
 | 93 |  | 
 | 94 | /* middle level: PMD. It's 10 bits = 1024 pointers */ | 
 | 95 | #define PTRS_PER_PMD	((1<<PAGE_SHIFT)/sizeof(unsigned long long *)) | 
 | 96 | #define PMD_MAGNITUDE	2	      /* sizeof(unsigned long long *) magnit. */ | 
 | 97 | #define PMD_SHIFT	(PTE_SHIFT + PTE_BITS) | 
 | 98 | #define PMD_BITS	(PAGE_SHIFT - PMD_MAGNITUDE) | 
 | 99 |  | 
 | 100 | /* top level: PMD. It's 1 bit = 2 pointers */ | 
 | 101 | #define PGDIR_SHIFT	(PMD_SHIFT + PMD_BITS) | 
 | 102 | #define PGD_BITS	(NEFF - PGDIR_SHIFT) | 
 | 103 | #define PTRS_PER_PGD	(1<<PGD_BITS) | 
 | 104 |  | 
 | 105 | #define PMD_SIZE	(1UL << PMD_SHIFT) | 
 | 106 | #define PMD_MASK	(~(PMD_SIZE-1)) | 
 | 107 | #define PGDIR_SIZE	(1UL << PGDIR_SHIFT) | 
 | 108 | #define PGDIR_MASK	(~(PGDIR_SIZE-1)) | 
 | 109 |  | 
 | 110 | #else | 
 | 111 | #error "No defined number of page table levels" | 
 | 112 | #endif | 
 | 113 |  | 
 | 114 | /* | 
 | 115 |  * Error outputs. | 
 | 116 |  */ | 
 | 117 | #define pte_ERROR(e) \ | 
 | 118 | 	printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) | 
 | 119 | #define pmd_ERROR(e) \ | 
 | 120 | 	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | 
 | 121 | #define pgd_ERROR(e) \ | 
 | 122 | 	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 
 | 123 |  | 
 | 124 | /* | 
 | 125 |  * Table setting routines. Used within arch/mm only. | 
 | 126 |  */ | 
 | 127 | #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) | 
 | 128 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | 
 | 129 |  | 
 | 130 | static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) | 
 | 131 | { | 
 | 132 | 	unsigned long long x = ((unsigned long long) pteval.pte); | 
 | 133 | 	unsigned long long *xp = (unsigned long long *) pteptr; | 
 | 134 | 	/* | 
 | 135 | 	 * Sign-extend based on NPHYS. | 
 | 136 | 	 */ | 
 | 137 | 	*(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; | 
 | 138 | } | 
 | 139 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 
 | 140 |  | 
 | 141 | static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) | 
 | 142 | { | 
 | 143 | 	pmd_val(*pmdp) = (unsigned long) ptep; | 
 | 144 | } | 
 | 145 |  | 
 | 146 | /* | 
 | 147 |  * PGD defines. Top level. | 
 | 148 |  */ | 
 | 149 |  | 
 | 150 | /* To find an entry in a generic PGD. */ | 
 | 151 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 
 | 152 | #define __pgd_offset(address) pgd_index(address) | 
 | 153 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | 
 | 154 |  | 
 | 155 | /* To find an entry in a kernel PGD. */ | 
 | 156 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 
 | 157 |  | 
 | 158 | /* | 
 | 159 |  * PGD level access routines. | 
 | 160 |  * | 
 | 161 |  * Note1: | 
 | 162 |  * There's no need to use physical addresses since the tree walk is all | 
 | 163 |  * in performed in software, until the PTE translation. | 
 | 164 |  * | 
 | 165 |  * Note 2: | 
 | 166 |  * A PGD entry can be uninitialized (_PGD_UNUSED), generically bad, | 
 | 167 |  * clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain | 
 | 168 |  * _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must | 
 | 169 |  * be 1. Assuming an arbitrary clear value of bit 31 set to 0 and | 
 | 170 |  * lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a | 
 | 171 |  * bad pgd that must be notified via printk(). | 
 | 172 |  * | 
 | 173 |  */ | 
 | 174 | #define _PGD_EMPTY		0x0 | 
 | 175 |  | 
 | 176 | #if defined(CONFIG_SH64_PGTABLE_2_LEVEL) | 
 | 177 | static inline int pgd_none(pgd_t pgd)		{ return 0; } | 
 | 178 | static inline int pgd_bad(pgd_t pgd)		{ return 0; } | 
 | 179 | #define pgd_present(pgd) ((pgd_val(pgd) & _PAGE_PRESENT) ? 1 : 0) | 
 | 180 | #define pgd_clear(xx)				do { } while(0) | 
 | 181 |  | 
 | 182 | #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) | 
 | 183 | #define pgd_present(pgd_entry)	(1) | 
 | 184 | #define pgd_none(pgd_entry)	(pgd_val((pgd_entry)) == _PGD_EMPTY) | 
 | 185 | /* TODO: Think later about what a useful definition of 'bad' would be now. */ | 
 | 186 | #define pgd_bad(pgd_entry)	(0) | 
 | 187 | #define pgd_clear(pgd_entry_p)	(set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY))) | 
 | 188 |  | 
 | 189 | #endif | 
 | 190 |  | 
 | 191 |  | 
 | 192 | #define pgd_page(pgd_entry)	((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK)) | 
 | 193 |  | 
 | 194 | /* | 
 | 195 |  * PMD defines. Middle level. | 
 | 196 |  */ | 
 | 197 |  | 
 | 198 | /* PGD to PMD dereferencing */ | 
 | 199 | #if defined(CONFIG_SH64_PGTABLE_2_LEVEL) | 
 | 200 | static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | 
 | 201 | { | 
 | 202 | 	return (pmd_t *) dir; | 
 | 203 | } | 
 | 204 | #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) | 
 | 205 | #define __pmd_offset(address) \ | 
 | 206 | 		(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 
 | 207 | #define pmd_offset(dir, addr) \ | 
 | 208 | 		((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr))) | 
 | 209 | #endif | 
 | 210 |  | 
 | 211 | /* | 
 | 212 |  * PMD level access routines. Same notes as above. | 
 | 213 |  */ | 
 | 214 | #define _PMD_EMPTY		0x0 | 
 | 215 | /* Either the PMD is empty or present, it's not paged out */ | 
 | 216 | #define pmd_present(pmd_entry)	(pmd_val(pmd_entry) & _PAGE_PRESENT) | 
 | 217 | #define pmd_clear(pmd_entry_p)	(set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY))) | 
 | 218 | #define pmd_none(pmd_entry)	(pmd_val((pmd_entry)) == _PMD_EMPTY) | 
 | 219 | #define pmd_bad(pmd_entry)	((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | 
 | 220 |  | 
 | 221 | #define pmd_page_kernel(pmd_entry) \ | 
 | 222 | 	((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK)) | 
 | 223 |  | 
 | 224 | #define pmd_page(pmd) \ | 
 | 225 | 	(virt_to_page(pmd_val(pmd))) | 
 | 226 |  | 
 | 227 | /* PMD to PTE dereferencing */ | 
 | 228 | #define pte_index(address) \ | 
 | 229 | 		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 
 | 230 |  | 
 | 231 | #define pte_offset_kernel(dir, addr) \ | 
 | 232 | 		((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) | 
 | 233 |  | 
 | 234 | #define pte_offset_map(dir,addr)	pte_offset_kernel(dir, addr) | 
 | 235 | #define pte_offset_map_nested(dir,addr)	pte_offset_kernel(dir, addr) | 
 | 236 | #define pte_unmap(pte)		do { } while (0) | 
 | 237 | #define pte_unmap_nested(pte)	do { } while (0) | 
 | 238 |  | 
 | 239 | /* Round it up ! */ | 
 | 240 | #define USER_PTRS_PER_PGD	((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE) | 
| Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 241 | #define FIRST_USER_ADDRESS	0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 |  | 
 | 243 | #ifndef __ASSEMBLY__ | 
 | 244 | #define VMALLOC_END	0xff000000 | 
 | 245 | #define VMALLOC_START	0xf0000000 | 
 | 246 | #define VMALLOC_VMADDR(x) ((unsigned long)(x)) | 
 | 247 |  | 
 | 248 | #define IOBASE_VADDR	0xff000000 | 
 | 249 | #define IOBASE_END	0xffffffff | 
 | 250 |  | 
 | 251 | /* | 
 | 252 |  * PTEL coherent flags. | 
 | 253 |  * See Chapter 17 ST50 CPU Core Volume 1, Architecture. | 
 | 254 |  */ | 
 | 255 | /* The bits that are required in the SH-5 TLB are placed in the h/w-defined | 
 | 256 |    positions, to avoid expensive bit shuffling on every refill.  The remaining | 
 | 257 |    bits are used for s/w purposes and masked out on each refill. | 
 | 258 |  | 
 | 259 |    Note, the PTE slots are used to hold data of type swp_entry_t when a page is | 
 | 260 |    swapped out.  Only the _PAGE_PRESENT flag is significant when the page is | 
 | 261 |    swapped out, and it must be placed so that it doesn't overlap either the | 
 | 262 |    type or offset fields of swp_entry_t.  For x86, offset is at [31:8] and type | 
 | 263 |    at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t.  This | 
 | 264 |    scheme doesn't map to SH-5 because bit [0] controls cacheability.  So bit | 
 | 265 |    [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split | 
 | 266 |    into 2 pieces.  That is handled by SWP_ENTRY and SWP_TYPE below. */ | 
 | 267 | #define _PAGE_WT	0x001  /* CB0: if cacheable, 1->write-thru, 0->write-back */ | 
 | 268 | #define _PAGE_DEVICE	0x001  /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */ | 
 | 269 | #define _PAGE_CACHABLE	0x002  /* CB1: uncachable/cachable */ | 
 | 270 | #define _PAGE_PRESENT	0x004  /* software: page referenced */ | 
 | 271 | #define _PAGE_FILE	0x004  /* software: only when !present */ | 
 | 272 | #define _PAGE_SIZE0	0x008  /* SZ0-bit : size of page */ | 
 | 273 | #define _PAGE_SIZE1	0x010  /* SZ1-bit : size of page */ | 
 | 274 | #define _PAGE_SHARED	0x020  /* software: reflects PTEH's SH */ | 
 | 275 | #define _PAGE_READ	0x040  /* PR0-bit : read access allowed */ | 
 | 276 | #define _PAGE_EXECUTE	0x080  /* PR1-bit : execute access allowed */ | 
 | 277 | #define _PAGE_WRITE	0x100  /* PR2-bit : write access allowed */ | 
 | 278 | #define _PAGE_USER	0x200  /* PR3-bit : user space access allowed */ | 
 | 279 | #define _PAGE_DIRTY	0x400  /* software: page accessed in write */ | 
 | 280 | #define _PAGE_ACCESSED	0x800  /* software: page referenced */ | 
 | 281 |  | 
 | 282 | /* Mask which drops software flags */ | 
 | 283 | #define _PAGE_FLAGS_HARDWARE_MASK	0xfffffffffffff3dbLL | 
 | 284 |  | 
 | 285 | /* | 
 | 286 |  * HugeTLB support | 
 | 287 |  */ | 
 | 288 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | 
 | 289 | #define _PAGE_SZHUGE	(_PAGE_SIZE0) | 
 | 290 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | 
 | 291 | #define _PAGE_SZHUGE	(_PAGE_SIZE1) | 
 | 292 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) | 
 | 293 | #define _PAGE_SZHUGE	(_PAGE_SIZE0 | _PAGE_SIZE1) | 
 | 294 | #endif | 
 | 295 |  | 
 | 296 | /* | 
 | 297 |  * Default flags for a Kernel page. | 
 | 298 |  * This is fundametally also SHARED because the main use of this define | 
 | 299 |  * (other than for PGD/PMD entries) is for the VMALLOC pool which is | 
 | 300 |  * contextless. | 
 | 301 |  * | 
 | 302 |  * _PAGE_EXECUTE is required for modules | 
 | 303 |  * | 
 | 304 |  */ | 
 | 305 | #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | 
 | 306 | 			 _PAGE_EXECUTE | \ | 
 | 307 | 			 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \ | 
 | 308 | 			 _PAGE_SHARED) | 
 | 309 |  | 
 | 310 | /* Default flags for a User page */ | 
 | 311 | #define _PAGE_TABLE	(_KERNPG_TABLE | _PAGE_USER) | 
 | 312 |  | 
 | 313 | #define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 
 | 314 |  | 
 | 315 | #define PAGE_NONE	__pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED) | 
 | 316 | #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | 
 | 317 | 				 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_USER | \ | 
 | 318 | 				 _PAGE_SHARED) | 
 | 319 | /* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default | 
 | 320 |  * protection mode for the stack. */ | 
 | 321 | #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ | 
 | 322 | 				 _PAGE_ACCESSED | _PAGE_USER | _PAGE_EXECUTE) | 
 | 323 | #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ | 
 | 324 | 				 _PAGE_ACCESSED | _PAGE_USER) | 
 | 325 | #define PAGE_KERNEL	__pgprot(_KERNPG_TABLE) | 
 | 326 |  | 
 | 327 |  | 
 | 328 | /* | 
 | 329 |  * In ST50 we have full permissions (Read/Write/Execute/Shared). | 
 | 330 |  * Just match'em all. These are for mmap(), therefore all at least | 
 | 331 |  * User/Cachable/Present/Accessed. No point in making Fault on Write. | 
 | 332 |  */ | 
 | 333 | #define __MMAP_COMMON	(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED) | 
 | 334 |        /* sxwr */ | 
 | 335 | #define __P000	__pgprot(__MMAP_COMMON) | 
 | 336 | #define __P001	__pgprot(__MMAP_COMMON | _PAGE_READ) | 
 | 337 | #define __P010	__pgprot(__MMAP_COMMON) | 
 | 338 | #define __P011	__pgprot(__MMAP_COMMON | _PAGE_READ) | 
 | 339 | #define __P100	__pgprot(__MMAP_COMMON | _PAGE_EXECUTE) | 
 | 340 | #define __P101	__pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) | 
 | 341 | #define __P110	__pgprot(__MMAP_COMMON | _PAGE_EXECUTE) | 
 | 342 | #define __P111	__pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) | 
 | 343 |  | 
 | 344 | #define __S000	__pgprot(__MMAP_COMMON | _PAGE_SHARED) | 
 | 345 | #define __S001	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ) | 
 | 346 | #define __S010	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_WRITE) | 
 | 347 | #define __S011	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ | _PAGE_WRITE) | 
 | 348 | #define __S100	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE) | 
 | 349 | #define __S101	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ) | 
 | 350 | #define __S110	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_WRITE) | 
 | 351 | #define __S111	__pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ | _PAGE_WRITE) | 
 | 352 |  | 
 | 353 | /* Make it a device mapping for maximum safety (e.g. for mapping device | 
 | 354 |    registers into user-space via /dev/map).  */ | 
 | 355 | #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE) | 
 | 356 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) | 
 | 357 |  | 
 | 358 | /* | 
 | 359 |  * Handling allocation failures during page table setup. | 
 | 360 |  */ | 
 | 361 | extern void __handle_bad_pmd_kernel(pmd_t * pmd); | 
 | 362 | #define __handle_bad_pmd(x)	__handle_bad_pmd_kernel(x) | 
 | 363 |  | 
 | 364 | /* | 
 | 365 |  * PTE level access routines. | 
 | 366 |  * | 
 | 367 |  * Note1: | 
 | 368 |  * It's the tree walk leaf. This is physical address to be stored. | 
 | 369 |  * | 
 | 370 |  * Note 2: | 
 | 371 |  * Regarding the choice of _PTE_EMPTY: | 
 | 372 |  | 
 | 373 |    We must choose a bit pattern that cannot be valid, whether or not the page | 
 | 374 |    is present.  bit[2]==1 => present, bit[2]==0 => swapped out.  If swapped | 
 | 375 |    out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is | 
 | 376 |    left for us to select.  If we force bit[7]==0 when swapped out, we could use | 
 | 377 |    the combination bit[7,2]=2'b10 to indicate an empty PTE.  Alternatively, if | 
 | 378 |    we force bit[7]==1 when swapped out, we can use all zeroes to indicate | 
 | 379 |    empty.  This is convenient, because the page tables get cleared to zero | 
 | 380 |    when they are allocated. | 
 | 381 |  | 
 | 382 |  */ | 
 | 383 | #define _PTE_EMPTY	0x0 | 
 | 384 | #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT) | 
 | 385 | #define pte_clear(mm,addr,xp)	(set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) | 
 | 386 | #define pte_none(x)	(pte_val(x) == _PTE_EMPTY) | 
 | 387 |  | 
 | 388 | /* | 
 | 389 |  * Some definitions to translate between mem_map, PTEs, and page | 
 | 390 |  * addresses: | 
 | 391 |  */ | 
 | 392 |  | 
 | 393 | /* | 
 | 394 |  * Given a PTE, return the index of the mem_map[] entry corresponding | 
 | 395 |  * to the page frame the PTE. Get the absolute physical address, make | 
 | 396 |  * a relative physical address and translate it to an index. | 
 | 397 |  */ | 
 | 398 | #define pte_pagenr(x)		(((unsigned long) (pte_val(x)) - \ | 
 | 399 | 				 __MEMORY_START) >> PAGE_SHIFT) | 
 | 400 |  | 
 | 401 | /* | 
 | 402 |  * Given a PTE, return the "struct page *". | 
 | 403 |  */ | 
 | 404 | #define pte_page(x)		(mem_map + pte_pagenr(x)) | 
 | 405 |  | 
 | 406 | /* | 
 | 407 |  * Return number of (down rounded) MB corresponding to x pages. | 
 | 408 |  */ | 
 | 409 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | 
 | 410 |  | 
 | 411 |  | 
 | 412 | /* | 
 | 413 |  * The following have defined behavior only work if pte_present() is true. | 
 | 414 |  */ | 
 | 415 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } | 
 | 416 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXECUTE; } | 
 | 417 | static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } | 
 | 418 | static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } | 
 | 419 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 
 | 420 | static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } | 
 | 421 |  | 
 | 422 | extern inline pte_t pte_rdprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; } | 
 | 423 | extern inline pte_t pte_wrprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } | 
 | 424 | extern inline pte_t pte_exprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; } | 
 | 425 | extern inline pte_t pte_mkclean(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } | 
 | 426 | extern inline pte_t pte_mkold(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } | 
 | 427 |  | 
 | 428 | extern inline pte_t pte_mkread(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; } | 
 | 429 | extern inline pte_t pte_mkwrite(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } | 
 | 430 | extern inline pte_t pte_mkexec(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; } | 
 | 431 | extern inline pte_t pte_mkdirty(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | 
 | 432 | extern inline pte_t pte_mkyoung(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 433 | extern inline pte_t pte_mkhuge(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } | 
 | 434 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 |  | 
 | 436 | /* | 
 | 437 |  * Conversion functions: convert a page and protection to a page entry. | 
 | 438 |  * | 
 | 439 |  * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) | 
 | 440 |  */ | 
 | 441 | #define mk_pte(page,pgprot)							\ | 
 | 442 | ({										\ | 
 | 443 | 	pte_t __pte;								\ | 
 | 444 | 										\ | 
 | 445 | 	set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | 		\ | 
 | 446 | 		__MEMORY_START | pgprot_val((pgprot))));			\ | 
 | 447 | 	__pte;									\ | 
 | 448 | }) | 
 | 449 |  | 
 | 450 | /* | 
 | 451 |  * This takes a (absolute) physical page address that is used | 
 | 452 |  * by the remapping functions | 
 | 453 |  */ | 
 | 454 | #define mk_pte_phys(physpage, pgprot) \ | 
 | 455 | ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; }) | 
 | 456 |  | 
 | 457 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 
 | 458 | { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } | 
 | 459 |  | 
 | 460 | #define page_pte_prot(page, prot) mk_pte(page, prot) | 
 | 461 | #define page_pte(page) page_pte_prot(page, __pgprot(0)) | 
 | 462 |  | 
 | 463 | typedef pte_t *pte_addr_t; | 
 | 464 | #define pgtable_cache_init()	do { } while (0) | 
 | 465 |  | 
 | 466 | extern void update_mmu_cache(struct vm_area_struct * vma, | 
 | 467 | 			     unsigned long address, pte_t pte); | 
 | 468 |  | 
 | 469 | /* Encode and decode a swap entry */ | 
 | 470 | #define __swp_type(x)			(((x).val & 3) + (((x).val >> 1) & 0x3c)) | 
 | 471 | #define __swp_offset(x)			((x).val >> 8) | 
 | 472 | #define __swp_entry(type, offset)	((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) }) | 
 | 473 | #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) }) | 
 | 474 | #define __swp_entry_to_pte(x)		((pte_t) { (x).val }) | 
 | 475 |  | 
 | 476 | /* Encode and decode a nonlinear file mapping entry */ | 
 | 477 | #define PTE_FILE_MAX_BITS		29 | 
 | 478 | #define pte_to_pgoff(pte)		(pte_val(pte)) | 
 | 479 | #define pgoff_to_pte(off)		((pte_t) { (off) | _PAGE_FILE }) | 
 | 480 |  | 
 | 481 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ | 
 | 482 | #define PageSkip(page)		(0) | 
 | 483 | #define kern_addr_valid(addr)	(1) | 
 | 484 |  | 
 | 485 | #define io_remap_page_range(vma, vaddr, paddr, size, prot)		\ | 
 | 486 | 		remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) | 
 | 487 |  | 
 | 488 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\ | 
 | 489 | 		remap_pfn_range(vma, vaddr, pfn, size, prot) | 
 | 490 |  | 
 | 491 | #define MK_IOSPACE_PFN(space, pfn)	(pfn) | 
 | 492 | #define GET_IOSPACE(pfn)		0 | 
 | 493 | #define GET_PFN(pfn)			(pfn) | 
 | 494 |  | 
 | 495 | #endif /* !__ASSEMBLY__ */ | 
 | 496 |  | 
 | 497 | /* | 
 | 498 |  * No page table caches to initialise | 
 | 499 |  */ | 
 | 500 | #define pgtable_cache_init()    do { } while (0) | 
 | 501 |  | 
 | 502 | #define pte_pfn(x)		(((unsigned long)((x).pte)) >> PAGE_SHIFT) | 
 | 503 | #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 
 | 504 | #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 
 | 505 |  | 
 | 506 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 
 | 507 |  | 
 | 508 | #include <asm-generic/pgtable.h> | 
 | 509 |  | 
 | 510 | #endif /* __ASM_SH64_PGTABLE_H */ |