| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _PARISC_PGTABLE_H | 
|  | 2 | #define _PARISC_PGTABLE_H | 
|  | 3 |  | 
|  | 4 | #include <asm-generic/4level-fixup.h> | 
|  | 5 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <asm/fixmap.h> | 
|  | 7 |  | 
|  | 8 | #ifndef __ASSEMBLY__ | 
|  | 9 | /* | 
|  | 10 | * we simulate an x86-style page table for the linux mm code | 
|  | 11 | */ | 
|  | 12 |  | 
| Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 13 | #include <linux/mm.h>		/* for vm_area_struct */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm/processor.h> | 
|  | 15 | #include <asm/cache.h> | 
|  | 16 | #include <asm/bitops.h> | 
|  | 17 |  | 
|  | 18 | /* | 
|  | 19 | * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel | 
|  | 20 | * memory.  For the return value to be meaningful, ADDR must be >= | 
|  | 21 | * PAGE_OFFSET.  This operation can be relatively expensive (e.g., | 
|  | 22 | * require a hash-, or multi-level tree-lookup or something of that | 
|  | 23 | * sort) but it guarantees to return TRUE only if accessing the page | 
|  | 24 | * at that address does not cause an error.  Note that there may be | 
|  | 25 | * addresses for which kern_addr_valid() returns FALSE even though an | 
|  | 26 | * access would not cause an error (e.g., this is typically true for | 
|  | 27 | * memory mapped I/O regions. | 
|  | 28 | * | 
|  | 29 | * XXX Need to implement this for parisc. | 
|  | 30 | */ | 
|  | 31 | #define kern_addr_valid(addr)	(1) | 
|  | 32 |  | 
|  | 33 | /* Certain architectures need to do special things when PTEs | 
|  | 34 | * within a page table are directly modified.  Thus, the following | 
|  | 35 | * hook is made available. | 
|  | 36 | */ | 
|  | 37 | #define set_pte(pteptr, pteval)                                 \ | 
|  | 38 | do{                                                     \ | 
|  | 39 | *(pteptr) = (pteval);                           \ | 
|  | 40 | } while(0) | 
|  | 41 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 
|  | 42 |  | 
|  | 43 | #endif /* !__ASSEMBLY__ */ | 
|  | 44 |  | 
|  | 45 | #define pte_ERROR(e) \ | 
|  | 46 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | 
|  | 47 | #define pmd_ERROR(e) \ | 
|  | 48 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) | 
|  | 49 | #define pgd_ERROR(e) \ | 
|  | 50 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) | 
|  | 51 |  | 
|  | 52 | /* Note: If you change ISTACK_SIZE, you need to change the corresponding | 
|  | 53 | * values in vmlinux.lds and vmlinux64.lds (init_istack section). Also, | 
|  | 54 | * the "order" and size need to agree. | 
|  | 55 | */ | 
|  | 56 |  | 
|  | 57 | #define  ISTACK_SIZE  32768 /* Interrupt Stack Size */ | 
|  | 58 | #define  ISTACK_ORDER 3 | 
|  | 59 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 60 | /* This is the size of the initially mapped kernel memory */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | #ifdef CONFIG_64BIT | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 62 | #define KERNEL_INITIAL_ORDER	24	/* 0 to 1<<24 = 16MB */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | #else | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 64 | #define KERNEL_INITIAL_ORDER	23	/* 0 to 1<<23 = 8MB */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | #endif | 
|  | 66 | #define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER) | 
|  | 67 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 68 | #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | #define PT_NLEVELS	3 | 
|  | 70 | #define PGD_ORDER	1 /* Number of pages per pgd */ | 
|  | 71 | #define PMD_ORDER	1 /* Number of pages per pmd */ | 
|  | 72 | #define PGD_ALLOC_ORDER	2 /* first pgd contains pmd */ | 
|  | 73 | #else | 
|  | 74 | #define PT_NLEVELS	2 | 
|  | 75 | #define PGD_ORDER	1 /* Number of pages per pgd */ | 
|  | 76 | #define PGD_ALLOC_ORDER	PGD_ORDER | 
|  | 77 | #endif | 
|  | 78 |  | 
|  | 79 | /* Definitions for 3rd level (we use PLD here for Page Lower directory | 
|  | 80 | * because PTE_SHIFT is used lower down to mean shift that has to be | 
|  | 81 | * done to get usable bits out of the PTE) */ | 
|  | 82 | #define PLD_SHIFT	PAGE_SHIFT | 
|  | 83 | #define PLD_SIZE	PAGE_SIZE | 
|  | 84 | #define BITS_PER_PTE	(PAGE_SHIFT - BITS_PER_PTE_ENTRY) | 
|  | 85 | #define PTRS_PER_PTE    (1UL << BITS_PER_PTE) | 
|  | 86 |  | 
|  | 87 | /* Definitions for 2nd level */ | 
|  | 88 | #define pgtable_cache_init()	do { } while (0) | 
|  | 89 |  | 
|  | 90 | #define PMD_SHIFT       (PLD_SHIFT + BITS_PER_PTE) | 
|  | 91 | #define PMD_SIZE	(1UL << PMD_SHIFT) | 
|  | 92 | #define PMD_MASK	(~(PMD_SIZE-1)) | 
|  | 93 | #if PT_NLEVELS == 3 | 
|  | 94 | #define BITS_PER_PMD	(PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) | 
|  | 95 | #else | 
|  | 96 | #define BITS_PER_PMD	0 | 
|  | 97 | #endif | 
|  | 98 | #define PTRS_PER_PMD    (1UL << BITS_PER_PMD) | 
|  | 99 |  | 
|  | 100 | /* Definitions for 1st level */ | 
|  | 101 | #define PGDIR_SHIFT	(PMD_SHIFT + BITS_PER_PMD) | 
|  | 102 | #define BITS_PER_PGD	(PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) | 
|  | 103 | #define PGDIR_SIZE	(1UL << PGDIR_SHIFT) | 
|  | 104 | #define PGDIR_MASK	(~(PGDIR_SIZE-1)) | 
|  | 105 | #define PTRS_PER_PGD    (1UL << BITS_PER_PGD) | 
|  | 106 | #define USER_PTRS_PER_PGD       PTRS_PER_PGD | 
|  | 107 |  | 
|  | 108 | #define MAX_ADDRBITS	(PGDIR_SHIFT + BITS_PER_PGD) | 
|  | 109 | #define MAX_ADDRESS	(1UL << MAX_ADDRBITS) | 
|  | 110 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 111 | #define SPACEID_SHIFT	(MAX_ADDRBITS - 32) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 |  | 
|  | 113 | /* This calculates the number of initial pages we need for the initial | 
|  | 114 | * page tables */ | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 115 | #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) | 
|  | 116 | # define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) | 
|  | 117 | #else | 
|  | 118 | # define PT_INITIAL	(1)  /* all initial PTEs fit into one page */ | 
|  | 119 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 |  | 
|  | 121 | /* | 
|  | 122 | * pgd entries used up by user/kernel: | 
|  | 123 | */ | 
|  | 124 |  | 
| Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 125 | #define FIRST_USER_ADDRESS	0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 |  | 
|  | 127 | #ifndef __ASSEMBLY__ | 
|  | 128 | extern  void *vmalloc_start; | 
|  | 129 | #define PCXL_DMA_MAP_SIZE   (8*1024*1024) | 
|  | 130 | #define VMALLOC_START   ((unsigned long)vmalloc_start) | 
|  | 131 | /* this is a fixmap remnant, see fixmap.h */ | 
|  | 132 | #define VMALLOC_END	(KERNEL_MAP_END) | 
|  | 133 | #endif | 
|  | 134 |  | 
|  | 135 | /* NB: The tlb miss handlers make certain assumptions about the order */ | 
|  | 136 | /*     of the following bits, so be careful (One example, bits 25-31  */ | 
|  | 137 | /*     are moved together in one instruction).                        */ | 
|  | 138 |  | 
|  | 139 | #define _PAGE_READ_BIT     31   /* (0x001) read access allowed */ | 
|  | 140 | #define _PAGE_WRITE_BIT    30   /* (0x002) write access allowed */ | 
|  | 141 | #define _PAGE_EXEC_BIT     29   /* (0x004) execute access allowed */ | 
|  | 142 | #define _PAGE_GATEWAY_BIT  28   /* (0x008) privilege promotion allowed */ | 
|  | 143 | #define _PAGE_DMB_BIT      27   /* (0x010) Data Memory Break enable (B bit) */ | 
|  | 144 | #define _PAGE_DIRTY_BIT    26   /* (0x020) Page Dirty (D bit) */ | 
|  | 145 | #define _PAGE_FILE_BIT	_PAGE_DIRTY_BIT	/* overload this bit */ | 
|  | 146 | #define _PAGE_REFTRAP_BIT  25   /* (0x040) Page Ref. Trap enable (T bit) */ | 
|  | 147 | #define _PAGE_NO_CACHE_BIT 24   /* (0x080) Uncached Page (U bit) */ | 
|  | 148 | #define _PAGE_ACCESSED_BIT 23   /* (0x100) Software: Page Accessed */ | 
|  | 149 | #define _PAGE_PRESENT_BIT  22   /* (0x200) Software: translation valid */ | 
|  | 150 | #define _PAGE_FLUSH_BIT    21   /* (0x400) Software: translation valid */ | 
|  | 151 | /*             for cache flushing only */ | 
|  | 152 | #define _PAGE_USER_BIT     20   /* (0x800) Software: User accessible page */ | 
|  | 153 |  | 
|  | 154 | /* N.B. The bits are defined in terms of a 32 bit word above, so the */ | 
|  | 155 | /*      following macro is ok for both 32 and 64 bit.                */ | 
|  | 156 |  | 
|  | 157 | #define xlate_pabit(x) (31 - x) | 
|  | 158 |  | 
|  | 159 | /* this defines the shift to the usable bits in the PTE it is set so | 
|  | 160 | * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set | 
|  | 161 | * to zero */ | 
|  | 162 | #define PTE_SHIFT	   	xlate_pabit(_PAGE_USER_BIT) | 
|  | 163 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 164 | /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ | 
|  | 165 | #define PFN_PTE_SHIFT		12 | 
|  | 166 |  | 
|  | 167 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | /* this is how many bits may be used by the file functions */ | 
|  | 169 | #define PTE_FILE_MAX_BITS	(BITS_PER_LONG - PTE_SHIFT) | 
|  | 170 |  | 
|  | 171 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) | 
|  | 172 | #define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE }) | 
|  | 173 |  | 
|  | 174 | #define _PAGE_READ     (1 << xlate_pabit(_PAGE_READ_BIT)) | 
|  | 175 | #define _PAGE_WRITE    (1 << xlate_pabit(_PAGE_WRITE_BIT)) | 
|  | 176 | #define _PAGE_RW       (_PAGE_READ | _PAGE_WRITE) | 
|  | 177 | #define _PAGE_EXEC     (1 << xlate_pabit(_PAGE_EXEC_BIT)) | 
|  | 178 | #define _PAGE_GATEWAY  (1 << xlate_pabit(_PAGE_GATEWAY_BIT)) | 
|  | 179 | #define _PAGE_DMB      (1 << xlate_pabit(_PAGE_DMB_BIT)) | 
|  | 180 | #define _PAGE_DIRTY    (1 << xlate_pabit(_PAGE_DIRTY_BIT)) | 
|  | 181 | #define _PAGE_REFTRAP  (1 << xlate_pabit(_PAGE_REFTRAP_BIT)) | 
|  | 182 | #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) | 
|  | 183 | #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) | 
|  | 184 | #define _PAGE_PRESENT  (1 << xlate_pabit(_PAGE_PRESENT_BIT)) | 
|  | 185 | #define _PAGE_FLUSH    (1 << xlate_pabit(_PAGE_FLUSH_BIT)) | 
|  | 186 | #define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT)) | 
|  | 187 | #define _PAGE_FILE     (1 << xlate_pabit(_PAGE_FILE_BIT)) | 
|  | 188 |  | 
|  | 189 | #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_ACCESSED) | 
|  | 190 | #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 
|  | 191 | #define _PAGE_KERNEL	(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) | 
|  | 192 |  | 
|  | 193 | /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds | 
|  | 194 | * are page-aligned, we don't care about the PAGE_OFFSET bits, except | 
|  | 195 | * for a few meta-information bits, so we shift the address to be | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 196 | * able to effectively address 40/42/44-bits of physical address space | 
|  | 197 | * depending on 4k/16k/64k PAGE_SIZE */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | #define _PxD_PRESENT_BIT   31 | 
|  | 199 | #define _PxD_ATTACHED_BIT  30 | 
|  | 200 | #define _PxD_VALID_BIT     29 | 
|  | 201 |  | 
|  | 202 | #define PxD_FLAG_PRESENT  (1 << xlate_pabit(_PxD_PRESENT_BIT)) | 
|  | 203 | #define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) | 
|  | 204 | #define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT)) | 
|  | 205 | #define PxD_FLAG_MASK     (0xf) | 
|  | 206 | #define PxD_FLAG_SHIFT    (4) | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 207 | #define PxD_VALUE_SHIFT   (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 |  | 
|  | 209 | #ifndef __ASSEMBLY__ | 
|  | 210 |  | 
|  | 211 | #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 
|  | 212 | #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) | 
|  | 213 | /* Others seem to make this executable, I don't know if that's correct | 
|  | 214 | or not.  The stack is mapped this way though so this is necessary | 
|  | 215 | in the short term - dhd@linuxcare.com, 2000-08-08 */ | 
|  | 216 | #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) | 
|  | 217 | #define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED) | 
|  | 218 | #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) | 
|  | 219 | #define PAGE_COPY       PAGE_EXECREAD | 
|  | 220 | #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) | 
|  | 221 | #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL) | 
| Helge Deller | 1bcdd85 | 2006-01-13 13:21:06 -0700 | [diff] [blame] | 222 | #define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | #define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) | 
|  | 224 | #define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) | 
|  | 225 | #define PAGE_FLUSH      __pgprot(_PAGE_FLUSH) | 
|  | 226 |  | 
|  | 227 |  | 
|  | 228 | /* | 
|  | 229 | * We could have an execute only page using "gateway - promote to priv | 
|  | 230 | * level 3", but that is kind of silly. So, the way things are defined | 
|  | 231 | * now, we must always have read permission for pages with execute | 
|  | 232 | * permission. For the fun of it we'll go ahead and support write only | 
|  | 233 | * pages. | 
|  | 234 | */ | 
|  | 235 |  | 
|  | 236 | /*xwr*/ | 
|  | 237 | #define __P000  PAGE_NONE | 
|  | 238 | #define __P001  PAGE_READONLY | 
|  | 239 | #define __P010  __P000 /* copy on write */ | 
|  | 240 | #define __P011  __P001 /* copy on write */ | 
|  | 241 | #define __P100  PAGE_EXECREAD | 
|  | 242 | #define __P101  PAGE_EXECREAD | 
|  | 243 | #define __P110  __P100 /* copy on write */ | 
|  | 244 | #define __P111  __P101 /* copy on write */ | 
|  | 245 |  | 
|  | 246 | #define __S000  PAGE_NONE | 
|  | 247 | #define __S001  PAGE_READONLY | 
|  | 248 | #define __S010  PAGE_WRITEONLY | 
|  | 249 | #define __S011  PAGE_SHARED | 
|  | 250 | #define __S100  PAGE_EXECREAD | 
|  | 251 | #define __S101  PAGE_EXECREAD | 
|  | 252 | #define __S110  PAGE_RWX | 
|  | 253 | #define __S111  PAGE_RWX | 
|  | 254 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 255 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ | 
|  | 257 |  | 
|  | 258 | /* initial page tables for 0-8MB for kernel */ | 
|  | 259 |  | 
|  | 260 | extern pte_t pg0[]; | 
|  | 261 |  | 
|  | 262 | /* zero page used for uninitialized stuff */ | 
|  | 263 |  | 
|  | 264 | extern unsigned long *empty_zero_page; | 
|  | 265 |  | 
|  | 266 | /* | 
|  | 267 | * ZERO_PAGE is a global shared page that is always zero: used | 
|  | 268 | * for zero-mapped memory areas etc.. | 
|  | 269 | */ | 
|  | 270 |  | 
|  | 271 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 
|  | 272 |  | 
|  | 273 | #define pte_none(x)     ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH)) | 
|  | 274 | #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT) | 
|  | 275 | #define pte_clear(mm,addr,xp)	do { pte_val(*(xp)) = 0; } while (0) | 
|  | 276 |  | 
|  | 277 | #define pmd_flag(x)	(pmd_val(x) & PxD_FLAG_MASK) | 
|  | 278 | #define pmd_address(x)	((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) | 
|  | 279 | #define pgd_flag(x)	(pgd_val(x) & PxD_FLAG_MASK) | 
|  | 280 | #define pgd_address(x)	((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) | 
|  | 281 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 282 | #if PT_NLEVELS == 3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | /* The first entry of the permanent pmd is not there if it contains | 
|  | 284 | * the gateway marker */ | 
|  | 285 | #define pmd_none(x)	(!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) | 
|  | 286 | #else | 
|  | 287 | #define pmd_none(x)	(!pmd_val(x)) | 
|  | 288 | #endif | 
|  | 289 | #define pmd_bad(x)	(!(pmd_flag(x) & PxD_FLAG_VALID)) | 
|  | 290 | #define pmd_present(x)	(pmd_flag(x) & PxD_FLAG_PRESENT) | 
|  | 291 | static inline void pmd_clear(pmd_t *pmd) { | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 292 | #if PT_NLEVELS == 3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 
|  | 294 | /* This is the entry pointing to the permanent pmd | 
|  | 295 | * attached to the pgd; cannot clear it */ | 
|  | 296 | __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); | 
|  | 297 | else | 
|  | 298 | #endif | 
|  | 299 | __pmd_val_set(*pmd,  0); | 
|  | 300 | } | 
|  | 301 |  | 
|  | 302 |  | 
|  | 303 |  | 
|  | 304 | #if PT_NLEVELS == 3 | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 305 | #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd))) | 
|  | 306 | #define pgd_page(pgd)	virt_to_page((void *)pgd_page_vaddr(pgd)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 |  | 
|  | 308 | /* For 64 bit we have three level tables */ | 
|  | 309 |  | 
|  | 310 | #define pgd_none(x)     (!pgd_val(x)) | 
|  | 311 | #define pgd_bad(x)      (!(pgd_flag(x) & PxD_FLAG_VALID)) | 
|  | 312 | #define pgd_present(x)  (pgd_flag(x) & PxD_FLAG_PRESENT) | 
|  | 313 | static inline void pgd_clear(pgd_t *pgd) { | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 314 | #if PT_NLEVELS == 3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) | 
|  | 316 | /* This is the permanent pmd attached to the pgd; cannot | 
|  | 317 | * free it */ | 
|  | 318 | return; | 
|  | 319 | #endif | 
|  | 320 | __pgd_val_set(*pgd, 0); | 
|  | 321 | } | 
|  | 322 | #else | 
|  | 323 | /* | 
|  | 324 | * The "pgd_xxx()" functions here are trivial for a folded two-level | 
|  | 325 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | 
|  | 326 | * into the pgd entry) | 
|  | 327 | */ | 
|  | 328 | extern inline int pgd_none(pgd_t pgd)		{ return 0; } | 
|  | 329 | extern inline int pgd_bad(pgd_t pgd)		{ return 0; } | 
|  | 330 | extern inline int pgd_present(pgd_t pgd)	{ return 1; } | 
|  | 331 | extern inline void pgd_clear(pgd_t * pgdp)	{ } | 
|  | 332 | #endif | 
|  | 333 |  | 
|  | 334 | /* | 
|  | 335 | * The following only work if pte_present() is true. | 
|  | 336 | * Undefined behaviour if not.. | 
|  | 337 | */ | 
|  | 338 | extern inline int pte_read(pte_t pte)		{ return pte_val(pte) & _PAGE_READ; } | 
|  | 339 | extern inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; } | 
|  | 340 | extern inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; } | 
|  | 341 | extern inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_WRITE; } | 
|  | 342 | extern inline int pte_file(pte_t pte)		{ return pte_val(pte) & _PAGE_FILE; } | 
|  | 343 | extern inline int pte_user(pte_t pte) 		{ return pte_val(pte) & _PAGE_USER; } | 
|  | 344 |  | 
|  | 345 | extern inline pte_t pte_rdprotect(pte_t pte)	{ pte_val(pte) &= ~_PAGE_READ; return pte; } | 
|  | 346 | extern inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~_PAGE_DIRTY; return pte; } | 
|  | 347 | extern inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | 
|  | 348 | extern inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) &= ~_PAGE_WRITE; return pte; } | 
|  | 349 | extern inline pte_t pte_mkread(pte_t pte)	{ pte_val(pte) |= _PAGE_READ; return pte; } | 
|  | 350 | extern inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= _PAGE_DIRTY; return pte; } | 
|  | 351 | extern inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; } | 
|  | 352 | extern inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) |= _PAGE_WRITE; return pte; } | 
|  | 353 |  | 
|  | 354 | /* | 
|  | 355 | * Conversion functions: convert a page and protection to a page entry, | 
|  | 356 | * and a page entry and page directory to the page they refer to. | 
|  | 357 | */ | 
|  | 358 | #define __mk_pte(addr,pgprot) \ | 
|  | 359 | ({									\ | 
|  | 360 | pte_t __pte;							\ | 
|  | 361 | \ | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 362 | pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot));	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | \ | 
|  | 364 | __pte;								\ | 
|  | 365 | }) | 
|  | 366 |  | 
|  | 367 | #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot)) | 
|  | 368 |  | 
|  | 369 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | 
|  | 370 | { | 
|  | 371 | pte_t pte; | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 372 | pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | return pte; | 
|  | 374 | } | 
|  | 375 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 
|  | 377 | { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } | 
|  | 378 |  | 
|  | 379 | /* Permanent address of a page.  On parisc we don't have highmem. */ | 
|  | 380 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 381 | #define pte_pfn(x)		(pte_val(x) >> PFN_PTE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 |  | 
|  | 383 | #define pte_page(pte)		(pfn_to_page(pte_pfn(pte))) | 
|  | 384 |  | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 385 | #define pmd_page_vaddr(pmd)	((unsigned long) __va(pmd_address(pmd))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 |  | 
|  | 387 | #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) | 
|  | 388 | #define pmd_page(pmd)	virt_to_page((void *)__pmd_page(pmd)) | 
|  | 389 |  | 
|  | 390 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | 
|  | 391 |  | 
|  | 392 | /* to find an entry in a page-table-directory */ | 
|  | 393 | #define pgd_offset(mm, address) \ | 
|  | 394 | ((mm)->pgd + ((address) >> PGDIR_SHIFT)) | 
|  | 395 |  | 
|  | 396 | /* to find an entry in a kernel page-table-directory */ | 
|  | 397 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 
|  | 398 |  | 
|  | 399 | /* Find an entry in the second-level page table.. */ | 
|  | 400 |  | 
|  | 401 | #if PT_NLEVELS == 3 | 
|  | 402 | #define pmd_offset(dir,address) \ | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 403 | ((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | #else | 
|  | 405 | #define pmd_offset(dir,addr) ((pmd_t *) dir) | 
|  | 406 | #endif | 
|  | 407 |  | 
|  | 408 | /* Find an entry in the third-level page table.. */ | 
|  | 409 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | 
|  | 410 | #define pte_offset_kernel(pmd, address) \ | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 411 | ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | 
|  | 413 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | 
|  | 414 | #define pte_unmap(pte) do { } while (0) | 
|  | 415 | #define pte_unmap_nested(pte) do { } while (0) | 
|  | 416 |  | 
|  | 417 | #define pte_unmap(pte)			do { } while (0) | 
|  | 418 | #define pte_unmap_nested(pte)		do { } while (0) | 
|  | 419 |  | 
|  | 420 | extern void paging_init (void); | 
|  | 421 |  | 
|  | 422 | /* Used for deferring calls to flush_dcache_page() */ | 
|  | 423 |  | 
|  | 424 | #define PG_dcache_dirty         PG_arch_1 | 
|  | 425 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | 
|  | 427 |  | 
|  | 428 | /* Encode and de-code a swap entry */ | 
|  | 429 |  | 
|  | 430 | #define __swp_type(x)                     ((x).val & 0x1f) | 
|  | 431 | #define __swp_offset(x)                   ( (((x).val >> 6) &  0x7) | \ | 
|  | 432 | (((x).val >> 8) & ~0x7) ) | 
|  | 433 | #define __swp_entry(type, offset)         ((swp_entry_t) { (type) | \ | 
|  | 434 | ((offset &  0x7) << 6) | \ | 
|  | 435 | ((offset & ~0x7) << 8) }) | 
|  | 436 | #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) }) | 
|  | 437 | #define __swp_entry_to_pte(x)		((pte_t) { (x).val }) | 
|  | 438 |  | 
|  | 439 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 
|  | 440 | { | 
|  | 441 | #ifdef CONFIG_SMP | 
|  | 442 | if (!pte_young(*ptep)) | 
|  | 443 | return 0; | 
|  | 444 | return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); | 
|  | 445 | #else | 
|  | 446 | pte_t pte = *ptep; | 
|  | 447 | if (!pte_young(pte)) | 
|  | 448 | return 0; | 
|  | 449 | set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); | 
|  | 450 | return 1; | 
|  | 451 | #endif | 
|  | 452 | } | 
|  | 453 |  | 
|  | 454 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 
|  | 455 | { | 
|  | 456 | #ifdef CONFIG_SMP | 
|  | 457 | if (!pte_dirty(*ptep)) | 
|  | 458 | return 0; | 
|  | 459 | return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep)); | 
|  | 460 | #else | 
|  | 461 | pte_t pte = *ptep; | 
|  | 462 | if (!pte_dirty(pte)) | 
|  | 463 | return 0; | 
|  | 464 | set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); | 
|  | 465 | return 1; | 
|  | 466 | #endif | 
|  | 467 | } | 
|  | 468 |  | 
|  | 469 | extern spinlock_t pa_dbit_lock; | 
|  | 470 |  | 
| Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 471 | struct mm_struct; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
|  | 473 | { | 
|  | 474 | pte_t old_pte; | 
|  | 475 | pte_t pte; | 
|  | 476 |  | 
|  | 477 | spin_lock(&pa_dbit_lock); | 
|  | 478 | pte = old_pte = *ptep; | 
|  | 479 | pte_val(pte) &= ~_PAGE_PRESENT; | 
|  | 480 | pte_val(pte) |= _PAGE_FLUSH; | 
|  | 481 | set_pte_at(mm,addr,ptep,pte); | 
|  | 482 | spin_unlock(&pa_dbit_lock); | 
|  | 483 |  | 
|  | 484 | return old_pte; | 
|  | 485 | } | 
|  | 486 |  | 
|  | 487 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
|  | 488 | { | 
|  | 489 | #ifdef CONFIG_SMP | 
|  | 490 | unsigned long new, old; | 
|  | 491 |  | 
|  | 492 | do { | 
|  | 493 | old = pte_val(*ptep); | 
|  | 494 | new = pte_val(pte_wrprotect(__pte (old))); | 
|  | 495 | } while (cmpxchg((unsigned long *) ptep, old, new) != old); | 
|  | 496 | #else | 
|  | 497 | pte_t old_pte = *ptep; | 
|  | 498 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | 
|  | 499 | #endif | 
|  | 500 | } | 
|  | 501 |  | 
|  | 502 | #define pte_same(A,B)	(pte_val(A) == pte_val(B)) | 
|  | 503 |  | 
|  | 504 | #endif /* !__ASSEMBLY__ */ | 
|  | 505 |  | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 506 |  | 
|  | 507 | /* TLB page size encoding - see table 3-1 in parisc20.pdf */ | 
|  | 508 | #define _PAGE_SIZE_ENCODING_4K		0 | 
| Kyle McMartin | c8224e0 | 2006-04-21 02:20:37 +0000 | [diff] [blame] | 509 | #define _PAGE_SIZE_ENCODING_16K		1 | 
|  | 510 | #define _PAGE_SIZE_ENCODING_64K		2 | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 511 | #define _PAGE_SIZE_ENCODING_256K	3 | 
|  | 512 | #define _PAGE_SIZE_ENCODING_1M		4 | 
|  | 513 | #define _PAGE_SIZE_ENCODING_4M		5 | 
| Kyle McMartin | c8224e0 | 2006-04-21 02:20:37 +0000 | [diff] [blame] | 514 | #define _PAGE_SIZE_ENCODING_16M		6 | 
|  | 515 | #define _PAGE_SIZE_ENCODING_64M		7 | 
| Helge Deller | 2fd8303 | 2006-04-20 20:40:23 +0000 | [diff] [blame] | 516 |  | 
|  | 517 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | 
|  | 518 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K | 
|  | 519 | #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) | 
|  | 520 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K | 
|  | 521 | #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) | 
|  | 522 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K | 
|  | 523 | #endif | 
|  | 524 |  | 
|  | 525 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\ | 
|  | 527 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 
|  | 528 |  | 
| Grant Grundler | 63af965 | 2005-10-21 22:54:20 -0400 | [diff] [blame] | 529 | #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) | 
|  | 530 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | #define MK_IOSPACE_PFN(space, pfn)	(pfn) | 
|  | 532 | #define GET_IOSPACE(pfn)		0 | 
|  | 533 | #define GET_PFN(pfn)			(pfn) | 
|  | 534 |  | 
|  | 535 | /* We provide our own get_unmapped_area to provide cache coherency */ | 
|  | 536 |  | 
|  | 537 | #define HAVE_ARCH_UNMAPPED_AREA | 
|  | 538 |  | 
|  | 539 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 
|  | 540 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | 
|  | 541 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 
|  | 542 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 
|  | 543 | #define __HAVE_ARCH_PTE_SAME | 
|  | 544 | #include <asm-generic/pgtable.h> | 
|  | 545 |  | 
|  | 546 | #endif /* _PARISC_PGTABLE_H */ |