| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  include/asm-s390/pgtable.h | 
|  | 3 | * | 
|  | 4 | *  S390 version | 
|  | 5 | *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | 
|  | 6 | *    Author(s): Hartmut Penner (hp@de.ibm.com) | 
|  | 7 | *               Ulrich Weigand (weigand@de.ibm.com) | 
|  | 8 | *               Martin Schwidefsky (schwidefsky@de.ibm.com) | 
|  | 9 | * | 
|  | 10 | *  Derived from "include/asm-i386/pgtable.h" | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | #ifndef _ASM_S390_PGTABLE_H | 
|  | 14 | #define _ASM_S390_PGTABLE_H | 
|  | 15 |  | 
|  | 16 | #include <asm-generic/4level-fixup.h> | 
|  | 17 |  | 
|  | 18 | /* | 
|  | 19 | * The Linux memory management assumes a three-level page table setup. For | 
|  | 20 | * s390 31 bit we "fold" the mid level into the top-level page table, so | 
|  | 21 | * that we physically have the same two-level page table as the s390 mmu | 
|  | 22 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels | 
|  | 23 | * the hardware provides (region first and region second tables are not | 
|  | 24 | * used). | 
|  | 25 | * | 
|  | 26 | * The "pgd_xxx()" functions are trivial for a folded two-level | 
|  | 27 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | 
|  | 28 | * into the pgd entry) | 
|  | 29 | * | 
|  | 30 | * This file contains the functions and defines necessary to modify and use | 
|  | 31 | * the S390 page table tree. | 
|  | 32 | */ | 
|  | 33 | #ifndef __ASSEMBLY__ | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 34 | #include <linux/mm_types.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/bug.h> | 
|  | 36 | #include <asm/processor.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 |  | 
|  | 38 | struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ | 
| Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 39 | struct mm_struct; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
|  | 41 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); | 
|  | 42 | extern void paging_init(void); | 
| Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 43 | extern void vmem_map_init(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 |  | 
|  | 45 | /* | 
|  | 46 | * The S390 doesn't have any external MMU info: the kernel page | 
|  | 47 | * tables contain all the necessary information. | 
|  | 48 | */ | 
|  | 49 | #define update_mmu_cache(vma, address, pte)     do { } while (0) | 
|  | 50 |  | 
|  | 51 | /* | 
|  | 52 | * ZERO_PAGE is a global shared page that is always zero: used | 
|  | 53 | * for zero-mapped memory areas etc.. | 
|  | 54 | */ | 
|  | 55 | extern char empty_zero_page[PAGE_SIZE]; | 
|  | 56 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 
|  | 57 | #endif /* !__ASSEMBLY__ */ | 
|  | 58 |  | 
|  | 59 | /* | 
|  | 60 | * PMD_SHIFT determines the size of the area a second-level page | 
|  | 61 | * table can map | 
|  | 62 | * PGDIR_SHIFT determines what a third-level page table entry can map | 
|  | 63 | */ | 
|  | 64 | #ifndef __s390x__ | 
|  | 65 | # define PMD_SHIFT	22 | 
|  | 66 | # define PGDIR_SHIFT	22 | 
|  | 67 | #else /* __s390x__ */ | 
|  | 68 | # define PMD_SHIFT	21 | 
|  | 69 | # define PGDIR_SHIFT	31 | 
|  | 70 | #endif /* __s390x__ */ | 
|  | 71 |  | 
|  | 72 | #define PMD_SIZE        (1UL << PMD_SHIFT) | 
|  | 73 | #define PMD_MASK        (~(PMD_SIZE-1)) | 
|  | 74 | #define PGDIR_SIZE      (1UL << PGDIR_SHIFT) | 
|  | 75 | #define PGDIR_MASK      (~(PGDIR_SIZE-1)) | 
|  | 76 |  | 
|  | 77 | /* | 
|  | 78 | * entries per page directory level: the S390 is two-level, so | 
|  | 79 | * we don't really have any PMD directory physically. | 
|  | 80 | * for S390 segment-table entries are combined to one PGD | 
|  | 81 | * that leads to 1024 pte per pgd | 
|  | 82 | */ | 
|  | 83 | #ifndef __s390x__ | 
|  | 84 | # define PTRS_PER_PTE    1024 | 
|  | 85 | # define PTRS_PER_PMD    1 | 
|  | 86 | # define PTRS_PER_PGD    512 | 
|  | 87 | #else /* __s390x__ */ | 
|  | 88 | # define PTRS_PER_PTE    512 | 
|  | 89 | # define PTRS_PER_PMD    1024 | 
|  | 90 | # define PTRS_PER_PGD    2048 | 
|  | 91 | #endif /* __s390x__ */ | 
|  | 92 |  | 
| Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 93 | #define FIRST_USER_ADDRESS  0 | 
|  | 94 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | #define pte_ERROR(e) \ | 
|  | 96 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) | 
|  | 97 | #define pmd_ERROR(e) \ | 
|  | 98 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) | 
|  | 99 | #define pgd_ERROR(e) \ | 
|  | 100 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) | 
|  | 101 |  | 
|  | 102 | #ifndef __ASSEMBLY__ | 
|  | 103 | /* | 
|  | 104 | * Just any arbitrary offset to the start of the vmalloc VM area: the | 
|  | 105 | * current 8MB value just means that there will be a 8MB "hole" after the | 
|  | 106 | * physical memory until the kernel virtual memory starts.  That means that | 
|  | 107 | * any out-of-bounds memory accesses will hopefully be caught. | 
|  | 108 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | 
|  | 109 | * area for the same reason. ;) | 
| Heiko Carstens | e39394b | 2007-10-12 16:11:45 +0200 | [diff] [blame] | 110 | * vmalloc area starts at 4GB to prevent syscall table entry exchanging | 
|  | 111 | * from modules. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | */ | 
| Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 113 | extern unsigned long vmalloc_end; | 
| Heiko Carstens | e39394b | 2007-10-12 16:11:45 +0200 | [diff] [blame] | 114 |  | 
|  | 115 | #ifdef CONFIG_64BIT | 
|  | 116 | #define VMALLOC_ADDR	(max(0x100000000UL, (unsigned long) high_memory)) | 
|  | 117 | #else | 
|  | 118 | #define VMALLOC_ADDR	((unsigned long) high_memory) | 
|  | 119 | #endif | 
|  | 120 | #define VMALLOC_OFFSET	(8*1024*1024) | 
|  | 121 | #define VMALLOC_START	((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | 
| Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 122 | #define VMALLOC_END	vmalloc_end | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 |  | 
| Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 124 | /* | 
|  | 125 | * We need some free virtual space to be able to do vmalloc. | 
|  | 126 | * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc | 
|  | 127 | * area. On a machine with 2GB memory we make sure that we | 
|  | 128 | * have at least 128MB free space for vmalloc. On a machine | 
| Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 129 | * with 4TB we make sure we have at least 128GB. | 
| Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 130 | */ | 
|  | 131 | #ifndef __s390x__ | 
|  | 132 | #define VMALLOC_MIN_SIZE	0x8000000UL | 
| Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 133 | #define VMALLOC_END_INIT	0x80000000UL | 
| Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 134 | #else /* __s390x__ */ | 
| Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 135 | #define VMALLOC_MIN_SIZE	0x2000000000UL | 
|  | 136 | #define VMALLOC_END_INIT	0x40000000000UL | 
| Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 137 | #endif /* __s390x__ */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 |  | 
|  | 139 | /* | 
|  | 140 | * A 31 bit pagetable entry of S390 has following format: | 
|  | 141 | *  |   PFRA          |    |  OS  | | 
|  | 142 | * 0                   0IP0 | 
|  | 143 | * 00000000001111111111222222222233 | 
|  | 144 | * 01234567890123456789012345678901 | 
|  | 145 | * | 
|  | 146 | * I Page-Invalid Bit:    Page is not available for address-translation | 
|  | 147 | * P Page-Protection Bit: Store access not possible for page | 
|  | 148 | * | 
|  | 149 | * A 31 bit segmenttable entry of S390 has following format: | 
|  | 150 | *  |   P-table origin      |  |PTL | 
|  | 151 | * 0                         IC | 
|  | 152 | * 00000000001111111111222222222233 | 
|  | 153 | * 01234567890123456789012345678901 | 
|  | 154 | * | 
|  | 155 | * I Segment-Invalid Bit:    Segment is not available for address-translation | 
|  | 156 | * C Common-Segment Bit:     Segment is not private (PoP 3-30) | 
|  | 157 | * PTL Page-Table-Length:    Page-table length (PTL+1*16 entries -> up to 256) | 
|  | 158 | * | 
|  | 159 | * The 31 bit segmenttable origin of S390 has following format: | 
|  | 160 | * | 
|  | 161 | *  |S-table origin   |     | STL | | 
|  | 162 | * X                   **GPS | 
|  | 163 | * 00000000001111111111222222222233 | 
|  | 164 | * 01234567890123456789012345678901 | 
|  | 165 | * | 
|  | 166 | * X Space-Switch event: | 
|  | 167 | * G Segment-Invalid Bit:     * | 
|  | 168 | * P Private-Space Bit:       Segment is not private (PoP 3-30) | 
|  | 169 | * S Storage-Alteration: | 
|  | 170 | * STL Segment-Table-Length:  Segment-table length (STL+1*16 entries -> up to 2048) | 
|  | 171 | * | 
|  | 172 | * A 64 bit pagetable entry of S390 has following format: | 
|  | 173 | * |                     PFRA                         |0IP0|  OS  | | 
|  | 174 | * 0000000000111111111122222222223333333333444444444455555555556666 | 
|  | 175 | * 0123456789012345678901234567890123456789012345678901234567890123 | 
|  | 176 | * | 
|  | 177 | * I Page-Invalid Bit:    Page is not available for address-translation | 
|  | 178 | * P Page-Protection Bit: Store access not possible for page | 
|  | 179 | * | 
|  | 180 | * A 64 bit segmenttable entry of S390 has following format: | 
|  | 181 | * |        P-table origin                              |      TT | 
|  | 182 | * 0000000000111111111122222222223333333333444444444455555555556666 | 
|  | 183 | * 0123456789012345678901234567890123456789012345678901234567890123 | 
|  | 184 | * | 
|  | 185 | * I Segment-Invalid Bit:    Segment is not available for address-translation | 
|  | 186 | * C Common-Segment Bit:     Segment is not private (PoP 3-30) | 
|  | 187 | * P Page-Protection Bit: Store access not possible for page | 
|  | 188 | * TT Type 00 | 
|  | 189 | * | 
|  | 190 | * A 64 bit region table entry of S390 has following format: | 
|  | 191 | * |        S-table origin                             |   TF  TTTL | 
|  | 192 | * 0000000000111111111122222222223333333333444444444455555555556666 | 
|  | 193 | * 0123456789012345678901234567890123456789012345678901234567890123 | 
|  | 194 | * | 
|  | 195 | * I Segment-Invalid Bit:    Segment is not available for address-translation | 
|  | 196 | * TT Type 01 | 
|  | 197 | * TF | 
|  | 198 | * TL Table lenght | 
|  | 199 | * | 
|  | 200 | * The 64 bit regiontable origin of S390 has following format: | 
|  | 201 | * |      region table origon                          |       DTTL | 
|  | 202 | * 0000000000111111111122222222223333333333444444444455555555556666 | 
|  | 203 | * 0123456789012345678901234567890123456789012345678901234567890123 | 
|  | 204 | * | 
|  | 205 | * X Space-Switch event: | 
|  | 206 | * G Segment-Invalid Bit: | 
|  | 207 | * P Private-Space Bit: | 
|  | 208 | * S Storage-Alteration: | 
|  | 209 | * R Real space | 
|  | 210 | * TL Table-Length: | 
|  | 211 | * | 
|  | 212 | * A storage key has the following format: | 
|  | 213 | * | ACC |F|R|C|0| | 
|  | 214 | *  0   3 4 5 6 7 | 
|  | 215 | * ACC: access key | 
|  | 216 | * F  : fetch protection bit | 
|  | 217 | * R  : referenced bit | 
|  | 218 | * C  : changed bit | 
|  | 219 | */ | 
|  | 220 |  | 
|  | 221 | /* Hardware bits in the page table entry */ | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 222 | #define _PAGE_RO	0x200		/* HW read-only bit  */ | 
|  | 223 | #define _PAGE_INVALID	0x400		/* HW invalid bit    */ | 
|  | 224 | #define _PAGE_SWT	0x001		/* SW pte type bit t */ | 
|  | 225 | #define _PAGE_SWX	0x002		/* SW pte type bit x */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 |  | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 227 | /* Six different types of pages. */ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 228 | #define _PAGE_TYPE_EMPTY	0x400 | 
|  | 229 | #define _PAGE_TYPE_NONE		0x401 | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 230 | #define _PAGE_TYPE_SWAP		0x403 | 
|  | 231 | #define _PAGE_TYPE_FILE		0x601	/* bit 0x002 is used for offset !! */ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 232 | #define _PAGE_TYPE_RO		0x200 | 
|  | 233 | #define _PAGE_TYPE_RW		0x000 | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 234 | #define _PAGE_TYPE_EX_RO	0x202 | 
|  | 235 | #define _PAGE_TYPE_EX_RW	0x002 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 |  | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 237 | /* | 
|  | 238 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, | 
|  | 239 | * pte_none and pte_file to find out the pte type WITHOUT holding the page | 
|  | 240 | * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to | 
|  | 241 | * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs | 
|  | 242 | * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. | 
|  | 243 | * This change is done while holding the lock, but the intermediate step | 
|  | 244 | * of a previously valid pte with the hw invalid bit set can be observed by | 
|  | 245 | * handle_pte_fault. That makes it necessary that all valid pte types with | 
|  | 246 | * the hw invalid bit set must be distinguishable from the four pte types | 
|  | 247 | * empty, none, swap and file. | 
|  | 248 | * | 
|  | 249 | *			irxt  ipte  irxt | 
|  | 250 | * _PAGE_TYPE_EMPTY	1000   ->   1000 | 
|  | 251 | * _PAGE_TYPE_NONE	1001   ->   1001 | 
|  | 252 | * _PAGE_TYPE_SWAP	1011   ->   1011 | 
|  | 253 | * _PAGE_TYPE_FILE	11?1   ->   11?1 | 
|  | 254 | * _PAGE_TYPE_RO	0100   ->   1100 | 
|  | 255 | * _PAGE_TYPE_RW	0000   ->   1000 | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 256 | * _PAGE_TYPE_EX_RO	0110   ->   1110 | 
|  | 257 | * _PAGE_TYPE_EX_RW	0010   ->   1010 | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 258 | * | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 259 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 260 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 | 
|  | 261 | * pte_file is true for bits combinations 1101, 1111 | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 262 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 263 | */ | 
|  | 264 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | #ifndef __s390x__ | 
|  | 266 |  | 
|  | 267 | /* Bits in the segment table entry */ | 
|  | 268 | #define _PAGE_TABLE_LEN 0xf            /* only full page-tables            */ | 
|  | 269 | #define _PAGE_TABLE_COM 0x10           /* common page-table                */ | 
|  | 270 | #define _PAGE_TABLE_INV 0x20           /* invalid page-table               */ | 
|  | 271 | #define _SEG_PRESENT    0x001          /* Software (overlap with PTL)      */ | 
|  | 272 |  | 
|  | 273 | /* Bits int the storage key */ | 
|  | 274 | #define _PAGE_CHANGED    0x02          /* HW changed bit                   */ | 
|  | 275 | #define _PAGE_REFERENCED 0x04          /* HW referenced bit                */ | 
|  | 276 |  | 
|  | 277 | #define _USER_SEG_TABLE_LEN    0x7f    /* user-segment-table up to 2 GB    */ | 
|  | 278 | #define _KERNEL_SEG_TABLE_LEN  0x7f    /* kernel-segment-table up to 2 GB  */ | 
|  | 279 |  | 
|  | 280 | /* | 
|  | 281 | * User and Kernel pagetables are identical | 
|  | 282 | */ | 
|  | 283 | #define _PAGE_TABLE	_PAGE_TABLE_LEN | 
|  | 284 | #define _KERNPG_TABLE	_PAGE_TABLE_LEN | 
|  | 285 |  | 
|  | 286 | /* | 
|  | 287 | * The Kernel segment-tables includes the User segment-table | 
|  | 288 | */ | 
|  | 289 |  | 
|  | 290 | #define _SEGMENT_TABLE	(_USER_SEG_TABLE_LEN|0x80000000|0x100) | 
|  | 291 | #define _KERNSEG_TABLE	_KERNEL_SEG_TABLE_LEN | 
|  | 292 |  | 
|  | 293 | #define USER_STD_MASK	0x00000080UL | 
|  | 294 |  | 
|  | 295 | #else /* __s390x__ */ | 
|  | 296 |  | 
|  | 297 | /* Bits in the segment table entry */ | 
|  | 298 | #define _PMD_ENTRY_INV   0x20          /* invalid segment table entry      */ | 
|  | 299 | #define _PMD_ENTRY       0x00 | 
|  | 300 |  | 
|  | 301 | /* Bits in the region third table entry */ | 
|  | 302 | #define _PGD_ENTRY_INV   0x20          /* invalid region table entry       */ | 
|  | 303 | #define _PGD_ENTRY       0x07 | 
|  | 304 |  | 
|  | 305 | /* | 
|  | 306 | * User and kernel page directory | 
|  | 307 | */ | 
|  | 308 | #define _REGION_THIRD       0x4 | 
|  | 309 | #define _REGION_THIRD_LEN   0x3 | 
|  | 310 | #define _REGION_TABLE       (_REGION_THIRD|_REGION_THIRD_LEN|0x40|0x100) | 
|  | 311 | #define _KERN_REGION_TABLE  (_REGION_THIRD|_REGION_THIRD_LEN) | 
|  | 312 |  | 
|  | 313 | #define USER_STD_MASK           0x0000000000000080UL | 
|  | 314 |  | 
|  | 315 | /* Bits in the storage key */ | 
|  | 316 | #define _PAGE_CHANGED    0x02          /* HW changed bit                   */ | 
|  | 317 | #define _PAGE_REFERENCED 0x04          /* HW referenced bit                */ | 
|  | 318 |  | 
|  | 319 | #endif /* __s390x__ */ | 
|  | 320 |  | 
|  | 321 | /* | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 322 | * Page protection definitions. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | */ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 324 | #define PAGE_NONE	__pgprot(_PAGE_TYPE_NONE) | 
|  | 325 | #define PAGE_RO		__pgprot(_PAGE_TYPE_RO) | 
|  | 326 | #define PAGE_RW		__pgprot(_PAGE_TYPE_RW) | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 327 | #define PAGE_EX_RO	__pgprot(_PAGE_TYPE_EX_RO) | 
|  | 328 | #define PAGE_EX_RW	__pgprot(_PAGE_TYPE_EX_RW) | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 329 |  | 
|  | 330 | #define PAGE_KERNEL	PAGE_RW | 
|  | 331 | #define PAGE_COPY	PAGE_RO | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 |  | 
|  | 333 | /* | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 334 | * Dependent on the EXEC_PROTECT option s390 can do execute protection. | 
|  | 335 | * Write permission always implies read permission. In theory with a | 
|  | 336 | * primary/secondary page table execute only can be implemented but | 
|  | 337 | * it would cost an additional bit in the pte to distinguish all the | 
|  | 338 | * different pte types. To avoid that execute permission currently | 
|  | 339 | * implies read permission as well. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | */ | 
|  | 341 | /*xwr*/ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 342 | #define __P000	PAGE_NONE | 
|  | 343 | #define __P001	PAGE_RO | 
|  | 344 | #define __P010	PAGE_RO | 
|  | 345 | #define __P011	PAGE_RO | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 346 | #define __P100	PAGE_EX_RO | 
|  | 347 | #define __P101	PAGE_EX_RO | 
|  | 348 | #define __P110	PAGE_EX_RO | 
|  | 349 | #define __P111	PAGE_EX_RO | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 |  | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 351 | #define __S000	PAGE_NONE | 
|  | 352 | #define __S001	PAGE_RO | 
|  | 353 | #define __S010	PAGE_RW | 
|  | 354 | #define __S011	PAGE_RW | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 355 | #define __S100	PAGE_EX_RO | 
|  | 356 | #define __S101	PAGE_EX_RO | 
|  | 357 | #define __S110	PAGE_EX_RW | 
|  | 358 | #define __S111	PAGE_EX_RW | 
|  | 359 |  | 
|  | 360 | #ifndef __s390x__ | 
|  | 361 | # define PMD_SHADOW_SHIFT	1 | 
|  | 362 | # define PGD_SHADOW_SHIFT	1 | 
|  | 363 | #else /* __s390x__ */ | 
|  | 364 | # define PMD_SHADOW_SHIFT	2 | 
|  | 365 | # define PGD_SHADOW_SHIFT	2 | 
|  | 366 | #endif /* __s390x__ */ | 
|  | 367 |  | 
|  | 368 | static inline struct page *get_shadow_page(struct page *page) | 
|  | 369 | { | 
|  | 370 | if (s390_noexec && !list_empty(&page->lru)) | 
|  | 371 | return virt_to_page(page->lru.next); | 
|  | 372 | return NULL; | 
|  | 373 | } | 
|  | 374 |  | 
|  | 375 | static inline pte_t *get_shadow_pte(pte_t *ptep) | 
|  | 376 | { | 
|  | 377 | unsigned long pteptr = (unsigned long) (ptep); | 
|  | 378 |  | 
|  | 379 | if (s390_noexec) { | 
|  | 380 | unsigned long offset = pteptr & (PAGE_SIZE - 1); | 
|  | 381 | void *addr = (void *) (pteptr ^ offset); | 
|  | 382 | struct page *page = virt_to_page(addr); | 
|  | 383 | if (!list_empty(&page->lru)) | 
|  | 384 | return (pte_t *) ((unsigned long) page->lru.next | | 
|  | 385 | offset); | 
|  | 386 | } | 
|  | 387 | return NULL; | 
|  | 388 | } | 
|  | 389 |  | 
|  | 390 | static inline pmd_t *get_shadow_pmd(pmd_t *pmdp) | 
|  | 391 | { | 
|  | 392 | unsigned long pmdptr = (unsigned long) (pmdp); | 
|  | 393 |  | 
|  | 394 | if (s390_noexec) { | 
|  | 395 | unsigned long offset = pmdptr & | 
|  | 396 | ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1); | 
|  | 397 | void *addr = (void *) (pmdptr ^ offset); | 
|  | 398 | struct page *page = virt_to_page(addr); | 
|  | 399 | if (!list_empty(&page->lru)) | 
|  | 400 | return (pmd_t *) ((unsigned long) page->lru.next | | 
|  | 401 | offset); | 
|  | 402 | } | 
|  | 403 | return NULL; | 
|  | 404 | } | 
|  | 405 |  | 
|  | 406 | static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) | 
|  | 407 | { | 
|  | 408 | unsigned long pgdptr = (unsigned long) (pgdp); | 
|  | 409 |  | 
|  | 410 | if (s390_noexec) { | 
|  | 411 | unsigned long offset = pgdptr & | 
|  | 412 | ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1); | 
|  | 413 | void *addr = (void *) (pgdptr ^ offset); | 
|  | 414 | struct page *page = virt_to_page(addr); | 
|  | 415 | if (!list_empty(&page->lru)) | 
|  | 416 | return (pgd_t *) ((unsigned long) page->lru.next | | 
|  | 417 | offset); | 
|  | 418 | } | 
|  | 419 | return NULL; | 
|  | 420 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 |  | 
|  | 422 | /* | 
|  | 423 | * Certain architectures need to do special things when PTEs | 
|  | 424 | * within a page table are directly modified.  Thus, the following | 
|  | 425 | * hook is made available. | 
|  | 426 | */ | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 427 | static inline void set_pte(pte_t *pteptr, pte_t pteval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | { | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 429 | pte_t *shadow_pte = get_shadow_pte(pteptr); | 
|  | 430 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | *pteptr = pteval; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 432 | if (shadow_pte) { | 
|  | 433 | if (!(pte_val(pteval) & _PAGE_INVALID) && | 
|  | 434 | (pte_val(pteval) & _PAGE_SWX)) | 
|  | 435 | pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO; | 
|  | 436 | else | 
|  | 437 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | 
|  | 438 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | } | 
|  | 440 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 
|  | 441 |  | 
|  | 442 | /* | 
|  | 443 | * pgd/pmd/pte query functions | 
|  | 444 | */ | 
|  | 445 | #ifndef __s390x__ | 
|  | 446 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 447 | static inline int pgd_present(pgd_t pgd) { return 1; } | 
|  | 448 | static inline int pgd_none(pgd_t pgd)    { return 0; } | 
|  | 449 | static inline int pgd_bad(pgd_t pgd)     { return 0; } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 451 | static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } | 
|  | 452 | static inline int pmd_none(pmd_t pmd)    { return pmd_val(pmd) & _PAGE_TABLE_INV; } | 
|  | 453 | static inline int pmd_bad(pmd_t pmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | { | 
|  | 455 | return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE; | 
|  | 456 | } | 
|  | 457 |  | 
|  | 458 | #else /* __s390x__ */ | 
|  | 459 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 460 | static inline int pgd_present(pgd_t pgd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | { | 
|  | 462 | return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY; | 
|  | 463 | } | 
|  | 464 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 465 | static inline int pgd_none(pgd_t pgd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | { | 
|  | 467 | return pgd_val(pgd) & _PGD_ENTRY_INV; | 
|  | 468 | } | 
|  | 469 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 470 | static inline int pgd_bad(pgd_t pgd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | { | 
|  | 472 | return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY; | 
|  | 473 | } | 
|  | 474 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 475 | static inline int pmd_present(pmd_t pmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | { | 
|  | 477 | return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY; | 
|  | 478 | } | 
|  | 479 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 480 | static inline int pmd_none(pmd_t pmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | { | 
|  | 482 | return pmd_val(pmd) & _PMD_ENTRY_INV; | 
|  | 483 | } | 
|  | 484 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 485 | static inline int pmd_bad(pmd_t pmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | { | 
|  | 487 | return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY; | 
|  | 488 | } | 
|  | 489 |  | 
|  | 490 | #endif /* __s390x__ */ | 
|  | 491 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 492 | static inline int pte_none(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | { | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 494 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | } | 
|  | 496 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 497 | static inline int pte_present(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | { | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 499 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; | 
|  | 500 | return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || | 
|  | 501 | (!(pte_val(pte) & _PAGE_INVALID) && | 
|  | 502 | !(pte_val(pte) & _PAGE_SWT)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | } | 
|  | 504 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 505 | static inline int pte_file(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | { | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 507 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; | 
|  | 508 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | } | 
|  | 510 |  | 
|  | 511 | #define pte_same(a,b)	(pte_val(a) == pte_val(b)) | 
|  | 512 |  | 
|  | 513 | /* | 
|  | 514 | * query functions pte_write/pte_dirty/pte_young only work if | 
|  | 515 | * pte_present() is true. Undefined behaviour if not.. | 
|  | 516 | */ | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 517 | static inline int pte_write(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | { | 
|  | 519 | return (pte_val(pte) & _PAGE_RO) == 0; | 
|  | 520 | } | 
|  | 521 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 522 | static inline int pte_dirty(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | { | 
|  | 524 | /* A pte is neither clean nor dirty on s/390. The dirty bit | 
|  | 525 | * is in the storage key. See page_test_and_clear_dirty for | 
|  | 526 | * details. | 
|  | 527 | */ | 
|  | 528 | return 0; | 
|  | 529 | } | 
|  | 530 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 531 | static inline int pte_young(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | { | 
|  | 533 | /* A pte is neither young nor old on s/390. The young bit | 
|  | 534 | * is in the storage key. See page_test_and_clear_young for | 
|  | 535 | * details. | 
|  | 536 | */ | 
|  | 537 | return 0; | 
|  | 538 | } | 
|  | 539 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | /* | 
|  | 541 | * pgd/pmd/pte modification functions | 
|  | 542 | */ | 
|  | 543 |  | 
|  | 544 | #ifndef __s390x__ | 
|  | 545 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 546 | static inline void pgd_clear(pgd_t * pgdp)      { } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 |  | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 548 | static inline void pmd_clear_kernel(pmd_t * pmdp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | { | 
|  | 550 | pmd_val(pmdp[0]) = _PAGE_TABLE_INV; | 
|  | 551 | pmd_val(pmdp[1]) = _PAGE_TABLE_INV; | 
|  | 552 | pmd_val(pmdp[2]) = _PAGE_TABLE_INV; | 
|  | 553 | pmd_val(pmdp[3]) = _PAGE_TABLE_INV; | 
|  | 554 | } | 
|  | 555 |  | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 556 | static inline void pmd_clear(pmd_t * pmdp) | 
|  | 557 | { | 
|  | 558 | pmd_t *shadow_pmd = get_shadow_pmd(pmdp); | 
|  | 559 |  | 
|  | 560 | pmd_clear_kernel(pmdp); | 
|  | 561 | if (shadow_pmd) | 
|  | 562 | pmd_clear_kernel(shadow_pmd); | 
|  | 563 | } | 
|  | 564 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | #else /* __s390x__ */ | 
|  | 566 |  | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 567 | static inline void pgd_clear_kernel(pgd_t * pgdp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | { | 
|  | 569 | pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; | 
|  | 570 | } | 
|  | 571 |  | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 572 | static inline void pgd_clear(pgd_t * pgdp) | 
|  | 573 | { | 
|  | 574 | pgd_t *shadow_pgd = get_shadow_pgd(pgdp); | 
|  | 575 |  | 
|  | 576 | pgd_clear_kernel(pgdp); | 
|  | 577 | if (shadow_pgd) | 
|  | 578 | pgd_clear_kernel(shadow_pgd); | 
|  | 579 | } | 
|  | 580 |  | 
|  | 581 | static inline void pmd_clear_kernel(pmd_t * pmdp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | { | 
|  | 583 | pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; | 
|  | 584 | pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; | 
|  | 585 | } | 
|  | 586 |  | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 587 | static inline void pmd_clear(pmd_t * pmdp) | 
|  | 588 | { | 
|  | 589 | pmd_t *shadow_pmd = get_shadow_pmd(pmdp); | 
|  | 590 |  | 
|  | 591 | pmd_clear_kernel(pmdp); | 
|  | 592 | if (shadow_pmd) | 
|  | 593 | pmd_clear_kernel(shadow_pmd); | 
|  | 594 | } | 
|  | 595 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | #endif /* __s390x__ */ | 
|  | 597 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 598 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | { | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 600 | pte_t *shadow_pte = get_shadow_pte(ptep); | 
|  | 601 |  | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 602 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 603 | if (shadow_pte) | 
|  | 604 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | } | 
|  | 606 |  | 
|  | 607 | /* | 
|  | 608 | * The following pte modification functions only work if | 
|  | 609 | * pte_present() is true. Undefined behaviour if not.. | 
|  | 610 | */ | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 611 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | { | 
|  | 613 | pte_val(pte) &= PAGE_MASK; | 
|  | 614 | pte_val(pte) |= pgprot_val(newprot); | 
|  | 615 | return pte; | 
|  | 616 | } | 
|  | 617 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 618 | static inline pte_t pte_wrprotect(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | { | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 620 | /* Do not clobber _PAGE_TYPE_NONE pages!  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | if (!(pte_val(pte) & _PAGE_INVALID)) | 
|  | 622 | pte_val(pte) |= _PAGE_RO; | 
|  | 623 | return pte; | 
|  | 624 | } | 
|  | 625 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 626 | static inline pte_t pte_mkwrite(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | { | 
|  | 628 | pte_val(pte) &= ~_PAGE_RO; | 
|  | 629 | return pte; | 
|  | 630 | } | 
|  | 631 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 632 | static inline pte_t pte_mkclean(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | { | 
|  | 634 | /* The only user of pte_mkclean is the fork() code. | 
|  | 635 | We must *not* clear the *physical* page dirty bit | 
|  | 636 | just because fork() wants to clear the dirty bit in | 
|  | 637 | *one* of the page's mappings.  So we just do nothing. */ | 
|  | 638 | return pte; | 
|  | 639 | } | 
|  | 640 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 641 | static inline pte_t pte_mkdirty(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | { | 
|  | 643 | /* We do not explicitly set the dirty bit because the | 
|  | 644 | * sske instruction is slow. It is faster to let the | 
|  | 645 | * next instruction set the dirty bit. | 
|  | 646 | */ | 
|  | 647 | return pte; | 
|  | 648 | } | 
|  | 649 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 650 | static inline pte_t pte_mkold(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | { | 
|  | 652 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | 
|  | 653 | * There is no point in clearing the real referenced bit. | 
|  | 654 | */ | 
|  | 655 | return pte; | 
|  | 656 | } | 
|  | 657 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 658 | static inline pte_t pte_mkyoung(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | { | 
|  | 660 | /* S/390 doesn't keep its dirty/referenced bit in the pte. | 
|  | 661 | * There is no point in setting the real referenced bit. | 
|  | 662 | */ | 
|  | 663 | return pte; | 
|  | 664 | } | 
|  | 665 |  | 
|  | 666 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 
|  | 667 | { | 
|  | 668 | return 0; | 
|  | 669 | } | 
|  | 670 |  | 
|  | 671 | static inline int | 
|  | 672 | ptep_clear_flush_young(struct vm_area_struct *vma, | 
|  | 673 | unsigned long address, pte_t *ptep) | 
|  | 674 | { | 
|  | 675 | /* No need to flush TLB; bits are in storage key */ | 
|  | 676 | return ptep_test_and_clear_young(vma, address, ptep); | 
|  | 677 | } | 
|  | 678 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
|  | 680 | { | 
|  | 681 | pte_t pte = *ptep; | 
|  | 682 | pte_clear(mm, addr, ptep); | 
|  | 683 | return pte; | 
|  | 684 | } | 
|  | 685 |  | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 686 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | 
|  | 687 | { | 
|  | 688 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { | 
|  | 689 | #ifndef __s390x__ | 
|  | 690 | /* S390 has 1mb segments, we are emulating 4MB segments */ | 
|  | 691 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); | 
|  | 692 | #else | 
|  | 693 | /* ipte in zarch mode can do the math */ | 
|  | 694 | pte_t *pto = ptep; | 
|  | 695 | #endif | 
| Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 696 | asm volatile( | 
|  | 697 | "	ipte	%2,%3" | 
|  | 698 | : "=m" (*ptep) : "m" (*ptep), | 
|  | 699 | "a" (pto), "a" (address)); | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 700 | } | 
|  | 701 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 
|  | 702 | } | 
|  | 703 |  | 
| Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 704 | static inline void ptep_invalidate(unsigned long address, pte_t *ptep) | 
|  | 705 | { | 
|  | 706 | __ptep_ipte(address, ptep); | 
|  | 707 | ptep = get_shadow_pte(ptep); | 
|  | 708 | if (ptep) | 
|  | 709 | __ptep_ipte(address, ptep); | 
|  | 710 | } | 
|  | 711 |  | 
|  | 712 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | 
|  | 713 | unsigned long address, pte_t *ptep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | { | 
|  | 715 | pte_t pte = *ptep; | 
| Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 716 | ptep_invalidate(address, ptep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | return pte; | 
|  | 718 | } | 
|  | 719 |  | 
|  | 720 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
|  | 721 | { | 
|  | 722 | pte_t old_pte = *ptep; | 
|  | 723 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | 
|  | 724 | } | 
|  | 725 |  | 
| Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 726 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty)	\ | 
|  | 727 | ({									\ | 
|  | 728 | int __changed = !pte_same(*(__ptep), __entry);			\ | 
|  | 729 | if (__changed) {						\ | 
|  | 730 | ptep_invalidate(__addr, __ptep);			\ | 
|  | 731 | set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);	\ | 
|  | 732 | }								\ | 
|  | 733 | __changed;							\ | 
| Benjamin Herrenschmidt | 8dab524 | 2007-06-16 10:16:12 -0700 | [diff] [blame] | 734 | }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 |  | 
|  | 736 | /* | 
|  | 737 | * Test and clear dirty bit in storage key. | 
|  | 738 | * We can't clear the changed bit atomically. This is a potential | 
|  | 739 | * race against modification of the referenced bit. This function | 
|  | 740 | * should therefore only be called if it is not mapped in any | 
|  | 741 | * address space. | 
|  | 742 | */ | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 743 | static inline int page_test_dirty(struct page *page) | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 744 | { | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 745 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; | 
|  | 746 | } | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 747 |  | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 748 | static inline void page_clear_dirty(struct page *page) | 
|  | 749 | { | 
|  | 750 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 751 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 |  | 
|  | 753 | /* | 
|  | 754 | * Test and clear referenced bit in storage key. | 
|  | 755 | */ | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 756 | static inline int page_test_and_clear_young(struct page *page) | 
|  | 757 | { | 
| Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 758 | unsigned long physpage = page_to_phys(page); | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 759 | int ccode; | 
|  | 760 |  | 
| Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 761 | asm volatile( | 
|  | 762 | "	rrbe	0,%1\n" | 
|  | 763 | "	ipm	%0\n" | 
|  | 764 | "	srl	%0,28\n" | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 765 | : "=d" (ccode) : "a" (physpage) : "cc" ); | 
|  | 766 | return ccode & 2; | 
|  | 767 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 |  | 
|  | 769 | /* | 
|  | 770 | * Conversion functions: convert a page and protection to a page entry, | 
|  | 771 | * and a page entry and page directory to the page they refer to. | 
|  | 772 | */ | 
|  | 773 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | 
|  | 774 | { | 
|  | 775 | pte_t __pte; | 
|  | 776 | pte_val(__pte) = physpage + pgprot_val(pgprot); | 
|  | 777 | return __pte; | 
|  | 778 | } | 
|  | 779 |  | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 780 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) | 
|  | 781 | { | 
| Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 782 | unsigned long physpage = page_to_phys(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 |  | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 784 | return mk_pte_phys(physpage, pgprot); | 
|  | 785 | } | 
|  | 786 |  | 
|  | 787 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | 
|  | 788 | { | 
|  | 789 | unsigned long physpage = __pa((pfn) << PAGE_SHIFT); | 
|  | 790 |  | 
|  | 791 | return mk_pte_phys(physpage, pgprot); | 
|  | 792 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | #ifdef __s390x__ | 
|  | 795 |  | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 796 | static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) | 
|  | 797 | { | 
|  | 798 | unsigned long physpage = __pa((pfn) << PAGE_SHIFT); | 
|  | 799 |  | 
|  | 800 | return __pmd(physpage + pgprot_val(pgprot)); | 
|  | 801 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 |  | 
|  | 803 | #endif /* __s390x__ */ | 
|  | 804 |  | 
|  | 805 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | 
|  | 806 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 
|  | 807 |  | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 808 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 |  | 
| Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 810 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 |  | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 812 | #define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) | 
|  | 813 |  | 
| Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 814 | #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 |  | 
|  | 816 | /* to find an entry in a page-table-directory */ | 
|  | 817 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 
|  | 818 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | 
|  | 819 |  | 
|  | 820 | /* to find an entry in a kernel page-table-directory */ | 
|  | 821 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 
|  | 822 |  | 
|  | 823 | #ifndef __s390x__ | 
|  | 824 |  | 
|  | 825 | /* Find an entry in the second-level page table.. */ | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 826 | static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | { | 
|  | 828 | return (pmd_t *) dir; | 
|  | 829 | } | 
|  | 830 |  | 
|  | 831 | #else /* __s390x__ */ | 
|  | 832 |  | 
|  | 833 | /* Find an entry in the second-level page table.. */ | 
|  | 834 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 
|  | 835 | #define pmd_offset(dir,addr) \ | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 836 | ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 |  | 
|  | 838 | #endif /* __s390x__ */ | 
|  | 839 |  | 
|  | 840 | /* Find an entry in the third-level page table.. */ | 
|  | 841 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | 
|  | 842 | #define pte_offset_kernel(pmd, address) \ | 
| Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 843 | ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 844 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | 
|  | 845 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | 
|  | 846 | #define pte_unmap(pte) do { } while (0) | 
|  | 847 | #define pte_unmap_nested(pte) do { } while (0) | 
|  | 848 |  | 
|  | 849 | /* | 
|  | 850 | * 31 bit swap entry format: | 
|  | 851 | * A page-table entry has some bits we have to treat in a special way. | 
|  | 852 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification | 
|  | 853 | * exception will occur instead of a page translation exception. The | 
|  | 854 | * specifiation exception has the bad habit not to store necessary | 
|  | 855 | * information in the lowcore. | 
|  | 856 | * Bit 21 and bit 22 are the page invalid bit and the page protection | 
|  | 857 | * bit. We set both to indicate a swapped page. | 
|  | 858 | * Bit 30 and 31 are used to distinguish the different page types. For | 
|  | 859 | * a swapped page these bits need to be zero. | 
|  | 860 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. | 
|  | 861 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 | 
|  | 862 | * plus 24 for the offset. | 
|  | 863 | * 0|     offset        |0110|o|type |00| | 
|  | 864 | * 0 0000000001111111111 2222 2 22222 33 | 
|  | 865 | * 0 1234567890123456789 0123 4 56789 01 | 
|  | 866 | * | 
|  | 867 | * 64 bit swap entry format: | 
|  | 868 | * A page-table entry has some bits we have to treat in a special way. | 
|  | 869 | * Bits 52 and bit 55 have to be zero, otherwise an specification | 
|  | 870 | * exception will occur instead of a page translation exception. The | 
|  | 871 | * specifiation exception has the bad habit not to store necessary | 
|  | 872 | * information in the lowcore. | 
|  | 873 | * Bit 53 and bit 54 are the page invalid bit and the page protection | 
|  | 874 | * bit. We set both to indicate a swapped page. | 
|  | 875 | * Bit 62 and 63 are used to distinguish the different page types. For | 
|  | 876 | * a swapped page these bits need to be zero. | 
|  | 877 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. | 
|  | 878 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 | 
|  | 879 | * plus 56 for the offset. | 
|  | 880 | * |                      offset                        |0110|o|type |00| | 
|  | 881 | *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66 | 
|  | 882 | *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23 | 
|  | 883 | */ | 
|  | 884 | #ifndef __s390x__ | 
|  | 885 | #define __SWP_OFFSET_MASK (~0UL >> 12) | 
|  | 886 | #else | 
|  | 887 | #define __SWP_OFFSET_MASK (~0UL >> 11) | 
|  | 888 | #endif | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 889 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 890 | { | 
|  | 891 | pte_t pte; | 
|  | 892 | offset &= __SWP_OFFSET_MASK; | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 893 | pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); | 
|  | 895 | return pte; | 
|  | 896 | } | 
|  | 897 |  | 
|  | 898 | #define __swp_type(entry)	(((entry).val >> 2) & 0x1f) | 
|  | 899 | #define __swp_offset(entry)	(((entry).val >> 11) | (((entry).val >> 7) & 1)) | 
|  | 900 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | 
|  | 901 |  | 
|  | 902 | #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) }) | 
|  | 903 | #define __swp_entry_to_pte(x)	((pte_t) { (x).val }) | 
|  | 904 |  | 
|  | 905 | #ifndef __s390x__ | 
|  | 906 | # define PTE_FILE_MAX_BITS	26 | 
|  | 907 | #else /* __s390x__ */ | 
|  | 908 | # define PTE_FILE_MAX_BITS	59 | 
|  | 909 | #endif /* __s390x__ */ | 
|  | 910 |  | 
|  | 911 | #define pte_to_pgoff(__pte) \ | 
|  | 912 | ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) | 
|  | 913 |  | 
|  | 914 | #define pgoff_to_pte(__off) \ | 
|  | 915 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 916 | | _PAGE_TYPE_FILE }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 917 |  | 
|  | 918 | #endif /* !__ASSEMBLY__ */ | 
|  | 919 |  | 
|  | 920 | #define kern_addr_valid(addr)   (1) | 
|  | 921 |  | 
| Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 922 | extern int add_shared_memory(unsigned long start, unsigned long size); | 
|  | 923 | extern int remove_shared_memory(unsigned long start, unsigned long size); | 
|  | 924 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | /* | 
|  | 926 | * No page table caches to initialise | 
|  | 927 | */ | 
|  | 928 | #define pgtable_cache_init()	do { } while (0) | 
|  | 929 |  | 
| Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 930 | #define __HAVE_ARCH_MEMMAP_INIT | 
|  | 931 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); | 
|  | 932 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 
|  | 934 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 
|  | 935 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 
|  | 937 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | 
|  | 938 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 
|  | 939 | #define __HAVE_ARCH_PTE_SAME | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 940 | #define __HAVE_ARCH_PAGE_TEST_DIRTY | 
|  | 941 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | 
|  | 943 | #include <asm-generic/pgtable.h> | 
|  | 944 |  | 
|  | 945 | #endif /* _S390_PAGE_H */ | 
|  | 946 |  |