| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  include/asm-s390/pgtable.h | 
 | 3 |  * | 
 | 4 |  *  S390 version | 
 | 5 |  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | 
 | 6 |  *    Author(s): Hartmut Penner (hp@de.ibm.com) | 
 | 7 |  *               Ulrich Weigand (weigand@de.ibm.com) | 
 | 8 |  *               Martin Schwidefsky (schwidefsky@de.ibm.com) | 
 | 9 |  * | 
 | 10 |  *  Derived from "include/asm-i386/pgtable.h" | 
 | 11 |  */ | 
 | 12 |  | 
 | 13 | #ifndef _ASM_S390_PGTABLE_H | 
 | 14 | #define _ASM_S390_PGTABLE_H | 
 | 15 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | /* | 
 | 17 |  * The Linux memory management assumes a three-level page table setup. For | 
 | 18 |  * s390 31 bit we "fold" the mid level into the top-level page table, so | 
 | 19 |  * that we physically have the same two-level page table as the s390 mmu | 
 | 20 |  * expects in 31 bit mode. For s390 64 bit we use three of the five levels | 
 | 21 |  * the hardware provides (region first and region second tables are not | 
 | 22 |  * used). | 
 | 23 |  * | 
 | 24 |  * The "pgd_xxx()" functions are trivial for a folded two-level | 
 | 25 |  * setup: the pgd is never bad, and a pmd always exists (as it's folded | 
 | 26 |  * into the pgd entry) | 
 | 27 |  * | 
 | 28 |  * This file contains the functions and defines necessary to modify and use | 
 | 29 |  * the S390 page table tree. | 
 | 30 |  */ | 
 | 31 | #ifndef __ASSEMBLY__ | 
| Heiko Carstens | 9789db0 | 2008-07-14 09:59:11 +0200 | [diff] [blame] | 32 | #include <linux/sched.h> | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 33 | #include <linux/mm_types.h> | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 34 | #include <asm/bitops.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/bug.h> | 
 | 36 | #include <asm/processor.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); | 
 | 39 | extern void paging_init(void); | 
| Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 40 | extern void vmem_map_init(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
 | 42 | /* | 
 | 43 |  * The S390 doesn't have any external MMU info: the kernel page | 
 | 44 |  * tables contain all the necessary information. | 
 | 45 |  */ | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 46 | #define update_mmu_cache(vma, address, ptep)     do { } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 |  | 
 | 48 | /* | 
 | 49 |  * ZERO_PAGE is a global shared page that is always zero: used | 
 | 50 |  * for zero-mapped memory areas etc.. | 
 | 51 |  */ | 
 | 52 | extern char empty_zero_page[PAGE_SIZE]; | 
 | 53 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 
 | 54 | #endif /* !__ASSEMBLY__ */ | 
 | 55 |  | 
 | 56 | /* | 
 | 57 |  * PMD_SHIFT determines the size of the area a second-level page | 
 | 58 |  * table can map | 
 | 59 |  * PGDIR_SHIFT determines what a third-level page table entry can map | 
 | 60 |  */ | 
 | 61 | #ifndef __s390x__ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 62 | # define PMD_SHIFT	20 | 
 | 63 | # define PUD_SHIFT	20 | 
 | 64 | # define PGDIR_SHIFT	20 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | #else /* __s390x__ */ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 66 | # define PMD_SHIFT	20 | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 67 | # define PUD_SHIFT	31 | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 68 | # define PGDIR_SHIFT	42 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | #endif /* __s390x__ */ | 
 | 70 |  | 
 | 71 | #define PMD_SIZE        (1UL << PMD_SHIFT) | 
 | 72 | #define PMD_MASK        (~(PMD_SIZE-1)) | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 73 | #define PUD_SIZE	(1UL << PUD_SHIFT) | 
 | 74 | #define PUD_MASK	(~(PUD_SIZE-1)) | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 75 | #define PGDIR_SIZE	(1UL << PGDIR_SHIFT) | 
 | 76 | #define PGDIR_MASK	(~(PGDIR_SIZE-1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 |  | 
 | 78 | /* | 
 | 79 |  * entries per page directory level: the S390 is two-level, so | 
 | 80 |  * we don't really have any PMD directory physically. | 
 | 81 |  * for S390 segment-table entries are combined to one PGD | 
 | 82 |  * that leads to 1024 pte per pgd | 
 | 83 |  */ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 84 | #define PTRS_PER_PTE	256 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | #ifndef __s390x__ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 86 | #define PTRS_PER_PMD	1 | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 87 | #define PTRS_PER_PUD	1 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | #else /* __s390x__ */ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 89 | #define PTRS_PER_PMD	2048 | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 90 | #define PTRS_PER_PUD	2048 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | #endif /* __s390x__ */ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 92 | #define PTRS_PER_PGD	2048 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 |  | 
| Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 94 | #define FIRST_USER_ADDRESS  0 | 
 | 95 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #define pte_ERROR(e) \ | 
 | 97 | 	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) | 
 | 98 | #define pmd_ERROR(e) \ | 
 | 99 | 	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 100 | #define pud_ERROR(e) \ | 
 | 101 | 	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | #define pgd_ERROR(e) \ | 
 | 103 | 	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) | 
 | 104 |  | 
 | 105 | #ifndef __ASSEMBLY__ | 
 | 106 | /* | 
| Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 107 |  * The vmalloc area will always be on the topmost area of the kernel | 
| Martin Schwidefsky | 7d3f661 | 2010-04-09 13:43:02 +0200 | [diff] [blame] | 108 |  * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc, | 
| Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 109 |  * which should be enough for any sane case. | 
 | 110 |  * By putting vmalloc at the top, we maximise the gap between physical | 
 | 111 |  * memory and vmalloc to catch misplaced memory accesses. As a side | 
 | 112 |  * effect, this also makes sure that 64 bit module code cannot be used | 
 | 113 |  * as system call address. | 
| Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 114 |  */ | 
| Heiko Carstens | 239a642 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 115 |  | 
 | 116 | extern unsigned long VMALLOC_START; | 
 | 117 |  | 
| Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 118 | #ifndef __s390x__ | 
| Heiko Carstens | 239a642 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 119 | #define VMALLOC_SIZE	(96UL << 20) | 
| Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 120 | #define VMALLOC_END	0x7e000000UL | 
| Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 121 | #define VMEM_MAP_END	0x80000000UL | 
| Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 122 | #else /* __s390x__ */ | 
| Martin Schwidefsky | 7d3f661 | 2010-04-09 13:43:02 +0200 | [diff] [blame] | 123 | #define VMALLOC_SIZE	(128UL << 30) | 
 | 124 | #define VMALLOC_END	0x3e000000000UL | 
| Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 125 | #define VMEM_MAP_END	0x40000000000UL | 
| Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 126 | #endif /* __s390x__ */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 |  | 
| Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 128 | /* | 
 | 129 |  * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1 | 
 | 130 |  * mapping. This needs to be calculated at compile time since the size of the | 
 | 131 |  * VMEM_MAP is static but the size of struct page can change. | 
 | 132 |  */ | 
| Martin Schwidefsky | 522d8dc | 2008-02-09 18:24:31 +0100 | [diff] [blame] | 133 | #define VMEM_MAX_PAGES	((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) | 
 | 134 | #define VMEM_MAX_PFN	min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) | 
 | 135 | #define VMEM_MAX_PHYS	((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) | 
| Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 136 | #define vmemmap		((struct page *) VMALLOC_END) | 
| Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | /* | 
 | 139 |  * A 31 bit pagetable entry of S390 has following format: | 
 | 140 |  *  |   PFRA          |    |  OS  | | 
 | 141 |  * 0                   0IP0 | 
 | 142 |  * 00000000001111111111222222222233 | 
 | 143 |  * 01234567890123456789012345678901 | 
 | 144 |  * | 
 | 145 |  * I Page-Invalid Bit:    Page is not available for address-translation | 
 | 146 |  * P Page-Protection Bit: Store access not possible for page | 
 | 147 |  * | 
 | 148 |  * A 31 bit segmenttable entry of S390 has following format: | 
 | 149 |  *  |   P-table origin      |  |PTL | 
 | 150 |  * 0                         IC | 
 | 151 |  * 00000000001111111111222222222233 | 
 | 152 |  * 01234567890123456789012345678901 | 
 | 153 |  * | 
 | 154 |  * I Segment-Invalid Bit:    Segment is not available for address-translation | 
 | 155 |  * C Common-Segment Bit:     Segment is not private (PoP 3-30) | 
 | 156 |  * PTL Page-Table-Length:    Page-table length (PTL+1*16 entries -> up to 256) | 
 | 157 |  * | 
 | 158 |  * The 31 bit segmenttable origin of S390 has following format: | 
 | 159 |  * | 
 | 160 |  *  |S-table origin   |     | STL | | 
 | 161 |  * X                   **GPS | 
 | 162 |  * 00000000001111111111222222222233 | 
 | 163 |  * 01234567890123456789012345678901 | 
 | 164 |  * | 
 | 165 |  * X Space-Switch event: | 
 | 166 |  * G Segment-Invalid Bit:     * | 
 | 167 |  * P Private-Space Bit:       Segment is not private (PoP 3-30) | 
 | 168 |  * S Storage-Alteration: | 
 | 169 |  * STL Segment-Table-Length:  Segment-table length (STL+1*16 entries -> up to 2048) | 
 | 170 |  * | 
 | 171 |  * A 64 bit pagetable entry of S390 has following format: | 
| Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 172 |  * |			 PFRA			      |0IPC|  OS  | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 |  * 0000000000111111111122222222223333333333444444444455555555556666 | 
 | 174 |  * 0123456789012345678901234567890123456789012345678901234567890123 | 
 | 175 |  * | 
 | 176 |  * I Page-Invalid Bit:    Page is not available for address-translation | 
 | 177 |  * P Page-Protection Bit: Store access not possible for page | 
| Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 178 |  * C Change-bit override: HW is not required to set change bit | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |  * | 
 | 180 |  * A 64 bit segmenttable entry of S390 has following format: | 
 | 181 |  * |        P-table origin                              |      TT | 
 | 182 |  * 0000000000111111111122222222223333333333444444444455555555556666 | 
 | 183 |  * 0123456789012345678901234567890123456789012345678901234567890123 | 
 | 184 |  * | 
 | 185 |  * I Segment-Invalid Bit:    Segment is not available for address-translation | 
 | 186 |  * C Common-Segment Bit:     Segment is not private (PoP 3-30) | 
 | 187 |  * P Page-Protection Bit: Store access not possible for page | 
 | 188 |  * TT Type 00 | 
 | 189 |  * | 
 | 190 |  * A 64 bit region table entry of S390 has following format: | 
 | 191 |  * |        S-table origin                             |   TF  TTTL | 
 | 192 |  * 0000000000111111111122222222223333333333444444444455555555556666 | 
 | 193 |  * 0123456789012345678901234567890123456789012345678901234567890123 | 
 | 194 |  * | 
 | 195 |  * I Segment-Invalid Bit:    Segment is not available for address-translation | 
 | 196 |  * TT Type 01 | 
 | 197 |  * TF | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 198 |  * TL Table length | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 |  * | 
 | 200 |  * The 64 bit regiontable origin of S390 has following format: | 
 | 201 |  * |      region table origon                          |       DTTL | 
 | 202 |  * 0000000000111111111122222222223333333333444444444455555555556666 | 
 | 203 |  * 0123456789012345678901234567890123456789012345678901234567890123 | 
 | 204 |  * | 
 | 205 |  * X Space-Switch event: | 
 | 206 |  * G Segment-Invalid Bit:   | 
 | 207 |  * P Private-Space Bit:     | 
 | 208 |  * S Storage-Alteration: | 
 | 209 |  * R Real space | 
 | 210 |  * TL Table-Length: | 
 | 211 |  * | 
 | 212 |  * A storage key has the following format: | 
 | 213 |  * | ACC |F|R|C|0| | 
 | 214 |  *  0   3 4 5 6 7 | 
 | 215 |  * ACC: access key | 
 | 216 |  * F  : fetch protection bit | 
 | 217 |  * R  : referenced bit | 
 | 218 |  * C  : changed bit | 
 | 219 |  */ | 
 | 220 |  | 
 | 221 | /* Hardware bits in the page table entry */ | 
| Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 222 | #define _PAGE_CO	0x100		/* HW Change-bit override */ | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 223 | #define _PAGE_RO	0x200		/* HW read-only bit  */ | 
 | 224 | #define _PAGE_INVALID	0x400		/* HW invalid bit    */ | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 225 |  | 
 | 226 | /* Software bits in the page table entry */ | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 227 | #define _PAGE_SWT	0x001		/* SW pte type bit t */ | 
 | 228 | #define _PAGE_SWX	0x002		/* SW pte type bit x */ | 
| Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 229 | #define _PAGE_SPECIAL	0x004		/* SW associated with special page */ | 
 | 230 | #define __HAVE_ARCH_PTE_SPECIAL | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 |  | 
| Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 232 | /* Set of bits not changed in pte_modify */ | 
 | 233 | #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_SPECIAL) | 
 | 234 |  | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 235 | /* Six different types of pages. */ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 236 | #define _PAGE_TYPE_EMPTY	0x400 | 
 | 237 | #define _PAGE_TYPE_NONE		0x401 | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 238 | #define _PAGE_TYPE_SWAP		0x403 | 
 | 239 | #define _PAGE_TYPE_FILE		0x601	/* bit 0x002 is used for offset !! */ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 240 | #define _PAGE_TYPE_RO		0x200 | 
 | 241 | #define _PAGE_TYPE_RW		0x000 | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 242 | #define _PAGE_TYPE_EX_RO	0x202 | 
 | 243 | #define _PAGE_TYPE_EX_RW	0x002 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 |  | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 245 | /* | 
| Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 246 |  * Only four types for huge pages, using the invalid bit and protection bit | 
 | 247 |  * of a segment table entry. | 
 | 248 |  */ | 
 | 249 | #define _HPAGE_TYPE_EMPTY	0x020	/* _SEGMENT_ENTRY_INV */ | 
 | 250 | #define _HPAGE_TYPE_NONE	0x220 | 
 | 251 | #define _HPAGE_TYPE_RO		0x200	/* _SEGMENT_ENTRY_RO  */ | 
 | 252 | #define _HPAGE_TYPE_RW		0x000 | 
 | 253 |  | 
 | 254 | /* | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 255 |  * PTE type bits are rather complicated. handle_pte_fault uses pte_present, | 
 | 256 |  * pte_none and pte_file to find out the pte type WITHOUT holding the page | 
 | 257 |  * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to | 
 | 258 |  * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs | 
 | 259 |  * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. | 
 | 260 |  * This change is done while holding the lock, but the intermediate step | 
 | 261 |  * of a previously valid pte with the hw invalid bit set can be observed by | 
 | 262 |  * handle_pte_fault. That makes it necessary that all valid pte types with | 
 | 263 |  * the hw invalid bit set must be distinguishable from the four pte types | 
 | 264 |  * empty, none, swap and file. | 
 | 265 |  * | 
 | 266 |  *			irxt  ipte  irxt | 
 | 267 |  * _PAGE_TYPE_EMPTY	1000   ->   1000 | 
 | 268 |  * _PAGE_TYPE_NONE	1001   ->   1001 | 
 | 269 |  * _PAGE_TYPE_SWAP	1011   ->   1011 | 
 | 270 |  * _PAGE_TYPE_FILE	11?1   ->   11?1 | 
 | 271 |  * _PAGE_TYPE_RO	0100   ->   1100 | 
 | 272 |  * _PAGE_TYPE_RW	0000   ->   1000 | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 273 |  * _PAGE_TYPE_EX_RO	0110   ->   1110 | 
 | 274 |  * _PAGE_TYPE_EX_RW	0010   ->   1010 | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 275 |  * | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 276 |  * pte_none is true for bits combinations 1000, 1010, 1100, 1110 | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 277 |  * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 | 
 | 278 |  * pte_file is true for bits combinations 1101, 1111 | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 279 |  * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 280 |  */ | 
 | 281 |  | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 282 | /* Page status table bits for virtualization */ | 
 | 283 | #define RCP_PCL_BIT	55 | 
 | 284 | #define RCP_HR_BIT	54 | 
 | 285 | #define RCP_HC_BIT	53 | 
 | 286 | #define RCP_GR_BIT	50 | 
 | 287 | #define RCP_GC_BIT	49 | 
 | 288 |  | 
| Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 289 | /* User dirty bit for KVM's migration feature */ | 
 | 290 | #define KVM_UD_BIT	47 | 
 | 291 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | #ifndef __s390x__ | 
 | 293 |  | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 294 | /* Bits in the segment table address-space-control-element */ | 
 | 295 | #define _ASCE_SPACE_SWITCH	0x80000000UL	/* space switch event	    */ | 
 | 296 | #define _ASCE_ORIGIN_MASK	0x7ffff000UL	/* segment table origin	    */ | 
 | 297 | #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */ | 
 | 298 | #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */ | 
 | 299 | #define _ASCE_TABLE_LENGTH	0x7f	/* 128 x 64 entries = 8k	    */ | 
 | 300 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | /* Bits in the segment table entry */ | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 302 | #define _SEGMENT_ENTRY_ORIGIN	0x7fffffc0UL	/* page table origin	    */ | 
 | 303 | #define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */ | 
 | 304 | #define _SEGMENT_ENTRY_COMMON	0x10	/* common segment bit		    */ | 
 | 305 | #define _SEGMENT_ENTRY_PTL	0x0f	/* page table length		    */ | 
 | 306 |  | 
 | 307 | #define _SEGMENT_ENTRY		(_SEGMENT_ENTRY_PTL) | 
 | 308 | #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV) | 
 | 309 |  | 
 | 310 | #else /* __s390x__ */ | 
 | 311 |  | 
 | 312 | /* Bits in the segment/region table address-space-control-element */ | 
 | 313 | #define _ASCE_ORIGIN		~0xfffUL/* segment table origin		    */ | 
 | 314 | #define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */ | 
 | 315 | #define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */ | 
 | 316 | #define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */ | 
 | 317 | #define _ASCE_REAL_SPACE	0x20	/* real space control		    */ | 
 | 318 | #define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */ | 
 | 319 | #define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */ | 
 | 320 | #define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */ | 
 | 321 | #define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */ | 
 | 322 | #define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */ | 
 | 323 | #define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */ | 
 | 324 |  | 
 | 325 | /* Bits in the region table entry */ | 
 | 326 | #define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */ | 
 | 327 | #define _REGION_ENTRY_INV	0x20	/* invalid region table entry	    */ | 
 | 328 | #define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */ | 
 | 329 | #define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */ | 
 | 330 | #define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */ | 
 | 331 | #define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */ | 
 | 332 | #define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */ | 
 | 333 |  | 
 | 334 | #define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) | 
 | 335 | #define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) | 
 | 336 | #define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) | 
 | 337 | #define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) | 
 | 338 | #define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) | 
 | 339 | #define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) | 
 | 340 |  | 
 | 341 | /* Bits in the segment table entry */ | 
 | 342 | #define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* segment table origin		    */ | 
 | 343 | #define _SEGMENT_ENTRY_RO	0x200	/* page protection bit		    */ | 
 | 344 | #define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */ | 
 | 345 |  | 
 | 346 | #define _SEGMENT_ENTRY		(0) | 
 | 347 | #define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV) | 
 | 348 |  | 
| Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 349 | #define _SEGMENT_ENTRY_LARGE	0x400	/* STE-format control, large page   */ | 
 | 350 | #define _SEGMENT_ENTRY_CO	0x100	/* change-recording override   */ | 
 | 351 |  | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 352 | #endif /* __s390x__ */ | 
 | 353 |  | 
 | 354 | /* | 
 | 355 |  * A user page table pointer has the space-switch-event bit, the | 
 | 356 |  * private-space-control bit and the storage-alteration-event-control | 
 | 357 |  * bit set. A kernel page table pointer doesn't need them. | 
 | 358 |  */ | 
 | 359 | #define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ | 
 | 360 | 				 _ASCE_ALT_EVENT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 |  | 
 | 362 | /* Bits int the storage key */ | 
 | 363 | #define _PAGE_CHANGED    0x02          /* HW changed bit                   */ | 
 | 364 | #define _PAGE_REFERENCED 0x04          /* HW referenced bit                */ | 
 | 365 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | /* | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 367 |  * Page protection definitions. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 |  */ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 369 | #define PAGE_NONE	__pgprot(_PAGE_TYPE_NONE) | 
 | 370 | #define PAGE_RO		__pgprot(_PAGE_TYPE_RO) | 
 | 371 | #define PAGE_RW		__pgprot(_PAGE_TYPE_RW) | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 372 | #define PAGE_EX_RO	__pgprot(_PAGE_TYPE_EX_RO) | 
 | 373 | #define PAGE_EX_RW	__pgprot(_PAGE_TYPE_EX_RW) | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 374 |  | 
 | 375 | #define PAGE_KERNEL	PAGE_RW | 
 | 376 | #define PAGE_COPY	PAGE_RO | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 |  | 
 | 378 | /* | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 379 |  * Dependent on the EXEC_PROTECT option s390 can do execute protection. | 
 | 380 |  * Write permission always implies read permission. In theory with a | 
 | 381 |  * primary/secondary page table execute only can be implemented but | 
 | 382 |  * it would cost an additional bit in the pte to distinguish all the | 
 | 383 |  * different pte types. To avoid that execute permission currently | 
 | 384 |  * implies read permission as well. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 |  */ | 
 | 386 |          /*xwr*/ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 387 | #define __P000	PAGE_NONE | 
 | 388 | #define __P001	PAGE_RO | 
 | 389 | #define __P010	PAGE_RO | 
 | 390 | #define __P011	PAGE_RO | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 391 | #define __P100	PAGE_EX_RO | 
 | 392 | #define __P101	PAGE_EX_RO | 
 | 393 | #define __P110	PAGE_EX_RO | 
 | 394 | #define __P111	PAGE_EX_RO | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 |  | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 396 | #define __S000	PAGE_NONE | 
 | 397 | #define __S001	PAGE_RO | 
 | 398 | #define __S010	PAGE_RW | 
 | 399 | #define __S011	PAGE_RW | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 400 | #define __S100	PAGE_EX_RO | 
 | 401 | #define __S101	PAGE_EX_RO | 
 | 402 | #define __S110	PAGE_EX_RW | 
 | 403 | #define __S111	PAGE_EX_RW | 
 | 404 |  | 
 | 405 | #ifndef __s390x__ | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 406 | # define PxD_SHADOW_SHIFT	1 | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 407 | #else /* __s390x__ */ | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 408 | # define PxD_SHADOW_SHIFT	2 | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 409 | #endif /* __s390x__ */ | 
 | 410 |  | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 411 | static inline void *get_shadow_table(void *table) | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 412 | { | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 413 | 	unsigned long addr, offset; | 
 | 414 | 	struct page *page; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 415 |  | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 416 | 	addr = (unsigned long) table; | 
 | 417 | 	offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); | 
 | 418 | 	page = virt_to_page((void *)(addr ^ offset)); | 
 | 419 | 	return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 420 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 |  | 
 | 422 | /* | 
 | 423 |  * Certain architectures need to do special things when PTEs | 
 | 424 |  * within a page table are directly modified.  Thus, the following | 
 | 425 |  * hook is made available. | 
 | 426 |  */ | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 427 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 428 | 			      pte_t *ptep, pte_t entry) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | { | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 430 | 	*ptep = entry; | 
 | 431 | 	if (mm->context.noexec) { | 
 | 432 | 		if (!(pte_val(entry) & _PAGE_INVALID) && | 
 | 433 | 		    (pte_val(entry) & _PAGE_SWX)) | 
 | 434 | 			pte_val(entry) |= _PAGE_RO; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 435 | 		else | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 436 | 			pte_val(entry) = _PAGE_TYPE_EMPTY; | 
 | 437 | 		ptep[PTRS_PER_PTE] = entry; | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 438 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 |  | 
 | 441 | /* | 
 | 442 |  * pgd/pmd/pte query functions | 
 | 443 |  */ | 
 | 444 | #ifndef __s390x__ | 
 | 445 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 446 | static inline int pgd_present(pgd_t pgd) { return 1; } | 
 | 447 | static inline int pgd_none(pgd_t pgd)    { return 0; } | 
 | 448 | static inline int pgd_bad(pgd_t pgd)     { return 0; } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 450 | static inline int pud_present(pud_t pud) { return 1; } | 
 | 451 | static inline int pud_none(pud_t pud)	 { return 0; } | 
 | 452 | static inline int pud_bad(pud_t pud)	 { return 0; } | 
 | 453 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | #else /* __s390x__ */ | 
 | 455 |  | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 456 | static inline int pgd_present(pgd_t pgd) | 
 | 457 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 458 | 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) | 
 | 459 | 		return 1; | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 460 | 	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; | 
 | 461 | } | 
 | 462 |  | 
 | 463 | static inline int pgd_none(pgd_t pgd) | 
 | 464 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 465 | 	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) | 
 | 466 | 		return 0; | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 467 | 	return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; | 
 | 468 | } | 
 | 469 |  | 
 | 470 | static inline int pgd_bad(pgd_t pgd) | 
 | 471 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 472 | 	/* | 
 | 473 | 	 * With dynamic page table levels the pgd can be a region table | 
 | 474 | 	 * entry or a segment table entry. Check for the bit that are | 
 | 475 | 	 * invalid for either table entry. | 
 | 476 | 	 */ | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 477 | 	unsigned long mask = | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 478 | 		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 479 | 		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; | 
 | 480 | 	return (pgd_val(pgd) & mask) != 0; | 
 | 481 | } | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 482 |  | 
 | 483 | static inline int pud_present(pud_t pud) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 485 | 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) | 
 | 486 | 		return 1; | 
| Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 487 | 	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | } | 
 | 489 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 490 | static inline int pud_none(pud_t pud) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 492 | 	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) | 
 | 493 | 		return 0; | 
| Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 494 | 	return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | } | 
 | 496 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 497 | static inline int pud_bad(pud_t pud) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 499 | 	/* | 
 | 500 | 	 * With dynamic page table levels the pud can be a region table | 
 | 501 | 	 * entry or a segment table entry. Check for the bit that are | 
 | 502 | 	 * invalid for either table entry. | 
 | 503 | 	 */ | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 504 | 	unsigned long mask = | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 505 | 		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 506 | 		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; | 
 | 507 | 	return (pud_val(pud) & mask) != 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | } | 
 | 509 |  | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 510 | #endif /* __s390x__ */ | 
 | 511 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 512 | static inline int pmd_present(pmd_t pmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | { | 
| Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 514 | 	return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | } | 
 | 516 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 517 | static inline int pmd_none(pmd_t pmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | { | 
| Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 519 | 	return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | } | 
 | 521 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 522 | static inline int pmd_bad(pmd_t pmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | { | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 524 | 	unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; | 
 | 525 | 	return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | } | 
 | 527 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 528 | static inline int pte_none(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | { | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 530 | 	return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | } | 
 | 532 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 533 | static inline int pte_present(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | { | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 535 | 	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; | 
 | 536 | 	return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || | 
 | 537 | 		(!(pte_val(pte) & _PAGE_INVALID) && | 
 | 538 | 		 !(pte_val(pte) & _PAGE_SWT)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | } | 
 | 540 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 541 | static inline int pte_file(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | { | 
| Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 543 | 	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; | 
 | 544 | 	return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | } | 
 | 546 |  | 
| Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 547 | static inline int pte_special(pte_t pte) | 
 | 548 | { | 
| Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 549 | 	return (pte_val(pte) & _PAGE_SPECIAL); | 
| Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 550 | } | 
 | 551 |  | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 552 | #define __HAVE_ARCH_PTE_SAME | 
 | 553 | #define pte_same(a,b)  (pte_val(a) == pte_val(b)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 |  | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 555 | static inline void rcp_lock(pte_t *ptep) | 
 | 556 | { | 
 | 557 | #ifdef CONFIG_PGSTE | 
 | 558 | 	unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | 
 | 559 | 	preempt_disable(); | 
 | 560 | 	while (test_and_set_bit(RCP_PCL_BIT, pgste)) | 
 | 561 | 		; | 
 | 562 | #endif | 
 | 563 | } | 
 | 564 |  | 
 | 565 | static inline void rcp_unlock(pte_t *ptep) | 
 | 566 | { | 
 | 567 | #ifdef CONFIG_PGSTE | 
 | 568 | 	unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | 
 | 569 | 	clear_bit(RCP_PCL_BIT, pgste); | 
 | 570 | 	preempt_enable(); | 
 | 571 | #endif | 
 | 572 | } | 
 | 573 |  | 
 | 574 | /* forward declaration for SetPageUptodate in page-flags.h*/ | 
 | 575 | static inline void page_clear_dirty(struct page *page); | 
 | 576 | #include <linux/page-flags.h> | 
 | 577 |  | 
 | 578 | static inline void ptep_rcp_copy(pte_t *ptep) | 
 | 579 | { | 
 | 580 | #ifdef CONFIG_PGSTE | 
 | 581 | 	struct page *page = virt_to_page(pte_val(*ptep)); | 
 | 582 | 	unsigned int skey; | 
 | 583 | 	unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | 
 | 584 |  | 
 | 585 | 	skey = page_get_storage_key(page_to_phys(page)); | 
| Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 586 | 	if (skey & _PAGE_CHANGED) { | 
| Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 587 | 		set_bit_simple(RCP_GC_BIT, pgste); | 
| Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 588 | 		set_bit_simple(KVM_UD_BIT, pgste); | 
 | 589 | 	} | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 590 | 	if (skey & _PAGE_REFERENCED) | 
| Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 591 | 		set_bit_simple(RCP_GR_BIT, pgste); | 
| Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 592 | 	if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 593 | 		SetPageDirty(page); | 
| Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 594 | 		set_bit_simple(KVM_UD_BIT, pgste); | 
 | 595 | 	} | 
| Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 596 | 	if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 597 | 		SetPageReferenced(page); | 
 | 598 | #endif | 
 | 599 | } | 
 | 600 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | /* | 
 | 602 |  * query functions pte_write/pte_dirty/pte_young only work if | 
 | 603 |  * pte_present() is true. Undefined behaviour if not.. | 
 | 604 |  */ | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 605 | static inline int pte_write(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | { | 
 | 607 | 	return (pte_val(pte) & _PAGE_RO) == 0; | 
 | 608 | } | 
 | 609 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 610 | static inline int pte_dirty(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | { | 
 | 612 | 	/* A pte is neither clean nor dirty on s/390. The dirty bit | 
 | 613 | 	 * is in the storage key. See page_test_and_clear_dirty for | 
 | 614 | 	 * details. | 
 | 615 | 	 */ | 
 | 616 | 	return 0; | 
 | 617 | } | 
 | 618 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 619 | static inline int pte_young(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | { | 
 | 621 | 	/* A pte is neither young nor old on s/390. The young bit | 
 | 622 | 	 * is in the storage key. See page_test_and_clear_young for | 
 | 623 | 	 * details. | 
 | 624 | 	 */ | 
 | 625 | 	return 0; | 
 | 626 | } | 
 | 627 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | /* | 
 | 629 |  * pgd/pmd/pte modification functions | 
 | 630 |  */ | 
 | 631 |  | 
 | 632 | #ifndef __s390x__ | 
 | 633 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 634 | #define pgd_clear(pgd)		do { } while (0) | 
 | 635 | #define pud_clear(pud)		do { } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | #else /* __s390x__ */ | 
 | 638 |  | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 639 | static inline void pgd_clear_kernel(pgd_t * pgd) | 
 | 640 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 641 | 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) | 
 | 642 | 		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 643 | } | 
 | 644 |  | 
 | 645 | static inline void pgd_clear(pgd_t * pgd) | 
 | 646 | { | 
 | 647 | 	pgd_t *shadow = get_shadow_table(pgd); | 
 | 648 |  | 
 | 649 | 	pgd_clear_kernel(pgd); | 
 | 650 | 	if (shadow) | 
 | 651 | 		pgd_clear_kernel(shadow); | 
 | 652 | } | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 653 |  | 
 | 654 | static inline void pud_clear_kernel(pud_t *pud) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 656 | 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) | 
 | 657 | 		pud_val(*pud) = _REGION3_ENTRY_EMPTY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | } | 
 | 659 |  | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 660 | static inline void pud_clear(pud_t *pud) | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 661 | { | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 662 | 	pud_t *shadow = get_shadow_table(pud); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 663 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 664 | 	pud_clear_kernel(pud); | 
 | 665 | 	if (shadow) | 
 | 666 | 		pud_clear_kernel(shadow); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 667 | } | 
 | 668 |  | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 669 | #endif /* __s390x__ */ | 
 | 670 |  | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 671 | static inline void pmd_clear_kernel(pmd_t * pmdp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | { | 
| Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 673 | 	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | } | 
 | 675 |  | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 676 | static inline void pmd_clear(pmd_t *pmd) | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 677 | { | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 678 | 	pmd_t *shadow = get_shadow_table(pmd); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 679 |  | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 680 | 	pmd_clear_kernel(pmd); | 
 | 681 | 	if (shadow) | 
 | 682 | 		pmd_clear_kernel(shadow); | 
| Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 683 | } | 
 | 684 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 685 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | { | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 687 | 	pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 688 | 	if (mm->context.noexec) | 
 | 689 | 		pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | } | 
 | 691 |  | 
 | 692 | /* | 
 | 693 |  * The following pte modification functions only work if | 
 | 694 |  * pte_present() is true. Undefined behaviour if not.. | 
 | 695 |  */ | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 696 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | { | 
| Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 698 | 	pte_val(pte) &= _PAGE_CHG_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | 	pte_val(pte) |= pgprot_val(newprot); | 
 | 700 | 	return pte; | 
 | 701 | } | 
 | 702 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 703 | static inline pte_t pte_wrprotect(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | { | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 705 | 	/* Do not clobber _PAGE_TYPE_NONE pages!  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | 	if (!(pte_val(pte) & _PAGE_INVALID)) | 
 | 707 | 		pte_val(pte) |= _PAGE_RO; | 
 | 708 | 	return pte; | 
 | 709 | } | 
 | 710 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 711 | static inline pte_t pte_mkwrite(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | { | 
 | 713 | 	pte_val(pte) &= ~_PAGE_RO; | 
 | 714 | 	return pte; | 
 | 715 | } | 
 | 716 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 717 | static inline pte_t pte_mkclean(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | { | 
 | 719 | 	/* The only user of pte_mkclean is the fork() code. | 
 | 720 | 	   We must *not* clear the *physical* page dirty bit | 
 | 721 | 	   just because fork() wants to clear the dirty bit in | 
 | 722 | 	   *one* of the page's mappings.  So we just do nothing. */ | 
 | 723 | 	return pte; | 
 | 724 | } | 
 | 725 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 726 | static inline pte_t pte_mkdirty(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | { | 
 | 728 | 	/* We do not explicitly set the dirty bit because the | 
 | 729 | 	 * sske instruction is slow. It is faster to let the | 
 | 730 | 	 * next instruction set the dirty bit. | 
 | 731 | 	 */ | 
 | 732 | 	return pte; | 
 | 733 | } | 
 | 734 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 735 | static inline pte_t pte_mkold(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | { | 
 | 737 | 	/* S/390 doesn't keep its dirty/referenced bit in the pte. | 
 | 738 | 	 * There is no point in clearing the real referenced bit. | 
 | 739 | 	 */ | 
 | 740 | 	return pte; | 
 | 741 | } | 
 | 742 |  | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 743 | static inline pte_t pte_mkyoung(pte_t pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | { | 
 | 745 | 	/* S/390 doesn't keep its dirty/referenced bit in the pte. | 
 | 746 | 	 * There is no point in setting the real referenced bit. | 
 | 747 | 	 */ | 
 | 748 | 	return pte; | 
 | 749 | } | 
 | 750 |  | 
| Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 751 | static inline pte_t pte_mkspecial(pte_t pte) | 
 | 752 | { | 
| Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 753 | 	pte_val(pte) |= _PAGE_SPECIAL; | 
| Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 754 | 	return pte; | 
 | 755 | } | 
 | 756 |  | 
| Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 757 | #ifdef CONFIG_PGSTE | 
 | 758 | /* | 
 | 759 |  * Get (and clear) the user dirty bit for a PTE. | 
 | 760 |  */ | 
 | 761 | static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, | 
 | 762 | 						     pte_t *ptep) | 
 | 763 | { | 
 | 764 | 	int dirty; | 
 | 765 | 	unsigned long *pgste; | 
 | 766 | 	struct page *page; | 
 | 767 | 	unsigned int skey; | 
 | 768 |  | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 769 | 	if (!mm->context.has_pgste) | 
| Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 770 | 		return -EINVAL; | 
 | 771 | 	rcp_lock(ptep); | 
 | 772 | 	pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | 
 | 773 | 	page = virt_to_page(pte_val(*ptep)); | 
 | 774 | 	skey = page_get_storage_key(page_to_phys(page)); | 
 | 775 | 	if (skey & _PAGE_CHANGED) { | 
 | 776 | 		set_bit_simple(RCP_GC_BIT, pgste); | 
 | 777 | 		set_bit_simple(KVM_UD_BIT, pgste); | 
 | 778 | 	} | 
 | 779 | 	if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { | 
 | 780 | 		SetPageDirty(page); | 
 | 781 | 		set_bit_simple(KVM_UD_BIT, pgste); | 
 | 782 | 	} | 
 | 783 | 	dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); | 
 | 784 | 	if (skey & _PAGE_CHANGED) | 
 | 785 | 		page_clear_dirty(page); | 
 | 786 | 	rcp_unlock(ptep); | 
 | 787 | 	return dirty; | 
 | 788 | } | 
 | 789 | #endif | 
 | 790 |  | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 791 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 
 | 792 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | 
 | 793 | 					    unsigned long addr, pte_t *ptep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | { | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 795 | #ifdef CONFIG_PGSTE | 
 | 796 | 	unsigned long physpage; | 
 | 797 | 	int young; | 
 | 798 | 	unsigned long *pgste; | 
 | 799 |  | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 800 | 	if (!vma->vm_mm->context.has_pgste) | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 801 | 		return 0; | 
 | 802 | 	physpage = pte_val(*ptep) & PAGE_MASK; | 
 | 803 | 	pgste = (unsigned long *) (ptep + PTRS_PER_PTE); | 
 | 804 |  | 
 | 805 | 	young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); | 
 | 806 | 	rcp_lock(ptep); | 
 | 807 | 	if (young) | 
| Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 808 | 		set_bit_simple(RCP_GR_BIT, pgste); | 
 | 809 | 	young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste); | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 810 | 	rcp_unlock(ptep); | 
 | 811 | 	return young; | 
 | 812 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | 	return 0; | 
 | 814 | } | 
 | 815 |  | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 816 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | 
 | 817 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | 
 | 818 | 					 unsigned long address, pte_t *ptep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 819 | { | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 820 | 	/* No need to flush TLB | 
 | 821 | 	 * On s390 reference bits are in storage key and never in TLB | 
 | 822 | 	 * With virtualization we handle the reference bit, without we | 
 | 823 | 	 * we can simply return */ | 
 | 824 | #ifdef CONFIG_PGSTE | 
 | 825 | 	return ptep_test_and_clear_young(vma, address, ptep); | 
 | 826 | #endif | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 827 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | } | 
 | 829 |  | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 830 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | 
 | 831 | { | 
 | 832 | 	if (!(pte_val(*ptep) & _PAGE_INVALID)) { | 
 | 833 | #ifndef __s390x__ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 834 | 		/* pto must point to the start of the segment table */ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 835 | 		pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); | 
 | 836 | #else | 
 | 837 | 		/* ipte in zarch mode can do the math */ | 
 | 838 | 		pte_t *pto = ptep; | 
 | 839 | #endif | 
| Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 840 | 		asm volatile( | 
 | 841 | 			"	ipte	%2,%3" | 
 | 842 | 			: "=m" (*ptep) : "m" (*ptep), | 
 | 843 | 			  "a" (pto), "a" (address)); | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 844 | 	} | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 845 | } | 
 | 846 |  | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 847 | static inline void ptep_invalidate(struct mm_struct *mm, | 
 | 848 | 				   unsigned long address, pte_t *ptep) | 
| Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 849 | { | 
| Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 850 | 	if (mm->context.has_pgste) { | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 851 | 		rcp_lock(ptep); | 
 | 852 | 		__ptep_ipte(address, ptep); | 
 | 853 | 		ptep_rcp_copy(ptep); | 
 | 854 | 		pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 
 | 855 | 		rcp_unlock(ptep); | 
 | 856 | 		return; | 
 | 857 | 	} | 
| Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 858 | 	__ptep_ipte(address, ptep); | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 859 | 	pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 
 | 860 | 	if (mm->context.noexec) { | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 861 | 		__ptep_ipte(address, ptep + PTRS_PER_PTE); | 
| Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 862 | 		pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY; | 
 | 863 | 	} | 
| Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 864 | } | 
 | 865 |  | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 866 | /* | 
 | 867 |  * This is hard to understand. ptep_get_and_clear and ptep_clear_flush | 
 | 868 |  * both clear the TLB for the unmapped pte. The reason is that | 
 | 869 |  * ptep_get_and_clear is used in common code (e.g. change_pte_range) | 
 | 870 |  * to modify an active pte. The sequence is | 
 | 871 |  *   1) ptep_get_and_clear | 
 | 872 |  *   2) set_pte_at | 
 | 873 |  *   3) flush_tlb_range | 
 | 874 |  * On s390 the tlb needs to get flushed with the modification of the pte | 
 | 875 |  * if the pte is active. The only way how this can be implemented is to | 
 | 876 |  * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range | 
 | 877 |  * is a nop. | 
 | 878 |  */ | 
 | 879 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 
 | 880 | #define ptep_get_and_clear(__mm, __address, __ptep)			\ | 
 | 881 | ({									\ | 
 | 882 | 	pte_t __pte = *(__ptep);					\ | 
 | 883 | 	if (atomic_read(&(__mm)->mm_users) > 1 ||			\ | 
 | 884 | 	    (__mm) != current->active_mm)				\ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 885 | 		ptep_invalidate(__mm, __address, __ptep);		\ | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 886 | 	else								\ | 
 | 887 | 		pte_clear((__mm), (__address), (__ptep));		\ | 
 | 888 | 	__pte;								\ | 
 | 889 | }) | 
 | 890 |  | 
 | 891 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH | 
| Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 892 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | 
 | 893 | 				     unsigned long address, pte_t *ptep) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | { | 
 | 895 | 	pte_t pte = *ptep; | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 896 | 	ptep_invalidate(vma->vm_mm, address, ptep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | 	return pte; | 
 | 898 | } | 
 | 899 |  | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 900 | /* | 
 | 901 |  * The batched pte unmap code uses ptep_get_and_clear_full to clear the | 
 | 902 |  * ptes. Here an optimization is possible. tlb_gather_mmu flushes all | 
 | 903 |  * tlbs of an mm if it can guarantee that the ptes of the mm_struct | 
 | 904 |  * cannot be accessed while the batched unmap is running. In this case | 
 | 905 |  * full==1 and a simple pte_clear is enough. See tlb.h. | 
 | 906 |  */ | 
 | 907 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 
 | 908 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | 
 | 909 | 					    unsigned long addr, | 
 | 910 | 					    pte_t *ptep, int full) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | { | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 912 | 	pte_t pte = *ptep; | 
 | 913 |  | 
 | 914 | 	if (full) | 
 | 915 | 		pte_clear(mm, addr, ptep); | 
 | 916 | 	else | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 917 | 		ptep_invalidate(mm, addr, ptep); | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 918 | 	return pte; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | } | 
 | 920 |  | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 921 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 
 | 922 | #define ptep_set_wrprotect(__mm, __addr, __ptep)			\ | 
 | 923 | ({									\ | 
 | 924 | 	pte_t __pte = *(__ptep);					\ | 
 | 925 | 	if (pte_write(__pte)) {						\ | 
 | 926 | 		if (atomic_read(&(__mm)->mm_users) > 1 ||		\ | 
 | 927 | 		    (__mm) != current->active_mm)			\ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 928 | 			ptep_invalidate(__mm, __addr, __ptep);		\ | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 929 | 		set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte));	\ | 
 | 930 | 	}								\ | 
 | 931 | }) | 
 | 932 |  | 
 | 933 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 
| Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 934 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty)	\ | 
 | 935 | ({									\ | 
 | 936 | 	int __changed = !pte_same(*(__ptep), __entry);			\ | 
 | 937 | 	if (__changed) {						\ | 
| Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 938 | 		ptep_invalidate((__vma)->vm_mm, __addr, __ptep);	\ | 
| Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 939 | 		set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);	\ | 
 | 940 | 	}								\ | 
 | 941 | 	__changed;							\ | 
| Benjamin Herrenschmidt | 8dab524 | 2007-06-16 10:16:12 -0700 | [diff] [blame] | 942 | }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 |  | 
 | 944 | /* | 
 | 945 |  * Test and clear dirty bit in storage key. | 
 | 946 |  * We can't clear the changed bit atomically. This is a potential | 
 | 947 |  * race against modification of the referenced bit. This function | 
 | 948 |  * should therefore only be called if it is not mapped in any | 
 | 949 |  * address space. | 
 | 950 |  */ | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 951 | #define __HAVE_ARCH_PAGE_TEST_DIRTY | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 952 | static inline int page_test_dirty(struct page *page) | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 953 | { | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 954 | 	return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; | 
 | 955 | } | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 956 |  | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 957 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 958 | static inline void page_clear_dirty(struct page *page) | 
 | 959 | { | 
 | 960 | 	page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 961 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 |  | 
 | 963 | /* | 
 | 964 |  * Test and clear referenced bit in storage key. | 
 | 965 |  */ | 
| Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 966 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 967 | static inline int page_test_and_clear_young(struct page *page) | 
 | 968 | { | 
| Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 969 | 	unsigned long physpage = page_to_phys(page); | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 970 | 	int ccode; | 
 | 971 |  | 
| Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 972 | 	asm volatile( | 
 | 973 | 		"	rrbe	0,%1\n" | 
 | 974 | 		"	ipm	%0\n" | 
 | 975 | 		"	srl	%0,28\n" | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 976 | 		: "=d" (ccode) : "a" (physpage) : "cc" ); | 
 | 977 | 	return ccode & 2; | 
 | 978 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 |  | 
 | 980 | /* | 
 | 981 |  * Conversion functions: convert a page and protection to a page entry, | 
 | 982 |  * and a page entry and page directory to the page they refer to. | 
 | 983 |  */ | 
 | 984 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | 
 | 985 | { | 
 | 986 | 	pte_t __pte; | 
 | 987 | 	pte_val(__pte) = physpage + pgprot_val(pgprot); | 
 | 988 | 	return __pte; | 
 | 989 | } | 
 | 990 |  | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 991 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) | 
 | 992 | { | 
| Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 993 | 	unsigned long physpage = page_to_phys(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 |  | 
| Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 995 | 	return mk_pte_phys(physpage, pgprot); | 
 | 996 | } | 
 | 997 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 999 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | 
 | 1000 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 
 | 1001 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1003 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1004 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 
 | 1005 |  | 
 | 1006 | #ifndef __s390x__ | 
 | 1007 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1008 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) | 
 | 1009 | #define pud_deref(pmd) ({ BUG(); 0UL; }) | 
 | 1010 | #define pgd_deref(pmd) ({ BUG(); 0UL; }) | 
 | 1011 |  | 
 | 1012 | #define pud_offset(pgd, address) ((pud_t *) pgd) | 
 | 1013 | #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 |  | 
 | 1015 | #else /* __s390x__ */ | 
 | 1016 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1017 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) | 
 | 1018 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1019 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1020 |  | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1021 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) | 
 | 1022 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1023 | 	pud_t *pud = (pud_t *) pgd; | 
 | 1024 | 	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) | 
 | 1025 | 		pud = (pud_t *) pgd_deref(*pgd); | 
| Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1026 | 	return pud  + pud_index(address); | 
 | 1027 | } | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1028 |  | 
 | 1029 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | 
 | 1030 | { | 
| Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1031 | 	pmd_t *pmd = (pmd_t *) pud; | 
 | 1032 | 	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) | 
 | 1033 | 		pmd = (pmd_t *) pud_deref(*pud); | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1034 | 	return pmd + pmd_index(address); | 
 | 1035 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 |  | 
 | 1037 | #endif /* __s390x__ */ | 
 | 1038 |  | 
| Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1039 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) | 
 | 1040 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) | 
 | 1041 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 
 | 1042 |  | 
 | 1043 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | 
 | 1044 |  | 
 | 1045 | /* Find an entry in the lowest level page table.. */ | 
 | 1046 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) | 
 | 1047 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1048 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | 
 | 1049 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | 
 | 1050 | #define pte_unmap(pte) do { } while (0) | 
 | 1051 | #define pte_unmap_nested(pte) do { } while (0) | 
 | 1052 |  | 
 | 1053 | /* | 
 | 1054 |  * 31 bit swap entry format: | 
 | 1055 |  * A page-table entry has some bits we have to treat in a special way. | 
 | 1056 |  * Bits 0, 20 and bit 23 have to be zero, otherwise an specification | 
 | 1057 |  * exception will occur instead of a page translation exception. The | 
 | 1058 |  * specifiation exception has the bad habit not to store necessary | 
 | 1059 |  * information in the lowcore. | 
 | 1060 |  * Bit 21 and bit 22 are the page invalid bit and the page protection | 
 | 1061 |  * bit. We set both to indicate a swapped page. | 
 | 1062 |  * Bit 30 and 31 are used to distinguish the different page types. For | 
 | 1063 |  * a swapped page these bits need to be zero. | 
 | 1064 |  * This leaves the bits 1-19 and bits 24-29 to store type and offset. | 
 | 1065 |  * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 | 
 | 1066 |  * plus 24 for the offset. | 
 | 1067 |  * 0|     offset        |0110|o|type |00| | 
 | 1068 |  * 0 0000000001111111111 2222 2 22222 33 | 
 | 1069 |  * 0 1234567890123456789 0123 4 56789 01 | 
 | 1070 |  * | 
 | 1071 |  * 64 bit swap entry format: | 
 | 1072 |  * A page-table entry has some bits we have to treat in a special way. | 
 | 1073 |  * Bits 52 and bit 55 have to be zero, otherwise an specification | 
 | 1074 |  * exception will occur instead of a page translation exception. The | 
 | 1075 |  * specifiation exception has the bad habit not to store necessary | 
 | 1076 |  * information in the lowcore. | 
 | 1077 |  * Bit 53 and bit 54 are the page invalid bit and the page protection | 
 | 1078 |  * bit. We set both to indicate a swapped page. | 
 | 1079 |  * Bit 62 and 63 are used to distinguish the different page types. For | 
 | 1080 |  * a swapped page these bits need to be zero. | 
 | 1081 |  * This leaves the bits 0-51 and bits 56-61 to store type and offset. | 
 | 1082 |  * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 | 
 | 1083 |  * plus 56 for the offset. | 
 | 1084 |  * |                      offset                        |0110|o|type |00| | 
 | 1085 |  *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66 | 
 | 1086 |  *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23 | 
 | 1087 |  */ | 
 | 1088 | #ifndef __s390x__ | 
 | 1089 | #define __SWP_OFFSET_MASK (~0UL >> 12) | 
 | 1090 | #else | 
 | 1091 | #define __SWP_OFFSET_MASK (~0UL >> 11) | 
 | 1092 | #endif | 
| Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 1093 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | { | 
 | 1095 | 	pte_t pte; | 
 | 1096 | 	offset &= __SWP_OFFSET_MASK; | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1097 | 	pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 | 		((offset & 1UL) << 7) | ((offset & ~1UL) << 11); | 
 | 1099 | 	return pte; | 
 | 1100 | } | 
 | 1101 |  | 
 | 1102 | #define __swp_type(entry)	(((entry).val >> 2) & 0x1f) | 
 | 1103 | #define __swp_offset(entry)	(((entry).val >> 11) | (((entry).val >> 7) & 1)) | 
 | 1104 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | 
 | 1105 |  | 
 | 1106 | #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) }) | 
 | 1107 | #define __swp_entry_to_pte(x)	((pte_t) { (x).val }) | 
 | 1108 |  | 
 | 1109 | #ifndef __s390x__ | 
 | 1110 | # define PTE_FILE_MAX_BITS	26 | 
 | 1111 | #else /* __s390x__ */ | 
 | 1112 | # define PTE_FILE_MAX_BITS	59 | 
 | 1113 | #endif /* __s390x__ */ | 
 | 1114 |  | 
 | 1115 | #define pte_to_pgoff(__pte) \ | 
 | 1116 | 	((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) | 
 | 1117 |  | 
 | 1118 | #define pgoff_to_pte(__off) \ | 
 | 1119 | 	((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ | 
| Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1120 | 		   | _PAGE_TYPE_FILE }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 |  | 
 | 1122 | #endif /* !__ASSEMBLY__ */ | 
 | 1123 |  | 
 | 1124 | #define kern_addr_valid(addr)   (1) | 
 | 1125 |  | 
| Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 1126 | extern int vmem_add_mapping(unsigned long start, unsigned long size); | 
 | 1127 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); | 
| Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 1128 | extern int s390_enable_sie(void); | 
| Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1129 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | /* | 
 | 1131 |  * No page table caches to initialise | 
 | 1132 |  */ | 
 | 1133 | #define pgtable_cache_init()	do { } while (0) | 
 | 1134 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 | #include <asm-generic/pgtable.h> | 
 | 1136 |  | 
 | 1137 | #endif /* _S390_PAGE_H */ |