Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-s390/pgalloc.h |
| 3 | * |
| 4 | * S390 version |
| 5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation |
| 6 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 8 | * |
| 9 | * Derived from "include/asm-i386/pgalloc.h" |
| 10 | * Copyright (C) 1994 Linus Torvalds |
| 11 | */ |
| 12 | |
| 13 | #ifndef _S390_PGALLOC_H |
| 14 | #define _S390_PGALLOC_H |
| 15 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/threads.h> |
| 17 | #include <linux/gfp.h> |
| 18 | #include <linux/mm.h> |
| 19 | |
| 20 | #define check_pgt_cache() do {} while (0) |
| 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /* |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 23 | * Page allocation orders. |
| 24 | */ |
| 25 | #ifndef __s390x__ |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 26 | # define PTE_ALLOC_ORDER 0 |
| 27 | # define PMD_ALLOC_ORDER 0 |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 28 | # define PGD_ALLOC_ORDER 1 |
| 29 | #else /* __s390x__ */ |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 30 | # define PTE_ALLOC_ORDER 0 |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 31 | # define PMD_ALLOC_ORDER 2 |
| 32 | # define PGD_ALLOC_ORDER 2 |
| 33 | #endif /* __s390x__ */ |
| 34 | |
| 35 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | * Allocate and free page tables. The xxx_kernel() versions are |
| 37 | * used to allocate a kernel page table - this turns on ASN bits |
| 38 | * if any. |
| 39 | */ |
| 40 | |
| 41 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| 42 | { |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 43 | pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | int i; |
| 45 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 46 | if (!pgd) |
| 47 | return NULL; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 48 | if (s390_noexec) { |
| 49 | pgd_t *shadow_pgd = (pgd_t *) |
| 50 | __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); |
| 51 | struct page *page = virt_to_page(pgd); |
| 52 | |
| 53 | if (!shadow_pgd) { |
| 54 | free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); |
| 55 | return NULL; |
| 56 | } |
| 57 | page->lru.next = (void *) shadow_pgd; |
| 58 | } |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 59 | for (i = 0; i < PTRS_PER_PGD; i++) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | #ifndef __s390x__ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 61 | pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); |
| 62 | #else |
| 63 | pgd_clear(pgd + i); |
| 64 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | return pgd; |
| 66 | } |
| 67 | |
| 68 | static inline void pgd_free(pgd_t *pgd) |
| 69 | { |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 70 | pgd_t *shadow_pgd = get_shadow_pgd(pgd); |
| 71 | |
| 72 | if (shadow_pgd) |
| 73 | free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 74 | free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | #ifndef __s390x__ |
| 78 | /* |
| 79 | * page middle directory allocation/free routines. |
| 80 | * We use pmd cache only on s390x, so these are dummy routines. This |
| 81 | * code never triggers because the pgd will always be present. |
| 82 | */ |
| 83 | #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) |
| 84 | #define pmd_free(x) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | #define pgd_populate(mm, pmd, pte) BUG() |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 86 | #define pgd_populate_kernel(mm, pmd, pte) BUG() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | #else /* __s390x__ */ |
| 88 | static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) |
| 89 | { |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 90 | pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); |
| 91 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 93 | if (!pmd) |
| 94 | return NULL; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 95 | if (s390_noexec) { |
| 96 | pmd_t *shadow_pmd = (pmd_t *) |
| 97 | __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); |
| 98 | struct page *page = virt_to_page(pmd); |
| 99 | |
| 100 | if (!shadow_pmd) { |
| 101 | free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); |
| 102 | return NULL; |
| 103 | } |
| 104 | page->lru.next = (void *) shadow_pmd; |
| 105 | } |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 106 | for (i=0; i < PTRS_PER_PMD; i++) |
| 107 | pmd_clear(pmd + i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | return pmd; |
| 109 | } |
| 110 | |
| 111 | static inline void pmd_free (pmd_t *pmd) |
| 112 | { |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 113 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); |
| 114 | |
| 115 | if (shadow_pmd) |
| 116 | free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 117 | free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | } |
| 119 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 120 | static inline void |
| 121 | pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | { |
| 123 | pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); |
| 124 | } |
| 125 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 126 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) |
| 127 | { |
| 128 | pgd_t *shadow_pgd = get_shadow_pgd(pgd); |
| 129 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); |
| 130 | |
| 131 | if (shadow_pgd && shadow_pmd) |
| 132 | pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); |
| 133 | pgd_populate_kernel(mm, pgd, pmd); |
| 134 | } |
| 135 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | #endif /* __s390x__ */ |
| 137 | |
| 138 | static inline void |
| 139 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) |
| 140 | { |
| 141 | #ifndef __s390x__ |
| 142 | pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); |
| 143 | pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); |
| 144 | pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); |
| 145 | pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); |
| 146 | #else /* __s390x__ */ |
| 147 | pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); |
| 148 | pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); |
| 149 | #endif /* __s390x__ */ |
| 150 | } |
| 151 | |
| 152 | static inline void |
| 153 | pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) |
| 154 | { |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 155 | pte_t *pte = (pte_t *)page_to_phys(page); |
| 156 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); |
| 157 | pte_t *shadow_pte = get_shadow_pte(pte); |
| 158 | |
| 159 | pmd_populate_kernel(mm, pmd, pte); |
| 160 | if (shadow_pmd && shadow_pte) |
| 161 | pmd_populate_kernel(mm, shadow_pmd, shadow_pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | /* |
| 165 | * page table entry allocation/free routines. |
| 166 | */ |
| 167 | static inline pte_t * |
| 168 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) |
| 169 | { |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 170 | pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); |
| 171 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 173 | if (!pte) |
| 174 | return NULL; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 175 | if (s390_noexec) { |
| 176 | pte_t *shadow_pte = (pte_t *) |
| 177 | __get_free_page(GFP_KERNEL|__GFP_REPEAT); |
| 178 | struct page *page = virt_to_page(pte); |
| 179 | |
| 180 | if (!shadow_pte) { |
| 181 | free_page((unsigned long) pte); |
| 182 | return NULL; |
| 183 | } |
| 184 | page->lru.next = (void *) shadow_pte; |
| 185 | } |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 186 | for (i=0; i < PTRS_PER_PTE; i++) { |
| 187 | pte_clear(mm, vmaddr, pte + i); |
| 188 | vmaddr += PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | } |
| 190 | return pte; |
| 191 | } |
| 192 | |
| 193 | static inline struct page * |
| 194 | pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr) |
| 195 | { |
| 196 | pte_t *pte = pte_alloc_one_kernel(mm, vmaddr); |
| 197 | if (pte) |
| 198 | return virt_to_page(pte); |
Heiko Carstens | d2c993d | 2006-07-12 16:41:55 +0200 | [diff] [blame] | 199 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | static inline void pte_free_kernel(pte_t *pte) |
| 203 | { |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 204 | pte_t *shadow_pte = get_shadow_pte(pte); |
| 205 | |
| 206 | if (shadow_pte) |
| 207 | free_page((unsigned long) shadow_pte); |
| 208 | free_page((unsigned long) pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | static inline void pte_free(struct page *pte) |
| 212 | { |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 213 | struct page *shadow_page = get_shadow_page(pte); |
| 214 | |
| 215 | if (shadow_page) |
| 216 | __free_page(shadow_page); |
| 217 | __free_page(pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | } |
| 219 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | #endif /* _S390_PGALLOC_H */ |