blob: 6cbbfe4f6749efc09c29f258bdbab62a7f75ff7c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-s390/pgalloc.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
11 */
12
13#ifndef _S390_PGALLOC_H
14#define _S390_PGALLOC_H
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/threads.h>
17#include <linux/gfp.h>
18#include <linux/mm.h>
19
20#define check_pgt_cache() do {} while (0)
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022/*
Gerald Schaefer9282ed92006-09-20 15:59:37 +020023 * Page allocation orders.
24 */
25#ifndef __s390x__
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010026# define PTE_ALLOC_ORDER 0
27# define PMD_ALLOC_ORDER 0
Gerald Schaefer9282ed92006-09-20 15:59:37 +020028# define PGD_ALLOC_ORDER 1
29#else /* __s390x__ */
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010030# define PTE_ALLOC_ORDER 0
Gerald Schaefer9282ed92006-09-20 15:59:37 +020031# define PMD_ALLOC_ORDER 2
32# define PGD_ALLOC_ORDER 2
33#endif /* __s390x__ */
34
35/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * Allocate and free page tables. The xxx_kernel() versions are
37 * used to allocate a kernel page table - this turns on ASN bits
38 * if any.
39 */
40
41static inline pgd_t *pgd_alloc(struct mm_struct *mm)
42{
Gerald Schaefer9282ed92006-09-20 15:59:37 +020043 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 int i;
45
Gerald Schaefer9282ed92006-09-20 15:59:37 +020046 if (!pgd)
47 return NULL;
Gerald Schaeferc1821c22007-02-05 21:18:17 +010048 if (s390_noexec) {
49 pgd_t *shadow_pgd = (pgd_t *)
50 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
51 struct page *page = virt_to_page(pgd);
52
53 if (!shadow_pgd) {
54 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
55 return NULL;
56 }
57 page->lru.next = (void *) shadow_pgd;
58 }
Gerald Schaefer9282ed92006-09-20 15:59:37 +020059 for (i = 0; i < PTRS_PER_PGD; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#ifndef __s390x__
Gerald Schaefer9282ed92006-09-20 15:59:37 +020061 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
62#else
63 pgd_clear(pgd + i);
64#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 return pgd;
66}
67
68static inline void pgd_free(pgd_t *pgd)
69{
Gerald Schaeferc1821c22007-02-05 21:18:17 +010070 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
71
72 if (shadow_pgd)
73 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
Gerald Schaefer9282ed92006-09-20 15:59:37 +020074 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075}
76
77#ifndef __s390x__
78/*
79 * page middle directory allocation/free routines.
80 * We use pmd cache only on s390x, so these are dummy routines. This
81 * code never triggers because the pgd will always be present.
82 */
83#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
84#define pmd_free(x) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#define pgd_populate(mm, pmd, pte) BUG()
Gerald Schaeferc1821c22007-02-05 21:18:17 +010086#define pgd_populate_kernel(mm, pmd, pte) BUG()
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#else /* __s390x__ */
88static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
89{
Gerald Schaefer9282ed92006-09-20 15:59:37 +020090 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
91 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Gerald Schaefer9282ed92006-09-20 15:59:37 +020093 if (!pmd)
94 return NULL;
Gerald Schaeferc1821c22007-02-05 21:18:17 +010095 if (s390_noexec) {
96 pmd_t *shadow_pmd = (pmd_t *)
97 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
98 struct page *page = virt_to_page(pmd);
99
100 if (!shadow_pmd) {
101 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
102 return NULL;
103 }
104 page->lru.next = (void *) shadow_pmd;
105 }
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200106 for (i=0; i < PTRS_PER_PMD; i++)
107 pmd_clear(pmd + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 return pmd;
109}
110
111static inline void pmd_free (pmd_t *pmd)
112{
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100113 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
114
115 if (shadow_pmd)
116 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200117 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118}
119
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100120static inline void
121pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
124}
125
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100126static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
127{
128 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
129 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
130
131 if (shadow_pgd && shadow_pmd)
132 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
133 pgd_populate_kernel(mm, pgd, pmd);
134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136#endif /* __s390x__ */
137
138static inline void
139pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
140{
141#ifndef __s390x__
142 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
143 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
144 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
145 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
146#else /* __s390x__ */
147 pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
148 pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
149#endif /* __s390x__ */
150}
151
152static inline void
153pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
154{
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100155 pte_t *pte = (pte_t *)page_to_phys(page);
156 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
157 pte_t *shadow_pte = get_shadow_pte(pte);
158
159 pmd_populate_kernel(mm, pmd, pte);
160 if (shadow_pmd && shadow_pte)
161 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
164/*
165 * page table entry allocation/free routines.
166 */
167static inline pte_t *
168pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
169{
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200170 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
171 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200173 if (!pte)
174 return NULL;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100175 if (s390_noexec) {
176 pte_t *shadow_pte = (pte_t *)
177 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
178 struct page *page = virt_to_page(pte);
179
180 if (!shadow_pte) {
181 free_page((unsigned long) pte);
182 return NULL;
183 }
184 page->lru.next = (void *) shadow_pte;
185 }
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200186 for (i=0; i < PTRS_PER_PTE; i++) {
187 pte_clear(mm, vmaddr, pte + i);
188 vmaddr += PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 }
190 return pte;
191}
192
193static inline struct page *
194pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
195{
196 pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
197 if (pte)
198 return virt_to_page(pte);
Heiko Carstensd2c993d2006-07-12 16:41:55 +0200199 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
202static inline void pte_free_kernel(pte_t *pte)
203{
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100204 pte_t *shadow_pte = get_shadow_pte(pte);
205
206 if (shadow_pte)
207 free_page((unsigned long) shadow_pte);
208 free_page((unsigned long) pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
211static inline void pte_free(struct page *pte)
212{
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100213 struct page *shadow_page = get_shadow_page(pte);
214
215 if (shadow_page)
216 __free_page(shadow_page);
217 __free_page(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220#endif /* _S390_PGALLOC_H */