blob: 229b0bd59331c204807b4caaad7d6ba84167a4ef [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-s390/pgalloc.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
11 */
12
13#ifndef _S390_PGALLOC_H
14#define _S390_PGALLOC_H
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/threads.h>
17#include <linux/gfp.h>
18#include <linux/mm.h>
19
20#define check_pgt_cache() do {} while (0)
21
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020022unsigned long *crst_table_alloc(struct mm_struct *, int);
23void crst_table_free(unsigned long *);
Gerald Schaefer9282ed92006-09-20 15:59:37 +020024
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020025unsigned long *page_table_alloc(int);
26void page_table_free(unsigned long *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020028static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020030 *s = val;
31 n = (n / 256) - 1;
32 asm volatile(
33#ifdef CONFIG_64BIT
34 " mvc 8(248,%0),0(%0)\n"
Gerald Schaefer9282ed92006-09-20 15:59:37 +020035#else
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020036 " mvc 4(252,%0),0(%0)\n"
Gerald Schaefer9282ed92006-09-20 15:59:37 +020037#endif
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020038 "0: mvc 256(256,%0),0(%0)\n"
39 " la %0,256(%0)\n"
40 " brct %1,0b\n"
41 : "+a" (s), "+d" (n));
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
43
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020044static inline void crst_table_init(unsigned long *crst, unsigned long entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020046 clear_table(crst, entry, sizeof(unsigned long)*2048);
47 crst = get_shadow_table(crst);
48 if (crst)
49 clear_table(crst, entry, sizeof(unsigned long)*2048);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050}
51
52#ifndef __s390x__
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020053
54static inline unsigned long pgd_entry_type(struct mm_struct *mm)
55{
56 return _SEGMENT_ENTRY_EMPTY;
57}
58
59#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
60#define pmd_free(x) do { } while (0)
61
62#define pgd_populate(mm, pmd, pte) BUG()
Gerald Schaeferc1821c22007-02-05 21:18:17 +010063#define pgd_populate_kernel(mm, pmd, pte) BUG()
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#else /* __s390x__ */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020066
67static inline unsigned long pgd_entry_type(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020069 return _REGION3_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070}
71
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020072static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020074 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
75 if (crst)
76 crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
77 return (pmd_t *) crst;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078}
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020079#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020081static inline void pgd_populate_kernel(struct mm_struct *mm,
82 pgd_t *pgd, pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020084 pgd_val(*pgd) = _REGION3_ENTRY | __pa(pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Gerald Schaeferc1821c22007-02-05 21:18:17 +010087static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
88{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020089 pgd_t *shadow_pgd = get_shadow_table(pgd);
90 pmd_t *shadow_pmd = get_shadow_table(pmd);
Gerald Schaeferc1821c22007-02-05 21:18:17 +010091
92 if (shadow_pgd && shadow_pmd)
93 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
94 pgd_populate_kernel(mm, pgd, pmd);
95}
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#endif /* __s390x__ */
98
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020099static inline pgd_t *pgd_alloc(struct mm_struct *mm)
100{
101 unsigned long *crst = crst_table_alloc(mm, s390_noexec);
102 if (crst)
103 crst_table_init(crst, pgd_entry_type(mm));
104 return (pgd_t *) crst;
105}
106#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108static inline void
109pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
110{
111#ifndef __s390x__
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200112 pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
113 pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
114 pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
115 pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#else /* __s390x__ */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200117 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
118 pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#endif /* __s390x__ */
120}
121
122static inline void
123pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
124{
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100125 pte_t *pte = (pte_t *)page_to_phys(page);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200126 pmd_t *shadow_pmd = get_shadow_table(pmd);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100127 pte_t *shadow_pte = get_shadow_pte(pte);
128
129 pmd_populate_kernel(mm, pmd, pte);
130 if (shadow_pmd && shadow_pte)
131 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
134/*
135 * page table entry allocation/free routines.
136 */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200137#define pte_alloc_one_kernel(mm, vmaddr) \
138 ((pte_t *) page_table_alloc(s390_noexec))
139#define pte_alloc_one(mm, vmaddr) \
140 virt_to_page(page_table_alloc(s390_noexec))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200142#define pte_free_kernel(pte) \
143 page_table_free((unsigned long *) pte)
144#define pte_free(pte) \
145 page_table_free((unsigned long *) page_to_phys((struct page *) pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#endif /* _S390_PGALLOC_H */