blob: 9673f73eb8db2294159a3356266b97c7df6734c2 [file] [log] [blame]
David Gibson8d2169e2007-04-27 11:53:52 +10001#ifndef _ASM_POWERPC_MMU_HASH64_H_
2#define _ASM_POWERPC_MMU_HASH64_H_
3/*
4 * PowerPC64 memory management structures
5 *
6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 * PPC64 rework.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <asm/asm-compat.h>
16#include <asm/page.h>
17
18/*
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000019 * This is necessary to get the definition of PGTABLE_RANGE which we
20 * need for various slices related matters. Note that this isn't the
21 * complete pgtable.h but only a portion of it.
22 */
23#include <asm/pgtable-ppc64.h>
24
25/*
David Gibson8d2169e2007-04-27 11:53:52 +100026 * Segment table
27 */
28
29#define STE_ESID_V 0x80
30#define STE_ESID_KS 0x20
31#define STE_ESID_KP 0x10
32#define STE_ESID_N 0x08
33
34#define STE_VSID_SHIFT 12
35
36/* Location of cpu0's segment table */
Benjamin Herrenschmidt84493802011-03-06 18:09:07 +000037#define STAB0_PAGE 0x8
David Gibson8d2169e2007-04-27 11:53:52 +100038#define STAB0_OFFSET (STAB0_PAGE << 12)
39#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
40
41#ifndef __ASSEMBLY__
42extern char initial_stab[];
43#endif /* ! __ASSEMBLY */
44
45/*
46 * SLB
47 */
48
49#define SLB_NUM_BOLTED 3
50#define SLB_CACHE_ENTRIES 8
Brian King46db2f82009-08-28 12:06:29 +000051#define SLB_MIN_SIZE 32
David Gibson8d2169e2007-04-27 11:53:52 +100052
53/* Bits in the SLB ESID word */
54#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
55
56/* Bits in the SLB VSID word */
57#define SLB_VSID_SHIFT 12
Paul Mackerras1189be62007-10-11 20:37:10 +100058#define SLB_VSID_SHIFT_1T 24
59#define SLB_VSID_SSIZE_SHIFT 62
David Gibson8d2169e2007-04-27 11:53:52 +100060#define SLB_VSID_B ASM_CONST(0xc000000000000000)
61#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
62#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
63#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
64#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
65#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
66#define SLB_VSID_L ASM_CONST(0x0000000000000100)
67#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
68#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
69#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
70#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
71#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
72#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
73#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
74
75#define SLB_VSID_KERNEL (SLB_VSID_KP)
76#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
77
78#define SLBIE_C (0x08000000)
Paul Mackerras1189be62007-10-11 20:37:10 +100079#define SLBIE_SSIZE_SHIFT 25
David Gibson8d2169e2007-04-27 11:53:52 +100080
81/*
82 * Hash table
83 */
84
85#define HPTES_PER_GROUP 8
86
Paul Mackerras2454c7e2007-05-10 15:28:44 +100087#define HPTE_V_SSIZE_SHIFT 62
David Gibson8d2169e2007-04-27 11:53:52 +100088#define HPTE_V_AVPN_SHIFT 7
Paul Mackerras2454c7e2007-05-10 15:28:44 +100089#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
David Gibson8d2169e2007-04-27 11:53:52 +100090#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
Geert Uytterhoeven91bbbe22007-11-27 03:24:43 +110091#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
David Gibson8d2169e2007-04-27 11:53:52 +100092#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
93#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
94#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
95#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
96#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
97
98#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
99#define HPTE_R_TS ASM_CONST(0x4000000000000000)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000100#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
David Gibson8d2169e2007-04-27 11:53:52 +1000101#define HPTE_R_RPN_SHIFT 12
Paul Mackerrasde56a942011-06-29 00:21:34 +0000102#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
David Gibson8d2169e2007-04-27 11:53:52 +1000103#define HPTE_R_PP ASM_CONST(0x0000000000000003)
104#define HPTE_R_N ASM_CONST(0x0000000000000004)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000105#define HPTE_R_G ASM_CONST(0x0000000000000008)
106#define HPTE_R_M ASM_CONST(0x0000000000000010)
107#define HPTE_R_I ASM_CONST(0x0000000000000020)
108#define HPTE_R_W ASM_CONST(0x0000000000000040)
109#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
David Gibson8d2169e2007-04-27 11:53:52 +1000110#define HPTE_R_C ASM_CONST(0x0000000000000080)
111#define HPTE_R_R ASM_CONST(0x0000000000000100)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000112#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
David Gibson8d2169e2007-04-27 11:53:52 +1000113
Sachin P. Santb7abc5c2007-06-14 15:31:34 +1000114#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
115#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
116
David Gibson8d2169e2007-04-27 11:53:52 +1000117/* Values for PP (assumes Ks=0, Kp=1) */
David Gibson8d2169e2007-04-27 11:53:52 +1000118#define PP_RWXX 0 /* Supervisor read/write, User none */
119#define PP_RWRX 1 /* Supervisor read/write, User read */
120#define PP_RWRW 2 /* Supervisor read/write, User read/write */
121#define PP_RXRX 3 /* Supervisor read, User read */
Paul Mackerras697d3892011-12-12 12:36:37 +0000122#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
David Gibson8d2169e2007-04-27 11:53:52 +1000123
124#ifndef __ASSEMBLY__
125
David Gibson8e561e72007-06-13 14:52:56 +1000126struct hash_pte {
David Gibson8d2169e2007-04-27 11:53:52 +1000127 unsigned long v;
128 unsigned long r;
David Gibson8e561e72007-06-13 14:52:56 +1000129};
David Gibson8d2169e2007-04-27 11:53:52 +1000130
David Gibson8e561e72007-06-13 14:52:56 +1000131extern struct hash_pte *htab_address;
David Gibson8d2169e2007-04-27 11:53:52 +1000132extern unsigned long htab_size_bytes;
133extern unsigned long htab_hash_mask;
134
135/*
136 * Page size definition
137 *
138 * shift : is the "PAGE_SHIFT" value for that page size
139 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
140 * directly to a slbmte "vsid" value
141 * penc : is the HPTE encoding mask for the "LP" field:
142 *
143 */
144struct mmu_psize_def
145{
146 unsigned int shift; /* number of bits */
147 unsigned int penc; /* HPTE encoding */
148 unsigned int tlbiel; /* tlbiel supported for that page size */
149 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
150 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
151};
152
153#endif /* __ASSEMBLY__ */
154
155/*
Paul Mackerras2454c7e2007-05-10 15:28:44 +1000156 * Segment sizes.
157 * These are the values used by hardware in the B field of
158 * SLB entries and the first dword of MMU hashtable entries.
159 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
160 */
161#define MMU_SEGSIZE_256M 0
162#define MMU_SEGSIZE_1T 1
163
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000164/*
165 * encode page number shift.
166 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
167 * 12 bits. This enable us to address upto 76 bit va.
168 * For hpt hash from a va we can ignore the page size bits of va and for
169 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
170 * we work in all cases including 4k page size.
171 */
172#define VPN_SHIFT 12
Paul Mackerras1189be62007-10-11 20:37:10 +1000173
David Gibson8d2169e2007-04-27 11:53:52 +1000174#ifndef __ASSEMBLY__
175
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000176static inline int segment_shift(int ssize)
177{
178 if (ssize == MMU_SEGSIZE_256M)
179 return SID_SHIFT;
180 return SID_SHIFT_1T;
181}
182
David Gibson8d2169e2007-04-27 11:53:52 +1000183/*
Paul Mackerras1189be62007-10-11 20:37:10 +1000184 * The current system page and segment sizes
David Gibson8d2169e2007-04-27 11:53:52 +1000185 */
186extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
187extern int mmu_linear_psize;
188extern int mmu_virtual_psize;
189extern int mmu_vmalloc_psize;
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000190extern int mmu_vmemmap_psize;
David Gibson8d2169e2007-04-27 11:53:52 +1000191extern int mmu_io_psize;
Paul Mackerras1189be62007-10-11 20:37:10 +1000192extern int mmu_kernel_ssize;
193extern int mmu_highuser_ssize;
Michael Neuling584f8b72007-12-06 17:24:48 +1100194extern u16 mmu_slb_size;
Michael Ellerman572fb572008-05-08 14:27:08 +1000195extern unsigned long tce_alloc_start, tce_alloc_end;
David Gibson8d2169e2007-04-27 11:53:52 +1000196
197/*
198 * If the processor supports 64k normal pages but not 64k cache
199 * inhibited pages, we have to be prepared to switch processes
200 * to use 4k pages when they create cache-inhibited mappings.
201 * If this is the case, mmu_ci_restrictions will be set to 1.
202 */
203extern int mmu_ci_restrictions;
204
David Gibson8d2169e2007-04-27 11:53:52 +1000205/*
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000206 * This computes the AVPN and B fields of the first dword of a HPTE,
207 * for use when we want to match an existing PTE. The bottom 7 bits
208 * of the returned value are zero.
209 */
210static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
211 int ssize)
212{
213 unsigned long v;
214 /*
215 * The AVA field omits the low-order 23 bits of the 78 bits VA.
216 * These bits are not needed in the PTE, because the
217 * low-order b of these bits are part of the byte offset
218 * into the virtual page and, if b < 23, the high-order
219 * 23-b of these bits are always used in selecting the
220 * PTEGs to be searched
221 */
222 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
223 v <<= HPTE_V_AVPN_SHIFT;
224 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
225 return v;
226}
227
228/*
David Gibson8d2169e2007-04-27 11:53:52 +1000229 * This function sets the AVPN and L fields of the HPTE appropriately
230 * for the page size
231 */
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000232static inline unsigned long hpte_encode_v(unsigned long vpn,
233 int psize, int ssize)
David Gibson8d2169e2007-04-27 11:53:52 +1000234{
Paul Mackerras1189be62007-10-11 20:37:10 +1000235 unsigned long v;
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000236 v = hpte_encode_avpn(vpn, psize, ssize);
David Gibson8d2169e2007-04-27 11:53:52 +1000237 if (psize != MMU_PAGE_4K)
238 v |= HPTE_V_LARGE;
239 return v;
240}
241
242/*
243 * This function sets the ARPN, and LP fields of the HPTE appropriately
244 * for the page size. We assume the pa is already "clean" that is properly
245 * aligned for the requested page size
246 */
247static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
248{
249 unsigned long r;
250
251 /* A 4K page needs no special encoding */
252 if (psize == MMU_PAGE_4K)
253 return pa & HPTE_R_RPN;
254 else {
255 unsigned int penc = mmu_psize_defs[psize].penc;
256 unsigned int shift = mmu_psize_defs[psize].shift;
257 return (pa & ~((1ul << shift) - 1)) | (penc << 12);
258 }
259 return r;
260}
261
262/*
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000263 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
Paul Mackerras1189be62007-10-11 20:37:10 +1000264 */
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000265static inline unsigned long hpt_vpn(unsigned long ea,
266 unsigned long vsid, int ssize)
Paul Mackerras1189be62007-10-11 20:37:10 +1000267{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000268 unsigned long mask;
269 int s_shift = segment_shift(ssize);
270
271 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
272 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
Paul Mackerras1189be62007-10-11 20:37:10 +1000273}
274
275/*
276 * This hashes a virtual address
David Gibson8d2169e2007-04-27 11:53:52 +1000277 */
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000278static inline unsigned long hpt_hash(unsigned long vpn,
279 unsigned int shift, int ssize)
David Gibson8d2169e2007-04-27 11:53:52 +1000280{
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000281 int mask;
Paul Mackerras1189be62007-10-11 20:37:10 +1000282 unsigned long hash, vsid;
283
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000284 /* VPN_SHIFT can be atmost 12 */
Paul Mackerras1189be62007-10-11 20:37:10 +1000285 if (ssize == MMU_SEGSIZE_256M) {
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000286 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
287 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
288 ((vpn & mask) >> (shift - VPN_SHIFT));
Paul Mackerras1189be62007-10-11 20:37:10 +1000289 } else {
Aneesh Kumar K.V5524a272012-09-10 02:52:50 +0000290 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
291 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
292 hash = vsid ^ (vsid << 25) ^
293 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
Paul Mackerras1189be62007-10-11 20:37:10 +1000294 }
295 return hash & 0x7fffffffffUL;
David Gibson8d2169e2007-04-27 11:53:52 +1000296}
297
298extern int __hash_page_4K(unsigned long ea, unsigned long access,
299 unsigned long vsid, pte_t *ptep, unsigned long trap,
Paul Mackerrasfa282372008-01-24 08:35:13 +1100300 unsigned int local, int ssize, int subpage_prot);
David Gibson8d2169e2007-04-27 11:53:52 +1000301extern int __hash_page_64K(unsigned long ea, unsigned long access,
302 unsigned long vsid, pte_t *ptep, unsigned long trap,
Paul Mackerras1189be62007-10-11 20:37:10 +1000303 unsigned int local, int ssize);
David Gibson8d2169e2007-04-27 11:53:52 +1000304struct mm_struct;
David Gibson0895ecd2009-10-26 19:24:31 +0000305unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
David Gibson8d2169e2007-04-27 11:53:52 +1000306extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
David Gibsona4fe3ce2009-10-26 19:24:31 +0000307int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
308 pte_t *ptep, unsigned long trap, int local, int ssize,
309 unsigned int shift, unsigned int mmu_psize);
Benjamin Herrenschmidt4b8692c2010-07-23 10:31:13 +1000310extern void hash_failure_debug(unsigned long ea, unsigned long access,
311 unsigned long vsid, unsigned long trap,
312 int ssize, int psize, unsigned long pte);
David Gibson8d2169e2007-04-27 11:53:52 +1000313extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000314 unsigned long pstart, unsigned long prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000315 int psize, int ssize);
Becky Bruce41151e72011-06-28 09:54:48 +0000316extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
Paul Mackerrasfa282372008-01-24 08:35:13 +1100317extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
David Gibson8d2169e2007-04-27 11:53:52 +1000318
David Gibson8d2169e2007-04-27 11:53:52 +1000319extern void hpte_init_native(void);
320extern void hpte_init_lpar(void);
David Gibson8d2169e2007-04-27 11:53:52 +1000321extern void hpte_init_beat(void);
Ishizaki Kou7f2c8572007-10-02 18:23:46 +1000322extern void hpte_init_beat_v3(void);
David Gibson8d2169e2007-04-27 11:53:52 +1000323
324extern void stabs_alloc(void);
325extern void slb_initialize(void);
326extern void slb_flush_and_rebolt(void);
327extern void stab_initialize(unsigned long stab);
328
Michael Neuling67439b72007-08-03 11:55:39 +1000329extern void slb_vmalloc_update(void);
Brian King46db2f82009-08-28 12:06:29 +0000330extern void slb_set_size(u16 size);
David Gibson8d2169e2007-04-27 11:53:52 +1000331#endif /* __ASSEMBLY__ */
332
333/*
Aneesh Kumar K.Vf033d652012-09-10 02:52:56 +0000334 * VSID allocation (256MB segment)
David Gibson8d2169e2007-04-27 11:53:52 +1000335 *
Aneesh Kumar K.Vf033d652012-09-10 02:52:56 +0000336 * We first generate a 38-bit "proto-VSID". For kernel addresses this
337 * is equal to the ESID | 1 << 37, for user addresses it is:
338 * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
David Gibson8d2169e2007-04-27 11:53:52 +1000339 *
Aneesh Kumar K.Vf033d652012-09-10 02:52:56 +0000340 * This splits the proto-VSID into the below range
341 * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
342 * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
343 *
344 * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
345 * That is, we assign half of the space to user processes and half
346 * to the kernel.
David Gibson8d2169e2007-04-27 11:53:52 +1000347 *
348 * The proto-VSIDs are then scrambled into real VSIDs with the
349 * multiplicative hash:
350 *
351 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
David Gibson8d2169e2007-04-27 11:53:52 +1000352 *
Aneesh Kumar K.Vf033d652012-09-10 02:52:56 +0000353 * VSID_MULTIPLIER is prime, so in particular it is
David Gibson8d2169e2007-04-27 11:53:52 +1000354 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
355 * Because the modulus is 2^n-1 we can compute it efficiently without
356 * a divide or extra multiply (see below).
357 *
358 * This scheme has several advantages over older methods:
359 *
Aneesh Kumar K.Vf033d652012-09-10 02:52:56 +0000360 * - We have VSIDs allocated for every kernel address
David Gibson8d2169e2007-04-27 11:53:52 +1000361 * (i.e. everything above 0xC000000000000000), except the very top
362 * segment, which simplifies several things.
363 *
Aneesh Kumar K.Vf033d652012-09-10 02:52:56 +0000364 * - We allow for USER_ESID_BITS significant bits of ESID and
365 * CONTEXT_BITS bits of context for user addresses.
366 * i.e. 64T (46 bits) of address space for up to half a million contexts.
David Gibson8d2169e2007-04-27 11:53:52 +1000367 *
Aneesh Kumar K.Vf033d652012-09-10 02:52:56 +0000368 * - The scramble function gives robust scattering in the hash
David Gibson8d2169e2007-04-27 11:53:52 +1000369 * table (at least based on some initial results). The previous
370 * method was more susceptible to pathological cases giving excessive
371 * hash collisions.
372 */
David Gibson8d2169e2007-04-27 11:53:52 +1000373
Aneesh Kumar K.V048ee092012-09-10 02:52:55 +0000374/*
375 * This should be computed such that protovosid * vsid_mulitplier
376 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
377 */
378#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
379#define VSID_BITS_256M 38
Paul Mackerras1189be62007-10-11 20:37:10 +1000380#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
David Gibson8d2169e2007-04-27 11:53:52 +1000381
Paul Mackerras1189be62007-10-11 20:37:10 +1000382#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
Aneesh Kumar K.V048ee092012-09-10 02:52:55 +0000383#define VSID_BITS_1T 26
Paul Mackerras1189be62007-10-11 20:37:10 +1000384#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
385
386#define CONTEXT_BITS 19
Aneesh Kumar K.V048ee092012-09-10 02:52:55 +0000387#define USER_ESID_BITS 18
388#define USER_ESID_BITS_1T 6
David Gibson8d2169e2007-04-27 11:53:52 +1000389
390#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
391
392/*
393 * This macro generates asm code to compute the VSID scramble
394 * function. Used in slb_allocate() and do_stab_bolted. The function
395 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
396 *
397 * rt = register continaing the proto-VSID and into which the
398 * VSID will be stored
399 * rx = scratch register (clobbered)
400 *
401 * - rt and rx must be different registers
Paul Mackerras1189be62007-10-11 20:37:10 +1000402 * - The answer will end up in the low VSID_BITS bits of rt. The higher
David Gibson8d2169e2007-04-27 11:53:52 +1000403 * bits may contain other garbage, so you may need to mask the
404 * result.
405 */
Paul Mackerras1189be62007-10-11 20:37:10 +1000406#define ASM_VSID_SCRAMBLE(rt, rx, size) \
407 lis rx,VSID_MULTIPLIER_##size@h; \
408 ori rx,rx,VSID_MULTIPLIER_##size@l; \
David Gibson8d2169e2007-04-27 11:53:52 +1000409 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
410 \
Paul Mackerras1189be62007-10-11 20:37:10 +1000411 srdi rx,rt,VSID_BITS_##size; \
412 clrldi rt,rt,(64-VSID_BITS_##size); \
David Gibson8d2169e2007-04-27 11:53:52 +1000413 add rt,rt,rx; /* add high and low bits */ \
414 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
415 * 2^36-1+2^28-1. That in particular means that if r3 >= \
416 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
417 * the bit clear, r3 already has the answer we want, if it \
418 * doesn't, the answer is the low 36 bits of r3+1. So in all \
419 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
420 addi rx,rt,1; \
Paul Mackerras1189be62007-10-11 20:37:10 +1000421 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
David Gibson8d2169e2007-04-27 11:53:52 +1000422 add rt,rt,rx
423
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +0000424/* 4 bits per slice and we have one slice per 1TB */
425#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
David Gibson8d2169e2007-04-27 11:53:52 +1000426
427#ifndef __ASSEMBLY__
428
David Gibsond28513b2009-11-26 18:56:04 +0000429#ifdef CONFIG_PPC_SUBPAGE_PROT
430/*
431 * For the sub-page protection option, we extend the PGD with one of
432 * these. Basically we have a 3-level tree, with the top level being
433 * the protptrs array. To optimize speed and memory consumption when
434 * only addresses < 4GB are being protected, pointers to the first
435 * four pages of sub-page protection words are stored in the low_prot
436 * array.
437 * Each page of sub-page protection words protects 1GB (4 bytes
438 * protects 64k). For the 3-level tree, each page of pointers then
439 * protects 8TB.
440 */
441struct subpage_prot_table {
442 unsigned long maxaddr; /* only addresses < this are protected */
443 unsigned int **protptrs[2];
444 unsigned int *low_prot[4];
445};
446
447#define SBP_L1_BITS (PAGE_SHIFT - 2)
448#define SBP_L2_BITS (PAGE_SHIFT - 3)
449#define SBP_L1_COUNT (1 << SBP_L1_BITS)
450#define SBP_L2_COUNT (1 << SBP_L2_BITS)
451#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
452#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
453
454extern void subpage_prot_free(struct mm_struct *mm);
455extern void subpage_prot_init_new_context(struct mm_struct *mm);
456#else
457static inline void subpage_prot_free(struct mm_struct *mm) {}
458static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
459#endif /* CONFIG_PPC_SUBPAGE_PROT */
460
David Gibson8d2169e2007-04-27 11:53:52 +1000461typedef unsigned long mm_context_id_t;
Tseng-Hui (Frank) Lin851d2e22011-05-02 20:43:04 +0000462struct spinlock;
David Gibson8d2169e2007-04-27 11:53:52 +1000463
464typedef struct {
465 mm_context_id_t id;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000466 u16 user_psize; /* page size index */
467
468#ifdef CONFIG_PPC_MM_SLICES
469 u64 low_slices_psize; /* SLB page size encodings */
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +0000470 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000471#else
472 u16 sllp; /* SLB page size encoding */
David Gibson8d2169e2007-04-27 11:53:52 +1000473#endif
474 unsigned long vdso_base;
David Gibsond28513b2009-11-26 18:56:04 +0000475#ifdef CONFIG_PPC_SUBPAGE_PROT
476 struct subpage_prot_table spt;
477#endif /* CONFIG_PPC_SUBPAGE_PROT */
Tseng-Hui (Frank) Lin851d2e22011-05-02 20:43:04 +0000478#ifdef CONFIG_PPC_ICSWX
479 struct spinlock *cop_lockp; /* guard acop and cop_pid */
480 unsigned long acop; /* mask of enabled coprocessor types */
481 unsigned int cop_pid; /* pid value used with coprocessors */
482#endif /* CONFIG_PPC_ICSWX */
David Gibson8d2169e2007-04-27 11:53:52 +1000483} mm_context_t;
484
485
David Gibson8d2169e2007-04-27 11:53:52 +1000486#if 0
Paul Mackerras1189be62007-10-11 20:37:10 +1000487/*
488 * The code below is equivalent to this function for arguments
489 * < 2^VSID_BITS, which is all this should ever be called
490 * with. However gcc is not clever enough to compute the
491 * modulus (2^n-1) without a second multiply.
492 */
Anton Blanchard34692702010-08-02 20:35:18 +0000493#define vsid_scramble(protovsid, size) \
Paul Mackerras1189be62007-10-11 20:37:10 +1000494 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
David Gibson8d2169e2007-04-27 11:53:52 +1000495
Paul Mackerras1189be62007-10-11 20:37:10 +1000496#else /* 1 */
497#define vsid_scramble(protovsid, size) \
498 ({ \
499 unsigned long x; \
500 x = (protovsid) * VSID_MULTIPLIER_##size; \
501 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
502 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
503 })
David Gibson8d2169e2007-04-27 11:53:52 +1000504#endif /* 1 */
David Gibson8d2169e2007-04-27 11:53:52 +1000505
Aneesh Kumar K.V048ee092012-09-10 02:52:55 +0000506/*
507 * This is only valid for addresses >= PAGE_OFFSET
508 * The proto-VSID space is divided into two class
509 * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
510 * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
511 *
512 * With KERNEL_START at 0xc000000000000000, the proto vsid for
513 * the kernel ends up with 0xc00000000 (36 bits). With 64TB
514 * support we need to have kernel proto-VSID in the
515 * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
516 */
Paul Mackerras1189be62007-10-11 20:37:10 +1000517static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
David Gibson8d2169e2007-04-27 11:53:52 +1000518{
Aneesh Kumar K.V048ee092012-09-10 02:52:55 +0000519 unsigned long proto_vsid;
520 /*
521 * We need to make sure proto_vsid for the kernel is
522 * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
523 */
524 if (ssize == MMU_SEGSIZE_256M) {
525 proto_vsid = ea >> SID_SHIFT;
526 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
527 return vsid_scramble(proto_vsid, 256M);
528 }
529 proto_vsid = ea >> SID_SHIFT_1T;
530 proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
531 return vsid_scramble(proto_vsid, 1T);
David Gibson8d2169e2007-04-27 11:53:52 +1000532}
533
Paul Mackerras1189be62007-10-11 20:37:10 +1000534/* Returns the segment size indicator for a user address */
535static inline int user_segment_size(unsigned long addr)
David Gibson8d2169e2007-04-27 11:53:52 +1000536{
Paul Mackerras1189be62007-10-11 20:37:10 +1000537 /* Use 1T segments if possible for addresses >= 1T */
538 if (addr >= (1UL << SID_SHIFT_1T))
539 return mmu_highuser_ssize;
540 return MMU_SEGSIZE_256M;
David Gibson8d2169e2007-04-27 11:53:52 +1000541}
542
Paul Mackerras1189be62007-10-11 20:37:10 +1000543/* This is only valid for user addresses (which are below 2^44) */
544static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
545 int ssize)
546{
547 if (ssize == MMU_SEGSIZE_256M)
548 return vsid_scramble((context << USER_ESID_BITS)
549 | (ea >> SID_SHIFT), 256M);
550 return vsid_scramble((context << USER_ESID_BITS_1T)
551 | (ea >> SID_SHIFT_1T), 1T);
552}
553
David Gibson8d2169e2007-04-27 11:53:52 +1000554#endif /* __ASSEMBLY__ */
555
556#endif /* _ASM_POWERPC_MMU_HASH64_H_ */