blob: ab843328b47fc7f563fa7d392efaaea961aef7ec [file] [log] [blame]
David Gibsonf88df142007-04-30 16:30:56 +10001#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_
2#define _ASM_POWERPC_PGTABLE_PPC64_H_
3/*
4 * This file contains the functions and defines necessary to modify and use
5 * the ppc64 hashed page table.
6 */
7
David Gibsonf88df142007-04-30 16:30:56 +10008#ifdef CONFIG_PPC_64K_PAGES
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +00009#include <asm/pgtable-ppc64-64k.h>
David Gibsonf88df142007-04-30 16:30:56 +100010#else
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +000011#include <asm/pgtable-ppc64-4k.h>
David Gibsonf88df142007-04-30 16:30:56 +100012#endif
13
14#define FIRST_USER_ADDRESS 0
15
16/*
17 * Size of EA range mapped by our pagetables.
18 */
19#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
20 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100021#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
David Gibsonf88df142007-04-30 16:30:56 +100022
Aneesh Kumar K.Vf940f522013-06-20 14:30:14 +053023#ifdef CONFIG_TRANSPARENT_HUGEPAGE
24#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1)
25#else
26#define PMD_CACHE_INDEX PMD_INDEX_SIZE
27#endif
David Gibsonf88df142007-04-30 16:30:56 +100028/*
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100029 * Define the address range of the kernel non-linear virtual area
David Gibsonf88df142007-04-30 16:30:56 +100030 */
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100031
32#ifdef CONFIG_PPC_BOOK3E
33#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
34#else
35#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
36#endif
Aneesh Kumar K.V67550082012-09-10 02:52:51 +000037#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
David Gibsonf88df142007-04-30 16:30:56 +100038
39/*
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100040 * The vmalloc space starts at the beginning of that region, and
41 * occupies half of it on hash CPUs and a quarter of it on Book3E
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +000042 * (we keep a quarter for the virtual memmap)
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100043 */
44#define VMALLOC_START KERN_VIRT_START
45#ifdef CONFIG_PPC_BOOK3E
46#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
47#else
48#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
49#endif
50#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
51
52/*
53 * The second half of the kernel virtual space is used for IO mappings,
54 * it's itself carved into the PIO region (ISA and PHB IO space) and
55 * the ioremap space
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100056 *
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100057 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100058 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
59 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
David Gibsonf88df142007-04-30 16:30:56 +100060 */
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100061#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100062#define FULL_IO_SIZE 0x80000000ul
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100063#define ISA_IO_BASE (KERN_IO_START)
64#define ISA_IO_END (KERN_IO_START + 0x10000ul)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100065#define PHB_IO_BASE (ISA_IO_END)
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100066#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100067#define IOREMAP_BASE (PHB_IO_END)
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100068#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
69
David Gibsonf88df142007-04-30 16:30:56 +100070
71/*
72 * Region IDs
73 */
74#define REGION_SHIFT 60UL
75#define REGION_MASK (0xfUL << REGION_SHIFT)
76#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
77
78#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
79#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +000080#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
David Gibsonf88df142007-04-30 16:30:56 +100081#define USER_REGION_ID (0UL)
82
83/*
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100084 * Defines the address of the vmemap area, in its own region on
85 * hash table CPUs and after the vmalloc space on Book3E
Andy Whitcroftd29eff72007-10-16 01:24:17 -070086 */
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100087#ifdef CONFIG_PPC_BOOK3E
88#define VMEMMAP_BASE VMALLOC_END
89#define VMEMMAP_END KERN_IO_START
90#else
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100091#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100092#endif
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100093#define vmemmap ((struct page *)VMEMMAP_BASE)
94
Andy Whitcroftd29eff72007-10-16 01:24:17 -070095
96/*
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +000097 * Include the PTE bits definitions
David Gibsonf88df142007-04-30 16:30:56 +100098 */
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100099#ifdef CONFIG_PPC_BOOK3S
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000100#include <asm/pte-hash64.h>
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +1000101#else
102#include <asm/pte-book3e.h>
103#endif
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +0000104#include <asm/pte-common.h>
David Gibsonf88df142007-04-30 16:30:56 +1000105
Benjamin Herrenschmidt94ee8152008-09-03 13:12:05 +1000106#ifdef CONFIG_PPC_MM_SLICES
David Gibsonf88df142007-04-30 16:30:56 +1000107#define HAVE_ARCH_UNMAPPED_AREA
108#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Benjamin Herrenschmidt94ee8152008-09-03 13:12:05 +1000109#endif /* CONFIG_PPC_MM_SLICES */
David Gibsonf88df142007-04-30 16:30:56 +1000110
111#ifndef __ASSEMBLY__
112
113/*
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000114 * This is the default implementation of various PTE accessors, it's
115 * used in all cases except Book3S with 64K pages where we have a
116 * concept of sub-pages
117 */
118#ifndef __real_pte
119
120#ifdef STRICT_MM_TYPECHECKS
121#define __real_pte(e,p) ((real_pte_t){(e)})
122#define __rpte_to_pte(r) ((r).pte)
123#else
124#define __real_pte(e,p) (e)
125#define __rpte_to_pte(r) (__pte(r))
126#endif
127#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
128
129#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
130 do { \
131 index = 0; \
132 shift = mmu_psize_defs[psize].shift; \
133
134#define pte_iterate_hashed_end() } while(0)
135
136#ifdef CONFIG_PPC_HAS_HASH_64K
137#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
138#else
139#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
140#endif
141
142#endif /* __real_pte */
143
144
David Gibsonf88df142007-04-30 16:30:56 +1000145/* pte_clear moved to later in this file */
146
David Gibsonf88df142007-04-30 16:30:56 +1000147#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
148#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
149
150#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
151#define pmd_none(pmd) (!pmd_val(pmd))
152#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
153 || (pmd_val(pmd) & PMD_BAD_BITS))
154#define pmd_present(pmd) (pmd_val(pmd) != 0)
155#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
156#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
157#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
158
159#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
160#define pud_none(pud) (!pud_val(pud))
161#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
162 || (pud_val(pud) & PUD_BAD_BITS))
163#define pud_present(pud) (pud_val(pud) != 0)
164#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
165#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
166#define pud_page(pud) virt_to_page(pud_page_vaddr(pud))
167
168#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
169
170/*
171 * Find an entry in a page-table-directory. We combine the address region
172 * (the high order N bits) and the pgd portion of the address.
173 */
Aneesh Kumar K.V0e5f35d2013-04-28 09:37:28 +0000174#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
David Gibsonf88df142007-04-30 16:30:56 +1000175
176#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
177
178#define pmd_offset(pudp,addr) \
179 (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
180
181#define pte_offset_kernel(dir,addr) \
182 (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
183
184#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
David Gibsonf88df142007-04-30 16:30:56 +1000185#define pte_unmap(pte) do { } while(0)
David Gibsonf88df142007-04-30 16:30:56 +1000186
187/* to find an entry in a kernel page-table-directory */
188/* This now only contains the vmalloc pages */
189#define pgd_offset_k(address) pgd_offset(&init_mm, address)
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +0000190extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
191 pte_t *ptep, unsigned long pte, int huge);
David Gibsonf88df142007-04-30 16:30:56 +1000192
193/* Atomic PTE updates */
194static inline unsigned long pte_update(struct mm_struct *mm,
195 unsigned long addr,
196 pte_t *ptep, unsigned long clr,
197 int huge)
198{
Benjamin Herrenschmidta033a482009-03-19 19:34:15 +0000199#ifdef PTE_ATOMIC_UPDATES
David Gibsonf88df142007-04-30 16:30:56 +1000200 unsigned long old, tmp;
201
202 __asm__ __volatile__(
203 "1: ldarx %0,0,%3 # pte_update\n\
204 andi. %1,%0,%6\n\
205 bne- 1b \n\
206 andc %1,%0,%4 \n\
207 stdcx. %1,0,%3 \n\
208 bne- 1b"
209 : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
210 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
211 : "cc" );
Benjamin Herrenschmidta033a482009-03-19 19:34:15 +0000212#else
213 unsigned long old = pte_val(*ptep);
214 *ptep = __pte(old & ~clr);
215#endif
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000216 /* huge pages use the old page table lock */
217 if (!huge)
218 assert_pte_locked(mm, addr);
219
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +0000220#ifdef CONFIG_PPC_STD_MMU_64
David Gibsonf88df142007-04-30 16:30:56 +1000221 if (old & _PAGE_HASHPTE)
222 hpte_need_flush(mm, addr, ptep, old, huge);
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +0000223#endif
224
David Gibsonf88df142007-04-30 16:30:56 +1000225 return old;
226}
227
228static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
229 unsigned long addr, pte_t *ptep)
230{
231 unsigned long old;
232
233 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
234 return 0;
235 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
236 return (old & _PAGE_ACCESSED) != 0;
237}
238#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
239#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
240({ \
241 int __r; \
242 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
243 __r; \
244})
245
David Gibsonf88df142007-04-30 16:30:56 +1000246#define __HAVE_ARCH_PTEP_SET_WRPROTECT
247static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
248 pte_t *ptep)
249{
David Gibsonf88df142007-04-30 16:30:56 +1000250
Stratos Psomadakis2a2c29c2011-05-07 04:11:31 +0000251 if ((pte_val(*ptep) & _PAGE_RW) == 0)
252 return;
253
254 pte_update(mm, addr, ptep, _PAGE_RW, 0);
David Gibsonf88df142007-04-30 16:30:56 +1000255}
256
Andy Whitcroft016b33c2008-06-26 19:55:58 +1000257static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
258 unsigned long addr, pte_t *ptep)
259{
David Gibson86df8642008-07-08 15:58:16 +1000260 if ((pte_val(*ptep) & _PAGE_RW) == 0)
261 return;
Stratos Psomadakis2a2c29c2011-05-07 04:11:31 +0000262
263 pte_update(mm, addr, ptep, _PAGE_RW, 1);
Andy Whitcroft016b33c2008-06-26 19:55:58 +1000264}
David Gibsonf88df142007-04-30 16:30:56 +1000265
266/*
267 * We currently remove entries from the hashtable regardless of whether
268 * the entry was young or dirty. The generic routines only flush if the
269 * entry was young or dirty which is not good enough.
270 *
271 * We should be more intelligent about this but for the moment we override
272 * these functions and force a tlb flush unconditionally
273 */
274#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
275#define ptep_clear_flush_young(__vma, __address, __ptep) \
276({ \
277 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
278 __ptep); \
279 __young; \
280})
281
David Gibsonf88df142007-04-30 16:30:56 +1000282#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
283static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
284 unsigned long addr, pte_t *ptep)
285{
286 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
287 return __pte(old);
288}
289
290static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
291 pte_t * ptep)
292{
293 pte_update(mm, addr, ptep, ~0UL, 0);
294}
295
David Gibsonf88df142007-04-30 16:30:56 +1000296
297/* Set the dirty and/or accessed bits atomically in a linux PTE, this
298 * function doesn't need to flush the hash entry
299 */
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000300static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
David Gibsonf88df142007-04-30 16:30:56 +1000301{
302 unsigned long bits = pte_val(entry) &
Benjamin Herrenschmidtea3cc332009-08-18 19:00:34 +0000303 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
Benjamin Herrenschmidta033a482009-03-19 19:34:15 +0000304
305#ifdef PTE_ATOMIC_UPDATES
David Gibsonf88df142007-04-30 16:30:56 +1000306 unsigned long old, tmp;
307
308 __asm__ __volatile__(
309 "1: ldarx %0,0,%4\n\
310 andi. %1,%0,%6\n\
311 bne- 1b \n\
312 or %0,%3,%0\n\
313 stdcx. %0,0,%4\n\
314 bne- 1b"
315 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
316 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
317 :"cc");
Benjamin Herrenschmidta033a482009-03-19 19:34:15 +0000318#else
319 unsigned long old = pte_val(*ptep);
320 *ptep = __pte(old | bits);
321#endif
David Gibsonf88df142007-04-30 16:30:56 +1000322}
David Gibsonf88df142007-04-30 16:30:56 +1000323
David Gibsonf88df142007-04-30 16:30:56 +1000324#define __HAVE_ARCH_PTE_SAME
325#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
326
327#define pte_ERROR(e) \
328 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
329#define pmd_ERROR(e) \
330 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
331#define pgd_ERROR(e) \
332 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
333
David Gibsonf88df142007-04-30 16:30:56 +1000334/* Encode and de-code a swap entry */
335#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
336#define __swp_offset(entry) ((entry).val >> 8)
337#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
338#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
339#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
340#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
341#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
342#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
343
David Gibsona0668cd2009-10-28 16:27:18 +0000344void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
David Gibsonf88df142007-04-30 16:30:56 +1000345void pgtable_cache_init(void);
346
347/*
348 * find_linux_pte returns the address of a linux pte for a given
349 * effective address and directory. If not found, it returns zero.
Becky Bruce72632ce2011-06-28 09:06:54 +0000350 */
351static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
David Gibsonf88df142007-04-30 16:30:56 +1000352{
353 pgd_t *pg;
354 pud_t *pu;
355 pmd_t *pm;
356 pte_t *pt = NULL;
357
358 pg = pgdir + pgd_index(ea);
359 if (!pgd_none(*pg)) {
360 pu = pud_offset(pg, ea);
361 if (!pud_none(*pu)) {
362 pm = pmd_offset(pu, ea);
363 if (pmd_present(*pm))
364 pt = pte_offset_kernel(pm, ea);
365 }
366 }
367 return pt;
368}
369
David Gibsona4fe3ce2009-10-26 19:24:31 +0000370#ifdef CONFIG_HUGETLB_PAGE
371pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
372 unsigned *shift);
373#else
374static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
375 unsigned *shift)
376{
377 if (shift)
378 *shift = 0;
379 return find_linux_pte(pgdir, ea);
380}
381#endif /* !CONFIG_HUGETLB_PAGE */
Nick Piggince0ad7f2008-07-30 15:23:13 +1000382
David Gibsonf88df142007-04-30 16:30:56 +1000383#endif /* __ASSEMBLY__ */
384
385#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */