blob: adcc3320e52ad3ec373bf348e7f717e4ef377f59 [file] [log] [blame]
Michal Simek6a3cece2009-03-27 14:25:37 +01001/*
Michal Simek15902bf2009-05-26 16:30:15 +02002 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
Michal Simek6a3cece2009-03-27 14:25:37 +01004 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_PGTABLE_H
12#define _ASM_MICROBLAZE_PGTABLE_H
13
14#include <asm/setup.h>
15
16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
17 remap_pfn_range(vma, vaddr, pfn, size, prot)
18
Michal Simek15902bf2009-05-26 16:30:15 +020019#ifndef CONFIG_MMU
20
Michal Simek6a3cece2009-03-27 14:25:37 +010021#define pgd_present(pgd) (1) /* pages are always present on non MMU */
22#define pgd_none(pgd) (0)
23#define pgd_bad(pgd) (0)
24#define pgd_clear(pgdp)
25#define kern_addr_valid(addr) (1)
26#define pmd_offset(a, b) ((void *) 0)
27
28#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */
29#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */
30#define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */
31#define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */
32#define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */
33
Arnd Bergmann0c601552009-05-01 21:44:51 +000034#define pgprot_noncached(x) (x)
35
Michal Simek6a3cece2009-03-27 14:25:37 +010036#define __swp_type(x) (0)
37#define __swp_offset(x) (0)
38#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
39#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
40#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
41
42#ifndef __ASSEMBLY__
43static inline int pte_file(pte_t pte) { return 0; }
44#endif /* __ASSEMBLY__ */
45
46#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
47
48#define swapper_pg_dir ((pgd_t *) NULL)
49
50#define pgtable_cache_init() do {} while (0)
51
52#define arch_enter_lazy_cpu_mode() do {} while (0)
53
Michal Simek15902bf2009-05-26 16:30:15 +020054#else /* CONFIG_MMU */
55
56#include <asm-generic/4level-fixup.h>
57
58#ifdef __KERNEL__
59#ifndef __ASSEMBLY__
60
61#include <linux/sched.h>
62#include <linux/threads.h>
63#include <asm/processor.h> /* For TASK_SIZE */
64#include <asm/mmu.h>
65#include <asm/page.h>
66
67#define FIRST_USER_ADDRESS 0
68
69extern unsigned long va_to_phys(unsigned long address);
70extern pte_t *va_to_pte(unsigned long address);
Michal Simek15902bf2009-05-26 16:30:15 +020071
72/*
73 * The following only work if pte_present() is true.
74 * Undefined behaviour if not..
75 */
76
77static inline int pte_special(pte_t pte) { return 0; }
78
79static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
80
81/* Start and end of the vmalloc area. */
82/* Make sure to map the vmalloc area above the pinned kernel memory area
83 of 32Mb. */
84#define VMALLOC_START (CONFIG_KERNEL_START + \
85 max(32 * 1024 * 1024UL, memory_size))
86#define VMALLOC_END ioremap_bot
Michal Simek15902bf2009-05-26 16:30:15 +020087
88#endif /* __ASSEMBLY__ */
89
90/*
Michal Simeka6475c12010-01-18 15:27:10 +010091 * Macro to mark a page protection value as "uncacheable".
92 */
93
94#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
95 _PAGE_WRITETHRU)
96
97#define pgprot_noncached(prot) \
98 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
99 _PAGE_NO_CACHE | _PAGE_GUARDED))
100
101#define pgprot_noncached_wc(prot) \
102 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
103 _PAGE_NO_CACHE))
104
105/*
Michal Simek15902bf2009-05-26 16:30:15 +0200106 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
107 * table containing PTEs, together with a set of 16 segment registers, to
108 * define the virtual to physical address mapping.
109 *
110 * We use the hash table as an extended TLB, i.e. a cache of currently
111 * active mappings. We maintain a two-level page table tree, much
112 * like that used by the i386, for the sake of the Linux memory
113 * management code. Low-level assembler code in hashtable.S
114 * (procedure hash_page) is responsible for extracting ptes from the
115 * tree and putting them into the hash table when necessary, and
116 * updating the accessed and modified bits in the page table tree.
117 */
118
119/*
120 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
121 * instruction and data sides share a unified, 64-entry, semi-associative
122 * TLB which is maintained totally under software control. In addition, the
123 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
124 * TLB which serves as a first level to the shared TLB. These two TLBs are
125 * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
126 */
127
128/*
129 * The normal case is that PTEs are 32-bits and we have a 1-page
130 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
131 *
132 */
133
134/* PMD_SHIFT determines the size of the area mapped by the PTE pages */
135#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
136#define PMD_SIZE (1UL << PMD_SHIFT)
137#define PMD_MASK (~(PMD_SIZE-1))
138
139/* PGDIR_SHIFT determines what a top-level page table entry can map */
140#define PGDIR_SHIFT PMD_SHIFT
141#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
142#define PGDIR_MASK (~(PGDIR_SIZE-1))
143
144/*
145 * entries per page directory level: our page-table tree is two-level, so
146 * we don't really have any PMD directory.
147 */
148#define PTRS_PER_PTE (1 << PTE_SHIFT)
149#define PTRS_PER_PMD 1
150#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
151
152#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
153#define FIRST_USER_PGD_NR 0
154
155#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
156#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
157
158#define pte_ERROR(e) \
159 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
160 __FILE__, __LINE__, pte_val(e))
161#define pmd_ERROR(e) \
162 printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
163 __FILE__, __LINE__, pmd_val(e))
164#define pgd_ERROR(e) \
165 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
166 __FILE__, __LINE__, pgd_val(e))
167
168/*
169 * Bits in a linux-style PTE. These match the bits in the
170 * (hardware-defined) PTE as closely as possible.
171 */
172
173/* There are several potential gotchas here. The hardware TLBLO
174 * field looks like this:
175 *
176 * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
177 * RPN..................... 0 0 EX WR ZSEL....... W I M G
178 *
179 * Where possible we make the Linux PTE bits match up with this
180 *
181 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
182 * support down to 1k pages), this is done in the TLBMiss exception
183 * handler.
184 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
185 * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
186 * miss handler. Bit 27 is PAGE_USER, thus selecting the correct
187 * zone.
188 * - PRESENT *must* be in the bottom two bits because swap cache
189 * entries use the top 30 bits. Because 4xx doesn't support SMP
190 * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
191 * is cleared in the TLB miss handler before the TLB entry is loaded.
192 * - All other bits of the PTE are loaded into TLBLO without
193 * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
194 * software PTE bits. We actually use use bits 21, 24, 25, and
195 * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
196 * PRESENT.
197 */
198
199/* Definitions for MicroBlaze. */
200#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
Michal Simekf14d6f72009-07-15 13:39:35 +0200201#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
Michal Simek15902bf2009-05-26 16:30:15 +0200202#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
203#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
204#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
205#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
206#define _PAGE_RW 0x040 /* software: Writes permitted */
207#define _PAGE_DIRTY 0x080 /* software: dirty page */
208#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
209#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
210#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
211#define _PMD_PRESENT PAGE_MASK
212
213/*
214 * Some bits are unused...
215 */
216#ifndef _PAGE_HASHPTE
217#define _PAGE_HASHPTE 0
218#endif
219#ifndef _PTE_NONE_MASK
220#define _PTE_NONE_MASK 0
221#endif
222#ifndef _PAGE_SHARED
223#define _PAGE_SHARED 0
224#endif
225#ifndef _PAGE_HWWRITE
226#define _PAGE_HWWRITE 0
227#endif
228#ifndef _PAGE_HWEXEC
229#define _PAGE_HWEXEC 0
230#endif
231#ifndef _PAGE_EXEC
232#define _PAGE_EXEC 0
233#endif
234
235#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
236
237/*
238 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
239 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
240 * to have it in the Linux PTE, and in fact the bit could be reused for
241 * another purpose. -- paulus.
242 */
243#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
244#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
245
246#define _PAGE_KERNEL \
247 (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
248
249#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
250
251#define PAGE_NONE __pgprot(_PAGE_BASE)
252#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
253#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
254#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
255#define PAGE_SHARED_X \
256 __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
257#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
258#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
259
260#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
261#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
262#define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
263
264/*
265 * We consider execute permission the same as read.
266 * Also, write permissions imply read permissions.
267 */
268#define __P000 PAGE_NONE
269#define __P001 PAGE_READONLY_X
270#define __P010 PAGE_COPY
271#define __P011 PAGE_COPY_X
272#define __P100 PAGE_READONLY
273#define __P101 PAGE_READONLY_X
274#define __P110 PAGE_COPY
275#define __P111 PAGE_COPY_X
276
277#define __S000 PAGE_NONE
278#define __S001 PAGE_READONLY_X
279#define __S010 PAGE_SHARED
280#define __S011 PAGE_SHARED_X
281#define __S100 PAGE_READONLY
282#define __S101 PAGE_READONLY_X
283#define __S110 PAGE_SHARED
284#define __S111 PAGE_SHARED_X
285
286#ifndef __ASSEMBLY__
287/*
288 * ZERO_PAGE is a global shared page that is always zero: used
289 * for zero-mapped memory areas etc..
290 */
291extern unsigned long empty_zero_page[1024];
292#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
293
294#endif /* __ASSEMBLY__ */
295
296#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
297#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
298#define pte_clear(mm, addr, ptep) \
299 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
300
301#define pmd_none(pmd) (!pmd_val(pmd))
302#define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
303#define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
304#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
305
306#define pte_page(x) (mem_map + (unsigned long) \
307 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
308#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
309
310#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
311
312#define pfn_pte(pfn, prot) \
313 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
314
315#ifndef __ASSEMBLY__
316/*
317 * The "pgd_xxx()" functions here are trivial for a folded two-level
318 * setup: the pgd is never bad, and a pmd always exists (as it's folded
319 * into the pgd entry)
320 */
321static inline int pgd_none(pgd_t pgd) { return 0; }
322static inline int pgd_bad(pgd_t pgd) { return 0; }
323static inline int pgd_present(pgd_t pgd) { return 1; }
324#define pgd_clear(xp) do { } while (0)
325#define pgd_page(pgd) \
326 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
327
328/*
329 * The following only work if pte_present() is true.
330 * Undefined behaviour if not..
331 */
332static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
333static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
334static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
335static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
336static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
Michal Simekf14d6f72009-07-15 13:39:35 +0200337static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
Michal Simek15902bf2009-05-26 16:30:15 +0200338
339static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
340static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
341
342static inline pte_t pte_rdprotect(pte_t pte) \
343 { pte_val(pte) &= ~_PAGE_USER; return pte; }
344static inline pte_t pte_wrprotect(pte_t pte) \
345 { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
346static inline pte_t pte_exprotect(pte_t pte) \
347 { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
348static inline pte_t pte_mkclean(pte_t pte) \
349 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
350static inline pte_t pte_mkold(pte_t pte) \
351 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
352
353static inline pte_t pte_mkread(pte_t pte) \
354 { pte_val(pte) |= _PAGE_USER; return pte; }
355static inline pte_t pte_mkexec(pte_t pte) \
356 { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
357static inline pte_t pte_mkwrite(pte_t pte) \
358 { pte_val(pte) |= _PAGE_RW; return pte; }
359static inline pte_t pte_mkdirty(pte_t pte) \
360 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
361static inline pte_t pte_mkyoung(pte_t pte) \
362 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
363
364/*
365 * Conversion functions: convert a page and protection to a page entry,
366 * and a page entry and page directory to the page they refer to.
367 */
368
369static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
370{
371 pte_t pte;
372 pte_val(pte) = physpage | pgprot_val(pgprot);
373 return pte;
374}
375
376#define mk_pte(page, pgprot) \
377({ \
378 pte_t pte; \
379 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
380 pgprot_val(pgprot); \
381 pte; \
382})
383
384static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
385{
386 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
387 return pte;
388}
389
390/*
391 * Atomic PTE updates.
392 *
393 * pte_update clears and sets bit atomically, and returns
394 * the old pte value.
395 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
396 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
397 */
398static inline unsigned long pte_update(pte_t *p, unsigned long clr,
399 unsigned long set)
400{
401 unsigned long old, tmp, msr;
402
403 __asm__ __volatile__("\
404 msrclr %2, 0x2\n\
405 nop\n\
406 lw %0, %4, r0\n\
407 andn %1, %0, %5\n\
408 or %1, %1, %6\n\
409 sw %1, %4, r0\n\
410 mts rmsr, %2\n\
411 nop"
412 : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
Michal Simekae8ee152010-02-22 12:09:02 +0100413 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
Michal Simek15902bf2009-05-26 16:30:15 +0200414 : "cc");
415
416 return old;
417}
418
419/*
420 * set_pte stores a linux PTE into the linux page table.
421 */
422static inline void set_pte(struct mm_struct *mm, unsigned long addr,
423 pte_t *ptep, pte_t pte)
424{
425 *ptep = pte;
426}
427
428static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
429 pte_t *ptep, pte_t pte)
430{
431 *ptep = pte;
432}
433
434static inline int ptep_test_and_clear_young(struct mm_struct *mm,
435 unsigned long addr, pte_t *ptep)
436{
437 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
438}
439
440static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
441 unsigned long addr, pte_t *ptep)
442{
443 return (pte_update(ptep, \
444 (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
445}
446
447static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
448 unsigned long addr, pte_t *ptep)
449{
450 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
451}
452
453/*static inline void ptep_set_wrprotect(struct mm_struct *mm,
454 unsigned long addr, pte_t *ptep)
455{
456 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
457}*/
458
459static inline void ptep_mkdirty(struct mm_struct *mm,
460 unsigned long addr, pte_t *ptep)
461{
462 pte_update(ptep, 0, _PAGE_DIRTY);
463}
464
465/*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
466
467/* Convert pmd entry to page */
468/* our pmd entry is an effective address of pte table*/
469/* returns effective address of the pmd entry*/
470#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
471
472/* returns struct *page of the pmd entry*/
473#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
474
475/* to find an entry in a kernel page-table-directory */
476#define pgd_offset_k(address) pgd_offset(&init_mm, address)
477
478/* to find an entry in a page-table-directory */
479#define pgd_index(address) ((address) >> PGDIR_SHIFT)
480#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
481
482/* Find an entry in the second-level page table.. */
483static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
484{
485 return (pmd_t *) dir;
486}
487
488/* Find an entry in the third-level page table.. */
489#define pte_index(address) \
490 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
491#define pte_offset_kernel(dir, addr) \
492 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
493#define pte_offset_map(dir, addr) \
494 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
495#define pte_offset_map_nested(dir, addr) \
496 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
497
498#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
499#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
500
501/* Encode and decode a nonlinear file mapping entry */
502#define PTE_FILE_MAX_BITS 29
503#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
Michal Simekf14d6f72009-07-15 13:39:35 +0200504#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
Michal Simek15902bf2009-05-26 16:30:15 +0200505
506extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
507
508/*
509 * When flushing the tlb entry for a page, we also need to flush the hash
510 * table entry. flush_hash_page is assembler (for speed) in hashtable.S.
511 */
512extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep);
513
514/* Add an HPTE to the hash table */
515extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep);
516
517/*
518 * Encode and decode a swap entry.
519 * Note that the bits we use in a PTE for representing a swap entry
520 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
521 * (if used). -- paulus
522 */
523#define __swp_type(entry) ((entry).val & 0x3f)
524#define __swp_offset(entry) ((entry).val >> 6)
525#define __swp_entry(type, offset) \
526 ((swp_entry_t) { (type) | ((offset) << 6) })
527#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
528#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
529
530
531/* CONFIG_APUS */
532/* For virtual address to physical address conversion */
533extern void cache_clear(__u32 addr, int length);
534extern void cache_push(__u32 addr, int length);
535extern int mm_end_of_chunk(unsigned long addr, int len);
536extern unsigned long iopa(unsigned long addr);
537/* extern unsigned long mm_ptov(unsigned long addr) \
538 __attribute__ ((const)); TBD */
539
540/* Values for nocacheflag and cmode */
541/* These are not used by the APUS kernel_map, but prevents
542 * compilation errors.
543 */
544#define IOMAP_FULL_CACHING 0
545#define IOMAP_NOCACHE_SER 1
546#define IOMAP_NOCACHE_NONSER 2
547#define IOMAP_NO_COPYBACK 3
548
549/*
550 * Map some physical address range into the kernel address space.
551 */
552extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
553 int nocacheflag, unsigned long *memavailp);
554
555/*
556 * Set cache mode of (kernel space) address range.
557 */
558extern void kernel_set_cachemode(unsigned long address, unsigned long size,
559 unsigned int cmode);
560
561/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
562#define kern_addr_valid(addr) (1)
563
564#define io_remap_page_range remap_page_range
565
566/*
567 * No page table caches to initialise
568 */
569#define pgtable_cache_init() do { } while (0)
570
571void do_page_fault(struct pt_regs *regs, unsigned long address,
572 unsigned long error_code);
573
574void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
575 unsigned int size, int flags);
576
577void __init adjust_total_lowmem(void);
578void mapin_ram(void);
579int map_page(unsigned long va, phys_addr_t pa, int flags);
580
581extern int mem_init_done;
Michal Simek15902bf2009-05-26 16:30:15 +0200582
583asmlinkage void __init mmu_init(void);
584
585void __init *early_get_page(void);
586
Michal Simek15902bf2009-05-26 16:30:15 +0200587#endif /* __ASSEMBLY__ */
588#endif /* __KERNEL__ */
589
590#endif /* CONFIG_MMU */
591
Michal Simek6a3cece2009-03-27 14:25:37 +0100592#ifndef __ASSEMBLY__
593#include <asm-generic/pgtable.h>
594
Michal Simekae8ee152010-02-22 12:09:02 +0100595extern unsigned long ioremap_bot, ioremap_base;
596
597void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
598void consistent_free(void *vaddr);
599void consistent_sync(void *vaddr, size_t size, int direction);
600void consistent_sync_page(struct page *page, unsigned long offset,
601 size_t size, int direction);
602
Michal Simek6a3cece2009-03-27 14:25:37 +0100603void setup_memory(void);
604#endif /* __ASSEMBLY__ */
605
606#endif /* _ASM_MICROBLAZE_PGTABLE_H */