| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file contains the functions and defines necessary to modify and | 
|  | 3 | * use the SuperH page table tree. | 
|  | 4 | * | 
|  | 5 | * Copyright (C) 1999 Niibe Yutaka | 
| Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 6 | * Copyright (C) 2002 - 2007 Paul Mundt | 
| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 7 | * | 
|  | 8 | * This file is subject to the terms and conditions of the GNU General | 
|  | 9 | * Public License.  See the file "COPYING" in the main directory of this | 
|  | 10 | * archive for more details. | 
|  | 11 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #ifndef __ASM_SH_PGTABLE_H | 
|  | 13 | #define __ASM_SH_PGTABLE_H | 
|  | 14 |  | 
| Paul Mundt | 782bb5a | 2010-01-13 19:11:14 +0900 | [diff] [blame] | 15 | #ifdef CONFIG_X2TLB | 
| Paul Mundt | e44d6c4 | 2010-01-13 19:18:39 +0900 | [diff] [blame] | 16 | #include <asm/pgtable-3level.h> | 
| Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 17 | #else | 
| Paul Mundt | e44d6c4 | 2010-01-13 19:18:39 +0900 | [diff] [blame] | 18 | #include <asm/pgtable-2level.h> | 
| Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 19 | #endif | 
| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 20 | #include <asm/page.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #ifndef __ASSEMBLY__ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/addrspace.h> | 
|  | 24 | #include <asm/fixmap.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | * ZERO_PAGE is a global shared page that is always zero: used | 
|  | 28 | * for zero-mapped memory areas etc.. | 
|  | 29 | */ | 
| Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 30 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 
|  | 32 |  | 
|  | 33 | #endif /* !__ASSEMBLY__ */ | 
|  | 34 |  | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 35 | /* | 
| Paul Mundt | 36bcd39 | 2007-11-10 19:16:55 +0900 | [diff] [blame] | 36 | * Effective and physical address definitions, to aid with sign | 
|  | 37 | * extension. | 
|  | 38 | */ | 
|  | 39 | #define NEFF		32 | 
|  | 40 | #define	NEFF_SIGN	(1LL << (NEFF - 1)) | 
|  | 41 | #define	NEFF_MASK	(-1LL << NEFF) | 
|  | 42 |  | 
| Paul Mundt | c791483 | 2009-08-04 17:14:39 +0900 | [diff] [blame] | 43 | static inline unsigned long long neff_sign_extend(unsigned long val) | 
|  | 44 | { | 
|  | 45 | unsigned long long extended = val; | 
|  | 46 | return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended; | 
|  | 47 | } | 
|  | 48 |  | 
| Paul Mundt | 36bcd39 | 2007-11-10 19:16:55 +0900 | [diff] [blame] | 49 | #ifdef CONFIG_29BIT | 
|  | 50 | #define NPHYS		29 | 
|  | 51 | #else | 
|  | 52 | #define NPHYS		32 | 
|  | 53 | #endif | 
|  | 54 |  | 
|  | 55 | #define	NPHYS_SIGN	(1LL << (NPHYS - 1)) | 
|  | 56 | #define	NPHYS_MASK	(-1LL << NPHYS) | 
|  | 57 |  | 
| Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 58 | #define PGDIR_SIZE	(1UL << PGDIR_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #define PGDIR_MASK	(~(PGDIR_SIZE-1)) | 
|  | 60 |  | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 61 | /* Entries per level */ | 
| Paul Mundt | 7a847f8 | 2006-12-26 15:29:19 +0900 | [diff] [blame] | 62 | #define PTRS_PER_PTE	(PAGE_SIZE / (1 << PTE_MAGNITUDE)) | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 63 |  | 
| Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 64 | #define FIRST_USER_ADDRESS	0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  | 
| Matt Fleming | 1f69b6a | 2009-10-06 21:22:25 +0000 | [diff] [blame] | 66 | #define PHYS_ADDR_MASK29		0x1fffffff | 
|  | 67 | #define PHYS_ADDR_MASK32		0xffffffff | 
|  | 68 |  | 
|  | 69 | #ifdef CONFIG_PMB | 
|  | 70 | static inline unsigned long phys_addr_mask(void) | 
|  | 71 | { | 
|  | 72 | /* Is the MMU in 29bit mode? */ | 
|  | 73 | if (__in_29bit_mode()) | 
|  | 74 | return PHYS_ADDR_MASK29; | 
|  | 75 |  | 
|  | 76 | return PHYS_ADDR_MASK32; | 
|  | 77 | } | 
| Paul Mundt | 2a8bc92 | 2009-10-10 22:24:55 +0900 | [diff] [blame] | 78 | #elif defined(CONFIG_32BIT) | 
| Matt Fleming | 1f69b6a | 2009-10-06 21:22:25 +0000 | [diff] [blame] | 79 | static inline unsigned long phys_addr_mask(void) | 
|  | 80 | { | 
|  | 81 | return PHYS_ADDR_MASK32; | 
|  | 82 | } | 
| Stuart Menefy | d02b08f | 2007-11-30 17:52:53 +0900 | [diff] [blame] | 83 | #else | 
| Matt Fleming | 1f69b6a | 2009-10-06 21:22:25 +0000 | [diff] [blame] | 84 | static inline unsigned long phys_addr_mask(void) | 
|  | 85 | { | 
|  | 86 | return PHYS_ADDR_MASK29; | 
|  | 87 | } | 
| Stuart Menefy | d02b08f | 2007-11-30 17:52:53 +0900 | [diff] [blame] | 88 | #endif | 
|  | 89 |  | 
| Matt Fleming | 1f69b6a | 2009-10-06 21:22:25 +0000 | [diff] [blame] | 90 | #define PTE_PHYS_MASK		(phys_addr_mask() & PAGE_MASK) | 
| Paul Mundt | cb700aa | 2008-09-12 20:41:05 +0900 | [diff] [blame] | 91 | #define PTE_FLAGS_MASK		(~(PTE_PHYS_MASK) << PAGE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 |  | 
| Paul Mundt | 0468b4b | 2007-11-10 20:39:06 +0900 | [diff] [blame] | 93 | #ifdef CONFIG_SUPERH32 | 
| Paul Mundt | f0b859e | 2007-07-25 10:43:47 +0900 | [diff] [blame] | 94 | #define VMALLOC_START	(P3SEG) | 
| Paul Mundt | 0468b4b | 2007-11-10 20:39:06 +0900 | [diff] [blame] | 95 | #else | 
|  | 96 | #define VMALLOC_START	(0xf0000000) | 
|  | 97 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | #define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE) | 
|  | 99 |  | 
| Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 100 | #if defined(CONFIG_SUPERH32) | 
|  | 101 | #include <asm/pgtable_32.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | #else | 
| Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 103 | #include <asm/pgtable_64.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | #endif | 
|  | 105 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | /* | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 107 | * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page | 
|  | 108 | * protection for execute, and considers it the same as a read. Also, write | 
|  | 109 | * permission implies read permission. This is the closest we can get.. | 
|  | 110 | * | 
|  | 111 | * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme, | 
|  | 112 | * not only supporting separate execute, read, and write bits, but having | 
|  | 113 | * completely separate permission bits for user and kernel space. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | */ | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 115 | /*xwr*/ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | #define __P000	PAGE_NONE | 
|  | 117 | #define __P001	PAGE_READONLY | 
|  | 118 | #define __P010	PAGE_COPY | 
|  | 119 | #define __P011	PAGE_COPY | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 120 | #define __P100	PAGE_EXECREAD | 
|  | 121 | #define __P101	PAGE_EXECREAD | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | #define __P110	PAGE_COPY | 
|  | 123 | #define __P111	PAGE_COPY | 
|  | 124 |  | 
|  | 125 | #define __S000	PAGE_NONE | 
|  | 126 | #define __S001	PAGE_READONLY | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 127 | #define __S010	PAGE_WRITEONLY | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | #define __S011	PAGE_SHARED | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 129 | #define __S100	PAGE_EXECREAD | 
|  | 130 | #define __S101	PAGE_EXECREAD | 
|  | 131 | #define __S110	PAGE_RWX | 
|  | 132 | #define __S111	PAGE_RWX | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | typedef pte_t *pte_addr_t; | 
|  | 135 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | #define kern_addr_valid(addr)	(1) | 
|  | 137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\ | 
|  | 139 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 
|  | 140 |  | 
| Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 141 | #define pte_pfn(x)		((unsigned long)(((x).pte_low >> PAGE_SHIFT))) | 
| Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 142 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | /* | 
| Matt Fleming | 2a5eacc | 2009-12-31 12:19:24 +0000 | [diff] [blame] | 144 | * Initialise the page table caches | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | */ | 
| Matt Fleming | 2a5eacc | 2009-12-31 12:19:24 +0000 | [diff] [blame] | 146 | extern void pgtable_cache_init(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 |  | 
| Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 148 | struct vm_area_struct; | 
| Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 149 |  | 
|  | 150 | extern void __update_cache(struct vm_area_struct *vma, | 
|  | 151 | unsigned long address, pte_t pte); | 
|  | 152 | extern void __update_tlb(struct vm_area_struct *vma, | 
|  | 153 | unsigned long address, pte_t pte); | 
|  | 154 |  | 
|  | 155 | static inline void | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 156 | update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | 
| Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 157 | { | 
| Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 158 | pte_t pte = *ptep; | 
| Paul Mundt | 9cef749 | 2009-07-29 00:12:17 +0900 | [diff] [blame] | 159 | __update_cache(vma, address, pte); | 
|  | 160 | __update_tlb(vma, address, pte); | 
|  | 161 | } | 
|  | 162 |  | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 163 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 
|  | 164 | extern void paging_init(void); | 
| Paul Mundt | 9acb98f | 2007-12-17 10:52:11 +0900 | [diff] [blame] | 165 | extern void page_table_range_init(unsigned long start, unsigned long end, | 
|  | 166 | pgd_t *pgd); | 
| Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 167 |  | 
| Paul Mundt | ee1acbf | 2009-05-07 16:38:16 +0900 | [diff] [blame] | 168 | /* arch/sh/mm/mmap.c */ | 
|  | 169 | #define HAVE_ARCH_UNMAPPED_AREA | 
|  | 170 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | 
|  | 171 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | #include <asm-generic/pgtable.h> | 
|  | 173 |  | 
| Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 174 | #endif /* __ASM_SH_PGTABLE_H */ |