| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright 2010 Tilera Corporation. All Rights Reserved. | 
 | 3 |  * | 
 | 4 |  *   This program is free software; you can redistribute it and/or | 
 | 5 |  *   modify it under the terms of the GNU General Public License | 
 | 6 |  *   as published by the Free Software Foundation, version 2. | 
 | 7 |  * | 
 | 8 |  *   This program is distributed in the hope that it will be useful, but | 
 | 9 |  *   WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 10 |  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 
 | 11 |  *   NON INFRINGEMENT.  See the GNU General Public License for | 
 | 12 |  *   more details. | 
 | 13 |  * | 
 | 14 |  * This file contains the functions and defines necessary to modify and use | 
 | 15 |  * the TILE page table tree. | 
 | 16 |  */ | 
 | 17 |  | 
 | 18 | #ifndef _ASM_TILE_PGTABLE_H | 
 | 19 | #define _ASM_TILE_PGTABLE_H | 
 | 20 |  | 
 | 21 | #include <hv/hypervisor.h> | 
 | 22 |  | 
 | 23 | #ifndef __ASSEMBLY__ | 
 | 24 |  | 
 | 25 | #include <linux/bitops.h> | 
 | 26 | #include <linux/threads.h> | 
 | 27 | #include <linux/slab.h> | 
 | 28 | #include <linux/list.h> | 
 | 29 | #include <linux/spinlock.h> | 
 | 30 | #include <asm/processor.h> | 
 | 31 | #include <asm/fixmap.h> | 
 | 32 | #include <asm/system.h> | 
 | 33 |  | 
 | 34 | struct mm_struct; | 
 | 35 | struct vm_area_struct; | 
 | 36 |  | 
 | 37 | /* | 
 | 38 |  * ZERO_PAGE is a global shared page that is always zero: used | 
 | 39 |  * for zero-mapped memory areas etc.. | 
 | 40 |  */ | 
 | 41 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | 
 | 42 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 
 | 43 |  | 
 | 44 | extern pgd_t swapper_pg_dir[]; | 
 | 45 | extern pgprot_t swapper_pgprot; | 
 | 46 | extern struct kmem_cache *pgd_cache; | 
 | 47 | extern spinlock_t pgd_lock; | 
 | 48 | extern struct list_head pgd_list; | 
 | 49 |  | 
 | 50 | /* | 
 | 51 |  * The very last slots in the pgd_t are for addresses unusable by Linux | 
 | 52 |  * (pgd_addr_invalid() returns true).  So we use them for the list structure. | 
 | 53 |  * The x86 code we are modelled on uses the page->private/index fields | 
 | 54 |  * (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since | 
 | 55 |  * our pgds are so much smaller than a page, it seems a waste to | 
 | 56 |  * spend a whole page on each pgd. | 
 | 57 |  */ | 
 | 58 | #define PGD_LIST_OFFSET \ | 
 | 59 |   ((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head)) | 
 | 60 | #define pgd_to_list(pgd) \ | 
 | 61 |   ((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET)) | 
 | 62 | #define list_to_pgd(list) \ | 
 | 63 |   ((pgd_t *)((char *)(list) - PGD_LIST_OFFSET)) | 
 | 64 |  | 
 | 65 | extern void pgtable_cache_init(void); | 
 | 66 | extern void paging_init(void); | 
 | 67 | extern void set_page_homes(void); | 
 | 68 |  | 
 | 69 | #define FIRST_USER_ADDRESS	0 | 
 | 70 |  | 
 | 71 | #define _PAGE_PRESENT           HV_PTE_PRESENT | 
 | 72 | #define _PAGE_HUGE_PAGE         HV_PTE_PAGE | 
 | 73 | #define _PAGE_READABLE          HV_PTE_READABLE | 
 | 74 | #define _PAGE_WRITABLE          HV_PTE_WRITABLE | 
 | 75 | #define _PAGE_EXECUTABLE        HV_PTE_EXECUTABLE | 
 | 76 | #define _PAGE_ACCESSED          HV_PTE_ACCESSED | 
 | 77 | #define _PAGE_DIRTY             HV_PTE_DIRTY | 
 | 78 | #define _PAGE_GLOBAL            HV_PTE_GLOBAL | 
 | 79 | #define _PAGE_USER              HV_PTE_USER | 
 | 80 |  | 
 | 81 | /* | 
 | 82 |  * All the "standard" bits.  Cache-control bits are managed elsewhere. | 
 | 83 |  * This is used to test for valid level-2 page table pointers by checking | 
 | 84 |  * all the bits, and to mask away the cache control bits for mprotect. | 
 | 85 |  */ | 
 | 86 | #define _PAGE_ALL (\ | 
 | 87 |   _PAGE_PRESENT | \ | 
 | 88 |   _PAGE_HUGE_PAGE | \ | 
 | 89 |   _PAGE_READABLE | \ | 
 | 90 |   _PAGE_WRITABLE | \ | 
 | 91 |   _PAGE_EXECUTABLE | \ | 
 | 92 |   _PAGE_ACCESSED | \ | 
 | 93 |   _PAGE_DIRTY | \ | 
 | 94 |   _PAGE_GLOBAL | \ | 
 | 95 |   _PAGE_USER \ | 
 | 96 | ) | 
 | 97 |  | 
 | 98 | #define PAGE_NONE \ | 
 | 99 | 	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) | 
 | 100 | #define PAGE_SHARED \ | 
 | 101 | 	__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | 
 | 102 | 		 _PAGE_USER | _PAGE_ACCESSED) | 
 | 103 |  | 
 | 104 | #define PAGE_SHARED_EXEC \ | 
 | 105 | 	__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | 
 | 106 | 		 _PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED) | 
 | 107 | #define PAGE_COPY_NOEXEC \ | 
 | 108 | 	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | 
 | 109 | #define PAGE_COPY_EXEC \ | 
 | 110 | 	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | 
 | 111 | 		 _PAGE_READABLE | _PAGE_EXECUTABLE) | 
 | 112 | #define PAGE_COPY \ | 
 | 113 | 	PAGE_COPY_NOEXEC | 
 | 114 | #define PAGE_READONLY \ | 
 | 115 | 	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | 
 | 116 | #define PAGE_READONLY_EXEC \ | 
 | 117 | 	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | 
 | 118 | 		 _PAGE_READABLE | _PAGE_EXECUTABLE) | 
 | 119 |  | 
 | 120 | #define _PAGE_KERNEL_RO \ | 
 | 121 |  (_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED) | 
 | 122 | #define _PAGE_KERNEL \ | 
 | 123 |  (_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY) | 
 | 124 | #define _PAGE_KERNEL_EXEC       (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE) | 
 | 125 |  | 
 | 126 | #define PAGE_KERNEL		__pgprot(_PAGE_KERNEL) | 
 | 127 | #define PAGE_KERNEL_RO		__pgprot(_PAGE_KERNEL_RO) | 
 | 128 | #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC) | 
 | 129 |  | 
 | 130 | #define page_to_kpgprot(p) PAGE_KERNEL | 
 | 131 |  | 
 | 132 | /* | 
 | 133 |  * We could tighten these up, but for now writable or executable | 
 | 134 |  * implies readable. | 
 | 135 |  */ | 
 | 136 | #define __P000	PAGE_NONE | 
 | 137 | #define __P001	PAGE_READONLY | 
 | 138 | #define __P010	PAGE_COPY      /* this is write-only, which we won't support */ | 
 | 139 | #define __P011	PAGE_COPY | 
 | 140 | #define __P100	PAGE_READONLY_EXEC | 
 | 141 | #define __P101	PAGE_READONLY_EXEC | 
 | 142 | #define __P110	PAGE_COPY_EXEC | 
 | 143 | #define __P111	PAGE_COPY_EXEC | 
 | 144 |  | 
 | 145 | #define __S000	PAGE_NONE | 
 | 146 | #define __S001	PAGE_READONLY | 
 | 147 | #define __S010	PAGE_SHARED | 
 | 148 | #define __S011	PAGE_SHARED | 
 | 149 | #define __S100	PAGE_READONLY_EXEC | 
 | 150 | #define __S101	PAGE_READONLY_EXEC | 
 | 151 | #define __S110	PAGE_SHARED_EXEC | 
 | 152 | #define __S111	PAGE_SHARED_EXEC | 
 | 153 |  | 
 | 154 | /* | 
 | 155 |  * All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT | 
 | 156 |  * and PAGE_HUGE_PAGE, which must be one and zero, respectively. | 
 | 157 |  * We set the ignored bits to zero. | 
 | 158 |  */ | 
 | 159 | #define _PAGE_TABLE     _PAGE_PRESENT | 
 | 160 |  | 
 | 161 | /* Inherit the caching flags from the old protection bits. */ | 
 | 162 | #define pgprot_modify(oldprot, newprot) \ | 
 | 163 |   (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } | 
 | 164 |  | 
 | 165 | /* Just setting the PFN to zero suffices. */ | 
 | 166 | #define pte_pgprot(x) hv_pte_set_pfn((x), 0) | 
 | 167 |  | 
 | 168 | /* | 
 | 169 |  * For PTEs and PDEs, we must clear the Present bit first when | 
 | 170 |  * clearing a page table entry, so clear the bottom half first and | 
 | 171 |  * enforce ordering with a barrier. | 
 | 172 |  */ | 
 | 173 | static inline void __pte_clear(pte_t *ptep) | 
 | 174 | { | 
 | 175 | #ifdef __tilegx__ | 
 | 176 | 	ptep->val = 0; | 
 | 177 | #else | 
 | 178 | 	u32 *tmp = (u32 *)ptep; | 
 | 179 | 	tmp[0] = 0; | 
 | 180 | 	barrier(); | 
 | 181 | 	tmp[1] = 0; | 
 | 182 | #endif | 
 | 183 | } | 
 | 184 | #define pte_clear(mm, addr, ptep) __pte_clear(ptep) | 
 | 185 |  | 
 | 186 | /* | 
 | 187 |  * The following only work if pte_present() is true. | 
 | 188 |  * Undefined behaviour if not.. | 
 | 189 |  */ | 
 | 190 | #define pte_present hv_pte_get_present | 
 | 191 | #define pte_user hv_pte_get_user | 
 | 192 | #define pte_read hv_pte_get_readable | 
 | 193 | #define pte_dirty hv_pte_get_dirty | 
 | 194 | #define pte_young hv_pte_get_accessed | 
 | 195 | #define pte_write hv_pte_get_writable | 
 | 196 | #define pte_exec hv_pte_get_executable | 
 | 197 | #define pte_huge hv_pte_get_page | 
 | 198 | #define pte_rdprotect hv_pte_clear_readable | 
 | 199 | #define pte_exprotect hv_pte_clear_executable | 
 | 200 | #define pte_mkclean hv_pte_clear_dirty | 
 | 201 | #define pte_mkold hv_pte_clear_accessed | 
 | 202 | #define pte_wrprotect hv_pte_clear_writable | 
 | 203 | #define pte_mksmall hv_pte_clear_page | 
 | 204 | #define pte_mkread hv_pte_set_readable | 
 | 205 | #define pte_mkexec hv_pte_set_executable | 
 | 206 | #define pte_mkdirty hv_pte_set_dirty | 
 | 207 | #define pte_mkyoung hv_pte_set_accessed | 
 | 208 | #define pte_mkwrite hv_pte_set_writable | 
 | 209 | #define pte_mkhuge hv_pte_set_page | 
 | 210 |  | 
 | 211 | #define pte_special(pte) 0 | 
 | 212 | #define pte_mkspecial(pte) (pte) | 
 | 213 |  | 
 | 214 | /* | 
 | 215 |  * Use some spare bits in the PTE for user-caching tags. | 
 | 216 |  */ | 
 | 217 | #define pte_set_forcecache hv_pte_set_client0 | 
 | 218 | #define pte_get_forcecache hv_pte_get_client0 | 
 | 219 | #define pte_clear_forcecache hv_pte_clear_client0 | 
 | 220 | #define pte_set_anyhome hv_pte_set_client1 | 
 | 221 | #define pte_get_anyhome hv_pte_get_client1 | 
 | 222 | #define pte_clear_anyhome hv_pte_clear_client1 | 
 | 223 |  | 
 | 224 | /* | 
 | 225 |  * A migrating PTE has PAGE_PRESENT clear but all the other bits preserved. | 
 | 226 |  */ | 
 | 227 | #define pte_migrating hv_pte_get_migrating | 
 | 228 | #define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x)) | 
 | 229 | #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) | 
 | 230 |  | 
 | 231 | #define pte_ERROR(e) \ | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 232 | 	pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e)) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 233 | #define pgd_ERROR(e) \ | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 234 | 	pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 235 |  | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 236 | /* Return PA and protection info for a given kernel VA. */ | 
 | 237 | int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte); | 
 | 238 |  | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 239 | /* | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 240 |  * __set_pte() ensures we write the 64-bit PTE with 32-bit words in | 
 | 241 |  * the right order on 32-bit platforms and also allows us to write | 
 | 242 |  * hooks to check valid PTEs, etc., if we want. | 
 | 243 |  */ | 
 | 244 | void __set_pte(pte_t *ptep, pte_t pte); | 
 | 245 |  | 
 | 246 | /* | 
 | 247 |  * set_pte() sets the given PTE and also sanity-checks the | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 248 |  * requested PTE against the page homecaching.  Unspecified parts | 
 | 249 |  * of the PTE are filled in when it is written to memory, i.e. all | 
 | 250 |  * caching attributes if "!forcecache", or the home cpu if "anyhome". | 
 | 251 |  */ | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 252 | extern void set_pte(pte_t *ptep, pte_t pte); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 253 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | 
 | 254 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval) | 
 | 255 |  | 
 | 256 | #define pte_page(x)		pfn_to_page(pte_pfn(x)) | 
 | 257 |  | 
 | 258 | static inline int pte_none(pte_t pte) | 
 | 259 | { | 
 | 260 | 	return !pte.val; | 
 | 261 | } | 
 | 262 |  | 
 | 263 | static inline unsigned long pte_pfn(pte_t pte) | 
 | 264 | { | 
 | 265 | 	return hv_pte_get_pfn(pte); | 
 | 266 | } | 
 | 267 |  | 
 | 268 | /* Set or get the remote cache cpu in a pgprot with remote caching. */ | 
 | 269 | extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu); | 
 | 270 | extern int get_remote_cache_cpu(pgprot_t prot); | 
 | 271 |  | 
 | 272 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | 
 | 273 | { | 
 | 274 | 	return hv_pte_set_pfn(prot, pfn); | 
 | 275 | } | 
 | 276 |  | 
 | 277 | /* Support for priority mappings. */ | 
 | 278 | extern void start_mm_caching(struct mm_struct *mm); | 
 | 279 | extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next); | 
 | 280 |  | 
 | 281 | /* | 
 | 282 |  * Support non-linear file mappings (see sys_remap_file_pages). | 
 | 283 |  * This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the | 
 | 284 |  * file offset in the 32 high bits. | 
 | 285 |  */ | 
 | 286 | #define _PAGE_FILE        HV_PTE_CLIENT1 | 
 | 287 | #define PTE_FILE_MAX_BITS 32 | 
 | 288 | #define pte_file(pte)     (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte)) | 
 | 289 | #define pte_to_pgoff(pte) ((pte).val >> 32) | 
 | 290 | #define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE }) | 
 | 291 |  | 
 | 292 | /* | 
 | 293 |  * Encode and de-code a swap entry (see <linux/swapops.h>). | 
 | 294 |  * We put the swap file type+offset in the 32 high bits; | 
 | 295 |  * I believe we can just leave the low bits clear. | 
 | 296 |  */ | 
 | 297 | #define __swp_type(swp)		((swp).val & 0x1f) | 
 | 298 | #define __swp_offset(swp)	((swp).val >> 5) | 
 | 299 | #define __swp_entry(type, off)	((swp_entry_t) { (type) | ((off) << 5) }) | 
 | 300 | #define __pte_to_swp_entry(pte)	((swp_entry_t) { (pte).val >> 32 }) | 
 | 301 | #define __swp_entry_to_pte(swp)	((pte_t) { (((long long) ((swp).val)) << 32) }) | 
 | 302 |  | 
 | 303 | /* | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 304 |  * Conversion functions: convert a page and protection to a page entry, | 
 | 305 |  * and a page entry and page directory to the page they refer to. | 
 | 306 |  */ | 
 | 307 |  | 
 | 308 | #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot)) | 
 | 309 |  | 
 | 310 | /* | 
 | 311 |  * If we are doing an mprotect(), just accept the new vma->vm_page_prot | 
 | 312 |  * value and combine it with the PFN from the old PTE to get a new PTE. | 
 | 313 |  */ | 
 | 314 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 
 | 315 | { | 
 | 316 | 	return pfn_pte(hv_pte_get_pfn(pte), newprot); | 
 | 317 | } | 
 | 318 |  | 
 | 319 | /* | 
 | 320 |  * The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | 
 | 321 |  * | 
 | 322 |  * This macro returns the index of the entry in the pgd page which would | 
 | 323 |  * control the given virtual address. | 
 | 324 |  */ | 
 | 325 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | 
 | 326 |  | 
 | 327 | /* | 
 | 328 |  * pgd_offset() returns a (pgd_t *) | 
 | 329 |  * pgd_index() is used get the offset into the pgd page's array of pgd_t's. | 
 | 330 |  */ | 
 | 331 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | 
 | 332 |  | 
 | 333 | /* | 
 | 334 |  * A shortcut which implies the use of the kernel's pgd, instead | 
 | 335 |  * of a process's. | 
 | 336 |  */ | 
 | 337 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 
 | 338 |  | 
 | 339 | #if defined(CONFIG_HIGHPTE) | 
| Chris Metcalf | 38a6f42 | 2010-11-01 15:21:35 -0400 | [diff] [blame] | 340 | extern pte_t *pte_offset_map(pmd_t *, unsigned long address); | 
 | 341 | #define pte_unmap(pte) kunmap_atomic(pte) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 342 | #else | 
 | 343 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 344 | #define pte_unmap(pte) do { } while (0) | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 345 | #endif | 
 | 346 |  | 
 | 347 | /* Clear a non-executable kernel PTE and flush it from the TLB. */ | 
 | 348 | #define kpte_clear_flush(ptep, vaddr)		\ | 
 | 349 | do {						\ | 
 | 350 | 	pte_clear(&init_mm, (vaddr), (ptep));	\ | 
 | 351 | 	local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \ | 
 | 352 | } while (0) | 
 | 353 |  | 
 | 354 | /* | 
 | 355 |  * The kernel page tables contain what we need, and we flush when we | 
 | 356 |  * change specific page table entries. | 
 | 357 |  */ | 
 | 358 | #define update_mmu_cache(vma, address, pte) do { } while (0) | 
 | 359 |  | 
 | 360 | #ifdef CONFIG_FLATMEM | 
 | 361 | #define kern_addr_valid(addr)	(1) | 
 | 362 | #endif /* CONFIG_FLATMEM */ | 
 | 363 |  | 
 | 364 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\ | 
 | 365 | 		remap_pfn_range(vma, vaddr, pfn, size, prot) | 
 | 366 |  | 
 | 367 | extern void vmalloc_sync_all(void); | 
 | 368 |  | 
 | 369 | #endif /* !__ASSEMBLY__ */ | 
 | 370 |  | 
 | 371 | #ifdef __tilegx__ | 
 | 372 | #include <asm/pgtable_64.h> | 
 | 373 | #else | 
 | 374 | #include <asm/pgtable_32.h> | 
 | 375 | #endif | 
 | 376 |  | 
 | 377 | #ifndef __ASSEMBLY__ | 
 | 378 |  | 
 | 379 | static inline int pmd_none(pmd_t pmd) | 
 | 380 | { | 
 | 381 | 	/* | 
 | 382 | 	 * Only check low word on 32-bit platforms, since it might be | 
 | 383 | 	 * out of sync with upper half. | 
 | 384 | 	 */ | 
 | 385 | 	return (unsigned long)pmd_val(pmd) == 0; | 
 | 386 | } | 
 | 387 |  | 
 | 388 | static inline int pmd_present(pmd_t pmd) | 
 | 389 | { | 
 | 390 | 	return pmd_val(pmd) & _PAGE_PRESENT; | 
 | 391 | } | 
 | 392 |  | 
 | 393 | static inline int pmd_bad(pmd_t pmd) | 
 | 394 | { | 
 | 395 | 	return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE); | 
 | 396 | } | 
 | 397 |  | 
 | 398 | static inline unsigned long pages_to_mb(unsigned long npg) | 
 | 399 | { | 
 | 400 | 	return npg >> (20 - PAGE_SHIFT); | 
 | 401 | } | 
 | 402 |  | 
 | 403 | /* | 
 | 404 |  * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD] | 
 | 405 |  * | 
 | 406 |  * This function returns the index of the entry in the pmd which would | 
 | 407 |  * control the given virtual address. | 
 | 408 |  */ | 
 | 409 | static inline unsigned long pmd_index(unsigned long address) | 
 | 410 | { | 
 | 411 | 	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | 
 | 412 | } | 
 | 413 |  | 
 | 414 | /* | 
 | 415 |  * A given kernel pmd_t maps to a specific virtual address (either a | 
 | 416 |  * kernel huge page or a kernel pte_t table).  Since kernel pte_t | 
 | 417 |  * tables can be aligned at sub-page granularity, this function can | 
 | 418 |  * return non-page-aligned pointers, despite its name. | 
 | 419 |  */ | 
 | 420 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | 
 | 421 | { | 
 | 422 | 	phys_addr_t pa = | 
 | 423 | 		(phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN; | 
 | 424 | 	return (unsigned long)__va(pa); | 
 | 425 | } | 
 | 426 |  | 
 | 427 | /* | 
 | 428 |  * A pmd_t points to the base of a huge page or to a pte_t array. | 
 | 429 |  * If a pte_t array, since we can have multiple per page, we don't | 
 | 430 |  * have a one-to-one mapping of pmd_t's to pages.  However, this is | 
 | 431 |  * OK for pte_lockptr(), since we just end up with potentially one | 
 | 432 |  * lock being used for several pte_t arrays. | 
 | 433 |  */ | 
 | 434 | #define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) | 
 | 435 |  | 
 | 436 | /* | 
 | 437 |  * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | 
 | 438 |  * | 
 | 439 |  * This macro returns the index of the entry in the pte page which would | 
 | 440 |  * control the given virtual address. | 
 | 441 |  */ | 
 | 442 | static inline unsigned long pte_index(unsigned long address) | 
 | 443 | { | 
 | 444 | 	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | 
 | 445 | } | 
 | 446 |  | 
 | 447 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) | 
 | 448 | { | 
 | 449 |        return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | 
 | 450 | } | 
 | 451 |  | 
 | 452 | static inline int pmd_huge_page(pmd_t pmd) | 
 | 453 | { | 
 | 454 | 	return pmd_val(pmd) & _PAGE_HUGE_PAGE; | 
 | 455 | } | 
 | 456 |  | 
 | 457 | #include <asm-generic/pgtable.h> | 
 | 458 |  | 
| Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 459 | /* Support /proc/NN/pgtable API. */ | 
 | 460 | struct seq_file; | 
 | 461 | int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm, | 
 | 462 | 			   unsigned long vaddr, pte_t *ptep, void **datap); | 
 | 463 |  | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 464 | #endif /* !__ASSEMBLY__ */ | 
 | 465 |  | 
 | 466 | #endif /* _ASM_TILE_PGTABLE_H */ |