| Paul Mundt | e44d6c4 | 2010-01-13 19:18:39 +0900 | [diff] [blame] | 1 | #ifndef __ASM_SH_PGTABLE_3LEVEL_H | 
|  | 2 | #define __ASM_SH_PGTABLE_3LEVEL_H | 
| Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 3 |  | 
|  | 4 | #include <asm-generic/pgtable-nopud.h> | 
|  | 5 |  | 
|  | 6 | /* | 
|  | 7 | * Some cores need a 3-level page table layout, for example when using | 
|  | 8 | * 64-bit PTEs and 4K pages. | 
|  | 9 | */ | 
| Paul Mundt | 782bb5a | 2010-01-13 19:11:14 +0900 | [diff] [blame] | 10 | #define PAGETABLE_LEVELS	3 | 
| Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 11 |  | 
| Paul Mundt | 782bb5a | 2010-01-13 19:11:14 +0900 | [diff] [blame] | 12 | #define PTE_MAGNITUDE		3	/* 64-bit PTEs on SH-X2 TLB */ | 
| Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 13 |  | 
|  | 14 | /* PGD bits */ | 
| Paul Mundt | 782bb5a | 2010-01-13 19:11:14 +0900 | [diff] [blame] | 15 | #define PGDIR_SHIFT		30 | 
| Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 16 |  | 
|  | 17 | #define PTRS_PER_PGD		4 | 
|  | 18 | #define USER_PTRS_PER_PGD	2 | 
|  | 19 |  | 
|  | 20 | /* PMD bits */ | 
| Matt Fleming | 3f5ab76 | 2009-12-24 20:38:45 +0000 | [diff] [blame] | 21 | #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE)) | 
| Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 22 | #define PMD_SIZE	(1UL << PMD_SHIFT) | 
|  | 23 | #define PMD_MASK	(~(PMD_SIZE-1)) | 
|  | 24 |  | 
| Matt Fleming | 3f5ab76 | 2009-12-24 20:38:45 +0000 | [diff] [blame] | 25 | #define PTRS_PER_PMD	((1 << PGDIR_SHIFT) / PMD_SIZE) | 
| Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 26 |  | 
|  | 27 | #define pmd_ERROR(e) \ | 
|  | 28 | printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) | 
|  | 29 |  | 
|  | 30 | typedef struct { unsigned long long pmd; } pmd_t; | 
|  | 31 | #define pmd_val(x)	((x).pmd) | 
|  | 32 | #define __pmd(x)	((pmd_t) { (x) } ) | 
|  | 33 |  | 
|  | 34 | static inline unsigned long pud_page_vaddr(pud_t pud) | 
|  | 35 | { | 
|  | 36 | return pud_val(pud); | 
|  | 37 | } | 
|  | 38 |  | 
|  | 39 | #define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 
|  | 40 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | 
|  | 41 | { | 
|  | 42 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); | 
|  | 43 | } | 
|  | 44 |  | 
|  | 45 | #define pud_none(x)	(!pud_val(x)) | 
|  | 46 | #define pud_present(x)	(pud_val(x)) | 
|  | 47 | #define pud_clear(xp)	do { set_pud(xp, __pud(0)); } while (0) | 
|  | 48 | #define	pud_bad(x)	(pud_val(x) & ~PAGE_MASK) | 
|  | 49 |  | 
|  | 50 | /* | 
|  | 51 | * (puds are folded into pgds so this doesn't get actually called, | 
|  | 52 | * but the define is needed for a generic inline function.) | 
|  | 53 | */ | 
|  | 54 | #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) | 
|  | 55 |  | 
| Paul Mundt | e44d6c4 | 2010-01-13 19:18:39 +0900 | [diff] [blame] | 56 | #endif /* __ASM_SH_PGTABLE_3LEVEL_H */ |