| Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PGTABLE_PPC64_4K_H | 
 | 2 | #define _ASM_POWERPC_PGTABLE_PPC64_4K_H | 
 | 3 | /* | 
 | 4 |  * Entries per page directory level.  The PTE level must use a 64b record | 
 | 5 |  * for each page table entry.  The PMD and PGD level use a 32b record for | 
 | 6 |  * each entry by assuming that each entry is page aligned. | 
 | 7 |  */ | 
 | 8 | #define PTE_INDEX_SIZE  9 | 
 | 9 | #define PMD_INDEX_SIZE  7 | 
| Aneesh Kumar K.V | 048ee09 | 2012-09-10 02:52:55 +0000 | [diff] [blame] | 10 | #define PUD_INDEX_SIZE  9 | 
| Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 11 | #define PGD_INDEX_SIZE  9 | 
 | 12 |  | 
 | 13 | #ifndef __ASSEMBLY__ | 
 | 14 | #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE) | 
 | 15 | #define PMD_TABLE_SIZE	(sizeof(pmd_t) << PMD_INDEX_SIZE) | 
 | 16 | #define PUD_TABLE_SIZE	(sizeof(pud_t) << PUD_INDEX_SIZE) | 
 | 17 | #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE) | 
 | 18 | #endif	/* __ASSEMBLY__ */ | 
 | 19 |  | 
 | 20 | #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE) | 
 | 21 | #define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE) | 
| Scott Wood | f3d34445 | 2012-09-12 13:00:09 +0000 | [diff] [blame] | 22 | #define PTRS_PER_PUD	(1 << PUD_INDEX_SIZE) | 
| Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 23 | #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE) | 
 | 24 |  | 
 | 25 | /* PMD_SHIFT determines what a second-level page table entry can map */ | 
 | 26 | #define PMD_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE) | 
 | 27 | #define PMD_SIZE	(1UL << PMD_SHIFT) | 
 | 28 | #define PMD_MASK	(~(PMD_SIZE-1)) | 
 | 29 |  | 
 | 30 | /* With 4k base page size, hugepage PTEs go at the PMD level */ | 
 | 31 | #define MIN_HUGEPTE_SHIFT	PMD_SHIFT | 
 | 32 |  | 
 | 33 | /* PUD_SHIFT determines what a third-level page table entry can map */ | 
 | 34 | #define PUD_SHIFT	(PMD_SHIFT + PMD_INDEX_SIZE) | 
 | 35 | #define PUD_SIZE	(1UL << PUD_SHIFT) | 
 | 36 | #define PUD_MASK	(~(PUD_SIZE-1)) | 
 | 37 |  | 
 | 38 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ | 
 | 39 | #define PGDIR_SHIFT	(PUD_SHIFT + PUD_INDEX_SIZE) | 
 | 40 | #define PGDIR_SIZE	(1UL << PGDIR_SHIFT) | 
 | 41 | #define PGDIR_MASK	(~(PGDIR_SIZE-1)) | 
 | 42 |  | 
 | 43 | /* Bits to mask out from a PMD to get to the PTE page */ | 
 | 44 | #define PMD_MASKED_BITS		0 | 
 | 45 | /* Bits to mask out from a PUD to get to the PMD page */ | 
 | 46 | #define PUD_MASKED_BITS		0 | 
 | 47 | /* Bits to mask out from a PGD to get to the PUD page */ | 
 | 48 | #define PGD_MASKED_BITS		0 | 
 | 49 |  | 
 | 50 |  | 
 | 51 | /* | 
 | 52 |  * 4-level page tables related bits | 
 | 53 |  */ | 
 | 54 |  | 
 | 55 | #define pgd_none(pgd)		(!pgd_val(pgd)) | 
 | 56 | #define pgd_bad(pgd)		(pgd_val(pgd) == 0) | 
 | 57 | #define pgd_present(pgd)	(pgd_val(pgd) != 0) | 
 | 58 | #define pgd_clear(pgdp)		(pgd_val(*(pgdp)) = 0) | 
 | 59 | #define pgd_page_vaddr(pgd)	(pgd_val(pgd) & ~PGD_MASKED_BITS) | 
 | 60 | #define pgd_page(pgd)		virt_to_page(pgd_page_vaddr(pgd)) | 
 | 61 |  | 
 | 62 | #define pud_offset(pgdp, addr)	\ | 
 | 63 |   (((pud_t *) pgd_page_vaddr(*(pgdp))) + \ | 
 | 64 |     (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | 
 | 65 |  | 
 | 66 | #define pud_ERROR(e) \ | 
 | 67 | 	printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) | 
 | 68 |  | 
 | 69 | /* | 
 | 70 |  * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ | 
 | 71 | #define remap_4k_pfn(vma, addr, pfn, prot)	\ | 
 | 72 | 	remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) | 
 | 73 |  | 
 | 74 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_4K_H */ |