| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright 2010 Tilera Corporation. All Rights Reserved. | 
 | 3 |  * | 
 | 4 |  *   This program is free software; you can redistribute it and/or | 
 | 5 |  *   modify it under the terms of the GNU General Public License | 
 | 6 |  *   as published by the Free Software Foundation, version 2. | 
 | 7 |  * | 
 | 8 |  *   This program is distributed in the hope that it will be useful, but | 
 | 9 |  *   WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 10 |  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 
 | 11 |  *   NON INFRINGEMENT.  See the GNU General Public License for | 
 | 12 |  *   more details. | 
 | 13 |  */ | 
 | 14 |  | 
 | 15 | #ifndef _ASM_TILE_PGALLOC_H | 
 | 16 | #define _ASM_TILE_PGALLOC_H | 
 | 17 |  | 
 | 18 | #include <linux/threads.h> | 
 | 19 | #include <linux/mm.h> | 
 | 20 | #include <linux/mmzone.h> | 
 | 21 | #include <asm/fixmap.h> | 
 | 22 | #include <hv/hypervisor.h> | 
 | 23 |  | 
 | 24 | /* Bits for the size of the second-level page table. */ | 
 | 25 | #define L2_KERNEL_PGTABLE_SHIFT \ | 
 | 26 |   (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) | 
 | 27 |  | 
 | 28 | /* We currently allocate user L2 page tables by page (unlike kernel L2s). */ | 
 | 29 | #if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL | 
 | 30 | #define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL | 
 | 31 | #else | 
 | 32 | #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT | 
 | 33 | #endif | 
 | 34 |  | 
 | 35 | /* How many pages do we need, as an "order", for a user L2 page table? */ | 
 | 36 | #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) | 
 | 37 |  | 
 | 38 | /* How big is a kernel L2 page table? */ | 
 | 39 | #define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT) | 
 | 40 |  | 
 | 41 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | 
 | 42 | { | 
 | 43 | #ifdef CONFIG_64BIT | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 44 | 	set_pte(pmdp, pmd); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 45 | #else | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 46 | 	set_pte(&pmdp->pud.pgd, pmd.pud.pgd); | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 47 | #endif | 
 | 48 | } | 
 | 49 |  | 
 | 50 | static inline void pmd_populate_kernel(struct mm_struct *mm, | 
 | 51 | 				       pmd_t *pmd, pte_t *ptep) | 
 | 52 | { | 
 | 53 | 	set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, | 
 | 54 | 			      __pgprot(_PAGE_PRESENT))); | 
 | 55 | } | 
 | 56 |  | 
 | 57 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | 
 | 58 | 				pgtable_t page) | 
 | 59 | { | 
 | 60 | 	set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), | 
 | 61 | 			      __pgprot(_PAGE_PRESENT))); | 
 | 62 | } | 
 | 63 |  | 
 | 64 | /* | 
 | 65 |  * Allocate and free page tables. | 
 | 66 |  */ | 
 | 67 |  | 
 | 68 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | 
 | 69 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | 
 | 70 |  | 
 | 71 | extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); | 
 | 72 | extern void pte_free(struct mm_struct *mm, struct page *pte); | 
 | 73 |  | 
 | 74 | #define pmd_pgtable(pmd) pmd_page(pmd) | 
 | 75 |  | 
 | 76 | static inline pte_t * | 
 | 77 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 
 | 78 | { | 
 | 79 | 	return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address))); | 
 | 80 | } | 
 | 81 |  | 
 | 82 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | 
 | 83 | { | 
 | 84 | 	BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | 
 | 85 | 	pte_free(mm, virt_to_page(pte)); | 
 | 86 | } | 
 | 87 |  | 
 | 88 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | 
 | 89 | 			   unsigned long address); | 
 | 90 |  | 
 | 91 | #define check_pgt_cache()	do { } while (0) | 
 | 92 |  | 
 | 93 | /* | 
 | 94 |  * Get the small-page pte_t lowmem entry for a given pfn. | 
 | 95 |  * This may or may not be in use, depending on whether the initial | 
 | 96 |  * huge-page entry for the page has already been shattered. | 
 | 97 |  */ | 
 | 98 | pte_t *get_prealloc_pte(unsigned long pfn); | 
 | 99 |  | 
 | 100 | /* During init, we can shatter kernel huge pages if needed. */ | 
 | 101 | void shatter_pmd(pmd_t *pmd); | 
 | 102 |  | 
| Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 103 | /* After init, a more complex technique is required. */ | 
 | 104 | void shatter_huge_page(unsigned long addr); | 
 | 105 |  | 
| Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 106 | #ifdef __tilegx__ | 
 | 107 | /* We share a single page allocator for both L1 and L2 page tables. */ | 
 | 108 | #if HV_L1_SIZE != HV_L2_SIZE | 
 | 109 | # error Rework assumption that L1 and L2 page tables are same size. | 
 | 110 | #endif | 
 | 111 | #define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER | 
 | 112 | #define pud_populate(mm, pud, pmd) \ | 
 | 113 |   pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) | 
 | 114 | #define pmd_alloc_one(mm, addr) \ | 
 | 115 |   ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) | 
 | 116 | #define pmd_free(mm, pmdp) \ | 
 | 117 |   pte_free((mm), virt_to_page(pmdp)) | 
 | 118 | #define __pmd_free_tlb(tlb, pmdp, address) \ | 
 | 119 |   __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) | 
 | 120 | #endif | 
 | 121 |  | 
 | 122 | #endif /* _ASM_TILE_PGALLOC_H */ |