| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _X86_64_PGALLOC_H | 
 | 2 | #define _X86_64_PGALLOC_H | 
 | 3 |  | 
 | 4 | #include <asm/fixmap.h> | 
 | 5 | #include <asm/pda.h> | 
 | 6 | #include <linux/threads.h> | 
 | 7 | #include <linux/mm.h> | 
 | 8 |  | 
 | 9 | #define pmd_populate_kernel(mm, pmd, pte) \ | 
 | 10 | 		set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) | 
 | 11 | #define pud_populate(mm, pud, pmd) \ | 
 | 12 | 		set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))) | 
 | 13 | #define pgd_populate(mm, pgd, pud) \ | 
 | 14 | 		set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))) | 
 | 15 |  | 
 | 16 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) | 
 | 17 | { | 
 | 18 | 	set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); | 
 | 19 | } | 
 | 20 |  | 
| Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 21 | static inline pmd_t *get_pmd(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | { | 
 | 23 | 	return (pmd_t *)get_zeroed_page(GFP_KERNEL); | 
 | 24 | } | 
 | 25 |  | 
| Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 26 | static inline void pmd_free(pmd_t *pmd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | { | 
 | 28 | 	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); | 
 | 29 | 	free_page((unsigned long)pmd); | 
 | 30 | } | 
 | 31 |  | 
 | 32 | static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) | 
 | 33 | { | 
 | 34 | 	return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | 
 | 35 | } | 
 | 36 |  | 
 | 37 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | 
 | 38 | { | 
 | 39 | 	return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | 
 | 40 | } | 
 | 41 |  | 
 | 42 | static inline void pud_free (pud_t *pud) | 
 | 43 | { | 
 | 44 | 	BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); | 
 | 45 | 	free_page((unsigned long)pud); | 
 | 46 | } | 
 | 47 |  | 
| Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 48 | static inline void pgd_list_add(pgd_t *pgd) | 
 | 49 | { | 
 | 50 | 	struct page *page = virt_to_page(pgd); | 
 | 51 |  | 
 | 52 | 	spin_lock(&pgd_lock); | 
 | 53 | 	page->index = (pgoff_t)pgd_list; | 
 | 54 | 	if (pgd_list) | 
 | 55 | 		pgd_list->private = (unsigned long)&page->index; | 
 | 56 | 	pgd_list = page; | 
 | 57 | 	page->private = (unsigned long)&pgd_list; | 
 | 58 | 	spin_unlock(&pgd_lock); | 
 | 59 | } | 
 | 60 |  | 
 | 61 | static inline void pgd_list_del(pgd_t *pgd) | 
 | 62 | { | 
 | 63 | 	struct page *next, **pprev, *page = virt_to_page(pgd); | 
 | 64 |  | 
 | 65 | 	spin_lock(&pgd_lock); | 
 | 66 | 	next = (struct page *)page->index; | 
 | 67 | 	pprev = (struct page **)page->private; | 
 | 68 | 	*pprev = next; | 
 | 69 | 	if (next) | 
 | 70 | 		next->private = (unsigned long)pprev; | 
 | 71 | 	spin_unlock(&pgd_lock); | 
 | 72 | } | 
 | 73 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 
 | 75 | { | 
 | 76 | 	unsigned boundary; | 
 | 77 | 	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | 
 | 78 | 	if (!pgd) | 
 | 79 | 		return NULL; | 
| Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 80 | 	pgd_list_add(pgd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | 	/* | 
 | 82 | 	 * Copy kernel pointers in from init. | 
 | 83 | 	 * Could keep a freelist or slab cache of those because the kernel | 
 | 84 | 	 * part never changes. | 
 | 85 | 	 */ | 
 | 86 | 	boundary = pgd_index(__PAGE_OFFSET); | 
 | 87 | 	memset(pgd, 0, boundary * sizeof(pgd_t)); | 
 | 88 | 	memcpy(pgd + boundary, | 
 | 89 | 	       init_level4_pgt + boundary, | 
 | 90 | 	       (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); | 
 | 91 | 	return pgd; | 
 | 92 | } | 
 | 93 |  | 
 | 94 | static inline void pgd_free(pgd_t *pgd) | 
 | 95 | { | 
 | 96 | 	BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | 
| Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 97 | 	pgd_list_del(pgd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | 	free_page((unsigned long)pgd); | 
 | 99 | } | 
 | 100 |  | 
 | 101 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 
 | 102 | { | 
 | 103 | 	return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | 
 | 104 | } | 
 | 105 |  | 
 | 106 | static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | 
 | 107 | { | 
 | 108 | 	void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | 
 | 109 | 	if (!p) | 
 | 110 | 		return NULL; | 
 | 111 | 	return virt_to_page(p); | 
 | 112 | } | 
 | 113 |  | 
 | 114 | /* Should really implement gc for free page table pages. This could be | 
 | 115 |    done with a reference count in struct page. */ | 
 | 116 |  | 
| Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 117 | static inline void pte_free_kernel(pte_t *pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | { | 
 | 119 | 	BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | 
 | 120 | 	free_page((unsigned long)pte);  | 
 | 121 | } | 
 | 122 |  | 
| Adrian Bunk | 9c0aa0f | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 123 | static inline void pte_free(struct page *pte) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | { | 
 | 125 | 	__free_page(pte); | 
 | 126 | }  | 
 | 127 |  | 
 | 128 | #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) | 
 | 129 |  | 
 | 130 | #define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x)) | 
 | 131 | #define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x)) | 
 | 132 |  | 
 | 133 | #endif /* _X86_64_PGALLOC_H */ |