| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 3 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 4 |  * for more details. | 
 | 5 |  * | 
 | 6 |  * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle | 
 | 7 |  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. | 
 | 8 |  */ | 
 | 9 | #ifndef _ASM_PGALLOC_H | 
 | 10 | #define _ASM_PGALLOC_H | 
 | 11 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/highmem.h> | 
 | 13 | #include <linux/mm.h> | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 14 | #include <linux/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 |  | 
 | 16 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, | 
 | 17 | 	pte_t *pte) | 
 | 18 | { | 
 | 19 | 	set_pmd(pmd, __pmd((unsigned long)pte)); | 
 | 20 | } | 
 | 21 |  | 
 | 22 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | 
 | 23 | 	struct page *pte) | 
 | 24 | { | 
 | 25 | 	set_pmd(pmd, __pmd((unsigned long)page_address(pte))); | 
 | 26 | } | 
 | 27 |  | 
 | 28 | /* | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 29 |  * Initialize a new pmd table with invalid pointers. | 
 | 30 |  */ | 
 | 31 | extern void pmd_init(unsigned long page, unsigned long pagetable); | 
 | 32 |  | 
 | 33 | #ifdef CONFIG_64BIT | 
 | 34 |  | 
 | 35 | static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | 
 | 36 | { | 
 | 37 | 	set_pud(pud, __pud((unsigned long)pmd)); | 
 | 38 | } | 
 | 39 | #endif | 
 | 40 |  | 
 | 41 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 |  * Initialize a new pgd / pmd table with invalid pointers. | 
 | 43 |  */ | 
 | 44 | extern void pgd_init(unsigned long page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 |  | 
 | 46 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 
 | 47 | { | 
 | 48 | 	pgd_t *ret, *init; | 
 | 49 |  | 
 | 50 | 	ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); | 
 | 51 | 	if (ret) { | 
| Ralf Baechle | 242954b | 2006-10-24 02:29:01 +0100 | [diff] [blame] | 52 | 		init = pgd_offset(&init_mm, 0UL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | 		pgd_init((unsigned long)ret); | 
 | 54 | 		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, | 
 | 55 | 		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | 
 | 56 | 	} | 
 | 57 |  | 
 | 58 | 	return ret; | 
 | 59 | } | 
 | 60 |  | 
 | 61 | static inline void pgd_free(pgd_t *pgd) | 
 | 62 | { | 
 | 63 | 	free_pages((unsigned long)pgd, PGD_ORDER); | 
 | 64 | } | 
 | 65 |  | 
 | 66 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 
 | 67 | 	unsigned long address) | 
 | 68 | { | 
 | 69 | 	pte_t *pte; | 
 | 70 |  | 
 | 71 | 	pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); | 
 | 72 |  | 
 | 73 | 	return pte; | 
 | 74 | } | 
 | 75 |  | 
 | 76 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | 
 | 77 | 	unsigned long address) | 
 | 78 | { | 
 | 79 | 	struct page *pte; | 
 | 80 |  | 
 | 81 | 	pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); | 
 | 82 | 	if (pte) | 
 | 83 | 		clear_highpage(pte); | 
 | 84 |  | 
 | 85 | 	return pte; | 
 | 86 | } | 
 | 87 |  | 
 | 88 | static inline void pte_free_kernel(pte_t *pte) | 
 | 89 | { | 
 | 90 | 	free_pages((unsigned long)pte, PTE_ORDER); | 
 | 91 | } | 
 | 92 |  | 
 | 93 | static inline void pte_free(struct page *pte) | 
 | 94 | { | 
 | 95 | 	__free_pages(pte, PTE_ORDER); | 
 | 96 | } | 
 | 97 |  | 
 | 98 | #define __pte_free_tlb(tlb,pte)		tlb_remove_page((tlb),(pte)) | 
 | 99 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 100 | #ifdef CONFIG_32BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 |  | 
 | 102 | /* | 
 | 103 |  * allocating and freeing a pmd is trivial: the 1-entry pmd is | 
 | 104 |  * inside the pgd, so has no extra memory associated with it. | 
 | 105 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | #define pmd_free(x)			do { } while (0) | 
 | 107 | #define __pmd_free_tlb(tlb,x)		do { } while (0) | 
| Ralf Baechle | c6e8b58 | 2005-02-10 12:19:59 +0000 | [diff] [blame] | 108 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | #endif | 
 | 110 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 111 | #ifdef CONFIG_64BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | 
 | 114 | { | 
 | 115 | 	pmd_t *pmd; | 
 | 116 |  | 
 | 117 | 	pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); | 
 | 118 | 	if (pmd) | 
 | 119 | 		pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); | 
 | 120 | 	return pmd; | 
 | 121 | } | 
 | 122 |  | 
 | 123 | static inline void pmd_free(pmd_t *pmd) | 
 | 124 | { | 
 | 125 | 	free_pages((unsigned long)pmd, PMD_ORDER); | 
 | 126 | } | 
 | 127 |  | 
 | 128 | #define __pmd_free_tlb(tlb,x)	pmd_free(x) | 
 | 129 |  | 
 | 130 | #endif | 
 | 131 |  | 
 | 132 | #define check_pgt_cache()	do { } while (0) | 
 | 133 |  | 
| Atsushi Nemoto | 6a1e552 | 2007-02-19 01:27:34 +0900 | [diff] [blame] | 134 | extern void pagetable_init(void); | 
 | 135 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | #endif /* _ASM_PGALLOC_H */ |