| Matt Fleming | 2a5eacc | 2009-12-31 12:19:24 +0000 | [diff] [blame] | 1 | #include <linux/mm.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 2 | #include <linux/slab.h> | 
| Matt Fleming | 2a5eacc | 2009-12-31 12:19:24 +0000 | [diff] [blame] | 3 |  | 
 | 4 | #define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO | 
 | 5 |  | 
 | 6 | static struct kmem_cache *pgd_cachep; | 
| Paul Mundt | 782bb5a | 2010-01-13 19:11:14 +0900 | [diff] [blame] | 7 | #if PAGETABLE_LEVELS > 2 | 
| Matt Fleming | 2a5eacc | 2009-12-31 12:19:24 +0000 | [diff] [blame] | 8 | static struct kmem_cache *pmd_cachep; | 
 | 9 | #endif | 
 | 10 |  | 
 | 11 | void pgd_ctor(void *x) | 
 | 12 | { | 
 | 13 | 	pgd_t *pgd = x; | 
 | 14 |  | 
 | 15 | 	memcpy(pgd + USER_PTRS_PER_PGD, | 
 | 16 | 	       swapper_pg_dir + USER_PTRS_PER_PGD, | 
 | 17 | 	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | 
 | 18 | } | 
 | 19 |  | 
 | 20 | void pgtable_cache_init(void) | 
 | 21 | { | 
 | 22 | 	pgd_cachep = kmem_cache_create("pgd_cache", | 
 | 23 | 				       PTRS_PER_PGD * (1<<PTE_MAGNITUDE), | 
 | 24 | 				       PAGE_SIZE, SLAB_PANIC, pgd_ctor); | 
| Paul Mundt | 782bb5a | 2010-01-13 19:11:14 +0900 | [diff] [blame] | 25 | #if PAGETABLE_LEVELS > 2 | 
| Matt Fleming | 2a5eacc | 2009-12-31 12:19:24 +0000 | [diff] [blame] | 26 | 	pmd_cachep = kmem_cache_create("pmd_cache", | 
 | 27 | 				       PTRS_PER_PMD * (1<<PTE_MAGNITUDE), | 
 | 28 | 				       PAGE_SIZE, SLAB_PANIC, NULL); | 
 | 29 | #endif | 
 | 30 | } | 
 | 31 |  | 
 | 32 | pgd_t *pgd_alloc(struct mm_struct *mm) | 
 | 33 | { | 
 | 34 | 	return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP); | 
 | 35 | } | 
 | 36 |  | 
 | 37 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 
 | 38 | { | 
 | 39 | 	kmem_cache_free(pgd_cachep, pgd); | 
 | 40 | } | 
 | 41 |  | 
| Paul Mundt | 782bb5a | 2010-01-13 19:11:14 +0900 | [diff] [blame] | 42 | #if PAGETABLE_LEVELS > 2 | 
| Matt Fleming | 2a5eacc | 2009-12-31 12:19:24 +0000 | [diff] [blame] | 43 | void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | 
 | 44 | { | 
 | 45 | 	set_pud(pud, __pud((unsigned long)pmd)); | 
 | 46 | } | 
 | 47 |  | 
 | 48 | pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | 
 | 49 | { | 
 | 50 | 	return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP); | 
 | 51 | } | 
 | 52 |  | 
 | 53 | void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 
 | 54 | { | 
 | 55 | 	kmem_cache_free(pmd_cachep, pmd); | 
 | 56 | } | 
| Paul Mundt | 782bb5a | 2010-01-13 19:11:14 +0900 | [diff] [blame] | 57 | #endif /* PAGETABLE_LEVELS > 2 */ |