| Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/include/asm-xtensa/pgalloc.h | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify | 
 | 5 |  * it under the terms of the GNU General Public License version 2 as | 
 | 6 |  * published by the Free Software Foundation. | 
 | 7 |  * | 
 | 8 |  * Copyright (C) 2001-2005 Tensilica Inc. | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #ifndef _XTENSA_PGALLOC_H | 
 | 12 | #define _XTENSA_PGALLOC_H | 
 | 13 |  | 
 | 14 | #ifdef __KERNEL__ | 
 | 15 |  | 
 | 16 | #include <linux/config.h> | 
 | 17 | #include <linux/threads.h> | 
 | 18 | #include <linux/highmem.h> | 
 | 19 | #include <asm/processor.h> | 
 | 20 | #include <asm/cacheflush.h> | 
 | 21 |  | 
 | 22 |  | 
 | 23 | /* Cache aliasing: | 
 | 24 |  * | 
 | 25 |  * If the cache size for one way is greater than the page size, we have to | 
 | 26 |  * deal with cache aliasing. The cache index is wider than the page size: | 
 | 27 |  * | 
 | 28 |  *      |cache | | 
 | 29 |  * |pgnum |page|	virtual address | 
 | 30 |  * |xxxxxX|zzzz| | 
 | 31 |  * |      |    | | 
 | 32 |  *   \  / |    | | 
 | 33 |  *  trans.|    | | 
 | 34 |  *   /  \ |    | | 
 | 35 |  * |yyyyyY|zzzz|	physical address | 
 | 36 |  * | 
 | 37 |  * When the page number is translated to the physical page address, the lowest | 
 | 38 |  * bit(s) (X) that are also part of the cache index are also translated (Y). | 
 | 39 |  * If this translation changes this bit (X), the cache index is also afected, | 
 | 40 |  * thus resulting in a different cache line than before. | 
 | 41 |  * The kernel does not provide a mechanism to ensure that the page color | 
 | 42 |  * (represented by this bit) remains the same when allocated or when pages | 
 | 43 |  * are remapped. When user pages are mapped into kernel space, the color of | 
 | 44 |  * the page might also change. | 
 | 45 |  * | 
 | 46 |  * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2 | 
 | 47 |  * to temporarily map a patch so we can match the color. | 
 | 48 |  */ | 
 | 49 |  | 
 | 50 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | 
 | 51 | # define PAGE_COLOR_MASK	(PAGE_MASK & (DCACHE_WAY_SIZE-1)) | 
 | 52 | # define PAGE_COLOR(a)		\ | 
 | 53 | 	(((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT) | 
 | 54 | # define PAGE_COLOR_EQ(a,b)	\ | 
 | 55 | 	((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0) | 
 | 56 | # define PAGE_COLOR_MAP0(v)	\ | 
 | 57 | 	(VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK)) | 
 | 58 | # define PAGE_COLOR_MAP1(v)	\ | 
 | 59 | 	(VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE) | 
 | 60 | #endif | 
 | 61 |  | 
 | 62 | /* | 
 | 63 |  * Allocating and freeing a pmd is trivial: the 1-entry pmd is | 
 | 64 |  * inside the pgd, so has no extra memory associated with it. | 
 | 65 |  */ | 
 | 66 |  | 
 | 67 | #define pgd_free(pgd)	free_page((unsigned long)(pgd)) | 
 | 68 |  | 
 | 69 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 
 | 70 |  | 
 | 71 | static inline void | 
 | 72 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte) | 
 | 73 | { | 
 | 74 | 	pmd_val(*(pmdp)) = (unsigned long)(pte); | 
 | 75 | 	__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); | 
 | 76 | } | 
 | 77 |  | 
 | 78 | static inline void | 
 | 79 | pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page) | 
 | 80 | { | 
 | 81 | 	pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page); | 
 | 82 | 	__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); | 
 | 83 | } | 
 | 84 |  | 
 | 85 |  | 
 | 86 |  | 
 | 87 | #else | 
 | 88 |  | 
 | 89 | # define pmd_populate_kernel(mm, pmdp, pte)				     \ | 
 | 90 | 	(pmd_val(*(pmdp)) = (unsigned long)(pte)) | 
 | 91 | # define pmd_populate(mm, pmdp, page)					     \ | 
 | 92 | 	(pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page)) | 
 | 93 |  | 
 | 94 | #endif | 
 | 95 |  | 
 | 96 | static inline pgd_t* | 
 | 97 | pgd_alloc(struct mm_struct *mm) | 
 | 98 | { | 
 | 99 | 	pgd_t *pgd; | 
 | 100 |  | 
 | 101 | 	pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER); | 
 | 102 |  | 
 | 103 | 	if (likely(pgd != NULL)) | 
 | 104 | 		__flush_dcache_page((unsigned long)pgd); | 
 | 105 |  | 
 | 106 | 	return pgd; | 
 | 107 | } | 
 | 108 |  | 
 | 109 | extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr); | 
 | 110 | extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr); | 
 | 111 |  | 
 | 112 | #define pte_free_kernel(pte) free_page((unsigned long)pte) | 
 | 113 | #define pte_free(pte) __free_page(pte) | 
 | 114 |  | 
 | 115 | #endif /* __KERNEL__ */ | 
 | 116 | #endif /* _XTENSA_PGALLOC_H */ |