| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_PAGE_H | 
 | 2 | #define _ASM_IA64_PAGE_H | 
 | 3 | /* | 
 | 4 |  * Pagetable related stuff. | 
 | 5 |  * | 
 | 6 |  * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co | 
 | 7 |  *	David Mosberger-Tang <davidm@hpl.hp.com> | 
 | 8 |  */ | 
 | 9 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <asm/intrinsics.h> | 
 | 11 | #include <asm/types.h> | 
 | 12 |  | 
 | 13 | /* | 
| Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 14 |  * The top three bits of an IA64 address are its Region Number. | 
 | 15 |  * Different regions are assigned to different purposes. | 
 | 16 |  */ | 
 | 17 | #define RGN_SHIFT	(61) | 
| Greg Edwards | 1b66776 | 2005-08-22 09:57:00 -0700 | [diff] [blame] | 18 | #define RGN_BASE(r)	(__IA64_UL_CONST(r)<<RGN_SHIFT) | 
 | 19 | #define RGN_BITS	(RGN_BASE(-1)) | 
| Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 20 |  | 
| Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 21 | #define RGN_KERNEL	7	/* Identity mapped region */ | 
 | 22 | #define RGN_UNCACHED    6	/* Identity mapped I/O region */ | 
 | 23 | #define RGN_GATE	5	/* Gate page, Kernel text, etc */ | 
 | 24 | #define RGN_HPAGE	4	/* For Huge TLB pages */ | 
 | 25 |  | 
 | 26 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 |  * PAGE_SHIFT determines the actual kernel page size. | 
 | 28 |  */ | 
 | 29 | #if defined(CONFIG_IA64_PAGE_SIZE_4KB) | 
 | 30 | # define PAGE_SHIFT	12 | 
 | 31 | #elif defined(CONFIG_IA64_PAGE_SIZE_8KB) | 
 | 32 | # define PAGE_SHIFT	13 | 
 | 33 | #elif defined(CONFIG_IA64_PAGE_SIZE_16KB) | 
 | 34 | # define PAGE_SHIFT	14 | 
 | 35 | #elif defined(CONFIG_IA64_PAGE_SIZE_64KB) | 
 | 36 | # define PAGE_SHIFT	16 | 
 | 37 | #else | 
 | 38 | # error Unsupported page size! | 
 | 39 | #endif | 
 | 40 |  | 
 | 41 | #define PAGE_SIZE		(__IA64_UL_CONST(1) << PAGE_SHIFT) | 
 | 42 | #define PAGE_MASK		(~(PAGE_SIZE - 1)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 |  | 
 | 44 | #define PERCPU_PAGE_SHIFT	16	/* log2() of max. size of per-CPU area */ | 
 | 45 | #define PERCPU_PAGE_SIZE	(__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT) | 
 | 46 |  | 
| Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 47 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #ifdef CONFIG_HUGETLB_PAGE | 
| Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 49 | # define HPAGE_REGION_BASE	RGN_BASE(RGN_HPAGE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | # define HPAGE_SHIFT		hpage_shift | 
 | 51 | # define HPAGE_SHIFT_DEFAULT	28	/* check ia64 SDM for architecture supported size */ | 
 | 52 | # define HPAGE_SIZE		(__IA64_UL_CONST(1) << HPAGE_SHIFT) | 
 | 53 | # define HPAGE_MASK		(~(HPAGE_SIZE - 1)) | 
 | 54 |  | 
 | 55 | # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #endif /* CONFIG_HUGETLB_PAGE */ | 
 | 57 |  | 
 | 58 | #ifdef __ASSEMBLY__ | 
 | 59 | # define __pa(x)		((x) - PAGE_OFFSET) | 
 | 60 | # define __va(x)		((x) + PAGE_OFFSET) | 
 | 61 | #else /* !__ASSEMBLY */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #  define STRICT_MM_TYPECHECKS | 
 | 63 |  | 
 | 64 | extern void clear_page (void *page); | 
 | 65 | extern void copy_page (void *to, void *from); | 
 | 66 |  | 
 | 67 | /* | 
 | 68 |  * clear_user_page() and copy_user_page() can't be inline functions because | 
 | 69 |  * flush_dcache_page() can't be defined until later... | 
 | 70 |  */ | 
 | 71 | #define clear_user_page(addr, vaddr, page)	\ | 
 | 72 | do {						\ | 
 | 73 | 	clear_page(addr);			\ | 
 | 74 | 	flush_dcache_page(page);		\ | 
 | 75 | } while (0) | 
 | 76 |  | 
 | 77 | #define copy_user_page(to, from, vaddr, page)	\ | 
 | 78 | do {						\ | 
 | 79 | 	copy_page((to), (from));		\ | 
 | 80 | 	flush_dcache_page(page);		\ | 
 | 81 | } while (0) | 
 | 82 |  | 
 | 83 |  | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 84 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr)		\ | 
 | 85 | ({									\ | 
 | 86 | 	struct page *page = alloc_page_vma(				\ | 
 | 87 | 		GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr);	\ | 
 | 88 | 	if (page)							\ | 
 | 89 |  		flush_dcache_page(page);				\ | 
 | 90 | 	page;								\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | }) | 
 | 92 |  | 
 | 93 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | 
 | 94 |  | 
 | 95 | #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 
 | 96 |  | 
 | 97 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 
 | 98 | extern int ia64_pfn_valid (unsigned long pfn); | 
| Matthew Wilcox | b0f40ea | 2006-11-16 13:40:53 -0700 | [diff] [blame] | 99 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | # define ia64_pfn_valid(pfn) 1 | 
 | 101 | #endif | 
 | 102 |  | 
| KAMEZAWA Hiroyuki | 0ecd702 | 2006-03-27 01:15:53 -0800 | [diff] [blame] | 103 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 
 | 104 | extern struct page *vmem_map; | 
 | 105 | #ifdef CONFIG_DISCONTIGMEM | 
 | 106 | # define page_to_pfn(page)	((unsigned long) (page - vmem_map)) | 
 | 107 | # define pfn_to_page(pfn)	(vmem_map + (pfn)) | 
| Matthew Wilcox | b0f40ea | 2006-11-16 13:40:53 -0700 | [diff] [blame] | 108 | #else | 
 | 109 | # include <asm-generic/memory_model.h> | 
| KAMEZAWA Hiroyuki | 0ecd702 | 2006-03-27 01:15:53 -0800 | [diff] [blame] | 110 | #endif | 
| Matthew Wilcox | b0f40ea | 2006-11-16 13:40:53 -0700 | [diff] [blame] | 111 | #else | 
 | 112 | # include <asm-generic/memory_model.h> | 
| KAMEZAWA Hiroyuki | 0ecd702 | 2006-03-27 01:15:53 -0800 | [diff] [blame] | 113 | #endif | 
 | 114 |  | 
| Bob Picco | 1be7d99 | 2005-10-04 15:13:50 -0400 | [diff] [blame] | 115 | #ifdef CONFIG_FLATMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | # define pfn_valid(pfn)		(((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) | 
| Bob Picco | 1be7d99 | 2005-10-04 15:13:50 -0400 | [diff] [blame] | 117 | #elif defined(CONFIG_DISCONTIGMEM) | 
| Dean Roe | b77dae5 | 2005-11-09 14:25:06 -0600 | [diff] [blame] | 118 | extern unsigned long min_low_pfn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | extern unsigned long max_low_pfn; | 
| Dean Roe | b77dae5 | 2005-11-09 14:25:06 -0600 | [diff] [blame] | 120 | # define pfn_valid(pfn)		(((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | #endif | 
 | 122 |  | 
 | 123 | #define page_to_phys(page)	(page_to_pfn(page) << PAGE_SHIFT) | 
 | 124 | #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 
| Bob Picco | 631bb0e | 2005-10-31 13:25:25 -0500 | [diff] [blame] | 125 | #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 |  | 
 | 127 | typedef union ia64_va { | 
 | 128 | 	struct { | 
 | 129 | 		unsigned long off : 61;		/* intra-region offset */ | 
 | 130 | 		unsigned long reg :  3;		/* region number */ | 
 | 131 | 	} f; | 
 | 132 | 	unsigned long l; | 
 | 133 | 	void *p; | 
 | 134 | } ia64_va; | 
 | 135 |  | 
 | 136 | /* | 
 | 137 |  * Note: These macros depend on the fact that PAGE_OFFSET has all | 
 | 138 |  * region bits set to 1 and all other bits set to zero.  They are | 
 | 139 |  * expressed in this way to ensure they result in a single "dep" | 
 | 140 |  * instruction. | 
 | 141 |  */ | 
 | 142 | #define __pa(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) | 
 | 143 | #define __va(x)		({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) | 
 | 144 |  | 
 | 145 | #define REGION_NUMBER(x)	({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) | 
 | 146 | #define REGION_OFFSET(x)	({ia64_va _v; _v.l = (long) (x); _v.f.off;}) | 
 | 147 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | #ifdef CONFIG_HUGETLB_PAGE | 
 | 149 | # define htlbpage_to_page(x)	(((unsigned long) REGION_NUMBER(x) << 61)			\ | 
 | 150 | 				 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) | 
 | 151 | # define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | extern unsigned int hpage_shift; | 
 | 153 | #endif | 
 | 154 |  | 
 | 155 | static __inline__ int | 
 | 156 | get_order (unsigned long size) | 
 | 157 | { | 
 | 158 | 	long double d = size - 1; | 
 | 159 | 	long order; | 
 | 160 |  | 
 | 161 | 	order = ia64_getf_exp(d); | 
 | 162 | 	order = order - PAGE_SHIFT - 0xffff + 1; | 
 | 163 | 	if (order < 0) | 
 | 164 | 		order = 0; | 
 | 165 | 	return order; | 
 | 166 | } | 
 | 167 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | #endif /* !__ASSEMBLY__ */ | 
 | 169 |  | 
 | 170 | #ifdef STRICT_MM_TYPECHECKS | 
 | 171 |   /* | 
 | 172 |    * These are used to make use of C type-checking.. | 
 | 173 |    */ | 
 | 174 |   typedef struct { unsigned long pte; } pte_t; | 
 | 175 |   typedef struct { unsigned long pmd; } pmd_t; | 
| Robin Holt | 837cd0b | 2005-11-11 09:35:43 -0600 | [diff] [blame] | 176 | #ifdef CONFIG_PGTABLE_4 | 
 | 177 |   typedef struct { unsigned long pud; } pud_t; | 
 | 178 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 |   typedef struct { unsigned long pgd; } pgd_t; | 
 | 180 |   typedef struct { unsigned long pgprot; } pgprot_t; | 
| Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 181 |   typedef struct page *pgtable_t; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 |  | 
 | 183 | # define pte_val(x)	((x).pte) | 
 | 184 | # define pmd_val(x)	((x).pmd) | 
| Robin Holt | 837cd0b | 2005-11-11 09:35:43 -0600 | [diff] [blame] | 185 | #ifdef CONFIG_PGTABLE_4 | 
 | 186 | # define pud_val(x)	((x).pud) | 
 | 187 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | # define pgd_val(x)	((x).pgd) | 
 | 189 | # define pgprot_val(x)	((x).pgprot) | 
 | 190 |  | 
 | 191 | # define __pte(x)	((pte_t) { (x) } ) | 
 | 192 | # define __pgprot(x)	((pgprot_t) { (x) } ) | 
 | 193 |  | 
 | 194 | #else /* !STRICT_MM_TYPECHECKS */ | 
 | 195 |   /* | 
 | 196 |    * .. while these make it easier on the compiler | 
 | 197 |    */ | 
 | 198 | # ifndef __ASSEMBLY__ | 
 | 199 |     typedef unsigned long pte_t; | 
 | 200 |     typedef unsigned long pmd_t; | 
 | 201 |     typedef unsigned long pgd_t; | 
 | 202 |     typedef unsigned long pgprot_t; | 
| Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 203 |     typedef struct page *pgtable_t; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | # endif | 
 | 205 |  | 
 | 206 | # define pte_val(x)	(x) | 
 | 207 | # define pmd_val(x)	(x) | 
 | 208 | # define pgd_val(x)	(x) | 
 | 209 | # define pgprot_val(x)	(x) | 
 | 210 |  | 
 | 211 | # define __pte(x)	(x) | 
 | 212 | # define __pgd(x)	(x) | 
 | 213 | # define __pgprot(x)	(x) | 
 | 214 | #endif /* !STRICT_MM_TYPECHECKS */ | 
 | 215 |  | 
| Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 216 | #define PAGE_OFFSET			RGN_BASE(RGN_KERNEL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 |  | 
 | 218 | #define VM_DATA_DEFAULT_FLAGS		(VM_READ | VM_WRITE |					\ | 
 | 219 | 					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |		\ | 
 | 220 | 					 (((current->personality & READ_IMPLIES_EXEC) != 0)	\ | 
 | 221 | 					  ? VM_EXEC : 0)) | 
 | 222 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | #endif /* _ASM_IA64_PAGE_H */ |