| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/include/asm-arm/page.h | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1995-2003 Russell King | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License version 2 as | 
 | 8 |  * published by the Free Software Foundation. | 
 | 9 |  */ | 
 | 10 | #ifndef _ASMARM_PAGE_H | 
 | 11 | #define _ASMARM_PAGE_H | 
 | 12 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 |  | 
| David Woodhouse | 6ff6340 | 2006-09-21 08:34:39 +0100 | [diff] [blame] | 14 | #ifdef __KERNEL__ | 
 | 15 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | /* PAGE_SHIFT determines the page size */ | 
 | 17 | #define PAGE_SHIFT		12 | 
 | 18 | #define PAGE_SIZE		(1UL << PAGE_SHIFT) | 
 | 19 | #define PAGE_MASK		(~(PAGE_SIZE-1)) | 
 | 20 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | /* to align the pointer to the (next) page boundary */ | 
 | 22 | #define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK) | 
 | 23 |  | 
 | 24 | #ifndef __ASSEMBLY__ | 
 | 25 |  | 
| Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 26 | #ifndef CONFIG_MMU | 
 | 27 |  | 
 | 28 | #include "page-nommu.h" | 
 | 29 |  | 
 | 30 | #else | 
 | 31 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <asm/glue.h> | 
 | 33 |  | 
 | 34 | /* | 
 | 35 |  *	User Space Model | 
 | 36 |  *	================ | 
 | 37 |  * | 
 | 38 |  *	This section selects the correct set of functions for dealing with | 
 | 39 |  *	page-based copying and clearing for user space for the particular | 
 | 40 |  *	processor(s) we're building for. | 
 | 41 |  * | 
 | 42 |  *	We have the following to choose from: | 
 | 43 |  *	  v3		- ARMv3 | 
 | 44 |  *	  v4wt		- ARMv4 with writethrough cache, without minicache | 
 | 45 |  *	  v4wb		- ARMv4 with writeback cache, without minicache | 
 | 46 |  *	  v4_mc		- ARMv4 with minicache | 
 | 47 |  *	  xscale	- Xscale | 
| Lennert Buytenhek | 23bdf86 | 2006-03-28 21:00:40 +0100 | [diff] [blame] | 48 |  *	  xsc3		- XScalev3 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 |  */ | 
 | 50 | #undef _USER | 
 | 51 | #undef MULTI_USER | 
 | 52 |  | 
 | 53 | #ifdef CONFIG_CPU_COPY_V3 | 
 | 54 | # ifdef _USER | 
 | 55 | #  define MULTI_USER 1 | 
 | 56 | # else | 
 | 57 | #  define _USER v3 | 
 | 58 | # endif | 
 | 59 | #endif | 
 | 60 |  | 
 | 61 | #ifdef CONFIG_CPU_COPY_V4WT | 
 | 62 | # ifdef _USER | 
 | 63 | #  define MULTI_USER 1 | 
 | 64 | # else | 
 | 65 | #  define _USER v4wt | 
 | 66 | # endif | 
 | 67 | #endif | 
 | 68 |  | 
 | 69 | #ifdef CONFIG_CPU_COPY_V4WB | 
 | 70 | # ifdef _USER | 
 | 71 | #  define MULTI_USER 1 | 
 | 72 | # else | 
 | 73 | #  define _USER v4wb | 
 | 74 | # endif | 
 | 75 | #endif | 
 | 76 |  | 
 | 77 | #ifdef CONFIG_CPU_SA1100 | 
 | 78 | # ifdef _USER | 
 | 79 | #  define MULTI_USER 1 | 
 | 80 | # else | 
 | 81 | #  define _USER v4_mc | 
 | 82 | # endif | 
 | 83 | #endif | 
 | 84 |  | 
 | 85 | #ifdef CONFIG_CPU_XSCALE | 
 | 86 | # ifdef _USER | 
 | 87 | #  define MULTI_USER 1 | 
 | 88 | # else | 
 | 89 | #  define _USER xscale_mc | 
 | 90 | # endif | 
 | 91 | #endif | 
 | 92 |  | 
| Lennert Buytenhek | 23bdf86 | 2006-03-28 21:00:40 +0100 | [diff] [blame] | 93 | #ifdef CONFIG_CPU_XSC3 | 
 | 94 | # ifdef _USER | 
 | 95 | #  define MULTI_USER 1 | 
 | 96 | # else | 
 | 97 | #  define _USER xsc3_mc | 
 | 98 | # endif | 
 | 99 | #endif | 
 | 100 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | #ifdef CONFIG_CPU_COPY_V6 | 
 | 102 | # define MULTI_USER 1 | 
 | 103 | #endif | 
 | 104 |  | 
 | 105 | #if !defined(_USER) && !defined(MULTI_USER) | 
 | 106 | #error Unknown user operations model | 
 | 107 | #endif | 
 | 108 |  | 
 | 109 | struct cpu_user_fns { | 
 | 110 | 	void (*cpu_clear_user_page)(void *p, unsigned long user); | 
 | 111 | 	void (*cpu_copy_user_page)(void *to, const void *from, | 
 | 112 | 				   unsigned long user); | 
 | 113 | }; | 
 | 114 |  | 
 | 115 | #ifdef MULTI_USER | 
 | 116 | extern struct cpu_user_fns cpu_user; | 
 | 117 |  | 
 | 118 | #define __cpu_clear_user_page	cpu_user.cpu_clear_user_page | 
 | 119 | #define __cpu_copy_user_page	cpu_user.cpu_copy_user_page | 
 | 120 |  | 
 | 121 | #else | 
 | 122 |  | 
 | 123 | #define __cpu_clear_user_page	__glue(_USER,_clear_user_page) | 
 | 124 | #define __cpu_copy_user_page	__glue(_USER,_copy_user_page) | 
 | 125 |  | 
 | 126 | extern void __cpu_clear_user_page(void *p, unsigned long user); | 
 | 127 | extern void __cpu_copy_user_page(void *to, const void *from, | 
 | 128 | 				 unsigned long user); | 
 | 129 | #endif | 
 | 130 |  | 
| Russell King | d2bab05 | 2005-05-10 14:23:01 +0100 | [diff] [blame] | 131 | #define clear_user_page(addr,vaddr,pg)	 __cpu_clear_user_page(addr, vaddr) | 
 | 132 | #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
 | 134 | #define clear_page(page)	memzero((void *)(page), PAGE_SIZE) | 
 | 135 | extern void copy_page(void *to, const void *from); | 
 | 136 |  | 
 | 137 | #undef STRICT_MM_TYPECHECKS | 
 | 138 |  | 
 | 139 | #ifdef STRICT_MM_TYPECHECKS | 
 | 140 | /* | 
 | 141 |  * These are used to make use of C type-checking.. | 
 | 142 |  */ | 
 | 143 | typedef struct { unsigned long pte; } pte_t; | 
 | 144 | typedef struct { unsigned long pmd; } pmd_t; | 
 | 145 | typedef struct { unsigned long pgd[2]; } pgd_t; | 
 | 146 | typedef struct { unsigned long pgprot; } pgprot_t; | 
 | 147 |  | 
 | 148 | #define pte_val(x)      ((x).pte) | 
 | 149 | #define pmd_val(x)      ((x).pmd) | 
 | 150 | #define pgd_val(x)	((x).pgd[0]) | 
 | 151 | #define pgprot_val(x)   ((x).pgprot) | 
 | 152 |  | 
 | 153 | #define __pte(x)        ((pte_t) { (x) } ) | 
 | 154 | #define __pmd(x)        ((pmd_t) { (x) } ) | 
 | 155 | #define __pgprot(x)     ((pgprot_t) { (x) } ) | 
 | 156 |  | 
 | 157 | #else | 
 | 158 | /* | 
 | 159 |  * .. while these make it easier on the compiler | 
 | 160 |  */ | 
 | 161 | typedef unsigned long pte_t; | 
 | 162 | typedef unsigned long pmd_t; | 
 | 163 | typedef unsigned long pgd_t[2]; | 
 | 164 | typedef unsigned long pgprot_t; | 
 | 165 |  | 
 | 166 | #define pte_val(x)      (x) | 
 | 167 | #define pmd_val(x)      (x) | 
 | 168 | #define pgd_val(x)	((x)[0]) | 
 | 169 | #define pgprot_val(x)   (x) | 
 | 170 |  | 
 | 171 | #define __pte(x)        (x) | 
 | 172 | #define __pmd(x)        (x) | 
 | 173 | #define __pgprot(x)     (x) | 
 | 174 |  | 
 | 175 | #endif /* STRICT_MM_TYPECHECKS */ | 
 | 176 |  | 
| Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 177 | #endif /* CONFIG_MMU */ | 
 | 178 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | #include <asm/memory.h> | 
 | 180 |  | 
 | 181 | #endif /* !__ASSEMBLY__ */ | 
 | 182 |  | 
 | 183 | #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \ | 
 | 184 | 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 
 | 185 |  | 
| Nicolas Pitre | da2b1cd | 2006-01-14 16:18:07 +0000 | [diff] [blame] | 186 | /* | 
 | 187 |  * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. | 
 | 188 |  */ | 
 | 189 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | 
 | 190 | #define ARCH_SLAB_MINALIGN 8 | 
 | 191 | #endif | 
 | 192 |  | 
| Stephen Rothwell | fd4fd5a | 2005-09-03 15:54:30 -0700 | [diff] [blame] | 193 | #include <asm-generic/page.h> | 
 | 194 |  | 
| Ralph Siemsen | e40b107 | 2006-09-18 16:28:50 +0100 | [diff] [blame] | 195 | #endif /* __KERNEL__ */ | 
 | 196 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | #endif |