| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_GCC_INTRIN_H | 
 | 2 | #define _ASM_IA64_GCC_INTRIN_H | 
 | 3 | /* | 
 | 4 |  * | 
 | 5 |  * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> | 
 | 6 |  * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> | 
 | 7 |  */ | 
 | 8 |  | 
 | 9 | #include <linux/compiler.h> | 
 | 10 |  | 
 | 11 | /* define this macro to get some asm stmts included in 'c' files */ | 
 | 12 | #define ASM_SUPPORTED | 
 | 13 |  | 
 | 14 | /* Optimization barrier */ | 
 | 15 | /* The "volatile" is due to gcc bugs */ | 
 | 16 | #define ia64_barrier()	asm volatile ("":::"memory") | 
 | 17 |  | 
 | 18 | #define ia64_stop()	asm volatile (";;"::) | 
 | 19 |  | 
 | 20 | #define ia64_invala_gr(regnum)	asm volatile ("invala.e r%0" :: "i"(regnum)) | 
 | 21 |  | 
 | 22 | #define ia64_invala_fr(regnum)	asm volatile ("invala.e f%0" :: "i"(regnum)) | 
 | 23 |  | 
 | 24 | extern void ia64_bad_param_for_setreg (void); | 
 | 25 | extern void ia64_bad_param_for_getreg (void); | 
 | 26 |  | 
| Doug Chapman | 0df2902 | 2008-01-28 15:33:28 -0800 | [diff] [blame] | 27 | #ifdef __KERNEL__ | 
| Adrian Bunk | 3ff6eec | 2008-01-24 22:16:20 +0100 | [diff] [blame] | 28 | register unsigned long ia64_r13 asm ("r13") __used; | 
| Doug Chapman | 0df2902 | 2008-01-28 15:33:28 -0800 | [diff] [blame] | 29 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 |  | 
 | 31 | #define ia64_setreg(regnum, val)						\ | 
 | 32 | ({										\ | 
 | 33 | 	switch (regnum) {							\ | 
 | 34 | 	    case _IA64_REG_PSR_L:						\ | 
 | 35 | 		    asm volatile ("mov psr.l=%0" :: "r"(val) : "memory");	\ | 
 | 36 | 		    break;							\ | 
 | 37 | 	    case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\ | 
 | 38 | 		    asm volatile ("mov ar%0=%1" ::				\ | 
 | 39 | 		    			  "i" (regnum - _IA64_REG_AR_KR0),	\ | 
 | 40 | 					  "r"(val): "memory");			\ | 
 | 41 | 		    break;							\ | 
 | 42 | 	    case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:			\ | 
 | 43 | 		    asm volatile ("mov cr%0=%1" ::				\ | 
 | 44 | 				          "i" (regnum - _IA64_REG_CR_DCR),	\ | 
 | 45 | 					  "r"(val): "memory" );			\ | 
 | 46 | 		    break;							\ | 
 | 47 | 	    case _IA64_REG_SP:							\ | 
 | 48 | 		    asm volatile ("mov r12=%0" ::				\ | 
 | 49 | 			    		  "r"(val): "memory");			\ | 
 | 50 | 		    break;							\ | 
 | 51 | 	    case _IA64_REG_GP:							\ | 
 | 52 | 		    asm volatile ("mov gp=%0" :: "r"(val) : "memory");		\ | 
 | 53 | 		break;								\ | 
 | 54 | 	    default:								\ | 
 | 55 | 		    ia64_bad_param_for_setreg();				\ | 
 | 56 | 		    break;							\ | 
 | 57 | 	}									\ | 
 | 58 | }) | 
 | 59 |  | 
 | 60 | #define ia64_getreg(regnum)							\ | 
 | 61 | ({										\ | 
 | 62 | 	__u64 ia64_intri_res;							\ | 
 | 63 | 										\ | 
 | 64 | 	switch (regnum) {							\ | 
 | 65 | 	case _IA64_REG_GP:							\ | 
 | 66 | 		asm volatile ("mov %0=gp" : "=r"(ia64_intri_res));		\ | 
 | 67 | 		break;								\ | 
 | 68 | 	case _IA64_REG_IP:							\ | 
 | 69 | 		asm volatile ("mov %0=ip" : "=r"(ia64_intri_res));		\ | 
 | 70 | 		break;								\ | 
 | 71 | 	case _IA64_REG_PSR:							\ | 
 | 72 | 		asm volatile ("mov %0=psr" : "=r"(ia64_intri_res));		\ | 
 | 73 | 		break;								\ | 
 | 74 | 	case _IA64_REG_TP:	/* for current() */				\ | 
 | 75 | 		ia64_intri_res = ia64_r13;					\ | 
 | 76 | 		break;								\ | 
 | 77 | 	case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC:				\ | 
 | 78 | 		asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res)		\ | 
 | 79 | 				      : "i"(regnum - _IA64_REG_AR_KR0));	\ | 
 | 80 | 		break;								\ | 
 | 81 | 	case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1:				\ | 
 | 82 | 		asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res)		\ | 
 | 83 | 				      : "i" (regnum - _IA64_REG_CR_DCR));	\ | 
 | 84 | 		break;								\ | 
 | 85 | 	case _IA64_REG_SP:							\ | 
 | 86 | 		asm volatile ("mov %0=sp" : "=r" (ia64_intri_res));		\ | 
 | 87 | 		break;								\ | 
 | 88 | 	default:								\ | 
 | 89 | 		ia64_bad_param_for_getreg();					\ | 
 | 90 | 		break;								\ | 
 | 91 | 	}									\ | 
 | 92 | 	ia64_intri_res;								\ | 
 | 93 | }) | 
 | 94 |  | 
 | 95 | #define ia64_hint_pause 0 | 
 | 96 |  | 
 | 97 | #define ia64_hint(mode)						\ | 
 | 98 | ({								\ | 
 | 99 | 	switch (mode) {						\ | 
 | 100 | 	case ia64_hint_pause:					\ | 
 | 101 | 		asm volatile ("hint @pause" ::: "memory");	\ | 
 | 102 | 		break;						\ | 
 | 103 | 	}							\ | 
 | 104 | }) | 
 | 105 |  | 
 | 106 |  | 
 | 107 | /* Integer values for mux1 instruction */ | 
 | 108 | #define ia64_mux1_brcst 0 | 
 | 109 | #define ia64_mux1_mix   8 | 
 | 110 | #define ia64_mux1_shuf  9 | 
 | 111 | #define ia64_mux1_alt  10 | 
 | 112 | #define ia64_mux1_rev  11 | 
 | 113 |  | 
 | 114 | #define ia64_mux1(x, mode)							\ | 
 | 115 | ({										\ | 
 | 116 | 	__u64 ia64_intri_res;							\ | 
 | 117 | 										\ | 
 | 118 | 	switch (mode) {								\ | 
 | 119 | 	case ia64_mux1_brcst:							\ | 
 | 120 | 		asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x));	\ | 
 | 121 | 		break;								\ | 
 | 122 | 	case ia64_mux1_mix:							\ | 
 | 123 | 		asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x));	\ | 
 | 124 | 		break;								\ | 
 | 125 | 	case ia64_mux1_shuf:							\ | 
 | 126 | 		asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x));	\ | 
 | 127 | 		break;								\ | 
 | 128 | 	case ia64_mux1_alt:							\ | 
 | 129 | 		asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x));	\ | 
 | 130 | 		break;								\ | 
 | 131 | 	case ia64_mux1_rev:							\ | 
 | 132 | 		asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x));	\ | 
 | 133 | 		break;								\ | 
 | 134 | 	}									\ | 
 | 135 | 	ia64_intri_res;								\ | 
 | 136 | }) | 
 | 137 |  | 
| David Mosberger-Tang | 821376b | 2005-04-21 11:07:59 -0700 | [diff] [blame] | 138 | #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) | 
 | 139 | # define ia64_popcnt(x)		__builtin_popcountl(x) | 
 | 140 | #else | 
 | 141 | # define ia64_popcnt(x)						\ | 
 | 142 |   ({								\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | 	__u64 ia64_intri_res;					\ | 
 | 144 | 	asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x));	\ | 
 | 145 | 								\ | 
 | 146 | 	ia64_intri_res;						\ | 
| David Mosberger-Tang | 821376b | 2005-04-21 11:07:59 -0700 | [diff] [blame] | 147 |   }) | 
 | 148 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 |  | 
 | 150 | #define ia64_getf_exp(x)					\ | 
 | 151 | ({								\ | 
 | 152 | 	long ia64_intri_res;					\ | 
 | 153 | 								\ | 
 | 154 | 	asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x));	\ | 
 | 155 | 								\ | 
 | 156 | 	ia64_intri_res;						\ | 
 | 157 | }) | 
 | 158 |  | 
 | 159 | #define ia64_shrp(a, b, count)								\ | 
 | 160 | ({											\ | 
 | 161 | 	__u64 ia64_intri_res;								\ | 
 | 162 | 	asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count));	\ | 
 | 163 | 	ia64_intri_res;									\ | 
 | 164 | }) | 
 | 165 |  | 
 | 166 | #define ia64_ldfs(regnum, x)					\ | 
 | 167 | ({								\ | 
 | 168 | 	register double __f__ asm ("f"#regnum);			\ | 
 | 169 | 	asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x));	\ | 
 | 170 | }) | 
 | 171 |  | 
 | 172 | #define ia64_ldfd(regnum, x)					\ | 
 | 173 | ({								\ | 
 | 174 | 	register double __f__ asm ("f"#regnum);			\ | 
 | 175 | 	asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x));	\ | 
 | 176 | }) | 
 | 177 |  | 
 | 178 | #define ia64_ldfe(regnum, x)					\ | 
 | 179 | ({								\ | 
 | 180 | 	register double __f__ asm ("f"#regnum);			\ | 
 | 181 | 	asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x));	\ | 
 | 182 | }) | 
 | 183 |  | 
 | 184 | #define ia64_ldf8(regnum, x)					\ | 
 | 185 | ({								\ | 
 | 186 | 	register double __f__ asm ("f"#regnum);			\ | 
 | 187 | 	asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x));	\ | 
 | 188 | }) | 
 | 189 |  | 
 | 190 | #define ia64_ldf_fill(regnum, x)				\ | 
 | 191 | ({								\ | 
 | 192 | 	register double __f__ asm ("f"#regnum);			\ | 
 | 193 | 	asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x));	\ | 
 | 194 | }) | 
 | 195 |  | 
| Christoph Lameter | a3ebdb6 | 2007-12-18 16:22:46 -0800 | [diff] [blame] | 196 | #define ia64_st4_rel_nta(m, val)					\ | 
 | 197 | ({									\ | 
 | 198 | 	asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val));	\ | 
 | 199 | }) | 
 | 200 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | #define ia64_stfs(x, regnum)						\ | 
 | 202 | ({									\ | 
 | 203 | 	register double __f__ asm ("f"#regnum);				\ | 
 | 204 | 	asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\ | 
 | 205 | }) | 
 | 206 |  | 
 | 207 | #define ia64_stfd(x, regnum)						\ | 
 | 208 | ({									\ | 
 | 209 | 	register double __f__ asm ("f"#regnum);				\ | 
 | 210 | 	asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\ | 
 | 211 | }) | 
 | 212 |  | 
 | 213 | #define ia64_stfe(x, regnum)						\ | 
 | 214 | ({									\ | 
 | 215 | 	register double __f__ asm ("f"#regnum);				\ | 
 | 216 | 	asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\ | 
 | 217 | }) | 
 | 218 |  | 
 | 219 | #define ia64_stf8(x, regnum)						\ | 
 | 220 | ({									\ | 
 | 221 | 	register double __f__ asm ("f"#regnum);				\ | 
 | 222 | 	asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\ | 
 | 223 | }) | 
 | 224 |  | 
 | 225 | #define ia64_stf_spill(x, regnum)						\ | 
 | 226 | ({										\ | 
 | 227 | 	register double __f__ asm ("f"#regnum);					\ | 
 | 228 | 	asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory");	\ | 
 | 229 | }) | 
 | 230 |  | 
 | 231 | #define ia64_fetchadd4_acq(p, inc)						\ | 
 | 232 | ({										\ | 
 | 233 | 										\ | 
 | 234 | 	__u64 ia64_intri_res;							\ | 
 | 235 | 	asm volatile ("fetchadd4.acq %0=[%1],%2"				\ | 
 | 236 | 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\ | 
 | 237 | 				: "memory");					\ | 
 | 238 | 										\ | 
 | 239 | 	ia64_intri_res;								\ | 
 | 240 | }) | 
 | 241 |  | 
 | 242 | #define ia64_fetchadd4_rel(p, inc)						\ | 
 | 243 | ({										\ | 
 | 244 | 	__u64 ia64_intri_res;							\ | 
 | 245 | 	asm volatile ("fetchadd4.rel %0=[%1],%2"				\ | 
 | 246 | 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\ | 
 | 247 | 				: "memory");					\ | 
 | 248 | 										\ | 
 | 249 | 	ia64_intri_res;								\ | 
 | 250 | }) | 
 | 251 |  | 
 | 252 | #define ia64_fetchadd8_acq(p, inc)						\ | 
 | 253 | ({										\ | 
 | 254 | 										\ | 
 | 255 | 	__u64 ia64_intri_res;							\ | 
 | 256 | 	asm volatile ("fetchadd8.acq %0=[%1],%2"				\ | 
 | 257 | 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\ | 
 | 258 | 				: "memory");					\ | 
 | 259 | 										\ | 
 | 260 | 	ia64_intri_res;								\ | 
 | 261 | }) | 
 | 262 |  | 
 | 263 | #define ia64_fetchadd8_rel(p, inc)						\ | 
 | 264 | ({										\ | 
 | 265 | 	__u64 ia64_intri_res;							\ | 
 | 266 | 	asm volatile ("fetchadd8.rel %0=[%1],%2"				\ | 
 | 267 | 				: "=r"(ia64_intri_res) : "r"(p), "i" (inc)	\ | 
 | 268 | 				: "memory");					\ | 
 | 269 | 										\ | 
 | 270 | 	ia64_intri_res;								\ | 
 | 271 | }) | 
 | 272 |  | 
 | 273 | #define ia64_xchg1(ptr,x)							\ | 
 | 274 | ({										\ | 
 | 275 | 	__u64 ia64_intri_res;							\ | 
 | 276 | 	asm volatile ("xchg1 %0=[%1],%2"					\ | 
 | 277 | 		      : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory");	\ | 
 | 278 | 	ia64_intri_res;								\ | 
 | 279 | }) | 
 | 280 |  | 
 | 281 | #define ia64_xchg2(ptr,x)						\ | 
 | 282 | ({									\ | 
 | 283 | 	__u64 ia64_intri_res;						\ | 
 | 284 | 	asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res)	\ | 
 | 285 | 		      : "r" (ptr), "r" (x) : "memory");			\ | 
 | 286 | 	ia64_intri_res;							\ | 
 | 287 | }) | 
 | 288 |  | 
 | 289 | #define ia64_xchg4(ptr,x)						\ | 
 | 290 | ({									\ | 
 | 291 | 	__u64 ia64_intri_res;						\ | 
 | 292 | 	asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res)	\ | 
 | 293 | 		      : "r" (ptr), "r" (x) : "memory");			\ | 
 | 294 | 	ia64_intri_res;							\ | 
 | 295 | }) | 
 | 296 |  | 
 | 297 | #define ia64_xchg8(ptr,x)						\ | 
 | 298 | ({									\ | 
 | 299 | 	__u64 ia64_intri_res;						\ | 
 | 300 | 	asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res)	\ | 
 | 301 | 		      : "r" (ptr), "r" (x) : "memory");			\ | 
 | 302 | 	ia64_intri_res;							\ | 
 | 303 | }) | 
 | 304 |  | 
 | 305 | #define ia64_cmpxchg1_acq(ptr, new, old)						\ | 
 | 306 | ({											\ | 
 | 307 | 	__u64 ia64_intri_res;								\ | 
 | 308 | 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\ | 
 | 309 | 	asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv":					\ | 
 | 310 | 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\ | 
 | 311 | 	ia64_intri_res;									\ | 
 | 312 | }) | 
 | 313 |  | 
 | 314 | #define ia64_cmpxchg1_rel(ptr, new, old)						\ | 
 | 315 | ({											\ | 
 | 316 | 	__u64 ia64_intri_res;								\ | 
 | 317 | 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\ | 
 | 318 | 	asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv":					\ | 
 | 319 | 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\ | 
 | 320 | 	ia64_intri_res;									\ | 
 | 321 | }) | 
 | 322 |  | 
 | 323 | #define ia64_cmpxchg2_acq(ptr, new, old)						\ | 
 | 324 | ({											\ | 
 | 325 | 	__u64 ia64_intri_res;								\ | 
 | 326 | 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\ | 
 | 327 | 	asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv":					\ | 
 | 328 | 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\ | 
 | 329 | 	ia64_intri_res;									\ | 
 | 330 | }) | 
 | 331 |  | 
 | 332 | #define ia64_cmpxchg2_rel(ptr, new, old)						\ | 
 | 333 | ({											\ | 
 | 334 | 	__u64 ia64_intri_res;								\ | 
 | 335 | 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\ | 
 | 336 | 											\ | 
 | 337 | 	asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv":					\ | 
 | 338 | 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\ | 
 | 339 | 	ia64_intri_res;									\ | 
 | 340 | }) | 
 | 341 |  | 
 | 342 | #define ia64_cmpxchg4_acq(ptr, new, old)						\ | 
 | 343 | ({											\ | 
 | 344 | 	__u64 ia64_intri_res;								\ | 
 | 345 | 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\ | 
 | 346 | 	asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv":					\ | 
 | 347 | 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\ | 
 | 348 | 	ia64_intri_res;									\ | 
 | 349 | }) | 
 | 350 |  | 
 | 351 | #define ia64_cmpxchg4_rel(ptr, new, old)						\ | 
 | 352 | ({											\ | 
 | 353 | 	__u64 ia64_intri_res;								\ | 
 | 354 | 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\ | 
 | 355 | 	asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv":					\ | 
 | 356 | 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\ | 
 | 357 | 	ia64_intri_res;									\ | 
 | 358 | }) | 
 | 359 |  | 
 | 360 | #define ia64_cmpxchg8_acq(ptr, new, old)						\ | 
 | 361 | ({											\ | 
 | 362 | 	__u64 ia64_intri_res;								\ | 
 | 363 | 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\ | 
 | 364 | 	asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv":					\ | 
 | 365 | 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\ | 
 | 366 | 	ia64_intri_res;									\ | 
 | 367 | }) | 
 | 368 |  | 
 | 369 | #define ia64_cmpxchg8_rel(ptr, new, old)						\ | 
 | 370 | ({											\ | 
 | 371 | 	__u64 ia64_intri_res;								\ | 
 | 372 | 	asm volatile ("mov ar.ccv=%0;;" :: "rO"(old));					\ | 
 | 373 | 											\ | 
 | 374 | 	asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv":					\ | 
 | 375 | 			      "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory");	\ | 
 | 376 | 	ia64_intri_res;									\ | 
 | 377 | }) | 
 | 378 |  | 
 | 379 | #define ia64_mf()	asm volatile ("mf" ::: "memory") | 
 | 380 | #define ia64_mfa()	asm volatile ("mf.a" ::: "memory") | 
 | 381 |  | 
 | 382 | #define ia64_invala() asm volatile ("invala" ::: "memory") | 
 | 383 |  | 
 | 384 | #define ia64_thash(addr)							\ | 
 | 385 | ({										\ | 
 | 386 | 	__u64 ia64_intri_res;							\ | 
 | 387 | 	asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr));	\ | 
 | 388 | 	ia64_intri_res;								\ | 
 | 389 | }) | 
 | 390 |  | 
 | 391 | #define ia64_srlz_i()	asm volatile (";; srlz.i ;;" ::: "memory") | 
 | 392 | #define ia64_srlz_d()	asm volatile (";; srlz.d" ::: "memory"); | 
 | 393 |  | 
 | 394 | #ifdef HAVE_SERIALIZE_DIRECTIVE | 
 | 395 | # define ia64_dv_serialize_data()		asm volatile (".serialize.data"); | 
 | 396 | # define ia64_dv_serialize_instruction()	asm volatile (".serialize.instruction"); | 
 | 397 | #else | 
 | 398 | # define ia64_dv_serialize_data() | 
 | 399 | # define ia64_dv_serialize_instruction() | 
 | 400 | #endif | 
 | 401 |  | 
 | 402 | #define ia64_nop(x)	asm volatile ("nop %0"::"i"(x)); | 
 | 403 |  | 
 | 404 | #define ia64_itci(addr)	asm volatile ("itc.i %0;;" :: "r"(addr) : "memory") | 
 | 405 |  | 
 | 406 | #define ia64_itcd(addr)	asm volatile ("itc.d %0;;" :: "r"(addr) : "memory") | 
 | 407 |  | 
 | 408 |  | 
 | 409 | #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1"				\ | 
 | 410 | 					     :: "r"(trnum), "r"(addr) : "memory") | 
 | 411 |  | 
 | 412 | #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1"				\ | 
 | 413 | 					     :: "r"(trnum), "r"(addr) : "memory") | 
 | 414 |  | 
 | 415 | #define ia64_tpa(addr)								\ | 
 | 416 | ({										\ | 
 | 417 | 	__u64 ia64_pa;								\ | 
 | 418 | 	asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory");	\ | 
 | 419 | 	ia64_pa;								\ | 
 | 420 | }) | 
 | 421 |  | 
 | 422 | #define __ia64_set_dbr(index, val)						\ | 
 | 423 | 	asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory") | 
 | 424 |  | 
 | 425 | #define ia64_set_ibr(index, val)						\ | 
 | 426 | 	asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory") | 
 | 427 |  | 
 | 428 | #define ia64_set_pkr(index, val)						\ | 
 | 429 | 	asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory") | 
 | 430 |  | 
 | 431 | #define ia64_set_pmc(index, val)						\ | 
 | 432 | 	asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory") | 
 | 433 |  | 
 | 434 | #define ia64_set_pmd(index, val)						\ | 
 | 435 | 	asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") | 
 | 436 |  | 
 | 437 | #define ia64_set_rr(index, val)							\ | 
 | 438 | 	asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); | 
 | 439 |  | 
 | 440 | #define ia64_get_cpuid(index)								\ | 
 | 441 | ({											\ | 
 | 442 | 	__u64 ia64_intri_res;								\ | 
 | 443 | 	asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index));	\ | 
 | 444 | 	ia64_intri_res;									\ | 
 | 445 | }) | 
 | 446 |  | 
 | 447 | #define __ia64_get_dbr(index)							\ | 
 | 448 | ({										\ | 
 | 449 | 	__u64 ia64_intri_res;							\ | 
 | 450 | 	asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\ | 
 | 451 | 	ia64_intri_res;								\ | 
 | 452 | }) | 
 | 453 |  | 
 | 454 | #define ia64_get_ibr(index)							\ | 
 | 455 | ({										\ | 
 | 456 | 	__u64 ia64_intri_res;							\ | 
 | 457 | 	asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\ | 
 | 458 | 	ia64_intri_res;								\ | 
 | 459 | }) | 
 | 460 |  | 
 | 461 | #define ia64_get_pkr(index)							\ | 
 | 462 | ({										\ | 
 | 463 | 	__u64 ia64_intri_res;							\ | 
 | 464 | 	asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index));	\ | 
 | 465 | 	ia64_intri_res;								\ | 
 | 466 | }) | 
 | 467 |  | 
 | 468 | #define ia64_get_pmc(index)							\ | 
 | 469 | ({										\ | 
 | 470 | 	__u64 ia64_intri_res;							\ | 
 | 471 | 	asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index));	\ | 
 | 472 | 	ia64_intri_res;								\ | 
 | 473 | }) | 
 | 474 |  | 
 | 475 |  | 
 | 476 | #define ia64_get_pmd(index)							\ | 
 | 477 | ({										\ | 
 | 478 | 	__u64 ia64_intri_res;							\ | 
 | 479 | 	asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index));	\ | 
 | 480 | 	ia64_intri_res;								\ | 
 | 481 | }) | 
 | 482 |  | 
 | 483 | #define ia64_get_rr(index)							\ | 
 | 484 | ({										\ | 
 | 485 | 	__u64 ia64_intri_res;							\ | 
 | 486 | 	asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index));	\ | 
 | 487 | 	ia64_intri_res;								\ | 
 | 488 | }) | 
 | 489 |  | 
 | 490 | #define ia64_fc(addr)	asm volatile ("fc %0" :: "r"(addr) : "memory") | 
 | 491 |  | 
 | 492 |  | 
 | 493 | #define ia64_sync_i()	asm volatile (";; sync.i" ::: "memory") | 
 | 494 |  | 
 | 495 | #define ia64_ssm(mask)	asm volatile ("ssm %0":: "i"((mask)) : "memory") | 
 | 496 | #define ia64_rsm(mask)	asm volatile ("rsm %0":: "i"((mask)) : "memory") | 
 | 497 | #define ia64_sum(mask)	asm volatile ("sum %0":: "i"((mask)) : "memory") | 
 | 498 | #define ia64_rum(mask)	asm volatile ("rum %0":: "i"((mask)) : "memory") | 
 | 499 |  | 
 | 500 | #define ia64_ptce(addr)	asm volatile ("ptc.e %0" :: "r"(addr)) | 
 | 501 |  | 
 | 502 | #define ia64_ptcga(addr, size)							\ | 
 | 503 | do {										\ | 
 | 504 | 	asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory");	\ | 
 | 505 | 	ia64_dv_serialize_data();						\ | 
 | 506 | } while (0) | 
 | 507 |  | 
 | 508 | #define ia64_ptcl(addr, size)							\ | 
 | 509 | do {										\ | 
 | 510 | 	asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory");	\ | 
 | 511 | 	ia64_dv_serialize_data();						\ | 
 | 512 | } while (0) | 
 | 513 |  | 
 | 514 | #define ia64_ptri(addr, size)						\ | 
 | 515 | 	asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory") | 
 | 516 |  | 
 | 517 | #define ia64_ptrd(addr, size)						\ | 
 | 518 | 	asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") | 
 | 519 |  | 
 | 520 | /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */ | 
 | 521 |  | 
 | 522 | #define ia64_lfhint_none   0 | 
 | 523 | #define ia64_lfhint_nt1    1 | 
 | 524 | #define ia64_lfhint_nt2    2 | 
 | 525 | #define ia64_lfhint_nta    3 | 
 | 526 |  | 
 | 527 | #define ia64_lfetch(lfhint, y)					\ | 
 | 528 | ({								\ | 
 | 529 |         switch (lfhint) {					\ | 
 | 530 |         case ia64_lfhint_none:					\ | 
 | 531 |                 asm volatile ("lfetch [%0]" : : "r"(y));	\ | 
 | 532 |                 break;						\ | 
 | 533 |         case ia64_lfhint_nt1:					\ | 
 | 534 |                 asm volatile ("lfetch.nt1 [%0]" : : "r"(y));	\ | 
 | 535 |                 break;						\ | 
 | 536 |         case ia64_lfhint_nt2:					\ | 
 | 537 |                 asm volatile ("lfetch.nt2 [%0]" : : "r"(y));	\ | 
 | 538 |                 break;						\ | 
 | 539 |         case ia64_lfhint_nta:					\ | 
 | 540 |                 asm volatile ("lfetch.nta [%0]" : : "r"(y));	\ | 
 | 541 |                 break;						\ | 
 | 542 |         }							\ | 
 | 543 | }) | 
 | 544 |  | 
 | 545 | #define ia64_lfetch_excl(lfhint, y)					\ | 
 | 546 | ({									\ | 
 | 547 |         switch (lfhint) {						\ | 
 | 548 |         case ia64_lfhint_none:						\ | 
 | 549 |                 asm volatile ("lfetch.excl [%0]" :: "r"(y));		\ | 
 | 550 |                 break;							\ | 
 | 551 |         case ia64_lfhint_nt1:						\ | 
 | 552 |                 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y));	\ | 
 | 553 |                 break;							\ | 
 | 554 |         case ia64_lfhint_nt2:						\ | 
 | 555 |                 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y));	\ | 
 | 556 |                 break;							\ | 
 | 557 |         case ia64_lfhint_nta:						\ | 
 | 558 |                 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y));	\ | 
 | 559 |                 break;							\ | 
 | 560 |         }								\ | 
 | 561 | }) | 
 | 562 |  | 
 | 563 | #define ia64_lfetch_fault(lfhint, y)					\ | 
 | 564 | ({									\ | 
 | 565 |         switch (lfhint) {						\ | 
 | 566 |         case ia64_lfhint_none:						\ | 
 | 567 |                 asm volatile ("lfetch.fault [%0]" : : "r"(y));		\ | 
 | 568 |                 break;							\ | 
 | 569 |         case ia64_lfhint_nt1:						\ | 
 | 570 |                 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y));	\ | 
 | 571 |                 break;							\ | 
 | 572 |         case ia64_lfhint_nt2:						\ | 
 | 573 |                 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y));	\ | 
 | 574 |                 break;							\ | 
 | 575 |         case ia64_lfhint_nta:						\ | 
 | 576 |                 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y));	\ | 
 | 577 |                 break;							\ | 
 | 578 |         }								\ | 
 | 579 | }) | 
 | 580 |  | 
 | 581 | #define ia64_lfetch_fault_excl(lfhint, y)				\ | 
 | 582 | ({									\ | 
 | 583 |         switch (lfhint) {						\ | 
 | 584 |         case ia64_lfhint_none:						\ | 
 | 585 |                 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y));	\ | 
 | 586 |                 break;							\ | 
 | 587 |         case ia64_lfhint_nt1:						\ | 
 | 588 |                 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y));	\ | 
 | 589 |                 break;							\ | 
 | 590 |         case ia64_lfhint_nt2:						\ | 
 | 591 |                 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y));	\ | 
 | 592 |                 break;							\ | 
 | 593 |         case ia64_lfhint_nta:						\ | 
 | 594 |                 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y));	\ | 
 | 595 |                 break;							\ | 
 | 596 |         }								\ | 
 | 597 | }) | 
 | 598 |  | 
 | 599 | #define ia64_intrin_local_irq_restore(x)			\ | 
 | 600 | do {								\ | 
 | 601 | 	asm volatile (";;   cmp.ne p6,p7=%0,r0;;"		\ | 
 | 602 | 		      "(p6) ssm psr.i;"				\ | 
 | 603 | 		      "(p7) rsm psr.i;;"			\ | 
 | 604 | 		      "(p6) srlz.d"				\ | 
 | 605 | 		      :: "r"((x)) : "p6", "p7", "memory");	\ | 
 | 606 | } while (0) | 
 | 607 |  | 
 | 608 | #endif /* _ASM_IA64_GCC_INTRIN_H */ |