| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Kernel support for the ptrace() and syscall tracing interfaces. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 1999-2005 Hewlett-Packard Co | 
|  | 5 | *	David Mosberger-Tang <davidm@hpl.hp.com> | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 6 | * Copyright (C) 2006 Intel Co | 
|  | 7 | *  2006-08-12	- IA64 Native Utrace implementation support added by | 
|  | 8 | *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * | 
|  | 10 | * Derived from the x86 and Alpha versions. | 
|  | 11 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/kernel.h> | 
|  | 13 | #include <linux/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> | 
|  | 15 | #include <linux/errno.h> | 
|  | 16 | #include <linux/ptrace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/user.h> | 
|  | 18 | #include <linux/security.h> | 
|  | 19 | #include <linux/audit.h> | 
| Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 20 | #include <linux/signal.h> | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 21 | #include <linux/regset.h> | 
|  | 22 | #include <linux/elf.h> | 
| Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 23 | #include <linux/tracehook.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 |  | 
|  | 25 | #include <asm/pgtable.h> | 
|  | 26 | #include <asm/processor.h> | 
|  | 27 | #include <asm/ptrace_offsets.h> | 
|  | 28 | #include <asm/rse.h> | 
|  | 29 | #include <asm/system.h> | 
|  | 30 | #include <asm/uaccess.h> | 
|  | 31 | #include <asm/unwind.h> | 
|  | 32 | #ifdef CONFIG_PERFMON | 
|  | 33 | #include <asm/perfmon.h> | 
|  | 34 | #endif | 
|  | 35 |  | 
|  | 36 | #include "entry.h" | 
|  | 37 |  | 
|  | 38 | /* | 
|  | 39 | * Bits in the PSR that we allow ptrace() to change: | 
|  | 40 | *	be, up, ac, mfl, mfh (the user mask; five bits total) | 
|  | 41 | *	db (debug breakpoint fault; one bit) | 
|  | 42 | *	id (instruction debug fault disable; one bit) | 
|  | 43 | *	dd (data debug fault disable; one bit) | 
|  | 44 | *	ri (restart instruction; two bits) | 
|  | 45 | *	is (instruction set; one bit) | 
|  | 46 | */ | 
|  | 47 | #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS	\ | 
|  | 48 | | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) | 
|  | 49 |  | 
|  | 50 | #define MASK(nbits)	((1UL << (nbits)) - 1)	/* mask with NBITS bits set */ | 
|  | 51 | #define PFM_MASK	MASK(38) | 
|  | 52 |  | 
|  | 53 | #define PTRACE_DEBUG	0 | 
|  | 54 |  | 
|  | 55 | #if PTRACE_DEBUG | 
|  | 56 | # define dprintk(format...)	printk(format) | 
|  | 57 | # define inline | 
|  | 58 | #else | 
|  | 59 | # define dprintk(format...) | 
|  | 60 | #endif | 
|  | 61 |  | 
|  | 62 | /* Return TRUE if PT was created due to kernel-entry via a system-call.  */ | 
|  | 63 |  | 
|  | 64 | static inline int | 
|  | 65 | in_syscall (struct pt_regs *pt) | 
|  | 66 | { | 
|  | 67 | return (long) pt->cr_ifs >= 0; | 
|  | 68 | } | 
|  | 69 |  | 
|  | 70 | /* | 
|  | 71 | * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT | 
|  | 72 | * bitset where bit i is set iff the NaT bit of register i is set. | 
|  | 73 | */ | 
|  | 74 | unsigned long | 
|  | 75 | ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) | 
|  | 76 | { | 
|  | 77 | #	define GET_BITS(first, last, unat)				\ | 
|  | 78 | ({								\ | 
|  | 79 | unsigned long bit = ia64_unat_pos(&pt->r##first);	\ | 
|  | 80 | unsigned long nbits = (last - first + 1);		\ | 
|  | 81 | unsigned long mask = MASK(nbits) << first;		\ | 
|  | 82 | unsigned long dist;					\ | 
|  | 83 | if (bit < first)					\ | 
|  | 84 | dist = 64 + bit - first;			\ | 
|  | 85 | else							\ | 
|  | 86 | dist = bit - first;				\ | 
|  | 87 | ia64_rotr(unat, dist) & mask;				\ | 
|  | 88 | }) | 
|  | 89 | unsigned long val; | 
|  | 90 |  | 
|  | 91 | /* | 
|  | 92 | * Registers that are stored consecutively in struct pt_regs | 
|  | 93 | * can be handled in parallel.  If the register order in | 
|  | 94 | * struct_pt_regs changes, this code MUST be updated. | 
|  | 95 | */ | 
|  | 96 | val  = GET_BITS( 1,  1, scratch_unat); | 
|  | 97 | val |= GET_BITS( 2,  3, scratch_unat); | 
|  | 98 | val |= GET_BITS(12, 13, scratch_unat); | 
|  | 99 | val |= GET_BITS(14, 14, scratch_unat); | 
|  | 100 | val |= GET_BITS(15, 15, scratch_unat); | 
|  | 101 | val |= GET_BITS( 8, 11, scratch_unat); | 
|  | 102 | val |= GET_BITS(16, 31, scratch_unat); | 
|  | 103 | return val; | 
|  | 104 |  | 
|  | 105 | #	undef GET_BITS | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | /* | 
|  | 109 | * Set the NaT bits for the scratch registers according to NAT and | 
|  | 110 | * return the resulting unat (assuming the scratch registers are | 
|  | 111 | * stored in PT). | 
|  | 112 | */ | 
|  | 113 | unsigned long | 
|  | 114 | ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) | 
|  | 115 | { | 
|  | 116 | #	define PUT_BITS(first, last, nat)				\ | 
|  | 117 | ({								\ | 
|  | 118 | unsigned long bit = ia64_unat_pos(&pt->r##first);	\ | 
|  | 119 | unsigned long nbits = (last - first + 1);		\ | 
|  | 120 | unsigned long mask = MASK(nbits) << first;		\ | 
|  | 121 | long dist;						\ | 
|  | 122 | if (bit < first)					\ | 
|  | 123 | dist = 64 + bit - first;			\ | 
|  | 124 | else							\ | 
|  | 125 | dist = bit - first;				\ | 
|  | 126 | ia64_rotl(nat & mask, dist);				\ | 
|  | 127 | }) | 
|  | 128 | unsigned long scratch_unat; | 
|  | 129 |  | 
|  | 130 | /* | 
|  | 131 | * Registers that are stored consecutively in struct pt_regs | 
|  | 132 | * can be handled in parallel.  If the register order in | 
|  | 133 | * struct_pt_regs changes, this code MUST be updated. | 
|  | 134 | */ | 
|  | 135 | scratch_unat  = PUT_BITS( 1,  1, nat); | 
|  | 136 | scratch_unat |= PUT_BITS( 2,  3, nat); | 
|  | 137 | scratch_unat |= PUT_BITS(12, 13, nat); | 
|  | 138 | scratch_unat |= PUT_BITS(14, 14, nat); | 
|  | 139 | scratch_unat |= PUT_BITS(15, 15, nat); | 
|  | 140 | scratch_unat |= PUT_BITS( 8, 11, nat); | 
|  | 141 | scratch_unat |= PUT_BITS(16, 31, nat); | 
|  | 142 |  | 
|  | 143 | return scratch_unat; | 
|  | 144 |  | 
|  | 145 | #	undef PUT_BITS | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | #define IA64_MLX_TEMPLATE	0x2 | 
|  | 149 | #define IA64_MOVL_OPCODE	6 | 
|  | 150 |  | 
|  | 151 | void | 
|  | 152 | ia64_increment_ip (struct pt_regs *regs) | 
|  | 153 | { | 
|  | 154 | unsigned long w0, ri = ia64_psr(regs)->ri + 1; | 
|  | 155 |  | 
|  | 156 | if (ri > 2) { | 
|  | 157 | ri = 0; | 
|  | 158 | regs->cr_iip += 16; | 
|  | 159 | } else if (ri == 2) { | 
|  | 160 | get_user(w0, (char __user *) regs->cr_iip + 0); | 
|  | 161 | if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { | 
|  | 162 | /* | 
|  | 163 | * rfi'ing to slot 2 of an MLX bundle causes | 
|  | 164 | * an illegal operation fault.  We don't want | 
|  | 165 | * that to happen... | 
|  | 166 | */ | 
|  | 167 | ri = 0; | 
|  | 168 | regs->cr_iip += 16; | 
|  | 169 | } | 
|  | 170 | } | 
|  | 171 | ia64_psr(regs)->ri = ri; | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | void | 
|  | 175 | ia64_decrement_ip (struct pt_regs *regs) | 
|  | 176 | { | 
|  | 177 | unsigned long w0, ri = ia64_psr(regs)->ri - 1; | 
|  | 178 |  | 
|  | 179 | if (ia64_psr(regs)->ri == 0) { | 
|  | 180 | regs->cr_iip -= 16; | 
|  | 181 | ri = 2; | 
|  | 182 | get_user(w0, (char __user *) regs->cr_iip + 0); | 
|  | 183 | if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { | 
|  | 184 | /* | 
|  | 185 | * rfi'ing to slot 2 of an MLX bundle causes | 
|  | 186 | * an illegal operation fault.  We don't want | 
|  | 187 | * that to happen... | 
|  | 188 | */ | 
|  | 189 | ri = 1; | 
|  | 190 | } | 
|  | 191 | } | 
|  | 192 | ia64_psr(regs)->ri = ri; | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | /* | 
|  | 196 | * This routine is used to read an rnat bits that are stored on the | 
|  | 197 | * kernel backing store.  Since, in general, the alignment of the user | 
|  | 198 | * and kernel are different, this is not completely trivial.  In | 
|  | 199 | * essence, we need to construct the user RNAT based on up to two | 
|  | 200 | * kernel RNAT values and/or the RNAT value saved in the child's | 
|  | 201 | * pt_regs. | 
|  | 202 | * | 
|  | 203 | * user rbs | 
|  | 204 | * | 
|  | 205 | * +--------+ <-- lowest address | 
|  | 206 | * | slot62 | | 
|  | 207 | * +--------+ | 
|  | 208 | * |  rnat  | 0x....1f8 | 
|  | 209 | * +--------+ | 
|  | 210 | * | slot00 | \ | 
|  | 211 | * +--------+ | | 
|  | 212 | * | slot01 | > child_regs->ar_rnat | 
|  | 213 | * +--------+ | | 
|  | 214 | * | slot02 | /				kernel rbs | 
|  | 215 | * +--------+				+--------+ | 
|  | 216 | *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs | 
|  | 217 | * +- - - - +				+--------+ | 
|  | 218 | *					| slot62 | | 
|  | 219 | * +- - - - +				+--------+ | 
|  | 220 | *					|  rnat	 | | 
|  | 221 | * +- - - - +				+--------+ | 
|  | 222 | *   vrnat				| slot00 | | 
|  | 223 | * +- - - - +				+--------+ | 
|  | 224 | *					=	 = | 
|  | 225 | *					+--------+ | 
|  | 226 | *					| slot00 | \ | 
|  | 227 | *					+--------+ | | 
|  | 228 | *					| slot01 | > child_stack->ar_rnat | 
|  | 229 | *					+--------+ | | 
|  | 230 | *					| slot02 | / | 
|  | 231 | *					+--------+ | 
|  | 232 | *						  <--- child_stack->ar_bspstore | 
|  | 233 | * | 
|  | 234 | * The way to think of this code is as follows: bit 0 in the user rnat | 
|  | 235 | * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat | 
|  | 236 | * value.  The kernel rnat value holding this bit is stored in | 
|  | 237 | * variable rnat0.  rnat1 is loaded with the kernel rnat value that | 
|  | 238 | * form the upper bits of the user rnat value. | 
|  | 239 | * | 
|  | 240 | * Boundary cases: | 
|  | 241 | * | 
|  | 242 | * o when reading the rnat "below" the first rnat slot on the kernel | 
|  | 243 | *   backing store, rnat0/rnat1 are set to 0 and the low order bits are | 
|  | 244 | *   merged in from pt->ar_rnat. | 
|  | 245 | * | 
|  | 246 | * o when reading the rnat "above" the last rnat slot on the kernel | 
|  | 247 | *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat. | 
|  | 248 | */ | 
|  | 249 | static unsigned long | 
|  | 250 | get_rnat (struct task_struct *task, struct switch_stack *sw, | 
|  | 251 | unsigned long *krbs, unsigned long *urnat_addr, | 
|  | 252 | unsigned long *urbs_end) | 
|  | 253 | { | 
|  | 254 | unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr; | 
|  | 255 | unsigned long umask = 0, mask, m; | 
|  | 256 | unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; | 
|  | 257 | long num_regs, nbits; | 
|  | 258 | struct pt_regs *pt; | 
|  | 259 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 260 | pt = task_pt_regs(task); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | kbsp = (unsigned long *) sw->ar_bspstore; | 
|  | 262 | ubspstore = (unsigned long *) pt->ar_bspstore; | 
|  | 263 |  | 
|  | 264 | if (urbs_end < urnat_addr) | 
|  | 265 | nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); | 
|  | 266 | else | 
|  | 267 | nbits = 63; | 
|  | 268 | mask = MASK(nbits); | 
|  | 269 | /* | 
|  | 270 | * First, figure out which bit number slot 0 in user-land maps | 
|  | 271 | * to in the kernel rnat.  Do this by figuring out how many | 
|  | 272 | * register slots we're beyond the user's backingstore and | 
|  | 273 | * then computing the equivalent address in kernel space. | 
|  | 274 | */ | 
|  | 275 | num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); | 
|  | 276 | slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); | 
|  | 277 | shift = ia64_rse_slot_num(slot0_kaddr); | 
|  | 278 | rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); | 
|  | 279 | rnat0_kaddr = rnat1_kaddr - 64; | 
|  | 280 |  | 
|  | 281 | if (ubspstore + 63 > urnat_addr) { | 
|  | 282 | /* some bits need to be merged in from pt->ar_rnat */ | 
|  | 283 | umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; | 
|  | 284 | urnat = (pt->ar_rnat & umask); | 
|  | 285 | mask &= ~umask; | 
|  | 286 | if (!mask) | 
|  | 287 | return urnat; | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | m = mask << shift; | 
|  | 291 | if (rnat0_kaddr >= kbsp) | 
|  | 292 | rnat0 = sw->ar_rnat; | 
|  | 293 | else if (rnat0_kaddr > krbs) | 
|  | 294 | rnat0 = *rnat0_kaddr; | 
|  | 295 | urnat |= (rnat0 & m) >> shift; | 
|  | 296 |  | 
|  | 297 | m = mask >> (63 - shift); | 
|  | 298 | if (rnat1_kaddr >= kbsp) | 
|  | 299 | rnat1 = sw->ar_rnat; | 
|  | 300 | else if (rnat1_kaddr > krbs) | 
|  | 301 | rnat1 = *rnat1_kaddr; | 
|  | 302 | urnat |= (rnat1 & m) << (63 - shift); | 
|  | 303 | return urnat; | 
|  | 304 | } | 
|  | 305 |  | 
|  | 306 | /* | 
|  | 307 | * The reverse of get_rnat. | 
|  | 308 | */ | 
|  | 309 | static void | 
|  | 310 | put_rnat (struct task_struct *task, struct switch_stack *sw, | 
|  | 311 | unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat, | 
|  | 312 | unsigned long *urbs_end) | 
|  | 313 | { | 
|  | 314 | unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; | 
|  | 315 | unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; | 
|  | 316 | long num_regs, nbits; | 
|  | 317 | struct pt_regs *pt; | 
|  | 318 | unsigned long cfm, *urbs_kargs; | 
|  | 319 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 320 | pt = task_pt_regs(task); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | kbsp = (unsigned long *) sw->ar_bspstore; | 
|  | 322 | ubspstore = (unsigned long *) pt->ar_bspstore; | 
|  | 323 |  | 
|  | 324 | urbs_kargs = urbs_end; | 
|  | 325 | if (in_syscall(pt)) { | 
|  | 326 | /* | 
|  | 327 | * If entered via syscall, don't allow user to set rnat bits | 
|  | 328 | * for syscall args. | 
|  | 329 | */ | 
|  | 330 | cfm = pt->cr_ifs; | 
|  | 331 | urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f)); | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | if (urbs_kargs >= urnat_addr) | 
|  | 335 | nbits = 63; | 
|  | 336 | else { | 
|  | 337 | if ((urnat_addr - 63) >= urbs_kargs) | 
|  | 338 | return; | 
|  | 339 | nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); | 
|  | 340 | } | 
|  | 341 | mask = MASK(nbits); | 
|  | 342 |  | 
|  | 343 | /* | 
|  | 344 | * First, figure out which bit number slot 0 in user-land maps | 
|  | 345 | * to in the kernel rnat.  Do this by figuring out how many | 
|  | 346 | * register slots we're beyond the user's backingstore and | 
|  | 347 | * then computing the equivalent address in kernel space. | 
|  | 348 | */ | 
|  | 349 | num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); | 
|  | 350 | slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); | 
|  | 351 | shift = ia64_rse_slot_num(slot0_kaddr); | 
|  | 352 | rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); | 
|  | 353 | rnat0_kaddr = rnat1_kaddr - 64; | 
|  | 354 |  | 
|  | 355 | if (ubspstore + 63 > urnat_addr) { | 
|  | 356 | /* some bits need to be place in pt->ar_rnat: */ | 
|  | 357 | umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; | 
|  | 358 | pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); | 
|  | 359 | mask &= ~umask; | 
|  | 360 | if (!mask) | 
|  | 361 | return; | 
|  | 362 | } | 
|  | 363 | /* | 
|  | 364 | * Note: Section 11.1 of the EAS guarantees that bit 63 of an | 
|  | 365 | * rnat slot is ignored. so we don't have to clear it here. | 
|  | 366 | */ | 
|  | 367 | rnat0 = (urnat << shift); | 
|  | 368 | m = mask << shift; | 
|  | 369 | if (rnat0_kaddr >= kbsp) | 
|  | 370 | sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); | 
|  | 371 | else if (rnat0_kaddr > krbs) | 
|  | 372 | *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); | 
|  | 373 |  | 
|  | 374 | rnat1 = (urnat >> (63 - shift)); | 
|  | 375 | m = mask >> (63 - shift); | 
|  | 376 | if (rnat1_kaddr >= kbsp) | 
|  | 377 | sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); | 
|  | 378 | else if (rnat1_kaddr > krbs) | 
|  | 379 | *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); | 
|  | 380 | } | 
|  | 381 |  | 
|  | 382 | static inline int | 
|  | 383 | on_kernel_rbs (unsigned long addr, unsigned long bspstore, | 
|  | 384 | unsigned long urbs_end) | 
|  | 385 | { | 
|  | 386 | unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *) | 
|  | 387 | urbs_end); | 
|  | 388 | return (addr >= bspstore && addr <= (unsigned long) rnat_addr); | 
|  | 389 | } | 
|  | 390 |  | 
|  | 391 | /* | 
|  | 392 | * Read a word from the user-level backing store of task CHILD.  ADDR | 
|  | 393 | * is the user-level address to read the word from, VAL a pointer to | 
|  | 394 | * the return value, and USER_BSP gives the end of the user-level | 
|  | 395 | * backing store (i.e., it's the address that would be in ar.bsp after | 
|  | 396 | * the user executed a "cover" instruction). | 
|  | 397 | * | 
|  | 398 | * This routine takes care of accessing the kernel register backing | 
|  | 399 | * store for those registers that got spilled there.  It also takes | 
|  | 400 | * care of calculating the appropriate RNaT collection words. | 
|  | 401 | */ | 
|  | 402 | long | 
|  | 403 | ia64_peek (struct task_struct *child, struct switch_stack *child_stack, | 
|  | 404 | unsigned long user_rbs_end, unsigned long addr, long *val) | 
|  | 405 | { | 
|  | 406 | unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; | 
|  | 407 | struct pt_regs *child_regs; | 
|  | 408 | size_t copied; | 
|  | 409 | long ret; | 
|  | 410 |  | 
|  | 411 | urbs_end = (long *) user_rbs_end; | 
|  | 412 | laddr = (unsigned long *) addr; | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 413 | child_regs = task_pt_regs(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | bspstore = (unsigned long *) child_regs->ar_bspstore; | 
|  | 415 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | 
|  | 416 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | 
|  | 417 | (unsigned long) urbs_end)) | 
|  | 418 | { | 
|  | 419 | /* | 
|  | 420 | * Attempt to read the RBS in an area that's actually | 
|  | 421 | * on the kernel RBS => read the corresponding bits in | 
|  | 422 | * the kernel RBS. | 
|  | 423 | */ | 
|  | 424 | rnat_addr = ia64_rse_rnat_addr(laddr); | 
|  | 425 | ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); | 
|  | 426 |  | 
|  | 427 | if (laddr == rnat_addr) { | 
|  | 428 | /* return NaT collection word itself */ | 
|  | 429 | *val = ret; | 
|  | 430 | return 0; | 
|  | 431 | } | 
|  | 432 |  | 
|  | 433 | if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { | 
|  | 434 | /* | 
|  | 435 | * It is implementation dependent whether the | 
|  | 436 | * data portion of a NaT value gets saved on a | 
|  | 437 | * st8.spill or RSE spill (e.g., see EAS 2.6, | 
|  | 438 | * 4.4.4.6 Register Spill and Fill).  To get | 
|  | 439 | * consistent behavior across all possible | 
|  | 440 | * IA-64 implementations, we return zero in | 
|  | 441 | * this case. | 
|  | 442 | */ | 
|  | 443 | *val = 0; | 
|  | 444 | return 0; | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | if (laddr < urbs_end) { | 
|  | 448 | /* | 
|  | 449 | * The desired word is on the kernel RBS and | 
|  | 450 | * is not a NaT. | 
|  | 451 | */ | 
|  | 452 | regnum = ia64_rse_num_regs(bspstore, laddr); | 
|  | 453 | *val = *ia64_rse_skip_regs(krbs, regnum); | 
|  | 454 | return 0; | 
|  | 455 | } | 
|  | 456 | } | 
|  | 457 | copied = access_process_vm(child, addr, &ret, sizeof(ret), 0); | 
|  | 458 | if (copied != sizeof(ret)) | 
|  | 459 | return -EIO; | 
|  | 460 | *val = ret; | 
|  | 461 | return 0; | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 | long | 
|  | 465 | ia64_poke (struct task_struct *child, struct switch_stack *child_stack, | 
|  | 466 | unsigned long user_rbs_end, unsigned long addr, long val) | 
|  | 467 | { | 
|  | 468 | unsigned long *bspstore, *krbs, regnum, *laddr; | 
|  | 469 | unsigned long *urbs_end = (long *) user_rbs_end; | 
|  | 470 | struct pt_regs *child_regs; | 
|  | 471 |  | 
|  | 472 | laddr = (unsigned long *) addr; | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 473 | child_regs = task_pt_regs(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | bspstore = (unsigned long *) child_regs->ar_bspstore; | 
|  | 475 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | 
|  | 476 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | 
|  | 477 | (unsigned long) urbs_end)) | 
|  | 478 | { | 
|  | 479 | /* | 
|  | 480 | * Attempt to write the RBS in an area that's actually | 
|  | 481 | * on the kernel RBS => write the corresponding bits | 
|  | 482 | * in the kernel RBS. | 
|  | 483 | */ | 
|  | 484 | if (ia64_rse_is_rnat_slot(laddr)) | 
|  | 485 | put_rnat(child, child_stack, krbs, laddr, val, | 
|  | 486 | urbs_end); | 
|  | 487 | else { | 
|  | 488 | if (laddr < urbs_end) { | 
|  | 489 | regnum = ia64_rse_num_regs(bspstore, laddr); | 
|  | 490 | *ia64_rse_skip_regs(krbs, regnum) = val; | 
|  | 491 | } | 
|  | 492 | } | 
|  | 493 | } else if (access_process_vm(child, addr, &val, sizeof(val), 1) | 
|  | 494 | != sizeof(val)) | 
|  | 495 | return -EIO; | 
|  | 496 | return 0; | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | /* | 
|  | 500 | * Calculate the address of the end of the user-level register backing | 
|  | 501 | * store.  This is the address that would have been stored in ar.bsp | 
|  | 502 | * if the user had executed a "cover" instruction right before | 
|  | 503 | * entering the kernel.  If CFMP is not NULL, it is used to return the | 
|  | 504 | * "current frame mask" that was active at the time the kernel was | 
|  | 505 | * entered. | 
|  | 506 | */ | 
|  | 507 | unsigned long | 
|  | 508 | ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, | 
|  | 509 | unsigned long *cfmp) | 
|  | 510 | { | 
|  | 511 | unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; | 
|  | 512 | long ndirty; | 
|  | 513 |  | 
|  | 514 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | 
|  | 515 | bspstore = (unsigned long *) pt->ar_bspstore; | 
|  | 516 | ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); | 
|  | 517 |  | 
|  | 518 | if (in_syscall(pt)) | 
|  | 519 | ndirty += (cfm & 0x7f); | 
|  | 520 | else | 
|  | 521 | cfm &= ~(1UL << 63);	/* clear valid bit */ | 
|  | 522 |  | 
|  | 523 | if (cfmp) | 
|  | 524 | *cfmp = cfm; | 
|  | 525 | return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); | 
|  | 526 | } | 
|  | 527 |  | 
|  | 528 | /* | 
|  | 529 | * Synchronize (i.e, write) the RSE backing store living in kernel | 
|  | 530 | * space to the VM of the CHILD task.  SW and PT are the pointers to | 
|  | 531 | * the switch_stack and pt_regs structures, respectively. | 
|  | 532 | * USER_RBS_END is the user-level address at which the backing store | 
|  | 533 | * ends. | 
|  | 534 | */ | 
|  | 535 | long | 
|  | 536 | ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, | 
|  | 537 | unsigned long user_rbs_start, unsigned long user_rbs_end) | 
|  | 538 | { | 
|  | 539 | unsigned long addr, val; | 
|  | 540 | long ret; | 
|  | 541 |  | 
|  | 542 | /* now copy word for word from kernel rbs to user rbs: */ | 
|  | 543 | for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { | 
|  | 544 | ret = ia64_peek(child, sw, user_rbs_end, addr, &val); | 
|  | 545 | if (ret < 0) | 
|  | 546 | return ret; | 
|  | 547 | if (access_process_vm(child, addr, &val, sizeof(val), 1) | 
|  | 548 | != sizeof(val)) | 
|  | 549 | return -EIO; | 
|  | 550 | } | 
|  | 551 | return 0; | 
|  | 552 | } | 
|  | 553 |  | 
| Petr Tesarik | 3b2ce0b | 2007-12-12 15:23:34 +0100 | [diff] [blame] | 554 | static long | 
|  | 555 | ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, | 
|  | 556 | unsigned long user_rbs_start, unsigned long user_rbs_end) | 
|  | 557 | { | 
|  | 558 | unsigned long addr, val; | 
|  | 559 | long ret; | 
|  | 560 |  | 
|  | 561 | /* now copy word for word from user rbs to kernel rbs: */ | 
|  | 562 | for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { | 
|  | 563 | if (access_process_vm(child, addr, &val, sizeof(val), 0) | 
|  | 564 | != sizeof(val)) | 
|  | 565 | return -EIO; | 
|  | 566 |  | 
|  | 567 | ret = ia64_poke(child, sw, user_rbs_end, addr, val); | 
|  | 568 | if (ret < 0) | 
|  | 569 | return ret; | 
|  | 570 | } | 
|  | 571 | return 0; | 
|  | 572 | } | 
|  | 573 |  | 
|  | 574 | typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *, | 
|  | 575 | unsigned long, unsigned long); | 
|  | 576 |  | 
|  | 577 | static void do_sync_rbs(struct unw_frame_info *info, void *arg) | 
|  | 578 | { | 
|  | 579 | struct pt_regs *pt; | 
|  | 580 | unsigned long urbs_end; | 
|  | 581 | syncfunc_t fn = arg; | 
|  | 582 |  | 
|  | 583 | if (unw_unwind_to_user(info) < 0) | 
|  | 584 | return; | 
|  | 585 | pt = task_pt_regs(info->task); | 
|  | 586 | urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL); | 
|  | 587 |  | 
|  | 588 | fn(info->task, info->sw, pt->ar_bspstore, urbs_end); | 
|  | 589 | } | 
|  | 590 |  | 
|  | 591 | /* | 
|  | 592 | * when a thread is stopped (ptraced), debugger might change thread's user | 
|  | 593 | * stack (change memory directly), and we must avoid the RSE stored in kernel | 
|  | 594 | * to override user stack (user space's RSE is newer than kernel's in the | 
|  | 595 | * case). To workaround the issue, we copy kernel RSE to user RSE before the | 
|  | 596 | * task is stopped, so user RSE has updated data.  we then copy user RSE to | 
|  | 597 | * kernel after the task is resummed from traced stop and kernel will use the | 
|  | 598 | * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need | 
|  | 599 | * synchronize user RSE to kernel. | 
|  | 600 | */ | 
|  | 601 | void ia64_ptrace_stop(void) | 
|  | 602 | { | 
|  | 603 | if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) | 
|  | 604 | return; | 
| Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 605 | set_notify_resume(current); | 
| Petr Tesarik | 3b2ce0b | 2007-12-12 15:23:34 +0100 | [diff] [blame] | 606 | unw_init_running(do_sync_rbs, ia64_sync_user_rbs); | 
|  | 607 | } | 
|  | 608 |  | 
|  | 609 | /* | 
|  | 610 | * This is called to read back the register backing store. | 
|  | 611 | */ | 
|  | 612 | void ia64_sync_krbs(void) | 
|  | 613 | { | 
|  | 614 | clear_tsk_thread_flag(current, TIF_RESTORE_RSE); | 
| Petr Tesarik | 3b2ce0b | 2007-12-12 15:23:34 +0100 | [diff] [blame] | 615 |  | 
|  | 616 | unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); | 
|  | 617 | } | 
|  | 618 |  | 
| Petr Tesarik | aa91a2e | 2007-12-12 15:24:25 +0100 | [diff] [blame] | 619 | /* | 
|  | 620 | * After PTRACE_ATTACH, a thread's register backing store area in user | 
|  | 621 | * space is assumed to contain correct data whenever the thread is | 
|  | 622 | * stopped.  arch_ptrace_stop takes care of this on tracing stops. | 
|  | 623 | * But if the child was already stopped for job control when we attach | 
|  | 624 | * to it, then it might not ever get into ptrace_stop by the time we | 
|  | 625 | * want to examine the user memory containing the RBS. | 
|  | 626 | */ | 
|  | 627 | void | 
|  | 628 | ptrace_attach_sync_user_rbs (struct task_struct *child) | 
|  | 629 | { | 
|  | 630 | int stopped = 0; | 
|  | 631 | struct unw_frame_info info; | 
|  | 632 |  | 
|  | 633 | /* | 
|  | 634 | * If the child is in TASK_STOPPED, we need to change that to | 
|  | 635 | * TASK_TRACED momentarily while we operate on it.  This ensures | 
|  | 636 | * that the child won't be woken up and return to user mode while | 
|  | 637 | * we are doing the sync.  (It can only be woken up for SIGKILL.) | 
|  | 638 | */ | 
|  | 639 |  | 
|  | 640 | read_lock(&tasklist_lock); | 
| Oleg Nesterov | ffdf918 | 2010-05-26 14:43:14 -0700 | [diff] [blame] | 641 | if (child->sighand) { | 
| Petr Tesarik | aa91a2e | 2007-12-12 15:24:25 +0100 | [diff] [blame] | 642 | spin_lock_irq(&child->sighand->siglock); | 
|  | 643 | if (child->state == TASK_STOPPED && | 
|  | 644 | !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { | 
| Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 645 | set_notify_resume(child); | 
| Petr Tesarik | aa91a2e | 2007-12-12 15:24:25 +0100 | [diff] [blame] | 646 |  | 
|  | 647 | child->state = TASK_TRACED; | 
|  | 648 | stopped = 1; | 
|  | 649 | } | 
|  | 650 | spin_unlock_irq(&child->sighand->siglock); | 
|  | 651 | } | 
|  | 652 | read_unlock(&tasklist_lock); | 
|  | 653 |  | 
|  | 654 | if (!stopped) | 
|  | 655 | return; | 
|  | 656 |  | 
|  | 657 | unw_init_from_blocked_task(&info, child); | 
|  | 658 | do_sync_rbs(&info, ia64_sync_user_rbs); | 
|  | 659 |  | 
|  | 660 | /* | 
|  | 661 | * Now move the child back into TASK_STOPPED if it should be in a | 
|  | 662 | * job control stop, so that SIGCONT can be used to wake it up. | 
|  | 663 | */ | 
|  | 664 | read_lock(&tasklist_lock); | 
| Oleg Nesterov | ffdf918 | 2010-05-26 14:43:14 -0700 | [diff] [blame] | 665 | if (child->sighand) { | 
| Petr Tesarik | aa91a2e | 2007-12-12 15:24:25 +0100 | [diff] [blame] | 666 | spin_lock_irq(&child->sighand->siglock); | 
|  | 667 | if (child->state == TASK_TRACED && | 
|  | 668 | (child->signal->flags & SIGNAL_STOP_STOPPED)) { | 
|  | 669 | child->state = TASK_STOPPED; | 
|  | 670 | } | 
|  | 671 | spin_unlock_irq(&child->sighand->siglock); | 
|  | 672 | } | 
|  | 673 | read_unlock(&tasklist_lock); | 
|  | 674 | } | 
|  | 675 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | static inline int | 
|  | 677 | thread_matches (struct task_struct *thread, unsigned long addr) | 
|  | 678 | { | 
|  | 679 | unsigned long thread_rbs_end; | 
|  | 680 | struct pt_regs *thread_regs; | 
|  | 681 |  | 
|  | 682 | if (ptrace_check_attach(thread, 0) < 0) | 
|  | 683 | /* | 
|  | 684 | * If the thread is not in an attachable state, we'll | 
|  | 685 | * ignore it.  The net effect is that if ADDR happens | 
|  | 686 | * to overlap with the portion of the thread's | 
|  | 687 | * register backing store that is currently residing | 
|  | 688 | * on the thread's kernel stack, then ptrace() may end | 
|  | 689 | * up accessing a stale value.  But if the thread | 
|  | 690 | * isn't stopped, that's a problem anyhow, so we're | 
|  | 691 | * doing as well as we can... | 
|  | 692 | */ | 
|  | 693 | return 0; | 
|  | 694 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 695 | thread_regs = task_pt_regs(thread); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); | 
|  | 697 | if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) | 
|  | 698 | return 0; | 
|  | 699 |  | 
|  | 700 | return 1;	/* looks like we've got a winner */ | 
|  | 701 | } | 
|  | 702 |  | 
|  | 703 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | * Write f32-f127 back to task->thread.fph if it has been modified. | 
|  | 705 | */ | 
|  | 706 | inline void | 
|  | 707 | ia64_flush_fph (struct task_struct *task) | 
|  | 708 | { | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 709 | struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 |  | 
| Peter Chubb | 05062d9 | 2005-06-08 15:50:20 -0700 | [diff] [blame] | 711 | /* | 
|  | 712 | * Prevent migrating this task while | 
|  | 713 | * we're fiddling with the FPU state | 
|  | 714 | */ | 
|  | 715 | preempt_disable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | if (ia64_is_local_fpu_owner(task) && psr->mfh) { | 
|  | 717 | psr->mfh = 0; | 
|  | 718 | task->thread.flags |= IA64_THREAD_FPH_VALID; | 
|  | 719 | ia64_save_fpu(&task->thread.fph[0]); | 
|  | 720 | } | 
| Peter Chubb | 05062d9 | 2005-06-08 15:50:20 -0700 | [diff] [blame] | 721 | preempt_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | } | 
|  | 723 |  | 
|  | 724 | /* | 
|  | 725 | * Sync the fph state of the task so that it can be manipulated | 
|  | 726 | * through thread.fph.  If necessary, f32-f127 are written back to | 
|  | 727 | * thread.fph or, if the fph state hasn't been used before, thread.fph | 
|  | 728 | * is cleared to zeroes.  Also, access to f32-f127 is disabled to | 
|  | 729 | * ensure that the task picks up the state from thread.fph when it | 
|  | 730 | * executes again. | 
|  | 731 | */ | 
|  | 732 | void | 
|  | 733 | ia64_sync_fph (struct task_struct *task) | 
|  | 734 | { | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 735 | struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 |  | 
|  | 737 | ia64_flush_fph(task); | 
|  | 738 | if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { | 
|  | 739 | task->thread.flags |= IA64_THREAD_FPH_VALID; | 
|  | 740 | memset(&task->thread.fph, 0, sizeof(task->thread.fph)); | 
|  | 741 | } | 
|  | 742 | ia64_drop_fpu(task); | 
|  | 743 | psr->dfh = 1; | 
|  | 744 | } | 
|  | 745 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | /* | 
|  | 747 | * Change the machine-state of CHILD such that it will return via the normal | 
|  | 748 | * kernel exit-path, rather than the syscall-exit path. | 
|  | 749 | */ | 
|  | 750 | static void | 
|  | 751 | convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt, | 
|  | 752 | unsigned long cfm) | 
|  | 753 | { | 
|  | 754 | struct unw_frame_info info, prev_info; | 
| David Mosberger-Tang | 02a017a | 2005-05-10 11:35:00 -0700 | [diff] [blame] | 755 | unsigned long ip, sp, pr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 |  | 
|  | 757 | unw_init_from_blocked_task(&info, child); | 
|  | 758 | while (1) { | 
|  | 759 | prev_info = info; | 
|  | 760 | if (unw_unwind(&info) < 0) | 
|  | 761 | return; | 
| David Mosberger-Tang | 02a017a | 2005-05-10 11:35:00 -0700 | [diff] [blame] | 762 |  | 
|  | 763 | unw_get_sp(&info, &sp); | 
|  | 764 | if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) | 
|  | 765 | < IA64_PT_REGS_SIZE) { | 
|  | 766 | dprintk("ptrace.%s: ran off the top of the kernel " | 
| Harvey Harrison | d4ed808 | 2008-03-04 15:15:00 -0800 | [diff] [blame] | 767 | "stack\n", __func__); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | return; | 
| David Mosberger-Tang | 02a017a | 2005-05-10 11:35:00 -0700 | [diff] [blame] | 769 | } | 
|  | 770 | if (unw_get_pr (&prev_info, &pr) < 0) { | 
|  | 771 | unw_get_rp(&prev_info, &ip); | 
|  | 772 | dprintk("ptrace.%s: failed to read " | 
|  | 773 | "predicate register (ip=0x%lx)\n", | 
| Harvey Harrison | d4ed808 | 2008-03-04 15:15:00 -0800 | [diff] [blame] | 774 | __func__, ip); | 
| David Mosberger-Tang | 02a017a | 2005-05-10 11:35:00 -0700 | [diff] [blame] | 775 | return; | 
|  | 776 | } | 
|  | 777 | if (unw_is_intr_frame(&info) | 
|  | 778 | && (pr & (1UL << PRED_USER_STACK))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | break; | 
|  | 780 | } | 
|  | 781 |  | 
| David Mosberger-Tang | 7f9eaed | 2005-05-10 12:49:00 -0700 | [diff] [blame] | 782 | /* | 
|  | 783 | * Note: at the time of this call, the target task is blocked | 
|  | 784 | * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL | 
|  | 785 | * (aka, "pLvSys") we redirect execution from | 
|  | 786 | * .work_pending_syscall_end to .work_processed_kernel. | 
|  | 787 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | unw_get_pr(&prev_info, &pr); | 
| David Mosberger-Tang | 7f9eaed | 2005-05-10 12:49:00 -0700 | [diff] [blame] | 789 | pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | pr |=  (1UL << PRED_NON_SYSCALL); | 
|  | 791 | unw_set_pr(&prev_info, pr); | 
|  | 792 |  | 
|  | 793 | pt->cr_ifs = (1UL << 63) | cfm; | 
| David Mosberger-Tang | 7f9eaed | 2005-05-10 12:49:00 -0700 | [diff] [blame] | 794 | /* | 
|  | 795 | * Clear the memory that is NOT written on syscall-entry to | 
|  | 796 | * ensure we do not leak kernel-state to user when execution | 
|  | 797 | * resumes. | 
|  | 798 | */ | 
|  | 799 | pt->r2 = 0; | 
|  | 800 | pt->r3 = 0; | 
|  | 801 | pt->r14 = 0; | 
|  | 802 | memset(&pt->r16, 0, 16*8);	/* clear r16-r31 */ | 
|  | 803 | memset(&pt->f6, 0, 6*16);	/* clear f6-f11 */ | 
|  | 804 | pt->b7 = 0; | 
|  | 805 | pt->ar_ccv = 0; | 
|  | 806 | pt->ar_csd = 0; | 
|  | 807 | pt->ar_ssd = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | } | 
|  | 809 |  | 
|  | 810 | static int | 
|  | 811 | access_nat_bits (struct task_struct *child, struct pt_regs *pt, | 
|  | 812 | struct unw_frame_info *info, | 
|  | 813 | unsigned long *data, int write_access) | 
|  | 814 | { | 
|  | 815 | unsigned long regnum, nat_bits, scratch_unat, dummy = 0; | 
|  | 816 | char nat = 0; | 
|  | 817 |  | 
|  | 818 | if (write_access) { | 
|  | 819 | nat_bits = *data; | 
|  | 820 | scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); | 
|  | 821 | if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) { | 
|  | 822 | dprintk("ptrace: failed to set ar.unat\n"); | 
|  | 823 | return -1; | 
|  | 824 | } | 
|  | 825 | for (regnum = 4; regnum <= 7; ++regnum) { | 
|  | 826 | unw_get_gr(info, regnum, &dummy, &nat); | 
|  | 827 | unw_set_gr(info, regnum, dummy, | 
|  | 828 | (nat_bits >> regnum) & 1); | 
|  | 829 | } | 
|  | 830 | } else { | 
|  | 831 | if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) { | 
|  | 832 | dprintk("ptrace: failed to read ar.unat\n"); | 
|  | 833 | return -1; | 
|  | 834 | } | 
|  | 835 | nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); | 
|  | 836 | for (regnum = 4; regnum <= 7; ++regnum) { | 
|  | 837 | unw_get_gr(info, regnum, &dummy, &nat); | 
|  | 838 | nat_bits |= (nat != 0) << regnum; | 
|  | 839 | } | 
|  | 840 | *data = nat_bits; | 
|  | 841 | } | 
|  | 842 | return 0; | 
|  | 843 | } | 
|  | 844 |  | 
|  | 845 | static int | 
|  | 846 | access_uarea (struct task_struct *child, unsigned long addr, | 
| Shaohua Li | 4cd8dc8 | 2008-02-28 16:09:42 +0800 | [diff] [blame] | 847 | unsigned long *data, int write_access); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 |  | 
|  | 849 | static long | 
|  | 850 | ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | 
|  | 851 | { | 
|  | 852 | unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val; | 
|  | 853 | struct unw_frame_info info; | 
|  | 854 | struct ia64_fpreg fpval; | 
|  | 855 | struct switch_stack *sw; | 
|  | 856 | struct pt_regs *pt; | 
|  | 857 | long ret, retval = 0; | 
|  | 858 | char nat = 0; | 
|  | 859 | int i; | 
|  | 860 |  | 
|  | 861 | if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) | 
|  | 862 | return -EIO; | 
|  | 863 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 864 | pt = task_pt_regs(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | sw = (struct switch_stack *) (child->thread.ksp + 16); | 
|  | 866 | unw_init_from_blocked_task(&info, child); | 
|  | 867 | if (unw_unwind_to_user(&info) < 0) { | 
|  | 868 | return -EIO; | 
|  | 869 | } | 
|  | 870 |  | 
|  | 871 | if (((unsigned long) ppr & 0x7) != 0) { | 
|  | 872 | dprintk("ptrace:unaligned register address %p\n", ppr); | 
|  | 873 | return -EIO; | 
|  | 874 | } | 
|  | 875 |  | 
|  | 876 | if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0 | 
|  | 877 | || access_uarea(child, PT_AR_EC, &ec, 0) < 0 | 
|  | 878 | || access_uarea(child, PT_AR_LC, &lc, 0) < 0 | 
|  | 879 | || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0 | 
|  | 880 | || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0 | 
|  | 881 | || access_uarea(child, PT_CFM, &cfm, 0) | 
|  | 882 | || access_uarea(child, PT_NAT_BITS, &nat_bits, 0)) | 
|  | 883 | return -EIO; | 
|  | 884 |  | 
|  | 885 | /* control regs */ | 
|  | 886 |  | 
|  | 887 | retval |= __put_user(pt->cr_iip, &ppr->cr_iip); | 
|  | 888 | retval |= __put_user(psr, &ppr->cr_ipsr); | 
|  | 889 |  | 
|  | 890 | /* app regs */ | 
|  | 891 |  | 
|  | 892 | retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); | 
|  | 893 | retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); | 
|  | 894 | retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); | 
|  | 895 | retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); | 
|  | 896 | retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); | 
|  | 897 | retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); | 
|  | 898 |  | 
|  | 899 | retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); | 
|  | 900 | retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); | 
|  | 901 | retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); | 
|  | 902 | retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); | 
|  | 903 | retval |= __put_user(cfm, &ppr->cfm); | 
|  | 904 |  | 
|  | 905 | /* gr1-gr3 */ | 
|  | 906 |  | 
|  | 907 | retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long)); | 
|  | 908 | retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2); | 
|  | 909 |  | 
|  | 910 | /* gr4-gr7 */ | 
|  | 911 |  | 
|  | 912 | for (i = 4; i < 8; i++) { | 
|  | 913 | if (unw_access_gr(&info, i, &val, &nat, 0) < 0) | 
|  | 914 | return -EIO; | 
|  | 915 | retval |= __put_user(val, &ppr->gr[i]); | 
|  | 916 | } | 
|  | 917 |  | 
|  | 918 | /* gr8-gr11 */ | 
|  | 919 |  | 
|  | 920 | retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4); | 
|  | 921 |  | 
|  | 922 | /* gr12-gr15 */ | 
|  | 923 |  | 
|  | 924 | retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2); | 
|  | 925 | retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long)); | 
|  | 926 | retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long)); | 
|  | 927 |  | 
|  | 928 | /* gr16-gr31 */ | 
|  | 929 |  | 
|  | 930 | retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16); | 
|  | 931 |  | 
|  | 932 | /* b0 */ | 
|  | 933 |  | 
|  | 934 | retval |= __put_user(pt->b0, &ppr->br[0]); | 
|  | 935 |  | 
|  | 936 | /* b1-b5 */ | 
|  | 937 |  | 
|  | 938 | for (i = 1; i < 6; i++) { | 
|  | 939 | if (unw_access_br(&info, i, &val, 0) < 0) | 
|  | 940 | return -EIO; | 
|  | 941 | __put_user(val, &ppr->br[i]); | 
|  | 942 | } | 
|  | 943 |  | 
|  | 944 | /* b6-b7 */ | 
|  | 945 |  | 
|  | 946 | retval |= __put_user(pt->b6, &ppr->br[6]); | 
|  | 947 | retval |= __put_user(pt->b7, &ppr->br[7]); | 
|  | 948 |  | 
|  | 949 | /* fr2-fr5 */ | 
|  | 950 |  | 
|  | 951 | for (i = 2; i < 6; i++) { | 
|  | 952 | if (unw_get_fr(&info, i, &fpval) < 0) | 
|  | 953 | return -EIO; | 
|  | 954 | retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); | 
|  | 955 | } | 
|  | 956 |  | 
|  | 957 | /* fr6-fr11 */ | 
|  | 958 |  | 
|  | 959 | retval |= __copy_to_user(&ppr->fr[6], &pt->f6, | 
|  | 960 | sizeof(struct ia64_fpreg) * 6); | 
|  | 961 |  | 
|  | 962 | /* fp scratch regs(12-15) */ | 
|  | 963 |  | 
|  | 964 | retval |= __copy_to_user(&ppr->fr[12], &sw->f12, | 
|  | 965 | sizeof(struct ia64_fpreg) * 4); | 
|  | 966 |  | 
|  | 967 | /* fr16-fr31 */ | 
|  | 968 |  | 
|  | 969 | for (i = 16; i < 32; i++) { | 
|  | 970 | if (unw_get_fr(&info, i, &fpval) < 0) | 
|  | 971 | return -EIO; | 
|  | 972 | retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); | 
|  | 973 | } | 
|  | 974 |  | 
|  | 975 | /* fph */ | 
|  | 976 |  | 
|  | 977 | ia64_flush_fph(child); | 
|  | 978 | retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, | 
|  | 979 | sizeof(ppr->fr[32]) * 96); | 
|  | 980 |  | 
|  | 981 | /*  preds */ | 
|  | 982 |  | 
|  | 983 | retval |= __put_user(pt->pr, &ppr->pr); | 
|  | 984 |  | 
|  | 985 | /* nat bits */ | 
|  | 986 |  | 
|  | 987 | retval |= __put_user(nat_bits, &ppr->nat); | 
|  | 988 |  | 
|  | 989 | ret = retval ? -EIO : 0; | 
|  | 990 | return ret; | 
|  | 991 | } | 
|  | 992 |  | 
|  | 993 | static long | 
|  | 994 | ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | 
|  | 995 | { | 
| Matthew Chapman | 4ea7872 | 2005-06-21 16:19:20 -0700 | [diff] [blame] | 996 | unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 997 | struct unw_frame_info info; | 
|  | 998 | struct switch_stack *sw; | 
|  | 999 | struct ia64_fpreg fpval; | 
|  | 1000 | struct pt_regs *pt; | 
|  | 1001 | long ret, retval = 0; | 
|  | 1002 | int i; | 
|  | 1003 |  | 
|  | 1004 | memset(&fpval, 0, sizeof(fpval)); | 
|  | 1005 |  | 
|  | 1006 | if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) | 
|  | 1007 | return -EIO; | 
|  | 1008 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 1009 | pt = task_pt_regs(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | sw = (struct switch_stack *) (child->thread.ksp + 16); | 
|  | 1011 | unw_init_from_blocked_task(&info, child); | 
|  | 1012 | if (unw_unwind_to_user(&info) < 0) { | 
|  | 1013 | return -EIO; | 
|  | 1014 | } | 
|  | 1015 |  | 
|  | 1016 | if (((unsigned long) ppr & 0x7) != 0) { | 
|  | 1017 | dprintk("ptrace:unaligned register address %p\n", ppr); | 
|  | 1018 | return -EIO; | 
|  | 1019 | } | 
|  | 1020 |  | 
|  | 1021 | /* control regs */ | 
|  | 1022 |  | 
|  | 1023 | retval |= __get_user(pt->cr_iip, &ppr->cr_iip); | 
|  | 1024 | retval |= __get_user(psr, &ppr->cr_ipsr); | 
|  | 1025 |  | 
|  | 1026 | /* app regs */ | 
|  | 1027 |  | 
|  | 1028 | retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); | 
| Matthew Chapman | 4ea7872 | 2005-06-21 16:19:20 -0700 | [diff] [blame] | 1029 | retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); | 
|  | 1031 | retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); | 
|  | 1032 | retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); | 
|  | 1033 | retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); | 
|  | 1034 |  | 
|  | 1035 | retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); | 
|  | 1036 | retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); | 
|  | 1037 | retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); | 
|  | 1038 | retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); | 
|  | 1039 | retval |= __get_user(cfm, &ppr->cfm); | 
|  | 1040 |  | 
|  | 1041 | /* gr1-gr3 */ | 
|  | 1042 |  | 
|  | 1043 | retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long)); | 
|  | 1044 | retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2); | 
|  | 1045 |  | 
|  | 1046 | /* gr4-gr7 */ | 
|  | 1047 |  | 
|  | 1048 | for (i = 4; i < 8; i++) { | 
|  | 1049 | retval |= __get_user(val, &ppr->gr[i]); | 
|  | 1050 | /* NaT bit will be set via PT_NAT_BITS: */ | 
|  | 1051 | if (unw_set_gr(&info, i, val, 0) < 0) | 
|  | 1052 | return -EIO; | 
|  | 1053 | } | 
|  | 1054 |  | 
|  | 1055 | /* gr8-gr11 */ | 
|  | 1056 |  | 
|  | 1057 | retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4); | 
|  | 1058 |  | 
|  | 1059 | /* gr12-gr15 */ | 
|  | 1060 |  | 
|  | 1061 | retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2); | 
|  | 1062 | retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long)); | 
|  | 1063 | retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long)); | 
|  | 1064 |  | 
|  | 1065 | /* gr16-gr31 */ | 
|  | 1066 |  | 
|  | 1067 | retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16); | 
|  | 1068 |  | 
|  | 1069 | /* b0 */ | 
|  | 1070 |  | 
|  | 1071 | retval |= __get_user(pt->b0, &ppr->br[0]); | 
|  | 1072 |  | 
|  | 1073 | /* b1-b5 */ | 
|  | 1074 |  | 
|  | 1075 | for (i = 1; i < 6; i++) { | 
|  | 1076 | retval |= __get_user(val, &ppr->br[i]); | 
|  | 1077 | unw_set_br(&info, i, val); | 
|  | 1078 | } | 
|  | 1079 |  | 
|  | 1080 | /* b6-b7 */ | 
|  | 1081 |  | 
|  | 1082 | retval |= __get_user(pt->b6, &ppr->br[6]); | 
|  | 1083 | retval |= __get_user(pt->b7, &ppr->br[7]); | 
|  | 1084 |  | 
|  | 1085 | /* fr2-fr5 */ | 
|  | 1086 |  | 
|  | 1087 | for (i = 2; i < 6; i++) { | 
|  | 1088 | retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); | 
|  | 1089 | if (unw_set_fr(&info, i, fpval) < 0) | 
|  | 1090 | return -EIO; | 
|  | 1091 | } | 
|  | 1092 |  | 
|  | 1093 | /* fr6-fr11 */ | 
|  | 1094 |  | 
|  | 1095 | retval |= __copy_from_user(&pt->f6, &ppr->fr[6], | 
|  | 1096 | sizeof(ppr->fr[6]) * 6); | 
|  | 1097 |  | 
|  | 1098 | /* fp scratch regs(12-15) */ | 
|  | 1099 |  | 
|  | 1100 | retval |= __copy_from_user(&sw->f12, &ppr->fr[12], | 
|  | 1101 | sizeof(ppr->fr[12]) * 4); | 
|  | 1102 |  | 
|  | 1103 | /* fr16-fr31 */ | 
|  | 1104 |  | 
|  | 1105 | for (i = 16; i < 32; i++) { | 
|  | 1106 | retval |= __copy_from_user(&fpval, &ppr->fr[i], | 
|  | 1107 | sizeof(fpval)); | 
|  | 1108 | if (unw_set_fr(&info, i, fpval) < 0) | 
|  | 1109 | return -EIO; | 
|  | 1110 | } | 
|  | 1111 |  | 
|  | 1112 | /* fph */ | 
|  | 1113 |  | 
|  | 1114 | ia64_sync_fph(child); | 
|  | 1115 | retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], | 
|  | 1116 | sizeof(ppr->fr[32]) * 96); | 
|  | 1117 |  | 
|  | 1118 | /* preds */ | 
|  | 1119 |  | 
|  | 1120 | retval |= __get_user(pt->pr, &ppr->pr); | 
|  | 1121 |  | 
|  | 1122 | /* nat bits */ | 
|  | 1123 |  | 
|  | 1124 | retval |= __get_user(nat_bits, &ppr->nat); | 
|  | 1125 |  | 
|  | 1126 | retval |= access_uarea(child, PT_CR_IPSR, &psr, 1); | 
| Matthew Chapman | 4ea7872 | 2005-06-21 16:19:20 -0700 | [diff] [blame] | 1127 | retval |= access_uarea(child, PT_AR_RSC, &rsc, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | retval |= access_uarea(child, PT_AR_EC, &ec, 1); | 
|  | 1129 | retval |= access_uarea(child, PT_AR_LC, &lc, 1); | 
|  | 1130 | retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1); | 
|  | 1131 | retval |= access_uarea(child, PT_AR_BSP, &bsp, 1); | 
|  | 1132 | retval |= access_uarea(child, PT_CFM, &cfm, 1); | 
|  | 1133 | retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1); | 
|  | 1134 |  | 
|  | 1135 | ret = retval ? -EIO : 0; | 
|  | 1136 | return ret; | 
|  | 1137 | } | 
|  | 1138 |  | 
| Petr Tesarik | 8db3f52 | 2008-02-11 22:43:38 +0100 | [diff] [blame] | 1139 | void | 
|  | 1140 | user_enable_single_step (struct task_struct *child) | 
|  | 1141 | { | 
|  | 1142 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); | 
|  | 1143 |  | 
|  | 1144 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | 
|  | 1145 | child_psr->ss = 1; | 
|  | 1146 | } | 
|  | 1147 |  | 
|  | 1148 | void | 
|  | 1149 | user_enable_block_step (struct task_struct *child) | 
|  | 1150 | { | 
|  | 1151 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); | 
|  | 1152 |  | 
|  | 1153 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | 
|  | 1154 | child_psr->tb = 1; | 
|  | 1155 | } | 
|  | 1156 |  | 
|  | 1157 | void | 
|  | 1158 | user_disable_single_step (struct task_struct *child) | 
|  | 1159 | { | 
|  | 1160 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); | 
|  | 1161 |  | 
|  | 1162 | /* make sure the single step/taken-branch trap bits are not set: */ | 
|  | 1163 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | 
|  | 1164 | child_psr->ss = 0; | 
|  | 1165 | child_psr->tb = 0; | 
|  | 1166 | } | 
|  | 1167 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1168 | /* | 
|  | 1169 | * Called by kernel/ptrace.c when detaching.. | 
|  | 1170 | * | 
|  | 1171 | * Make sure the single step bit is not set. | 
|  | 1172 | */ | 
|  | 1173 | void | 
|  | 1174 | ptrace_disable (struct task_struct *child) | 
|  | 1175 | { | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1176 | user_disable_single_step(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 | } | 
|  | 1178 |  | 
| Petr Tesarik | eac738e | 2008-02-11 22:43:05 +0100 | [diff] [blame] | 1179 | long | 
|  | 1180 | arch_ptrace (struct task_struct *child, long request, long addr, long data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1181 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | switch (request) { | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1183 | case PTRACE_PEEKTEXT: | 
|  | 1184 | case PTRACE_PEEKDATA: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1185 | /* read word at location addr */ | 
| Petr Tesarik | 972559a | 2008-02-11 22:41:18 +0100 | [diff] [blame] | 1186 | if (access_process_vm(child, addr, &data, sizeof(data), 0) | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1187 | != sizeof(data)) | 
|  | 1188 | return -EIO; | 
|  | 1189 | /* ensure return value is not mistaken for error code */ | 
| Petr Tesarik | 972559a | 2008-02-11 22:41:18 +0100 | [diff] [blame] | 1190 | force_successful_syscall_return(); | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1191 | return data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 |  | 
| Petr Tesarik | 972559a | 2008-02-11 22:41:18 +0100 | [diff] [blame] | 1193 | /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled | 
|  | 1194 | * by the generic ptrace_request(). | 
|  | 1195 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1197 | case PTRACE_PEEKUSR: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | /* read the word at addr in the USER area */ | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1199 | if (access_uarea(child, addr, &data, 0) < 0) | 
|  | 1200 | return -EIO; | 
|  | 1201 | /* ensure return value is not mistaken for error code */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | force_successful_syscall_return(); | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1203 | return data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1205 | case PTRACE_POKEUSR: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | /* write the word at addr in the USER area */ | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1207 | if (access_uarea(child, addr, &data, 1) < 0) | 
|  | 1208 | return -EIO; | 
|  | 1209 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1210 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1211 | case PTRACE_OLD_GETSIGINFO: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | /* for backwards-compatibility */ | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1213 | return ptrace_request(child, PTRACE_GETSIGINFO, addr, data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1215 | case PTRACE_OLD_SETSIGINFO: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | /* for backwards-compatibility */ | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1217 | return ptrace_request(child, PTRACE_SETSIGINFO, addr, data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1219 | case PTRACE_GETREGS: | 
|  | 1220 | return ptrace_getregs(child, | 
|  | 1221 | (struct pt_all_user_regs __user *) data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1223 | case PTRACE_SETREGS: | 
|  | 1224 | return ptrace_setregs(child, | 
|  | 1225 | (struct pt_all_user_regs __user *) data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1226 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1227 | default: | 
|  | 1228 | return ptrace_request(child, request, addr, data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | } | 
|  | 1231 |  | 
|  | 1232 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1233 | /* "asmlinkage" so the input arguments are preserved... */ | 
|  | 1234 |  | 
| Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 1235 | asmlinkage long | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1236 | syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, | 
|  | 1237 | long arg4, long arg5, long arg6, long arg7, | 
|  | 1238 | struct pt_regs regs) | 
|  | 1239 | { | 
| Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 1240 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 
|  | 1241 | if (tracehook_report_syscall_entry(®s)) | 
|  | 1242 | return -ENOSYS; | 
|  | 2fd6f58 | 2005-04-29 16:08:28 +0100 | [diff] [blame] | 1243 |  | 
| Petr Tesarik | 3b2ce0b | 2007-12-12 15:23:34 +0100 | [diff] [blame] | 1244 | /* copy user rbs to kernel rbs */ | 
|  | 1245 | if (test_thread_flag(TIF_RESTORE_RSE)) | 
|  | 1246 | ia64_sync_krbs(); | 
|  | 1247 |  | 
|  | 2fd6f58 | 2005-04-29 16:08:28 +0100 | [diff] [blame] | 1248 | if (unlikely(current->audit_context)) { | 
|  | 1249 | long syscall; | 
|  | 1250 | int arch; | 
|  | 1251 |  | 
| Tony Luck | 32974ad | 2010-02-08 10:42:17 -0800 | [diff] [blame] | 1252 | syscall = regs.r15; | 
|  | 1253 | arch = AUDIT_ARCH_IA64; | 
|  | 2fd6f58 | 2005-04-29 16:08:28 +0100 | [diff] [blame] | 1254 |  | 
| Al Viro | 5411be5 | 2006-03-29 20:23:36 -0500 | [diff] [blame] | 1255 | audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); | 
|  | 2fd6f58 | 2005-04-29 16:08:28 +0100 | [diff] [blame] | 1256 | } | 
|  | 1257 |  | 
| Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 1258 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | } | 
|  | 1260 |  | 
|  | 1261 | /* "asmlinkage" so the input arguments are preserved... */ | 
|  | 1262 |  | 
|  | 1263 | asmlinkage void | 
|  | 1264 | syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, | 
|  | 1265 | long arg4, long arg5, long arg6, long arg7, | 
|  | 1266 | struct pt_regs regs) | 
|  | 1267 | { | 
| Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 1268 | int step; | 
|  | 1269 |  | 
| David Woodhouse | ee436dc | 2005-11-18 14:43:54 +0000 | [diff] [blame] | 1270 | if (unlikely(current->audit_context)) { | 
|  | 1271 | int success = AUDITSC_RESULT(regs.r10); | 
|  | 1272 | long result = regs.r8; | 
|  | 1273 |  | 
|  | 1274 | if (success != AUDITSC_SUCCESS) | 
|  | 1275 | result = -result; | 
| Al Viro | 5411be5 | 2006-03-29 20:23:36 -0500 | [diff] [blame] | 1276 | audit_syscall_exit(success, result); | 
| David Woodhouse | ee436dc | 2005-11-18 14:43:54 +0000 | [diff] [blame] | 1277 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 |  | 
| Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 1279 | step = test_thread_flag(TIF_SINGLESTEP); | 
|  | 1280 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | 
|  | 1281 | tracehook_report_syscall_exit(®s, step); | 
| Petr Tesarik | 3b2ce0b | 2007-12-12 15:23:34 +0100 | [diff] [blame] | 1282 |  | 
|  | 1283 | /* copy user rbs to kernel rbs */ | 
|  | 1284 | if (test_thread_flag(TIF_RESTORE_RSE)) | 
|  | 1285 | ia64_sync_krbs(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | } | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 1287 |  | 
|  | 1288 | /* Utrace implementation starts here */ | 
|  | 1289 | struct regset_get { | 
|  | 1290 | void *kbuf; | 
|  | 1291 | void __user *ubuf; | 
|  | 1292 | }; | 
|  | 1293 |  | 
|  | 1294 | struct regset_set { | 
|  | 1295 | const void *kbuf; | 
|  | 1296 | const void __user *ubuf; | 
|  | 1297 | }; | 
|  | 1298 |  | 
|  | 1299 | struct regset_getset { | 
|  | 1300 | struct task_struct *target; | 
|  | 1301 | const struct user_regset *regset; | 
|  | 1302 | union { | 
|  | 1303 | struct regset_get get; | 
|  | 1304 | struct regset_set set; | 
|  | 1305 | } u; | 
|  | 1306 | unsigned int pos; | 
|  | 1307 | unsigned int count; | 
|  | 1308 | int ret; | 
|  | 1309 | }; | 
|  | 1310 |  | 
|  | 1311 | static int | 
|  | 1312 | access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info, | 
|  | 1313 | unsigned long addr, unsigned long *data, int write_access) | 
|  | 1314 | { | 
|  | 1315 | struct pt_regs *pt; | 
|  | 1316 | unsigned long *ptr = NULL; | 
|  | 1317 | int ret; | 
|  | 1318 | char nat = 0; | 
|  | 1319 |  | 
|  | 1320 | pt = task_pt_regs(target); | 
|  | 1321 | switch (addr) { | 
|  | 1322 | case ELF_GR_OFFSET(1): | 
|  | 1323 | ptr = &pt->r1; | 
|  | 1324 | break; | 
|  | 1325 | case ELF_GR_OFFSET(2): | 
|  | 1326 | case ELF_GR_OFFSET(3): | 
|  | 1327 | ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2)); | 
|  | 1328 | break; | 
|  | 1329 | case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7): | 
|  | 1330 | if (write_access) { | 
|  | 1331 | /* read NaT bit first: */ | 
|  | 1332 | unsigned long dummy; | 
|  | 1333 |  | 
|  | 1334 | ret = unw_get_gr(info, addr/8, &dummy, &nat); | 
|  | 1335 | if (ret < 0) | 
|  | 1336 | return ret; | 
|  | 1337 | } | 
|  | 1338 | return unw_access_gr(info, addr/8, data, &nat, write_access); | 
|  | 1339 | case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11): | 
|  | 1340 | ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8); | 
|  | 1341 | break; | 
|  | 1342 | case ELF_GR_OFFSET(12): | 
|  | 1343 | case ELF_GR_OFFSET(13): | 
|  | 1344 | ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12); | 
|  | 1345 | break; | 
|  | 1346 | case ELF_GR_OFFSET(14): | 
|  | 1347 | ptr = &pt->r14; | 
|  | 1348 | break; | 
|  | 1349 | case ELF_GR_OFFSET(15): | 
|  | 1350 | ptr = &pt->r15; | 
|  | 1351 | } | 
|  | 1352 | if (write_access) | 
|  | 1353 | *ptr = *data; | 
|  | 1354 | else | 
|  | 1355 | *data = *ptr; | 
|  | 1356 | return 0; | 
|  | 1357 | } | 
|  | 1358 |  | 
|  | 1359 | static int | 
|  | 1360 | access_elf_breg(struct task_struct *target, struct unw_frame_info *info, | 
|  | 1361 | unsigned long addr, unsigned long *data, int write_access) | 
|  | 1362 | { | 
|  | 1363 | struct pt_regs *pt; | 
|  | 1364 | unsigned long *ptr = NULL; | 
|  | 1365 |  | 
|  | 1366 | pt = task_pt_regs(target); | 
|  | 1367 | switch (addr) { | 
|  | 1368 | case ELF_BR_OFFSET(0): | 
|  | 1369 | ptr = &pt->b0; | 
|  | 1370 | break; | 
|  | 1371 | case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5): | 
|  | 1372 | return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8, | 
|  | 1373 | data, write_access); | 
|  | 1374 | case ELF_BR_OFFSET(6): | 
|  | 1375 | ptr = &pt->b6; | 
|  | 1376 | break; | 
|  | 1377 | case ELF_BR_OFFSET(7): | 
|  | 1378 | ptr = &pt->b7; | 
|  | 1379 | } | 
|  | 1380 | if (write_access) | 
|  | 1381 | *ptr = *data; | 
|  | 1382 | else | 
|  | 1383 | *data = *ptr; | 
|  | 1384 | return 0; | 
|  | 1385 | } | 
|  | 1386 |  | 
|  | 1387 | static int | 
|  | 1388 | access_elf_areg(struct task_struct *target, struct unw_frame_info *info, | 
|  | 1389 | unsigned long addr, unsigned long *data, int write_access) | 
|  | 1390 | { | 
|  | 1391 | struct pt_regs *pt; | 
|  | 1392 | unsigned long cfm, urbs_end; | 
|  | 1393 | unsigned long *ptr = NULL; | 
|  | 1394 |  | 
|  | 1395 | pt = task_pt_regs(target); | 
|  | 1396 | if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) { | 
|  | 1397 | switch (addr) { | 
|  | 1398 | case ELF_AR_RSC_OFFSET: | 
|  | 1399 | /* force PL3 */ | 
|  | 1400 | if (write_access) | 
|  | 1401 | pt->ar_rsc = *data | (3 << 2); | 
|  | 1402 | else | 
|  | 1403 | *data = pt->ar_rsc; | 
|  | 1404 | return 0; | 
|  | 1405 | case ELF_AR_BSP_OFFSET: | 
|  | 1406 | /* | 
|  | 1407 | * By convention, we use PT_AR_BSP to refer to | 
|  | 1408 | * the end of the user-level backing store. | 
|  | 1409 | * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) | 
|  | 1410 | * to get the real value of ar.bsp at the time | 
|  | 1411 | * the kernel was entered. | 
|  | 1412 | * | 
|  | 1413 | * Furthermore, when changing the contents of | 
|  | 1414 | * PT_AR_BSP (or PT_CFM) while the task is | 
|  | 1415 | * blocked in a system call, convert the state | 
|  | 1416 | * so that the non-system-call exit | 
|  | 1417 | * path is used.  This ensures that the proper | 
|  | 1418 | * state will be picked up when resuming | 
|  | 1419 | * execution.  However, it *also* means that | 
|  | 1420 | * once we write PT_AR_BSP/PT_CFM, it won't be | 
|  | 1421 | * possible to modify the syscall arguments of | 
|  | 1422 | * the pending system call any longer.  This | 
|  | 1423 | * shouldn't be an issue because modifying | 
|  | 1424 | * PT_AR_BSP/PT_CFM generally implies that | 
|  | 1425 | * we're either abandoning the pending system | 
|  | 1426 | * call or that we defer it's re-execution | 
|  | 1427 | * (e.g., due to GDB doing an inferior | 
|  | 1428 | * function call). | 
|  | 1429 | */ | 
|  | 1430 | urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); | 
|  | 1431 | if (write_access) { | 
|  | 1432 | if (*data != urbs_end) { | 
|  | 1433 | if (in_syscall(pt)) | 
|  | 1434 | convert_to_non_syscall(target, | 
|  | 1435 | pt, | 
|  | 1436 | cfm); | 
|  | 1437 | /* | 
|  | 1438 | * Simulate user-level write | 
|  | 1439 | * of ar.bsp: | 
|  | 1440 | */ | 
|  | 1441 | pt->loadrs = 0; | 
|  | 1442 | pt->ar_bspstore = *data; | 
|  | 1443 | } | 
|  | 1444 | } else | 
|  | 1445 | *data = urbs_end; | 
|  | 1446 | return 0; | 
|  | 1447 | case ELF_AR_BSPSTORE_OFFSET: | 
|  | 1448 | ptr = &pt->ar_bspstore; | 
|  | 1449 | break; | 
|  | 1450 | case ELF_AR_RNAT_OFFSET: | 
|  | 1451 | ptr = &pt->ar_rnat; | 
|  | 1452 | break; | 
|  | 1453 | case ELF_AR_CCV_OFFSET: | 
|  | 1454 | ptr = &pt->ar_ccv; | 
|  | 1455 | break; | 
|  | 1456 | case ELF_AR_UNAT_OFFSET: | 
|  | 1457 | ptr = &pt->ar_unat; | 
|  | 1458 | break; | 
|  | 1459 | case ELF_AR_FPSR_OFFSET: | 
|  | 1460 | ptr = &pt->ar_fpsr; | 
|  | 1461 | break; | 
|  | 1462 | case ELF_AR_PFS_OFFSET: | 
|  | 1463 | ptr = &pt->ar_pfs; | 
|  | 1464 | break; | 
|  | 1465 | case ELF_AR_LC_OFFSET: | 
|  | 1466 | return unw_access_ar(info, UNW_AR_LC, data, | 
|  | 1467 | write_access); | 
|  | 1468 | case ELF_AR_EC_OFFSET: | 
|  | 1469 | return unw_access_ar(info, UNW_AR_EC, data, | 
|  | 1470 | write_access); | 
|  | 1471 | case ELF_AR_CSD_OFFSET: | 
|  | 1472 | ptr = &pt->ar_csd; | 
|  | 1473 | break; | 
|  | 1474 | case ELF_AR_SSD_OFFSET: | 
|  | 1475 | ptr = &pt->ar_ssd; | 
|  | 1476 | } | 
|  | 1477 | } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) { | 
|  | 1478 | switch (addr) { | 
|  | 1479 | case ELF_CR_IIP_OFFSET: | 
|  | 1480 | ptr = &pt->cr_iip; | 
|  | 1481 | break; | 
|  | 1482 | case ELF_CFM_OFFSET: | 
|  | 1483 | urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); | 
|  | 1484 | if (write_access) { | 
|  | 1485 | if (((cfm ^ *data) & PFM_MASK) != 0) { | 
|  | 1486 | if (in_syscall(pt)) | 
|  | 1487 | convert_to_non_syscall(target, | 
|  | 1488 | pt, | 
|  | 1489 | cfm); | 
|  | 1490 | pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) | 
|  | 1491 | | (*data & PFM_MASK)); | 
|  | 1492 | } | 
|  | 1493 | } else | 
|  | 1494 | *data = cfm; | 
|  | 1495 | return 0; | 
|  | 1496 | case ELF_CR_IPSR_OFFSET: | 
|  | 1497 | if (write_access) { | 
|  | 1498 | unsigned long tmp = *data; | 
|  | 1499 | /* psr.ri==3 is a reserved value: SDM 2:25 */ | 
|  | 1500 | if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) | 
|  | 1501 | tmp &= ~IA64_PSR_RI; | 
|  | 1502 | pt->cr_ipsr = ((tmp & IPSR_MASK) | 
|  | 1503 | | (pt->cr_ipsr & ~IPSR_MASK)); | 
|  | 1504 | } else | 
|  | 1505 | *data = (pt->cr_ipsr & IPSR_MASK); | 
|  | 1506 | return 0; | 
|  | 1507 | } | 
|  | 1508 | } else if (addr == ELF_NAT_OFFSET) | 
|  | 1509 | return access_nat_bits(target, pt, info, | 
|  | 1510 | data, write_access); | 
|  | 1511 | else if (addr == ELF_PR_OFFSET) | 
|  | 1512 | ptr = &pt->pr; | 
|  | 1513 | else | 
|  | 1514 | return -1; | 
|  | 1515 |  | 
|  | 1516 | if (write_access) | 
|  | 1517 | *ptr = *data; | 
|  | 1518 | else | 
|  | 1519 | *data = *ptr; | 
|  | 1520 |  | 
|  | 1521 | return 0; | 
|  | 1522 | } | 
|  | 1523 |  | 
|  | 1524 | static int | 
|  | 1525 | access_elf_reg(struct task_struct *target, struct unw_frame_info *info, | 
|  | 1526 | unsigned long addr, unsigned long *data, int write_access) | 
|  | 1527 | { | 
|  | 1528 | if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15)) | 
|  | 1529 | return access_elf_gpreg(target, info, addr, data, write_access); | 
|  | 1530 | else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7)) | 
|  | 1531 | return access_elf_breg(target, info, addr, data, write_access); | 
|  | 1532 | else | 
|  | 1533 | return access_elf_areg(target, info, addr, data, write_access); | 
|  | 1534 | } | 
|  | 1535 |  | 
|  | 1536 | void do_gpregs_get(struct unw_frame_info *info, void *arg) | 
|  | 1537 | { | 
|  | 1538 | struct pt_regs *pt; | 
|  | 1539 | struct regset_getset *dst = arg; | 
|  | 1540 | elf_greg_t tmp[16]; | 
|  | 1541 | unsigned int i, index, min_copy; | 
|  | 1542 |  | 
|  | 1543 | if (unw_unwind_to_user(info) < 0) | 
|  | 1544 | return; | 
|  | 1545 |  | 
|  | 1546 | /* | 
|  | 1547 | * coredump format: | 
|  | 1548 | *      r0-r31 | 
|  | 1549 | *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) | 
|  | 1550 | *      predicate registers (p0-p63) | 
|  | 1551 | *      b0-b7 | 
|  | 1552 | *      ip cfm user-mask | 
|  | 1553 | *      ar.rsc ar.bsp ar.bspstore ar.rnat | 
|  | 1554 | *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec | 
|  | 1555 | */ | 
|  | 1556 |  | 
|  | 1557 |  | 
|  | 1558 | /* Skip r0 */ | 
|  | 1559 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { | 
|  | 1560 | dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, | 
|  | 1561 | &dst->u.get.kbuf, | 
|  | 1562 | &dst->u.get.ubuf, | 
|  | 1563 | 0, ELF_GR_OFFSET(1)); | 
|  | 1564 | if (dst->ret || dst->count == 0) | 
|  | 1565 | return; | 
|  | 1566 | } | 
|  | 1567 |  | 
|  | 1568 | /* gr1 - gr15 */ | 
|  | 1569 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { | 
|  | 1570 | index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); | 
|  | 1571 | min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ? | 
|  | 1572 | (dst->pos + dst->count) : ELF_GR_OFFSET(16); | 
|  | 1573 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | 
|  | 1574 | index++) | 
|  | 1575 | if (access_elf_reg(dst->target, info, i, | 
|  | 1576 | &tmp[index], 0) < 0) { | 
|  | 1577 | dst->ret = -EIO; | 
|  | 1578 | return; | 
|  | 1579 | } | 
|  | 1580 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1581 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | 
|  | 1582 | ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); | 
|  | 1583 | if (dst->ret || dst->count == 0) | 
|  | 1584 | return; | 
|  | 1585 | } | 
|  | 1586 |  | 
|  | 1587 | /* r16-r31 */ | 
|  | 1588 | if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { | 
|  | 1589 | pt = task_pt_regs(dst->target); | 
|  | 1590 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1591 | &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16, | 
|  | 1592 | ELF_GR_OFFSET(16), ELF_NAT_OFFSET); | 
|  | 1593 | if (dst->ret || dst->count == 0) | 
|  | 1594 | return; | 
|  | 1595 | } | 
|  | 1596 |  | 
|  | 1597 | /* nat, pr, b0 - b7 */ | 
|  | 1598 | if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { | 
|  | 1599 | index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); | 
|  | 1600 | min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ? | 
|  | 1601 | (dst->pos + dst->count) : ELF_CR_IIP_OFFSET; | 
|  | 1602 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | 
|  | 1603 | index++) | 
|  | 1604 | if (access_elf_reg(dst->target, info, i, | 
|  | 1605 | &tmp[index], 0) < 0) { | 
|  | 1606 | dst->ret = -EIO; | 
|  | 1607 | return; | 
|  | 1608 | } | 
|  | 1609 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1610 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | 
|  | 1611 | ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); | 
|  | 1612 | if (dst->ret || dst->count == 0) | 
|  | 1613 | return; | 
|  | 1614 | } | 
|  | 1615 |  | 
|  | 1616 | /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat | 
|  | 1617 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd | 
|  | 1618 | */ | 
|  | 1619 | if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { | 
|  | 1620 | index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); | 
|  | 1621 | min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ? | 
|  | 1622 | (dst->pos + dst->count) : ELF_AR_END_OFFSET; | 
|  | 1623 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | 
|  | 1624 | index++) | 
|  | 1625 | if (access_elf_reg(dst->target, info, i, | 
|  | 1626 | &tmp[index], 0) < 0) { | 
|  | 1627 | dst->ret = -EIO; | 
|  | 1628 | return; | 
|  | 1629 | } | 
|  | 1630 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1631 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | 
|  | 1632 | ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); | 
|  | 1633 | } | 
|  | 1634 | } | 
|  | 1635 |  | 
|  | 1636 | void do_gpregs_set(struct unw_frame_info *info, void *arg) | 
|  | 1637 | { | 
|  | 1638 | struct pt_regs *pt; | 
|  | 1639 | struct regset_getset *dst = arg; | 
|  | 1640 | elf_greg_t tmp[16]; | 
|  | 1641 | unsigned int i, index; | 
|  | 1642 |  | 
|  | 1643 | if (unw_unwind_to_user(info) < 0) | 
|  | 1644 | return; | 
|  | 1645 |  | 
|  | 1646 | /* Skip r0 */ | 
|  | 1647 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { | 
|  | 1648 | dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, | 
|  | 1649 | &dst->u.set.kbuf, | 
|  | 1650 | &dst->u.set.ubuf, | 
|  | 1651 | 0, ELF_GR_OFFSET(1)); | 
|  | 1652 | if (dst->ret || dst->count == 0) | 
|  | 1653 | return; | 
|  | 1654 | } | 
|  | 1655 |  | 
|  | 1656 | /* gr1-gr15 */ | 
|  | 1657 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { | 
|  | 1658 | i = dst->pos; | 
|  | 1659 | index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); | 
|  | 1660 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1661 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | 
|  | 1662 | ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); | 
|  | 1663 | if (dst->ret) | 
|  | 1664 | return; | 
|  | 1665 | for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) | 
|  | 1666 | if (access_elf_reg(dst->target, info, i, | 
|  | 1667 | &tmp[index], 1) < 0) { | 
|  | 1668 | dst->ret = -EIO; | 
|  | 1669 | return; | 
|  | 1670 | } | 
|  | 1671 | if (dst->count == 0) | 
|  | 1672 | return; | 
|  | 1673 | } | 
|  | 1674 |  | 
|  | 1675 | /* gr16-gr31 */ | 
|  | 1676 | if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { | 
|  | 1677 | pt = task_pt_regs(dst->target); | 
|  | 1678 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1679 | &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16, | 
|  | 1680 | ELF_GR_OFFSET(16), ELF_NAT_OFFSET); | 
|  | 1681 | if (dst->ret || dst->count == 0) | 
|  | 1682 | return; | 
|  | 1683 | } | 
|  | 1684 |  | 
|  | 1685 | /* nat, pr, b0 - b7 */ | 
|  | 1686 | if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { | 
|  | 1687 | i = dst->pos; | 
|  | 1688 | index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); | 
|  | 1689 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1690 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | 
|  | 1691 | ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); | 
|  | 1692 | if (dst->ret) | 
|  | 1693 | return; | 
|  | 1694 | for (; i < dst->pos; i += sizeof(elf_greg_t), index++) | 
|  | 1695 | if (access_elf_reg(dst->target, info, i, | 
|  | 1696 | &tmp[index], 1) < 0) { | 
|  | 1697 | dst->ret = -EIO; | 
|  | 1698 | return; | 
|  | 1699 | } | 
|  | 1700 | if (dst->count == 0) | 
|  | 1701 | return; | 
|  | 1702 | } | 
|  | 1703 |  | 
|  | 1704 | /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat | 
|  | 1705 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd | 
|  | 1706 | */ | 
|  | 1707 | if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { | 
|  | 1708 | i = dst->pos; | 
|  | 1709 | index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); | 
|  | 1710 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1711 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | 
|  | 1712 | ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); | 
|  | 1713 | if (dst->ret) | 
|  | 1714 | return; | 
|  | 1715 | for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) | 
|  | 1716 | if (access_elf_reg(dst->target, info, i, | 
|  | 1717 | &tmp[index], 1) < 0) { | 
|  | 1718 | dst->ret = -EIO; | 
|  | 1719 | return; | 
|  | 1720 | } | 
|  | 1721 | } | 
|  | 1722 | } | 
|  | 1723 |  | 
|  | 1724 | #define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t)) | 
|  | 1725 |  | 
|  | 1726 | void do_fpregs_get(struct unw_frame_info *info, void *arg) | 
|  | 1727 | { | 
|  | 1728 | struct regset_getset *dst = arg; | 
|  | 1729 | struct task_struct *task = dst->target; | 
|  | 1730 | elf_fpreg_t tmp[30]; | 
|  | 1731 | int index, min_copy, i; | 
|  | 1732 |  | 
|  | 1733 | if (unw_unwind_to_user(info) < 0) | 
|  | 1734 | return; | 
|  | 1735 |  | 
|  | 1736 | /* Skip pos 0 and 1 */ | 
|  | 1737 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { | 
|  | 1738 | dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, | 
|  | 1739 | &dst->u.get.kbuf, | 
|  | 1740 | &dst->u.get.ubuf, | 
|  | 1741 | 0, ELF_FP_OFFSET(2)); | 
|  | 1742 | if (dst->count == 0 || dst->ret) | 
|  | 1743 | return; | 
|  | 1744 | } | 
|  | 1745 |  | 
|  | 1746 | /* fr2-fr31 */ | 
|  | 1747 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { | 
|  | 1748 | index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t); | 
|  | 1749 |  | 
|  | 1750 | min_copy = min(((unsigned int)ELF_FP_OFFSET(32)), | 
|  | 1751 | dst->pos + dst->count); | 
|  | 1752 | for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), | 
|  | 1753 | index++) | 
|  | 1754 | if (unw_get_fr(info, i / sizeof(elf_fpreg_t), | 
|  | 1755 | &tmp[index])) { | 
|  | 1756 | dst->ret = -EIO; | 
|  | 1757 | return; | 
|  | 1758 | } | 
|  | 1759 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1760 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | 
|  | 1761 | ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); | 
|  | 1762 | if (dst->count == 0 || dst->ret) | 
|  | 1763 | return; | 
|  | 1764 | } | 
|  | 1765 |  | 
|  | 1766 | /* fph */ | 
|  | 1767 | if (dst->count > 0) { | 
|  | 1768 | ia64_flush_fph(dst->target); | 
|  | 1769 | if (task->thread.flags & IA64_THREAD_FPH_VALID) | 
|  | 1770 | dst->ret = user_regset_copyout( | 
|  | 1771 | &dst->pos, &dst->count, | 
|  | 1772 | &dst->u.get.kbuf, &dst->u.get.ubuf, | 
|  | 1773 | &dst->target->thread.fph, | 
|  | 1774 | ELF_FP_OFFSET(32), -1); | 
|  | 1775 | else | 
|  | 1776 | /* Zero fill instead.  */ | 
|  | 1777 | dst->ret = user_regset_copyout_zero( | 
|  | 1778 | &dst->pos, &dst->count, | 
|  | 1779 | &dst->u.get.kbuf, &dst->u.get.ubuf, | 
|  | 1780 | ELF_FP_OFFSET(32), -1); | 
|  | 1781 | } | 
|  | 1782 | } | 
|  | 1783 |  | 
|  | 1784 | void do_fpregs_set(struct unw_frame_info *info, void *arg) | 
|  | 1785 | { | 
|  | 1786 | struct regset_getset *dst = arg; | 
|  | 1787 | elf_fpreg_t fpreg, tmp[30]; | 
|  | 1788 | int index, start, end; | 
|  | 1789 |  | 
|  | 1790 | if (unw_unwind_to_user(info) < 0) | 
|  | 1791 | return; | 
|  | 1792 |  | 
|  | 1793 | /* Skip pos 0 and 1 */ | 
|  | 1794 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { | 
|  | 1795 | dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, | 
|  | 1796 | &dst->u.set.kbuf, | 
|  | 1797 | &dst->u.set.ubuf, | 
|  | 1798 | 0, ELF_FP_OFFSET(2)); | 
|  | 1799 | if (dst->count == 0 || dst->ret) | 
|  | 1800 | return; | 
|  | 1801 | } | 
|  | 1802 |  | 
|  | 1803 | /* fr2-fr31 */ | 
|  | 1804 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { | 
|  | 1805 | start = dst->pos; | 
|  | 1806 | end = min(((unsigned int)ELF_FP_OFFSET(32)), | 
|  | 1807 | dst->pos + dst->count); | 
|  | 1808 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1809 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | 
|  | 1810 | ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); | 
|  | 1811 | if (dst->ret) | 
|  | 1812 | return; | 
|  | 1813 |  | 
|  | 1814 | if (start & 0xF) { /* only write high part */ | 
|  | 1815 | if (unw_get_fr(info, start / sizeof(elf_fpreg_t), | 
|  | 1816 | &fpreg)) { | 
|  | 1817 | dst->ret = -EIO; | 
|  | 1818 | return; | 
|  | 1819 | } | 
|  | 1820 | tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0] | 
|  | 1821 | = fpreg.u.bits[0]; | 
|  | 1822 | start &= ~0xFUL; | 
|  | 1823 | } | 
|  | 1824 | if (end & 0xF) { /* only write low part */ | 
|  | 1825 | if (unw_get_fr(info, end / sizeof(elf_fpreg_t), | 
|  | 1826 | &fpreg)) { | 
|  | 1827 | dst->ret = -EIO; | 
|  | 1828 | return; | 
|  | 1829 | } | 
|  | 1830 | tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1] | 
|  | 1831 | = fpreg.u.bits[1]; | 
|  | 1832 | end = (end + 0xF) & ~0xFUL; | 
|  | 1833 | } | 
|  | 1834 |  | 
|  | 1835 | for ( ;	start < end ; start += sizeof(elf_fpreg_t)) { | 
|  | 1836 | index = start / sizeof(elf_fpreg_t); | 
|  | 1837 | if (unw_set_fr(info, index, tmp[index - 2])) { | 
|  | 1838 | dst->ret = -EIO; | 
|  | 1839 | return; | 
|  | 1840 | } | 
|  | 1841 | } | 
|  | 1842 | if (dst->ret || dst->count == 0) | 
|  | 1843 | return; | 
|  | 1844 | } | 
|  | 1845 |  | 
|  | 1846 | /* fph */ | 
|  | 1847 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) { | 
|  | 1848 | ia64_sync_fph(dst->target); | 
|  | 1849 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1850 | &dst->u.set.kbuf, | 
|  | 1851 | &dst->u.set.ubuf, | 
|  | 1852 | &dst->target->thread.fph, | 
|  | 1853 | ELF_FP_OFFSET(32), -1); | 
|  | 1854 | } | 
|  | 1855 | } | 
|  | 1856 |  | 
|  | 1857 | static int | 
|  | 1858 | do_regset_call(void (*call)(struct unw_frame_info *, void *), | 
|  | 1859 | struct task_struct *target, | 
|  | 1860 | const struct user_regset *regset, | 
|  | 1861 | unsigned int pos, unsigned int count, | 
|  | 1862 | const void *kbuf, const void __user *ubuf) | 
|  | 1863 | { | 
|  | 1864 | struct regset_getset info = { .target = target, .regset = regset, | 
|  | 1865 | .pos = pos, .count = count, | 
|  | 1866 | .u.set = { .kbuf = kbuf, .ubuf = ubuf }, | 
|  | 1867 | .ret = 0 }; | 
|  | 1868 |  | 
|  | 1869 | if (target == current) | 
|  | 1870 | unw_init_running(call, &info); | 
|  | 1871 | else { | 
|  | 1872 | struct unw_frame_info ufi; | 
|  | 1873 | memset(&ufi, 0, sizeof(ufi)); | 
|  | 1874 | unw_init_from_blocked_task(&ufi, target); | 
|  | 1875 | (*call)(&ufi, &info); | 
|  | 1876 | } | 
|  | 1877 |  | 
|  | 1878 | return info.ret; | 
|  | 1879 | } | 
|  | 1880 |  | 
|  | 1881 | static int | 
|  | 1882 | gpregs_get(struct task_struct *target, | 
|  | 1883 | const struct user_regset *regset, | 
|  | 1884 | unsigned int pos, unsigned int count, | 
|  | 1885 | void *kbuf, void __user *ubuf) | 
|  | 1886 | { | 
|  | 1887 | return do_regset_call(do_gpregs_get, target, regset, pos, count, | 
|  | 1888 | kbuf, ubuf); | 
|  | 1889 | } | 
|  | 1890 |  | 
|  | 1891 | static int gpregs_set(struct task_struct *target, | 
|  | 1892 | const struct user_regset *regset, | 
|  | 1893 | unsigned int pos, unsigned int count, | 
|  | 1894 | const void *kbuf, const void __user *ubuf) | 
|  | 1895 | { | 
|  | 1896 | return do_regset_call(do_gpregs_set, target, regset, pos, count, | 
|  | 1897 | kbuf, ubuf); | 
|  | 1898 | } | 
|  | 1899 |  | 
|  | 1900 | static void do_gpregs_writeback(struct unw_frame_info *info, void *arg) | 
|  | 1901 | { | 
|  | 1902 | do_sync_rbs(info, ia64_sync_user_rbs); | 
|  | 1903 | } | 
|  | 1904 |  | 
|  | 1905 | /* | 
|  | 1906 | * This is called to write back the register backing store. | 
|  | 1907 | * ptrace does this before it stops, so that a tracer reading the user | 
|  | 1908 | * memory after the thread stops will get the current register data. | 
|  | 1909 | */ | 
|  | 1910 | static int | 
|  | 1911 | gpregs_writeback(struct task_struct *target, | 
|  | 1912 | const struct user_regset *regset, | 
|  | 1913 | int now) | 
|  | 1914 | { | 
|  | 1915 | if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) | 
|  | 1916 | return 0; | 
| Shaohua Li | f14488c | 2008-10-06 10:43:06 -0700 | [diff] [blame] | 1917 | set_notify_resume(target); | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 1918 | return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, | 
|  | 1919 | NULL, NULL); | 
|  | 1920 | } | 
|  | 1921 |  | 
|  | 1922 | static int | 
|  | 1923 | fpregs_active(struct task_struct *target, const struct user_regset *regset) | 
|  | 1924 | { | 
|  | 1925 | return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32; | 
|  | 1926 | } | 
|  | 1927 |  | 
|  | 1928 | static int fpregs_get(struct task_struct *target, | 
|  | 1929 | const struct user_regset *regset, | 
|  | 1930 | unsigned int pos, unsigned int count, | 
|  | 1931 | void *kbuf, void __user *ubuf) | 
|  | 1932 | { | 
|  | 1933 | return do_regset_call(do_fpregs_get, target, regset, pos, count, | 
|  | 1934 | kbuf, ubuf); | 
|  | 1935 | } | 
|  | 1936 |  | 
|  | 1937 | static int fpregs_set(struct task_struct *target, | 
|  | 1938 | const struct user_regset *regset, | 
|  | 1939 | unsigned int pos, unsigned int count, | 
|  | 1940 | const void *kbuf, const void __user *ubuf) | 
|  | 1941 | { | 
|  | 1942 | return do_regset_call(do_fpregs_set, target, regset, pos, count, | 
|  | 1943 | kbuf, ubuf); | 
|  | 1944 | } | 
|  | 1945 |  | 
| Shaohua Li | 4cd8dc8 | 2008-02-28 16:09:42 +0800 | [diff] [blame] | 1946 | static int | 
|  | 1947 | access_uarea(struct task_struct *child, unsigned long addr, | 
|  | 1948 | unsigned long *data, int write_access) | 
|  | 1949 | { | 
|  | 1950 | unsigned int pos = -1; /* an invalid value */ | 
|  | 1951 | int ret; | 
|  | 1952 | unsigned long *ptr, regnum; | 
|  | 1953 |  | 
|  | 1954 | if ((addr & 0x7) != 0) { | 
|  | 1955 | dprintk("ptrace: unaligned register address 0x%lx\n", addr); | 
|  | 1956 | return -1; | 
|  | 1957 | } | 
|  | 1958 | if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) || | 
|  | 1959 | (addr >= PT_R7 + 8 && addr < PT_B1) || | 
|  | 1960 | (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) || | 
|  | 1961 | (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) { | 
|  | 1962 | dprintk("ptrace: rejecting access to register " | 
|  | 1963 | "address 0x%lx\n", addr); | 
|  | 1964 | return -1; | 
|  | 1965 | } | 
|  | 1966 |  | 
|  | 1967 | switch (addr) { | 
|  | 1968 | case PT_F32 ... (PT_F127 + 15): | 
|  | 1969 | pos = addr - PT_F32 + ELF_FP_OFFSET(32); | 
|  | 1970 | break; | 
|  | 1971 | case PT_F2 ... (PT_F5 + 15): | 
|  | 1972 | pos = addr - PT_F2 + ELF_FP_OFFSET(2); | 
|  | 1973 | break; | 
|  | 1974 | case PT_F10 ... (PT_F31 + 15): | 
|  | 1975 | pos = addr - PT_F10 + ELF_FP_OFFSET(10); | 
|  | 1976 | break; | 
|  | 1977 | case PT_F6 ... (PT_F9 + 15): | 
|  | 1978 | pos = addr - PT_F6 + ELF_FP_OFFSET(6); | 
|  | 1979 | break; | 
|  | 1980 | } | 
|  | 1981 |  | 
|  | 1982 | if (pos != -1) { | 
|  | 1983 | if (write_access) | 
|  | 1984 | ret = fpregs_set(child, NULL, pos, | 
|  | 1985 | sizeof(unsigned long), data, NULL); | 
|  | 1986 | else | 
|  | 1987 | ret = fpregs_get(child, NULL, pos, | 
|  | 1988 | sizeof(unsigned long), data, NULL); | 
|  | 1989 | if (ret != 0) | 
|  | 1990 | return -1; | 
|  | 1991 | return 0; | 
|  | 1992 | } | 
|  | 1993 |  | 
|  | 1994 | switch (addr) { | 
|  | 1995 | case PT_NAT_BITS: | 
|  | 1996 | pos = ELF_NAT_OFFSET; | 
|  | 1997 | break; | 
|  | 1998 | case PT_R4 ... PT_R7: | 
|  | 1999 | pos = addr - PT_R4 + ELF_GR_OFFSET(4); | 
|  | 2000 | break; | 
|  | 2001 | case PT_B1 ... PT_B5: | 
|  | 2002 | pos = addr - PT_B1 + ELF_BR_OFFSET(1); | 
|  | 2003 | break; | 
|  | 2004 | case PT_AR_EC: | 
|  | 2005 | pos = ELF_AR_EC_OFFSET; | 
|  | 2006 | break; | 
|  | 2007 | case PT_AR_LC: | 
|  | 2008 | pos = ELF_AR_LC_OFFSET; | 
|  | 2009 | break; | 
|  | 2010 | case PT_CR_IPSR: | 
|  | 2011 | pos = ELF_CR_IPSR_OFFSET; | 
|  | 2012 | break; | 
|  | 2013 | case PT_CR_IIP: | 
|  | 2014 | pos = ELF_CR_IIP_OFFSET; | 
|  | 2015 | break; | 
|  | 2016 | case PT_CFM: | 
|  | 2017 | pos = ELF_CFM_OFFSET; | 
|  | 2018 | break; | 
|  | 2019 | case PT_AR_UNAT: | 
|  | 2020 | pos = ELF_AR_UNAT_OFFSET; | 
|  | 2021 | break; | 
|  | 2022 | case PT_AR_PFS: | 
|  | 2023 | pos = ELF_AR_PFS_OFFSET; | 
|  | 2024 | break; | 
|  | 2025 | case PT_AR_RSC: | 
|  | 2026 | pos = ELF_AR_RSC_OFFSET; | 
|  | 2027 | break; | 
|  | 2028 | case PT_AR_RNAT: | 
|  | 2029 | pos = ELF_AR_RNAT_OFFSET; | 
|  | 2030 | break; | 
|  | 2031 | case PT_AR_BSPSTORE: | 
|  | 2032 | pos = ELF_AR_BSPSTORE_OFFSET; | 
|  | 2033 | break; | 
|  | 2034 | case PT_PR: | 
|  | 2035 | pos = ELF_PR_OFFSET; | 
|  | 2036 | break; | 
|  | 2037 | case PT_B6: | 
|  | 2038 | pos = ELF_BR_OFFSET(6); | 
|  | 2039 | break; | 
|  | 2040 | case PT_AR_BSP: | 
|  | 2041 | pos = ELF_AR_BSP_OFFSET; | 
|  | 2042 | break; | 
|  | 2043 | case PT_R1 ... PT_R3: | 
|  | 2044 | pos = addr - PT_R1 + ELF_GR_OFFSET(1); | 
|  | 2045 | break; | 
|  | 2046 | case PT_R12 ... PT_R15: | 
|  | 2047 | pos = addr - PT_R12 + ELF_GR_OFFSET(12); | 
|  | 2048 | break; | 
|  | 2049 | case PT_R8 ... PT_R11: | 
|  | 2050 | pos = addr - PT_R8 + ELF_GR_OFFSET(8); | 
|  | 2051 | break; | 
|  | 2052 | case PT_R16 ... PT_R31: | 
|  | 2053 | pos = addr - PT_R16 + ELF_GR_OFFSET(16); | 
|  | 2054 | break; | 
|  | 2055 | case PT_AR_CCV: | 
|  | 2056 | pos = ELF_AR_CCV_OFFSET; | 
|  | 2057 | break; | 
|  | 2058 | case PT_AR_FPSR: | 
|  | 2059 | pos = ELF_AR_FPSR_OFFSET; | 
|  | 2060 | break; | 
|  | 2061 | case PT_B0: | 
|  | 2062 | pos = ELF_BR_OFFSET(0); | 
|  | 2063 | break; | 
|  | 2064 | case PT_B7: | 
|  | 2065 | pos = ELF_BR_OFFSET(7); | 
|  | 2066 | break; | 
|  | 2067 | case PT_AR_CSD: | 
|  | 2068 | pos = ELF_AR_CSD_OFFSET; | 
|  | 2069 | break; | 
|  | 2070 | case PT_AR_SSD: | 
|  | 2071 | pos = ELF_AR_SSD_OFFSET; | 
|  | 2072 | break; | 
|  | 2073 | } | 
|  | 2074 |  | 
|  | 2075 | if (pos != -1) { | 
|  | 2076 | if (write_access) | 
|  | 2077 | ret = gpregs_set(child, NULL, pos, | 
|  | 2078 | sizeof(unsigned long), data, NULL); | 
|  | 2079 | else | 
|  | 2080 | ret = gpregs_get(child, NULL, pos, | 
|  | 2081 | sizeof(unsigned long), data, NULL); | 
|  | 2082 | if (ret != 0) | 
|  | 2083 | return -1; | 
|  | 2084 | return 0; | 
|  | 2085 | } | 
|  | 2086 |  | 
|  | 2087 | /* access debug registers */ | 
|  | 2088 | if (addr >= PT_IBR) { | 
|  | 2089 | regnum = (addr - PT_IBR) >> 3; | 
|  | 2090 | ptr = &child->thread.ibr[0]; | 
|  | 2091 | } else { | 
|  | 2092 | regnum = (addr - PT_DBR) >> 3; | 
|  | 2093 | ptr = &child->thread.dbr[0]; | 
|  | 2094 | } | 
|  | 2095 |  | 
|  | 2096 | if (regnum >= 8) { | 
|  | 2097 | dprintk("ptrace: rejecting access to register " | 
|  | 2098 | "address 0x%lx\n", addr); | 
|  | 2099 | return -1; | 
|  | 2100 | } | 
|  | 2101 | #ifdef CONFIG_PERFMON | 
|  | 2102 | /* | 
|  | 2103 | * Check if debug registers are used by perfmon. This | 
|  | 2104 | * test must be done once we know that we can do the | 
|  | 2105 | * operation, i.e. the arguments are all valid, but | 
|  | 2106 | * before we start modifying the state. | 
|  | 2107 | * | 
|  | 2108 | * Perfmon needs to keep a count of how many processes | 
|  | 2109 | * are trying to modify the debug registers for system | 
|  | 2110 | * wide monitoring sessions. | 
|  | 2111 | * | 
|  | 2112 | * We also include read access here, because they may | 
|  | 2113 | * cause the PMU-installed debug register state | 
|  | 2114 | * (dbr[], ibr[]) to be reset. The two arrays are also | 
|  | 2115 | * used by perfmon, but we do not use | 
|  | 2116 | * IA64_THREAD_DBG_VALID. The registers are restored | 
|  | 2117 | * by the PMU context switch code. | 
|  | 2118 | */ | 
|  | 2119 | if (pfm_use_debug_registers(child)) | 
|  | 2120 | return -1; | 
|  | 2121 | #endif | 
|  | 2122 |  | 
|  | 2123 | if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { | 
|  | 2124 | child->thread.flags |= IA64_THREAD_DBG_VALID; | 
|  | 2125 | memset(child->thread.dbr, 0, | 
|  | 2126 | sizeof(child->thread.dbr)); | 
|  | 2127 | memset(child->thread.ibr, 0, | 
|  | 2128 | sizeof(child->thread.ibr)); | 
|  | 2129 | } | 
|  | 2130 |  | 
|  | 2131 | ptr += regnum; | 
|  | 2132 |  | 
|  | 2133 | if ((regnum & 1) && write_access) { | 
|  | 2134 | /* don't let the user set kernel-level breakpoints: */ | 
|  | 2135 | *ptr = *data & ~(7UL << 56); | 
|  | 2136 | return 0; | 
|  | 2137 | } | 
|  | 2138 | if (write_access) | 
|  | 2139 | *ptr = *data; | 
|  | 2140 | else | 
|  | 2141 | *data = *ptr; | 
|  | 2142 | return 0; | 
|  | 2143 | } | 
|  | 2144 |  | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 2145 | static const struct user_regset native_regsets[] = { | 
|  | 2146 | { | 
|  | 2147 | .core_note_type = NT_PRSTATUS, | 
|  | 2148 | .n = ELF_NGREG, | 
|  | 2149 | .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), | 
|  | 2150 | .get = gpregs_get, .set = gpregs_set, | 
|  | 2151 | .writeback = gpregs_writeback | 
|  | 2152 | }, | 
|  | 2153 | { | 
|  | 2154 | .core_note_type = NT_PRFPREG, | 
|  | 2155 | .n = ELF_NFPREG, | 
|  | 2156 | .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), | 
|  | 2157 | .get = fpregs_get, .set = fpregs_set, .active = fpregs_active | 
|  | 2158 | }, | 
|  | 2159 | }; | 
|  | 2160 |  | 
|  | 2161 | static const struct user_regset_view user_ia64_view = { | 
|  | 2162 | .name = "ia64", | 
|  | 2163 | .e_machine = EM_IA_64, | 
|  | 2164 | .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) | 
|  | 2165 | }; | 
|  | 2166 |  | 
|  | 2167 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) | 
|  | 2168 | { | 
|  | 2169 | return &user_ia64_view; | 
|  | 2170 | } | 
| Shaohua Li | cfb361f | 2008-09-18 15:49:14 +0800 | [diff] [blame] | 2171 |  | 
|  | 2172 | struct syscall_get_set_args { | 
|  | 2173 | unsigned int i; | 
|  | 2174 | unsigned int n; | 
|  | 2175 | unsigned long *args; | 
|  | 2176 | struct pt_regs *regs; | 
|  | 2177 | int rw; | 
|  | 2178 | }; | 
|  | 2179 |  | 
|  | 2180 | static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) | 
|  | 2181 | { | 
|  | 2182 | struct syscall_get_set_args *args = data; | 
|  | 2183 | struct pt_regs *pt = args->regs; | 
|  | 2184 | unsigned long *krbs, cfm, ndirty; | 
|  | 2185 | int i, count; | 
|  | 2186 |  | 
|  | 2187 | if (unw_unwind_to_user(info) < 0) | 
|  | 2188 | return; | 
|  | 2189 |  | 
|  | 2190 | cfm = pt->cr_ifs; | 
|  | 2191 | krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; | 
|  | 2192 | ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); | 
|  | 2193 |  | 
|  | 2194 | count = 0; | 
|  | 2195 | if (in_syscall(pt)) | 
|  | 2196 | count = min_t(int, args->n, cfm & 0x7f); | 
|  | 2197 |  | 
|  | 2198 | for (i = 0; i < count; i++) { | 
|  | 2199 | if (args->rw) | 
|  | 2200 | *ia64_rse_skip_regs(krbs, ndirty + i + args->i) = | 
|  | 2201 | args->args[i]; | 
|  | 2202 | else | 
|  | 2203 | args->args[i] = *ia64_rse_skip_regs(krbs, | 
|  | 2204 | ndirty + i + args->i); | 
|  | 2205 | } | 
|  | 2206 |  | 
|  | 2207 | if (!args->rw) { | 
|  | 2208 | while (i < args->n) { | 
|  | 2209 | args->args[i] = 0; | 
|  | 2210 | i++; | 
|  | 2211 | } | 
|  | 2212 | } | 
|  | 2213 | } | 
|  | 2214 |  | 
|  | 2215 | void ia64_syscall_get_set_arguments(struct task_struct *task, | 
|  | 2216 | struct pt_regs *regs, unsigned int i, unsigned int n, | 
|  | 2217 | unsigned long *args, int rw) | 
|  | 2218 | { | 
|  | 2219 | struct syscall_get_set_args data = { | 
|  | 2220 | .i = i, | 
|  | 2221 | .n = n, | 
|  | 2222 | .args = args, | 
|  | 2223 | .regs = regs, | 
|  | 2224 | .rw = rw, | 
|  | 2225 | }; | 
|  | 2226 |  | 
|  | 2227 | if (task == current) | 
|  | 2228 | unw_init_running(syscall_get_set_args_cb, &data); | 
|  | 2229 | else { | 
|  | 2230 | struct unw_frame_info ufi; | 
|  | 2231 | memset(&ufi, 0, sizeof(ufi)); | 
|  | 2232 | unw_init_from_blocked_task(&ufi, task); | 
|  | 2233 | syscall_get_set_args_cb(&ufi, &data); | 
|  | 2234 | } | 
|  | 2235 | } |