| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Kernel support for the ptrace() and syscall tracing interfaces. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 1999-2005 Hewlett-Packard Co | 
|  | 5 | *	David Mosberger-Tang <davidm@hpl.hp.com> | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 6 | * Copyright (C) 2006 Intel Co | 
|  | 7 | *  2006-08-12	- IA64 Native Utrace implementation support added by | 
|  | 8 | *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * | 
|  | 10 | * Derived from the x86 and Alpha versions. | 
|  | 11 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/kernel.h> | 
|  | 13 | #include <linux/sched.h> | 
|  | 14 | #include <linux/slab.h> | 
|  | 15 | #include <linux/mm.h> | 
|  | 16 | #include <linux/errno.h> | 
|  | 17 | #include <linux/ptrace.h> | 
|  | 18 | #include <linux/smp_lock.h> | 
|  | 19 | #include <linux/user.h> | 
|  | 20 | #include <linux/security.h> | 
|  | 21 | #include <linux/audit.h> | 
| Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 22 | #include <linux/signal.h> | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 23 | #include <linux/regset.h> | 
|  | 24 | #include <linux/elf.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 |  | 
|  | 26 | #include <asm/pgtable.h> | 
|  | 27 | #include <asm/processor.h> | 
|  | 28 | #include <asm/ptrace_offsets.h> | 
|  | 29 | #include <asm/rse.h> | 
|  | 30 | #include <asm/system.h> | 
|  | 31 | #include <asm/uaccess.h> | 
|  | 32 | #include <asm/unwind.h> | 
|  | 33 | #ifdef CONFIG_PERFMON | 
|  | 34 | #include <asm/perfmon.h> | 
|  | 35 | #endif | 
|  | 36 |  | 
|  | 37 | #include "entry.h" | 
|  | 38 |  | 
|  | 39 | /* | 
|  | 40 | * Bits in the PSR that we allow ptrace() to change: | 
|  | 41 | *	be, up, ac, mfl, mfh (the user mask; five bits total) | 
|  | 42 | *	db (debug breakpoint fault; one bit) | 
|  | 43 | *	id (instruction debug fault disable; one bit) | 
|  | 44 | *	dd (data debug fault disable; one bit) | 
|  | 45 | *	ri (restart instruction; two bits) | 
|  | 46 | *	is (instruction set; one bit) | 
|  | 47 | */ | 
|  | 48 | #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS	\ | 
|  | 49 | | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) | 
|  | 50 |  | 
|  | 51 | #define MASK(nbits)	((1UL << (nbits)) - 1)	/* mask with NBITS bits set */ | 
|  | 52 | #define PFM_MASK	MASK(38) | 
|  | 53 |  | 
|  | 54 | #define PTRACE_DEBUG	0 | 
|  | 55 |  | 
|  | 56 | #if PTRACE_DEBUG | 
|  | 57 | # define dprintk(format...)	printk(format) | 
|  | 58 | # define inline | 
|  | 59 | #else | 
|  | 60 | # define dprintk(format...) | 
|  | 61 | #endif | 
|  | 62 |  | 
|  | 63 | /* Return TRUE if PT was created due to kernel-entry via a system-call.  */ | 
|  | 64 |  | 
|  | 65 | static inline int | 
|  | 66 | in_syscall (struct pt_regs *pt) | 
|  | 67 | { | 
|  | 68 | return (long) pt->cr_ifs >= 0; | 
|  | 69 | } | 
|  | 70 |  | 
|  | 71 | /* | 
|  | 72 | * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT | 
|  | 73 | * bitset where bit i is set iff the NaT bit of register i is set. | 
|  | 74 | */ | 
|  | 75 | unsigned long | 
|  | 76 | ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) | 
|  | 77 | { | 
|  | 78 | #	define GET_BITS(first, last, unat)				\ | 
|  | 79 | ({								\ | 
|  | 80 | unsigned long bit = ia64_unat_pos(&pt->r##first);	\ | 
|  | 81 | unsigned long nbits = (last - first + 1);		\ | 
|  | 82 | unsigned long mask = MASK(nbits) << first;		\ | 
|  | 83 | unsigned long dist;					\ | 
|  | 84 | if (bit < first)					\ | 
|  | 85 | dist = 64 + bit - first;			\ | 
|  | 86 | else							\ | 
|  | 87 | dist = bit - first;				\ | 
|  | 88 | ia64_rotr(unat, dist) & mask;				\ | 
|  | 89 | }) | 
|  | 90 | unsigned long val; | 
|  | 91 |  | 
|  | 92 | /* | 
|  | 93 | * Registers that are stored consecutively in struct pt_regs | 
|  | 94 | * can be handled in parallel.  If the register order in | 
|  | 95 | * struct_pt_regs changes, this code MUST be updated. | 
|  | 96 | */ | 
|  | 97 | val  = GET_BITS( 1,  1, scratch_unat); | 
|  | 98 | val |= GET_BITS( 2,  3, scratch_unat); | 
|  | 99 | val |= GET_BITS(12, 13, scratch_unat); | 
|  | 100 | val |= GET_BITS(14, 14, scratch_unat); | 
|  | 101 | val |= GET_BITS(15, 15, scratch_unat); | 
|  | 102 | val |= GET_BITS( 8, 11, scratch_unat); | 
|  | 103 | val |= GET_BITS(16, 31, scratch_unat); | 
|  | 104 | return val; | 
|  | 105 |  | 
|  | 106 | #	undef GET_BITS | 
|  | 107 | } | 
|  | 108 |  | 
|  | 109 | /* | 
|  | 110 | * Set the NaT bits for the scratch registers according to NAT and | 
|  | 111 | * return the resulting unat (assuming the scratch registers are | 
|  | 112 | * stored in PT). | 
|  | 113 | */ | 
|  | 114 | unsigned long | 
|  | 115 | ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) | 
|  | 116 | { | 
|  | 117 | #	define PUT_BITS(first, last, nat)				\ | 
|  | 118 | ({								\ | 
|  | 119 | unsigned long bit = ia64_unat_pos(&pt->r##first);	\ | 
|  | 120 | unsigned long nbits = (last - first + 1);		\ | 
|  | 121 | unsigned long mask = MASK(nbits) << first;		\ | 
|  | 122 | long dist;						\ | 
|  | 123 | if (bit < first)					\ | 
|  | 124 | dist = 64 + bit - first;			\ | 
|  | 125 | else							\ | 
|  | 126 | dist = bit - first;				\ | 
|  | 127 | ia64_rotl(nat & mask, dist);				\ | 
|  | 128 | }) | 
|  | 129 | unsigned long scratch_unat; | 
|  | 130 |  | 
|  | 131 | /* | 
|  | 132 | * Registers that are stored consecutively in struct pt_regs | 
|  | 133 | * can be handled in parallel.  If the register order in | 
|  | 134 | * struct_pt_regs changes, this code MUST be updated. | 
|  | 135 | */ | 
|  | 136 | scratch_unat  = PUT_BITS( 1,  1, nat); | 
|  | 137 | scratch_unat |= PUT_BITS( 2,  3, nat); | 
|  | 138 | scratch_unat |= PUT_BITS(12, 13, nat); | 
|  | 139 | scratch_unat |= PUT_BITS(14, 14, nat); | 
|  | 140 | scratch_unat |= PUT_BITS(15, 15, nat); | 
|  | 141 | scratch_unat |= PUT_BITS( 8, 11, nat); | 
|  | 142 | scratch_unat |= PUT_BITS(16, 31, nat); | 
|  | 143 |  | 
|  | 144 | return scratch_unat; | 
|  | 145 |  | 
|  | 146 | #	undef PUT_BITS | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | #define IA64_MLX_TEMPLATE	0x2 | 
|  | 150 | #define IA64_MOVL_OPCODE	6 | 
|  | 151 |  | 
|  | 152 | void | 
|  | 153 | ia64_increment_ip (struct pt_regs *regs) | 
|  | 154 | { | 
|  | 155 | unsigned long w0, ri = ia64_psr(regs)->ri + 1; | 
|  | 156 |  | 
|  | 157 | if (ri > 2) { | 
|  | 158 | ri = 0; | 
|  | 159 | regs->cr_iip += 16; | 
|  | 160 | } else if (ri == 2) { | 
|  | 161 | get_user(w0, (char __user *) regs->cr_iip + 0); | 
|  | 162 | if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { | 
|  | 163 | /* | 
|  | 164 | * rfi'ing to slot 2 of an MLX bundle causes | 
|  | 165 | * an illegal operation fault.  We don't want | 
|  | 166 | * that to happen... | 
|  | 167 | */ | 
|  | 168 | ri = 0; | 
|  | 169 | regs->cr_iip += 16; | 
|  | 170 | } | 
|  | 171 | } | 
|  | 172 | ia64_psr(regs)->ri = ri; | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | void | 
|  | 176 | ia64_decrement_ip (struct pt_regs *regs) | 
|  | 177 | { | 
|  | 178 | unsigned long w0, ri = ia64_psr(regs)->ri - 1; | 
|  | 179 |  | 
|  | 180 | if (ia64_psr(regs)->ri == 0) { | 
|  | 181 | regs->cr_iip -= 16; | 
|  | 182 | ri = 2; | 
|  | 183 | get_user(w0, (char __user *) regs->cr_iip + 0); | 
|  | 184 | if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { | 
|  | 185 | /* | 
|  | 186 | * rfi'ing to slot 2 of an MLX bundle causes | 
|  | 187 | * an illegal operation fault.  We don't want | 
|  | 188 | * that to happen... | 
|  | 189 | */ | 
|  | 190 | ri = 1; | 
|  | 191 | } | 
|  | 192 | } | 
|  | 193 | ia64_psr(regs)->ri = ri; | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | /* | 
|  | 197 | * This routine is used to read an rnat bits that are stored on the | 
|  | 198 | * kernel backing store.  Since, in general, the alignment of the user | 
|  | 199 | * and kernel are different, this is not completely trivial.  In | 
|  | 200 | * essence, we need to construct the user RNAT based on up to two | 
|  | 201 | * kernel RNAT values and/or the RNAT value saved in the child's | 
|  | 202 | * pt_regs. | 
|  | 203 | * | 
|  | 204 | * user rbs | 
|  | 205 | * | 
|  | 206 | * +--------+ <-- lowest address | 
|  | 207 | * | slot62 | | 
|  | 208 | * +--------+ | 
|  | 209 | * |  rnat  | 0x....1f8 | 
|  | 210 | * +--------+ | 
|  | 211 | * | slot00 | \ | 
|  | 212 | * +--------+ | | 
|  | 213 | * | slot01 | > child_regs->ar_rnat | 
|  | 214 | * +--------+ | | 
|  | 215 | * | slot02 | /				kernel rbs | 
|  | 216 | * +--------+				+--------+ | 
|  | 217 | *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs | 
|  | 218 | * +- - - - +				+--------+ | 
|  | 219 | *					| slot62 | | 
|  | 220 | * +- - - - +				+--------+ | 
|  | 221 | *					|  rnat	 | | 
|  | 222 | * +- - - - +				+--------+ | 
|  | 223 | *   vrnat				| slot00 | | 
|  | 224 | * +- - - - +				+--------+ | 
|  | 225 | *					=	 = | 
|  | 226 | *					+--------+ | 
|  | 227 | *					| slot00 | \ | 
|  | 228 | *					+--------+ | | 
|  | 229 | *					| slot01 | > child_stack->ar_rnat | 
|  | 230 | *					+--------+ | | 
|  | 231 | *					| slot02 | / | 
|  | 232 | *					+--------+ | 
|  | 233 | *						  <--- child_stack->ar_bspstore | 
|  | 234 | * | 
|  | 235 | * The way to think of this code is as follows: bit 0 in the user rnat | 
|  | 236 | * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat | 
|  | 237 | * value.  The kernel rnat value holding this bit is stored in | 
|  | 238 | * variable rnat0.  rnat1 is loaded with the kernel rnat value that | 
|  | 239 | * form the upper bits of the user rnat value. | 
|  | 240 | * | 
|  | 241 | * Boundary cases: | 
|  | 242 | * | 
|  | 243 | * o when reading the rnat "below" the first rnat slot on the kernel | 
|  | 244 | *   backing store, rnat0/rnat1 are set to 0 and the low order bits are | 
|  | 245 | *   merged in from pt->ar_rnat. | 
|  | 246 | * | 
|  | 247 | * o when reading the rnat "above" the last rnat slot on the kernel | 
|  | 248 | *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat. | 
|  | 249 | */ | 
|  | 250 | static unsigned long | 
|  | 251 | get_rnat (struct task_struct *task, struct switch_stack *sw, | 
|  | 252 | unsigned long *krbs, unsigned long *urnat_addr, | 
|  | 253 | unsigned long *urbs_end) | 
|  | 254 | { | 
|  | 255 | unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr; | 
|  | 256 | unsigned long umask = 0, mask, m; | 
|  | 257 | unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; | 
|  | 258 | long num_regs, nbits; | 
|  | 259 | struct pt_regs *pt; | 
|  | 260 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 261 | pt = task_pt_regs(task); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | kbsp = (unsigned long *) sw->ar_bspstore; | 
|  | 263 | ubspstore = (unsigned long *) pt->ar_bspstore; | 
|  | 264 |  | 
|  | 265 | if (urbs_end < urnat_addr) | 
|  | 266 | nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); | 
|  | 267 | else | 
|  | 268 | nbits = 63; | 
|  | 269 | mask = MASK(nbits); | 
|  | 270 | /* | 
|  | 271 | * First, figure out which bit number slot 0 in user-land maps | 
|  | 272 | * to in the kernel rnat.  Do this by figuring out how many | 
|  | 273 | * register slots we're beyond the user's backingstore and | 
|  | 274 | * then computing the equivalent address in kernel space. | 
|  | 275 | */ | 
|  | 276 | num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); | 
|  | 277 | slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); | 
|  | 278 | shift = ia64_rse_slot_num(slot0_kaddr); | 
|  | 279 | rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); | 
|  | 280 | rnat0_kaddr = rnat1_kaddr - 64; | 
|  | 281 |  | 
|  | 282 | if (ubspstore + 63 > urnat_addr) { | 
|  | 283 | /* some bits need to be merged in from pt->ar_rnat */ | 
|  | 284 | umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; | 
|  | 285 | urnat = (pt->ar_rnat & umask); | 
|  | 286 | mask &= ~umask; | 
|  | 287 | if (!mask) | 
|  | 288 | return urnat; | 
|  | 289 | } | 
|  | 290 |  | 
|  | 291 | m = mask << shift; | 
|  | 292 | if (rnat0_kaddr >= kbsp) | 
|  | 293 | rnat0 = sw->ar_rnat; | 
|  | 294 | else if (rnat0_kaddr > krbs) | 
|  | 295 | rnat0 = *rnat0_kaddr; | 
|  | 296 | urnat |= (rnat0 & m) >> shift; | 
|  | 297 |  | 
|  | 298 | m = mask >> (63 - shift); | 
|  | 299 | if (rnat1_kaddr >= kbsp) | 
|  | 300 | rnat1 = sw->ar_rnat; | 
|  | 301 | else if (rnat1_kaddr > krbs) | 
|  | 302 | rnat1 = *rnat1_kaddr; | 
|  | 303 | urnat |= (rnat1 & m) << (63 - shift); | 
|  | 304 | return urnat; | 
|  | 305 | } | 
|  | 306 |  | 
|  | 307 | /* | 
|  | 308 | * The reverse of get_rnat. | 
|  | 309 | */ | 
|  | 310 | static void | 
|  | 311 | put_rnat (struct task_struct *task, struct switch_stack *sw, | 
|  | 312 | unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat, | 
|  | 313 | unsigned long *urbs_end) | 
|  | 314 | { | 
|  | 315 | unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; | 
|  | 316 | unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; | 
|  | 317 | long num_regs, nbits; | 
|  | 318 | struct pt_regs *pt; | 
|  | 319 | unsigned long cfm, *urbs_kargs; | 
|  | 320 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 321 | pt = task_pt_regs(task); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | kbsp = (unsigned long *) sw->ar_bspstore; | 
|  | 323 | ubspstore = (unsigned long *) pt->ar_bspstore; | 
|  | 324 |  | 
|  | 325 | urbs_kargs = urbs_end; | 
|  | 326 | if (in_syscall(pt)) { | 
|  | 327 | /* | 
|  | 328 | * If entered via syscall, don't allow user to set rnat bits | 
|  | 329 | * for syscall args. | 
|  | 330 | */ | 
|  | 331 | cfm = pt->cr_ifs; | 
|  | 332 | urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f)); | 
|  | 333 | } | 
|  | 334 |  | 
|  | 335 | if (urbs_kargs >= urnat_addr) | 
|  | 336 | nbits = 63; | 
|  | 337 | else { | 
|  | 338 | if ((urnat_addr - 63) >= urbs_kargs) | 
|  | 339 | return; | 
|  | 340 | nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); | 
|  | 341 | } | 
|  | 342 | mask = MASK(nbits); | 
|  | 343 |  | 
|  | 344 | /* | 
|  | 345 | * First, figure out which bit number slot 0 in user-land maps | 
|  | 346 | * to in the kernel rnat.  Do this by figuring out how many | 
|  | 347 | * register slots we're beyond the user's backingstore and | 
|  | 348 | * then computing the equivalent address in kernel space. | 
|  | 349 | */ | 
|  | 350 | num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); | 
|  | 351 | slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); | 
|  | 352 | shift = ia64_rse_slot_num(slot0_kaddr); | 
|  | 353 | rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); | 
|  | 354 | rnat0_kaddr = rnat1_kaddr - 64; | 
|  | 355 |  | 
|  | 356 | if (ubspstore + 63 > urnat_addr) { | 
|  | 357 | /* some bits need to be place in pt->ar_rnat: */ | 
|  | 358 | umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; | 
|  | 359 | pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); | 
|  | 360 | mask &= ~umask; | 
|  | 361 | if (!mask) | 
|  | 362 | return; | 
|  | 363 | } | 
|  | 364 | /* | 
|  | 365 | * Note: Section 11.1 of the EAS guarantees that bit 63 of an | 
|  | 366 | * rnat slot is ignored. so we don't have to clear it here. | 
|  | 367 | */ | 
|  | 368 | rnat0 = (urnat << shift); | 
|  | 369 | m = mask << shift; | 
|  | 370 | if (rnat0_kaddr >= kbsp) | 
|  | 371 | sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); | 
|  | 372 | else if (rnat0_kaddr > krbs) | 
|  | 373 | *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); | 
|  | 374 |  | 
|  | 375 | rnat1 = (urnat >> (63 - shift)); | 
|  | 376 | m = mask >> (63 - shift); | 
|  | 377 | if (rnat1_kaddr >= kbsp) | 
|  | 378 | sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); | 
|  | 379 | else if (rnat1_kaddr > krbs) | 
|  | 380 | *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); | 
|  | 381 | } | 
|  | 382 |  | 
|  | 383 | static inline int | 
|  | 384 | on_kernel_rbs (unsigned long addr, unsigned long bspstore, | 
|  | 385 | unsigned long urbs_end) | 
|  | 386 | { | 
|  | 387 | unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *) | 
|  | 388 | urbs_end); | 
|  | 389 | return (addr >= bspstore && addr <= (unsigned long) rnat_addr); | 
|  | 390 | } | 
|  | 391 |  | 
|  | 392 | /* | 
|  | 393 | * Read a word from the user-level backing store of task CHILD.  ADDR | 
|  | 394 | * is the user-level address to read the word from, VAL a pointer to | 
|  | 395 | * the return value, and USER_BSP gives the end of the user-level | 
|  | 396 | * backing store (i.e., it's the address that would be in ar.bsp after | 
|  | 397 | * the user executed a "cover" instruction). | 
|  | 398 | * | 
|  | 399 | * This routine takes care of accessing the kernel register backing | 
|  | 400 | * store for those registers that got spilled there.  It also takes | 
|  | 401 | * care of calculating the appropriate RNaT collection words. | 
|  | 402 | */ | 
|  | 403 | long | 
|  | 404 | ia64_peek (struct task_struct *child, struct switch_stack *child_stack, | 
|  | 405 | unsigned long user_rbs_end, unsigned long addr, long *val) | 
|  | 406 | { | 
|  | 407 | unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; | 
|  | 408 | struct pt_regs *child_regs; | 
|  | 409 | size_t copied; | 
|  | 410 | long ret; | 
|  | 411 |  | 
|  | 412 | urbs_end = (long *) user_rbs_end; | 
|  | 413 | laddr = (unsigned long *) addr; | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 414 | child_regs = task_pt_regs(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | bspstore = (unsigned long *) child_regs->ar_bspstore; | 
|  | 416 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | 
|  | 417 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | 
|  | 418 | (unsigned long) urbs_end)) | 
|  | 419 | { | 
|  | 420 | /* | 
|  | 421 | * Attempt to read the RBS in an area that's actually | 
|  | 422 | * on the kernel RBS => read the corresponding bits in | 
|  | 423 | * the kernel RBS. | 
|  | 424 | */ | 
|  | 425 | rnat_addr = ia64_rse_rnat_addr(laddr); | 
|  | 426 | ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); | 
|  | 427 |  | 
|  | 428 | if (laddr == rnat_addr) { | 
|  | 429 | /* return NaT collection word itself */ | 
|  | 430 | *val = ret; | 
|  | 431 | return 0; | 
|  | 432 | } | 
|  | 433 |  | 
|  | 434 | if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { | 
|  | 435 | /* | 
|  | 436 | * It is implementation dependent whether the | 
|  | 437 | * data portion of a NaT value gets saved on a | 
|  | 438 | * st8.spill or RSE spill (e.g., see EAS 2.6, | 
|  | 439 | * 4.4.4.6 Register Spill and Fill).  To get | 
|  | 440 | * consistent behavior across all possible | 
|  | 441 | * IA-64 implementations, we return zero in | 
|  | 442 | * this case. | 
|  | 443 | */ | 
|  | 444 | *val = 0; | 
|  | 445 | return 0; | 
|  | 446 | } | 
|  | 447 |  | 
|  | 448 | if (laddr < urbs_end) { | 
|  | 449 | /* | 
|  | 450 | * The desired word is on the kernel RBS and | 
|  | 451 | * is not a NaT. | 
|  | 452 | */ | 
|  | 453 | regnum = ia64_rse_num_regs(bspstore, laddr); | 
|  | 454 | *val = *ia64_rse_skip_regs(krbs, regnum); | 
|  | 455 | return 0; | 
|  | 456 | } | 
|  | 457 | } | 
|  | 458 | copied = access_process_vm(child, addr, &ret, sizeof(ret), 0); | 
|  | 459 | if (copied != sizeof(ret)) | 
|  | 460 | return -EIO; | 
|  | 461 | *val = ret; | 
|  | 462 | return 0; | 
|  | 463 | } | 
|  | 464 |  | 
|  | 465 | long | 
|  | 466 | ia64_poke (struct task_struct *child, struct switch_stack *child_stack, | 
|  | 467 | unsigned long user_rbs_end, unsigned long addr, long val) | 
|  | 468 | { | 
|  | 469 | unsigned long *bspstore, *krbs, regnum, *laddr; | 
|  | 470 | unsigned long *urbs_end = (long *) user_rbs_end; | 
|  | 471 | struct pt_regs *child_regs; | 
|  | 472 |  | 
|  | 473 | laddr = (unsigned long *) addr; | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 474 | child_regs = task_pt_regs(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | bspstore = (unsigned long *) child_regs->ar_bspstore; | 
|  | 476 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | 
|  | 477 | if (on_kernel_rbs(addr, (unsigned long) bspstore, | 
|  | 478 | (unsigned long) urbs_end)) | 
|  | 479 | { | 
|  | 480 | /* | 
|  | 481 | * Attempt to write the RBS in an area that's actually | 
|  | 482 | * on the kernel RBS => write the corresponding bits | 
|  | 483 | * in the kernel RBS. | 
|  | 484 | */ | 
|  | 485 | if (ia64_rse_is_rnat_slot(laddr)) | 
|  | 486 | put_rnat(child, child_stack, krbs, laddr, val, | 
|  | 487 | urbs_end); | 
|  | 488 | else { | 
|  | 489 | if (laddr < urbs_end) { | 
|  | 490 | regnum = ia64_rse_num_regs(bspstore, laddr); | 
|  | 491 | *ia64_rse_skip_regs(krbs, regnum) = val; | 
|  | 492 | } | 
|  | 493 | } | 
|  | 494 | } else if (access_process_vm(child, addr, &val, sizeof(val), 1) | 
|  | 495 | != sizeof(val)) | 
|  | 496 | return -EIO; | 
|  | 497 | return 0; | 
|  | 498 | } | 
|  | 499 |  | 
|  | 500 | /* | 
|  | 501 | * Calculate the address of the end of the user-level register backing | 
|  | 502 | * store.  This is the address that would have been stored in ar.bsp | 
|  | 503 | * if the user had executed a "cover" instruction right before | 
|  | 504 | * entering the kernel.  If CFMP is not NULL, it is used to return the | 
|  | 505 | * "current frame mask" that was active at the time the kernel was | 
|  | 506 | * entered. | 
|  | 507 | */ | 
|  | 508 | unsigned long | 
|  | 509 | ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, | 
|  | 510 | unsigned long *cfmp) | 
|  | 511 | { | 
|  | 512 | unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; | 
|  | 513 | long ndirty; | 
|  | 514 |  | 
|  | 515 | krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; | 
|  | 516 | bspstore = (unsigned long *) pt->ar_bspstore; | 
|  | 517 | ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); | 
|  | 518 |  | 
|  | 519 | if (in_syscall(pt)) | 
|  | 520 | ndirty += (cfm & 0x7f); | 
|  | 521 | else | 
|  | 522 | cfm &= ~(1UL << 63);	/* clear valid bit */ | 
|  | 523 |  | 
|  | 524 | if (cfmp) | 
|  | 525 | *cfmp = cfm; | 
|  | 526 | return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); | 
|  | 527 | } | 
|  | 528 |  | 
|  | 529 | /* | 
|  | 530 | * Synchronize (i.e, write) the RSE backing store living in kernel | 
|  | 531 | * space to the VM of the CHILD task.  SW and PT are the pointers to | 
|  | 532 | * the switch_stack and pt_regs structures, respectively. | 
|  | 533 | * USER_RBS_END is the user-level address at which the backing store | 
|  | 534 | * ends. | 
|  | 535 | */ | 
|  | 536 | long | 
|  | 537 | ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, | 
|  | 538 | unsigned long user_rbs_start, unsigned long user_rbs_end) | 
|  | 539 | { | 
|  | 540 | unsigned long addr, val; | 
|  | 541 | long ret; | 
|  | 542 |  | 
|  | 543 | /* now copy word for word from kernel rbs to user rbs: */ | 
|  | 544 | for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { | 
|  | 545 | ret = ia64_peek(child, sw, user_rbs_end, addr, &val); | 
|  | 546 | if (ret < 0) | 
|  | 547 | return ret; | 
|  | 548 | if (access_process_vm(child, addr, &val, sizeof(val), 1) | 
|  | 549 | != sizeof(val)) | 
|  | 550 | return -EIO; | 
|  | 551 | } | 
|  | 552 | return 0; | 
|  | 553 | } | 
|  | 554 |  | 
| Petr Tesarik | 3b2ce0b | 2007-12-12 15:23:34 +0100 | [diff] [blame] | 555 | static long | 
|  | 556 | ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, | 
|  | 557 | unsigned long user_rbs_start, unsigned long user_rbs_end) | 
|  | 558 | { | 
|  | 559 | unsigned long addr, val; | 
|  | 560 | long ret; | 
|  | 561 |  | 
|  | 562 | /* now copy word for word from user rbs to kernel rbs: */ | 
|  | 563 | for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { | 
|  | 564 | if (access_process_vm(child, addr, &val, sizeof(val), 0) | 
|  | 565 | != sizeof(val)) | 
|  | 566 | return -EIO; | 
|  | 567 |  | 
|  | 568 | ret = ia64_poke(child, sw, user_rbs_end, addr, val); | 
|  | 569 | if (ret < 0) | 
|  | 570 | return ret; | 
|  | 571 | } | 
|  | 572 | return 0; | 
|  | 573 | } | 
|  | 574 |  | 
|  | 575 | typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *, | 
|  | 576 | unsigned long, unsigned long); | 
|  | 577 |  | 
|  | 578 | static void do_sync_rbs(struct unw_frame_info *info, void *arg) | 
|  | 579 | { | 
|  | 580 | struct pt_regs *pt; | 
|  | 581 | unsigned long urbs_end; | 
|  | 582 | syncfunc_t fn = arg; | 
|  | 583 |  | 
|  | 584 | if (unw_unwind_to_user(info) < 0) | 
|  | 585 | return; | 
|  | 586 | pt = task_pt_regs(info->task); | 
|  | 587 | urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL); | 
|  | 588 |  | 
|  | 589 | fn(info->task, info->sw, pt->ar_bspstore, urbs_end); | 
|  | 590 | } | 
|  | 591 |  | 
|  | 592 | /* | 
|  | 593 | * when a thread is stopped (ptraced), debugger might change thread's user | 
|  | 594 | * stack (change memory directly), and we must avoid the RSE stored in kernel | 
|  | 595 | * to override user stack (user space's RSE is newer than kernel's in the | 
|  | 596 | * case). To workaround the issue, we copy kernel RSE to user RSE before the | 
|  | 597 | * task is stopped, so user RSE has updated data.  we then copy user RSE to | 
|  | 598 | * kernel after the task is resummed from traced stop and kernel will use the | 
|  | 599 | * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need | 
|  | 600 | * synchronize user RSE to kernel. | 
|  | 601 | */ | 
|  | 602 | void ia64_ptrace_stop(void) | 
|  | 603 | { | 
|  | 604 | if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) | 
|  | 605 | return; | 
|  | 606 | tsk_set_notify_resume(current); | 
|  | 607 | unw_init_running(do_sync_rbs, ia64_sync_user_rbs); | 
|  | 608 | } | 
|  | 609 |  | 
|  | 610 | /* | 
|  | 611 | * This is called to read back the register backing store. | 
|  | 612 | */ | 
|  | 613 | void ia64_sync_krbs(void) | 
|  | 614 | { | 
|  | 615 | clear_tsk_thread_flag(current, TIF_RESTORE_RSE); | 
|  | 616 | tsk_clear_notify_resume(current); | 
|  | 617 |  | 
|  | 618 | unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); | 
|  | 619 | } | 
|  | 620 |  | 
| Petr Tesarik | aa91a2e | 2007-12-12 15:24:25 +0100 | [diff] [blame] | 621 | /* | 
|  | 622 | * After PTRACE_ATTACH, a thread's register backing store area in user | 
|  | 623 | * space is assumed to contain correct data whenever the thread is | 
|  | 624 | * stopped.  arch_ptrace_stop takes care of this on tracing stops. | 
|  | 625 | * But if the child was already stopped for job control when we attach | 
|  | 626 | * to it, then it might not ever get into ptrace_stop by the time we | 
|  | 627 | * want to examine the user memory containing the RBS. | 
|  | 628 | */ | 
|  | 629 | void | 
|  | 630 | ptrace_attach_sync_user_rbs (struct task_struct *child) | 
|  | 631 | { | 
|  | 632 | int stopped = 0; | 
|  | 633 | struct unw_frame_info info; | 
|  | 634 |  | 
|  | 635 | /* | 
|  | 636 | * If the child is in TASK_STOPPED, we need to change that to | 
|  | 637 | * TASK_TRACED momentarily while we operate on it.  This ensures | 
|  | 638 | * that the child won't be woken up and return to user mode while | 
|  | 639 | * we are doing the sync.  (It can only be woken up for SIGKILL.) | 
|  | 640 | */ | 
|  | 641 |  | 
|  | 642 | read_lock(&tasklist_lock); | 
|  | 643 | if (child->signal) { | 
|  | 644 | spin_lock_irq(&child->sighand->siglock); | 
|  | 645 | if (child->state == TASK_STOPPED && | 
|  | 646 | !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { | 
|  | 647 | tsk_set_notify_resume(child); | 
|  | 648 |  | 
|  | 649 | child->state = TASK_TRACED; | 
|  | 650 | stopped = 1; | 
|  | 651 | } | 
|  | 652 | spin_unlock_irq(&child->sighand->siglock); | 
|  | 653 | } | 
|  | 654 | read_unlock(&tasklist_lock); | 
|  | 655 |  | 
|  | 656 | if (!stopped) | 
|  | 657 | return; | 
|  | 658 |  | 
|  | 659 | unw_init_from_blocked_task(&info, child); | 
|  | 660 | do_sync_rbs(&info, ia64_sync_user_rbs); | 
|  | 661 |  | 
|  | 662 | /* | 
|  | 663 | * Now move the child back into TASK_STOPPED if it should be in a | 
|  | 664 | * job control stop, so that SIGCONT can be used to wake it up. | 
|  | 665 | */ | 
|  | 666 | read_lock(&tasklist_lock); | 
|  | 667 | if (child->signal) { | 
|  | 668 | spin_lock_irq(&child->sighand->siglock); | 
|  | 669 | if (child->state == TASK_TRACED && | 
|  | 670 | (child->signal->flags & SIGNAL_STOP_STOPPED)) { | 
|  | 671 | child->state = TASK_STOPPED; | 
|  | 672 | } | 
|  | 673 | spin_unlock_irq(&child->sighand->siglock); | 
|  | 674 | } | 
|  | 675 | read_unlock(&tasklist_lock); | 
|  | 676 | } | 
|  | 677 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | static inline int | 
|  | 679 | thread_matches (struct task_struct *thread, unsigned long addr) | 
|  | 680 | { | 
|  | 681 | unsigned long thread_rbs_end; | 
|  | 682 | struct pt_regs *thread_regs; | 
|  | 683 |  | 
|  | 684 | if (ptrace_check_attach(thread, 0) < 0) | 
|  | 685 | /* | 
|  | 686 | * If the thread is not in an attachable state, we'll | 
|  | 687 | * ignore it.  The net effect is that if ADDR happens | 
|  | 688 | * to overlap with the portion of the thread's | 
|  | 689 | * register backing store that is currently residing | 
|  | 690 | * on the thread's kernel stack, then ptrace() may end | 
|  | 691 | * up accessing a stale value.  But if the thread | 
|  | 692 | * isn't stopped, that's a problem anyhow, so we're | 
|  | 693 | * doing as well as we can... | 
|  | 694 | */ | 
|  | 695 | return 0; | 
|  | 696 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 697 | thread_regs = task_pt_regs(thread); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); | 
|  | 699 | if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) | 
|  | 700 | return 0; | 
|  | 701 |  | 
|  | 702 | return 1;	/* looks like we've got a winner */ | 
|  | 703 | } | 
|  | 704 |  | 
|  | 705 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | * Write f32-f127 back to task->thread.fph if it has been modified. | 
|  | 707 | */ | 
|  | 708 | inline void | 
|  | 709 | ia64_flush_fph (struct task_struct *task) | 
|  | 710 | { | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 711 | struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 |  | 
| Peter Chubb | 05062d9 | 2005-06-08 15:50:20 -0700 | [diff] [blame] | 713 | /* | 
|  | 714 | * Prevent migrating this task while | 
|  | 715 | * we're fiddling with the FPU state | 
|  | 716 | */ | 
|  | 717 | preempt_disable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | if (ia64_is_local_fpu_owner(task) && psr->mfh) { | 
|  | 719 | psr->mfh = 0; | 
|  | 720 | task->thread.flags |= IA64_THREAD_FPH_VALID; | 
|  | 721 | ia64_save_fpu(&task->thread.fph[0]); | 
|  | 722 | } | 
| Peter Chubb | 05062d9 | 2005-06-08 15:50:20 -0700 | [diff] [blame] | 723 | preempt_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | } | 
|  | 725 |  | 
|  | 726 | /* | 
|  | 727 | * Sync the fph state of the task so that it can be manipulated | 
|  | 728 | * through thread.fph.  If necessary, f32-f127 are written back to | 
|  | 729 | * thread.fph or, if the fph state hasn't been used before, thread.fph | 
|  | 730 | * is cleared to zeroes.  Also, access to f32-f127 is disabled to | 
|  | 731 | * ensure that the task picks up the state from thread.fph when it | 
|  | 732 | * executes again. | 
|  | 733 | */ | 
|  | 734 | void | 
|  | 735 | ia64_sync_fph (struct task_struct *task) | 
|  | 736 | { | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 737 | struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 |  | 
|  | 739 | ia64_flush_fph(task); | 
|  | 740 | if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { | 
|  | 741 | task->thread.flags |= IA64_THREAD_FPH_VALID; | 
|  | 742 | memset(&task->thread.fph, 0, sizeof(task->thread.fph)); | 
|  | 743 | } | 
|  | 744 | ia64_drop_fpu(task); | 
|  | 745 | psr->dfh = 1; | 
|  | 746 | } | 
|  | 747 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | /* | 
|  | 749 | * Change the machine-state of CHILD such that it will return via the normal | 
|  | 750 | * kernel exit-path, rather than the syscall-exit path. | 
|  | 751 | */ | 
|  | 752 | static void | 
|  | 753 | convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt, | 
|  | 754 | unsigned long cfm) | 
|  | 755 | { | 
|  | 756 | struct unw_frame_info info, prev_info; | 
| David Mosberger-Tang | 02a017a | 2005-05-10 11:35:00 -0700 | [diff] [blame] | 757 | unsigned long ip, sp, pr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 |  | 
|  | 759 | unw_init_from_blocked_task(&info, child); | 
|  | 760 | while (1) { | 
|  | 761 | prev_info = info; | 
|  | 762 | if (unw_unwind(&info) < 0) | 
|  | 763 | return; | 
| David Mosberger-Tang | 02a017a | 2005-05-10 11:35:00 -0700 | [diff] [blame] | 764 |  | 
|  | 765 | unw_get_sp(&info, &sp); | 
|  | 766 | if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) | 
|  | 767 | < IA64_PT_REGS_SIZE) { | 
|  | 768 | dprintk("ptrace.%s: ran off the top of the kernel " | 
| Harvey Harrison | d4ed808 | 2008-03-04 15:15:00 -0800 | [diff] [blame] | 769 | "stack\n", __func__); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 770 | return; | 
| David Mosberger-Tang | 02a017a | 2005-05-10 11:35:00 -0700 | [diff] [blame] | 771 | } | 
|  | 772 | if (unw_get_pr (&prev_info, &pr) < 0) { | 
|  | 773 | unw_get_rp(&prev_info, &ip); | 
|  | 774 | dprintk("ptrace.%s: failed to read " | 
|  | 775 | "predicate register (ip=0x%lx)\n", | 
| Harvey Harrison | d4ed808 | 2008-03-04 15:15:00 -0800 | [diff] [blame] | 776 | __func__, ip); | 
| David Mosberger-Tang | 02a017a | 2005-05-10 11:35:00 -0700 | [diff] [blame] | 777 | return; | 
|  | 778 | } | 
|  | 779 | if (unw_is_intr_frame(&info) | 
|  | 780 | && (pr & (1UL << PRED_USER_STACK))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | break; | 
|  | 782 | } | 
|  | 783 |  | 
| David Mosberger-Tang | 7f9eaed | 2005-05-10 12:49:00 -0700 | [diff] [blame] | 784 | /* | 
|  | 785 | * Note: at the time of this call, the target task is blocked | 
|  | 786 | * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL | 
|  | 787 | * (aka, "pLvSys") we redirect execution from | 
|  | 788 | * .work_pending_syscall_end to .work_processed_kernel. | 
|  | 789 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | unw_get_pr(&prev_info, &pr); | 
| David Mosberger-Tang | 7f9eaed | 2005-05-10 12:49:00 -0700 | [diff] [blame] | 791 | pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | pr |=  (1UL << PRED_NON_SYSCALL); | 
|  | 793 | unw_set_pr(&prev_info, pr); | 
|  | 794 |  | 
|  | 795 | pt->cr_ifs = (1UL << 63) | cfm; | 
| David Mosberger-Tang | 7f9eaed | 2005-05-10 12:49:00 -0700 | [diff] [blame] | 796 | /* | 
|  | 797 | * Clear the memory that is NOT written on syscall-entry to | 
|  | 798 | * ensure we do not leak kernel-state to user when execution | 
|  | 799 | * resumes. | 
|  | 800 | */ | 
|  | 801 | pt->r2 = 0; | 
|  | 802 | pt->r3 = 0; | 
|  | 803 | pt->r14 = 0; | 
|  | 804 | memset(&pt->r16, 0, 16*8);	/* clear r16-r31 */ | 
|  | 805 | memset(&pt->f6, 0, 6*16);	/* clear f6-f11 */ | 
|  | 806 | pt->b7 = 0; | 
|  | 807 | pt->ar_ccv = 0; | 
|  | 808 | pt->ar_csd = 0; | 
|  | 809 | pt->ar_ssd = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 810 | } | 
|  | 811 |  | 
|  | 812 | static int | 
|  | 813 | access_nat_bits (struct task_struct *child, struct pt_regs *pt, | 
|  | 814 | struct unw_frame_info *info, | 
|  | 815 | unsigned long *data, int write_access) | 
|  | 816 | { | 
|  | 817 | unsigned long regnum, nat_bits, scratch_unat, dummy = 0; | 
|  | 818 | char nat = 0; | 
|  | 819 |  | 
|  | 820 | if (write_access) { | 
|  | 821 | nat_bits = *data; | 
|  | 822 | scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); | 
|  | 823 | if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) { | 
|  | 824 | dprintk("ptrace: failed to set ar.unat\n"); | 
|  | 825 | return -1; | 
|  | 826 | } | 
|  | 827 | for (regnum = 4; regnum <= 7; ++regnum) { | 
|  | 828 | unw_get_gr(info, regnum, &dummy, &nat); | 
|  | 829 | unw_set_gr(info, regnum, dummy, | 
|  | 830 | (nat_bits >> regnum) & 1); | 
|  | 831 | } | 
|  | 832 | } else { | 
|  | 833 | if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) { | 
|  | 834 | dprintk("ptrace: failed to read ar.unat\n"); | 
|  | 835 | return -1; | 
|  | 836 | } | 
|  | 837 | nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); | 
|  | 838 | for (regnum = 4; regnum <= 7; ++regnum) { | 
|  | 839 | unw_get_gr(info, regnum, &dummy, &nat); | 
|  | 840 | nat_bits |= (nat != 0) << regnum; | 
|  | 841 | } | 
|  | 842 | *data = nat_bits; | 
|  | 843 | } | 
|  | 844 | return 0; | 
|  | 845 | } | 
|  | 846 |  | 
|  | 847 | static int | 
|  | 848 | access_uarea (struct task_struct *child, unsigned long addr, | 
| Shaohua Li | 4cd8dc8 | 2008-02-28 16:09:42 +0800 | [diff] [blame] | 849 | unsigned long *data, int write_access); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 |  | 
|  | 851 | static long | 
|  | 852 | ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | 
|  | 853 | { | 
|  | 854 | unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val; | 
|  | 855 | struct unw_frame_info info; | 
|  | 856 | struct ia64_fpreg fpval; | 
|  | 857 | struct switch_stack *sw; | 
|  | 858 | struct pt_regs *pt; | 
|  | 859 | long ret, retval = 0; | 
|  | 860 | char nat = 0; | 
|  | 861 | int i; | 
|  | 862 |  | 
|  | 863 | if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) | 
|  | 864 | return -EIO; | 
|  | 865 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 866 | pt = task_pt_regs(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | sw = (struct switch_stack *) (child->thread.ksp + 16); | 
|  | 868 | unw_init_from_blocked_task(&info, child); | 
|  | 869 | if (unw_unwind_to_user(&info) < 0) { | 
|  | 870 | return -EIO; | 
|  | 871 | } | 
|  | 872 |  | 
|  | 873 | if (((unsigned long) ppr & 0x7) != 0) { | 
|  | 874 | dprintk("ptrace:unaligned register address %p\n", ppr); | 
|  | 875 | return -EIO; | 
|  | 876 | } | 
|  | 877 |  | 
|  | 878 | if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0 | 
|  | 879 | || access_uarea(child, PT_AR_EC, &ec, 0) < 0 | 
|  | 880 | || access_uarea(child, PT_AR_LC, &lc, 0) < 0 | 
|  | 881 | || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0 | 
|  | 882 | || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0 | 
|  | 883 | || access_uarea(child, PT_CFM, &cfm, 0) | 
|  | 884 | || access_uarea(child, PT_NAT_BITS, &nat_bits, 0)) | 
|  | 885 | return -EIO; | 
|  | 886 |  | 
|  | 887 | /* control regs */ | 
|  | 888 |  | 
|  | 889 | retval |= __put_user(pt->cr_iip, &ppr->cr_iip); | 
|  | 890 | retval |= __put_user(psr, &ppr->cr_ipsr); | 
|  | 891 |  | 
|  | 892 | /* app regs */ | 
|  | 893 |  | 
|  | 894 | retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); | 
|  | 895 | retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); | 
|  | 896 | retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); | 
|  | 897 | retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); | 
|  | 898 | retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); | 
|  | 899 | retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); | 
|  | 900 |  | 
|  | 901 | retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); | 
|  | 902 | retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); | 
|  | 903 | retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); | 
|  | 904 | retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); | 
|  | 905 | retval |= __put_user(cfm, &ppr->cfm); | 
|  | 906 |  | 
|  | 907 | /* gr1-gr3 */ | 
|  | 908 |  | 
|  | 909 | retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long)); | 
|  | 910 | retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2); | 
|  | 911 |  | 
|  | 912 | /* gr4-gr7 */ | 
|  | 913 |  | 
|  | 914 | for (i = 4; i < 8; i++) { | 
|  | 915 | if (unw_access_gr(&info, i, &val, &nat, 0) < 0) | 
|  | 916 | return -EIO; | 
|  | 917 | retval |= __put_user(val, &ppr->gr[i]); | 
|  | 918 | } | 
|  | 919 |  | 
|  | 920 | /* gr8-gr11 */ | 
|  | 921 |  | 
|  | 922 | retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4); | 
|  | 923 |  | 
|  | 924 | /* gr12-gr15 */ | 
|  | 925 |  | 
|  | 926 | retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2); | 
|  | 927 | retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long)); | 
|  | 928 | retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long)); | 
|  | 929 |  | 
|  | 930 | /* gr16-gr31 */ | 
|  | 931 |  | 
|  | 932 | retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16); | 
|  | 933 |  | 
|  | 934 | /* b0 */ | 
|  | 935 |  | 
|  | 936 | retval |= __put_user(pt->b0, &ppr->br[0]); | 
|  | 937 |  | 
|  | 938 | /* b1-b5 */ | 
|  | 939 |  | 
|  | 940 | for (i = 1; i < 6; i++) { | 
|  | 941 | if (unw_access_br(&info, i, &val, 0) < 0) | 
|  | 942 | return -EIO; | 
|  | 943 | __put_user(val, &ppr->br[i]); | 
|  | 944 | } | 
|  | 945 |  | 
|  | 946 | /* b6-b7 */ | 
|  | 947 |  | 
|  | 948 | retval |= __put_user(pt->b6, &ppr->br[6]); | 
|  | 949 | retval |= __put_user(pt->b7, &ppr->br[7]); | 
|  | 950 |  | 
|  | 951 | /* fr2-fr5 */ | 
|  | 952 |  | 
|  | 953 | for (i = 2; i < 6; i++) { | 
|  | 954 | if (unw_get_fr(&info, i, &fpval) < 0) | 
|  | 955 | return -EIO; | 
|  | 956 | retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); | 
|  | 957 | } | 
|  | 958 |  | 
|  | 959 | /* fr6-fr11 */ | 
|  | 960 |  | 
|  | 961 | retval |= __copy_to_user(&ppr->fr[6], &pt->f6, | 
|  | 962 | sizeof(struct ia64_fpreg) * 6); | 
|  | 963 |  | 
|  | 964 | /* fp scratch regs(12-15) */ | 
|  | 965 |  | 
|  | 966 | retval |= __copy_to_user(&ppr->fr[12], &sw->f12, | 
|  | 967 | sizeof(struct ia64_fpreg) * 4); | 
|  | 968 |  | 
|  | 969 | /* fr16-fr31 */ | 
|  | 970 |  | 
|  | 971 | for (i = 16; i < 32; i++) { | 
|  | 972 | if (unw_get_fr(&info, i, &fpval) < 0) | 
|  | 973 | return -EIO; | 
|  | 974 | retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); | 
|  | 975 | } | 
|  | 976 |  | 
|  | 977 | /* fph */ | 
|  | 978 |  | 
|  | 979 | ia64_flush_fph(child); | 
|  | 980 | retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, | 
|  | 981 | sizeof(ppr->fr[32]) * 96); | 
|  | 982 |  | 
|  | 983 | /*  preds */ | 
|  | 984 |  | 
|  | 985 | retval |= __put_user(pt->pr, &ppr->pr); | 
|  | 986 |  | 
|  | 987 | /* nat bits */ | 
|  | 988 |  | 
|  | 989 | retval |= __put_user(nat_bits, &ppr->nat); | 
|  | 990 |  | 
|  | 991 | ret = retval ? -EIO : 0; | 
|  | 992 | return ret; | 
|  | 993 | } | 
|  | 994 |  | 
|  | 995 | static long | 
|  | 996 | ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | 
|  | 997 | { | 
| Matthew Chapman | 4ea7872 | 2005-06-21 16:19:20 -0700 | [diff] [blame] | 998 | unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | struct unw_frame_info info; | 
|  | 1000 | struct switch_stack *sw; | 
|  | 1001 | struct ia64_fpreg fpval; | 
|  | 1002 | struct pt_regs *pt; | 
|  | 1003 | long ret, retval = 0; | 
|  | 1004 | int i; | 
|  | 1005 |  | 
|  | 1006 | memset(&fpval, 0, sizeof(fpval)); | 
|  | 1007 |  | 
|  | 1008 | if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) | 
|  | 1009 | return -EIO; | 
|  | 1010 |  | 
| Al Viro | 6450578 | 2006-01-12 01:06:06 -0800 | [diff] [blame] | 1011 | pt = task_pt_regs(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | sw = (struct switch_stack *) (child->thread.ksp + 16); | 
|  | 1013 | unw_init_from_blocked_task(&info, child); | 
|  | 1014 | if (unw_unwind_to_user(&info) < 0) { | 
|  | 1015 | return -EIO; | 
|  | 1016 | } | 
|  | 1017 |  | 
|  | 1018 | if (((unsigned long) ppr & 0x7) != 0) { | 
|  | 1019 | dprintk("ptrace:unaligned register address %p\n", ppr); | 
|  | 1020 | return -EIO; | 
|  | 1021 | } | 
|  | 1022 |  | 
|  | 1023 | /* control regs */ | 
|  | 1024 |  | 
|  | 1025 | retval |= __get_user(pt->cr_iip, &ppr->cr_iip); | 
|  | 1026 | retval |= __get_user(psr, &ppr->cr_ipsr); | 
|  | 1027 |  | 
|  | 1028 | /* app regs */ | 
|  | 1029 |  | 
|  | 1030 | retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); | 
| Matthew Chapman | 4ea7872 | 2005-06-21 16:19:20 -0700 | [diff] [blame] | 1031 | retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); | 
|  | 1033 | retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); | 
|  | 1034 | retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); | 
|  | 1035 | retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); | 
|  | 1036 |  | 
|  | 1037 | retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); | 
|  | 1038 | retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); | 
|  | 1039 | retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); | 
|  | 1040 | retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); | 
|  | 1041 | retval |= __get_user(cfm, &ppr->cfm); | 
|  | 1042 |  | 
|  | 1043 | /* gr1-gr3 */ | 
|  | 1044 |  | 
|  | 1045 | retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long)); | 
|  | 1046 | retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2); | 
|  | 1047 |  | 
|  | 1048 | /* gr4-gr7 */ | 
|  | 1049 |  | 
|  | 1050 | for (i = 4; i < 8; i++) { | 
|  | 1051 | retval |= __get_user(val, &ppr->gr[i]); | 
|  | 1052 | /* NaT bit will be set via PT_NAT_BITS: */ | 
|  | 1053 | if (unw_set_gr(&info, i, val, 0) < 0) | 
|  | 1054 | return -EIO; | 
|  | 1055 | } | 
|  | 1056 |  | 
|  | 1057 | /* gr8-gr11 */ | 
|  | 1058 |  | 
|  | 1059 | retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4); | 
|  | 1060 |  | 
|  | 1061 | /* gr12-gr15 */ | 
|  | 1062 |  | 
|  | 1063 | retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2); | 
|  | 1064 | retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long)); | 
|  | 1065 | retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long)); | 
|  | 1066 |  | 
|  | 1067 | /* gr16-gr31 */ | 
|  | 1068 |  | 
|  | 1069 | retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16); | 
|  | 1070 |  | 
|  | 1071 | /* b0 */ | 
|  | 1072 |  | 
|  | 1073 | retval |= __get_user(pt->b0, &ppr->br[0]); | 
|  | 1074 |  | 
|  | 1075 | /* b1-b5 */ | 
|  | 1076 |  | 
|  | 1077 | for (i = 1; i < 6; i++) { | 
|  | 1078 | retval |= __get_user(val, &ppr->br[i]); | 
|  | 1079 | unw_set_br(&info, i, val); | 
|  | 1080 | } | 
|  | 1081 |  | 
|  | 1082 | /* b6-b7 */ | 
|  | 1083 |  | 
|  | 1084 | retval |= __get_user(pt->b6, &ppr->br[6]); | 
|  | 1085 | retval |= __get_user(pt->b7, &ppr->br[7]); | 
|  | 1086 |  | 
|  | 1087 | /* fr2-fr5 */ | 
|  | 1088 |  | 
|  | 1089 | for (i = 2; i < 6; i++) { | 
|  | 1090 | retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); | 
|  | 1091 | if (unw_set_fr(&info, i, fpval) < 0) | 
|  | 1092 | return -EIO; | 
|  | 1093 | } | 
|  | 1094 |  | 
|  | 1095 | /* fr6-fr11 */ | 
|  | 1096 |  | 
|  | 1097 | retval |= __copy_from_user(&pt->f6, &ppr->fr[6], | 
|  | 1098 | sizeof(ppr->fr[6]) * 6); | 
|  | 1099 |  | 
|  | 1100 | /* fp scratch regs(12-15) */ | 
|  | 1101 |  | 
|  | 1102 | retval |= __copy_from_user(&sw->f12, &ppr->fr[12], | 
|  | 1103 | sizeof(ppr->fr[12]) * 4); | 
|  | 1104 |  | 
|  | 1105 | /* fr16-fr31 */ | 
|  | 1106 |  | 
|  | 1107 | for (i = 16; i < 32; i++) { | 
|  | 1108 | retval |= __copy_from_user(&fpval, &ppr->fr[i], | 
|  | 1109 | sizeof(fpval)); | 
|  | 1110 | if (unw_set_fr(&info, i, fpval) < 0) | 
|  | 1111 | return -EIO; | 
|  | 1112 | } | 
|  | 1113 |  | 
|  | 1114 | /* fph */ | 
|  | 1115 |  | 
|  | 1116 | ia64_sync_fph(child); | 
|  | 1117 | retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], | 
|  | 1118 | sizeof(ppr->fr[32]) * 96); | 
|  | 1119 |  | 
|  | 1120 | /* preds */ | 
|  | 1121 |  | 
|  | 1122 | retval |= __get_user(pt->pr, &ppr->pr); | 
|  | 1123 |  | 
|  | 1124 | /* nat bits */ | 
|  | 1125 |  | 
|  | 1126 | retval |= __get_user(nat_bits, &ppr->nat); | 
|  | 1127 |  | 
|  | 1128 | retval |= access_uarea(child, PT_CR_IPSR, &psr, 1); | 
| Matthew Chapman | 4ea7872 | 2005-06-21 16:19:20 -0700 | [diff] [blame] | 1129 | retval |= access_uarea(child, PT_AR_RSC, &rsc, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | retval |= access_uarea(child, PT_AR_EC, &ec, 1); | 
|  | 1131 | retval |= access_uarea(child, PT_AR_LC, &lc, 1); | 
|  | 1132 | retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1); | 
|  | 1133 | retval |= access_uarea(child, PT_AR_BSP, &bsp, 1); | 
|  | 1134 | retval |= access_uarea(child, PT_CFM, &cfm, 1); | 
|  | 1135 | retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1); | 
|  | 1136 |  | 
|  | 1137 | ret = retval ? -EIO : 0; | 
|  | 1138 | return ret; | 
|  | 1139 | } | 
|  | 1140 |  | 
| Petr Tesarik | 8db3f52 | 2008-02-11 22:43:38 +0100 | [diff] [blame] | 1141 | void | 
|  | 1142 | user_enable_single_step (struct task_struct *child) | 
|  | 1143 | { | 
|  | 1144 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); | 
|  | 1145 |  | 
|  | 1146 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | 
|  | 1147 | child_psr->ss = 1; | 
|  | 1148 | } | 
|  | 1149 |  | 
|  | 1150 | void | 
|  | 1151 | user_enable_block_step (struct task_struct *child) | 
|  | 1152 | { | 
|  | 1153 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); | 
|  | 1154 |  | 
|  | 1155 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | 
|  | 1156 | child_psr->tb = 1; | 
|  | 1157 | } | 
|  | 1158 |  | 
|  | 1159 | void | 
|  | 1160 | user_disable_single_step (struct task_struct *child) | 
|  | 1161 | { | 
|  | 1162 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); | 
|  | 1163 |  | 
|  | 1164 | /* make sure the single step/taken-branch trap bits are not set: */ | 
|  | 1165 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | 
|  | 1166 | child_psr->ss = 0; | 
|  | 1167 | child_psr->tb = 0; | 
|  | 1168 | } | 
|  | 1169 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1170 | /* | 
|  | 1171 | * Called by kernel/ptrace.c when detaching.. | 
|  | 1172 | * | 
|  | 1173 | * Make sure the single step bit is not set. | 
|  | 1174 | */ | 
|  | 1175 | void | 
|  | 1176 | ptrace_disable (struct task_struct *child) | 
|  | 1177 | { | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1178 | user_disable_single_step(child); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | } | 
|  | 1180 |  | 
| Petr Tesarik | eac738e | 2008-02-11 22:43:05 +0100 | [diff] [blame] | 1181 | long | 
|  | 1182 | arch_ptrace (struct task_struct *child, long request, long addr, long data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 | switch (request) { | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1185 | case PTRACE_PEEKTEXT: | 
|  | 1186 | case PTRACE_PEEKDATA: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 | /* read word at location addr */ | 
| Petr Tesarik | 972559a | 2008-02-11 22:41:18 +0100 | [diff] [blame] | 1188 | if (access_process_vm(child, addr, &data, sizeof(data), 0) | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1189 | != sizeof(data)) | 
|  | 1190 | return -EIO; | 
|  | 1191 | /* ensure return value is not mistaken for error code */ | 
| Petr Tesarik | 972559a | 2008-02-11 22:41:18 +0100 | [diff] [blame] | 1192 | force_successful_syscall_return(); | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1193 | return data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 |  | 
| Petr Tesarik | 972559a | 2008-02-11 22:41:18 +0100 | [diff] [blame] | 1195 | /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled | 
|  | 1196 | * by the generic ptrace_request(). | 
|  | 1197 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1199 | case PTRACE_PEEKUSR: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | /* read the word at addr in the USER area */ | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1201 | if (access_uarea(child, addr, &data, 0) < 0) | 
|  | 1202 | return -EIO; | 
|  | 1203 | /* ensure return value is not mistaken for error code */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 | force_successful_syscall_return(); | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1205 | return data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1207 | case PTRACE_POKEUSR: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 | /* write the word at addr in the USER area */ | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1209 | if (access_uarea(child, addr, &data, 1) < 0) | 
|  | 1210 | return -EIO; | 
|  | 1211 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1213 | case PTRACE_OLD_GETSIGINFO: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 | /* for backwards-compatibility */ | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1215 | return ptrace_request(child, PTRACE_GETSIGINFO, addr, data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1217 | case PTRACE_OLD_SETSIGINFO: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | /* for backwards-compatibility */ | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1219 | return ptrace_request(child, PTRACE_SETSIGINFO, addr, data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1221 | case PTRACE_GETREGS: | 
|  | 1222 | return ptrace_getregs(child, | 
|  | 1223 | (struct pt_all_user_regs __user *) data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1225 | case PTRACE_SETREGS: | 
|  | 1226 | return ptrace_setregs(child, | 
|  | 1227 | (struct pt_all_user_regs __user *) data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 |  | 
| Petr Tesarik | aa17f6f | 2008-02-26 12:03:28 +0100 | [diff] [blame] | 1229 | default: | 
|  | 1230 | return ptrace_request(child, request, addr, data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1232 | } | 
|  | 1233 |  | 
|  | 1234 |  | 
| bibo,mao | 90f9d70 | 2007-01-31 17:50:31 +0800 | [diff] [blame] | 1235 | static void | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1236 | syscall_trace (void) | 
|  | 1237 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | /* | 
|  | 1239 | * The 0x80 provides a way for the tracing parent to | 
|  | 1240 | * distinguish between a syscall stop and SIGTRAP delivery. | 
|  | 1241 | */ | 
|  | 1242 | ptrace_notify(SIGTRAP | 
|  | 1243 | | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); | 
|  | 1244 |  | 
|  | 1245 | /* | 
|  | 1246 | * This isn't the same as continuing with a signal, but it | 
|  | 1247 | * will do for normal use.  strace only continues with a | 
|  | 1248 | * signal if the stopping signal is not SIGTRAP.  -brl | 
|  | 1249 | */ | 
|  | 1250 | if (current->exit_code) { | 
|  | 1251 | send_sig(current->exit_code, current, 1); | 
|  | 1252 | current->exit_code = 0; | 
|  | 1253 | } | 
|  | 1254 | } | 
|  | 1255 |  | 
|  | 1256 | /* "asmlinkage" so the input arguments are preserved... */ | 
|  | 1257 |  | 
|  | 1258 | asmlinkage void | 
|  | 1259 | syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, | 
|  | 1260 | long arg4, long arg5, long arg6, long arg7, | 
|  | 1261 | struct pt_regs regs) | 
|  | 1262 | { | 
|  | 2fd6f58 | 2005-04-29 16:08:28 +0100 | [diff] [blame] | 1263 | if (test_thread_flag(TIF_SYSCALL_TRACE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 | && (current->ptrace & PT_PTRACED)) | 
|  | 1265 | syscall_trace(); | 
|  | 2fd6f58 | 2005-04-29 16:08:28 +0100 | [diff] [blame] | 1266 |  | 
| Petr Tesarik | 3b2ce0b | 2007-12-12 15:23:34 +0100 | [diff] [blame] | 1267 | /* copy user rbs to kernel rbs */ | 
|  | 1268 | if (test_thread_flag(TIF_RESTORE_RSE)) | 
|  | 1269 | ia64_sync_krbs(); | 
|  | 1270 |  | 
|  | 2fd6f58 | 2005-04-29 16:08:28 +0100 | [diff] [blame] | 1271 | if (unlikely(current->audit_context)) { | 
|  | 1272 | long syscall; | 
|  | 1273 | int arch; | 
|  | 1274 |  | 
|  | 1275 | if (IS_IA32_PROCESS(®s)) { | 
|  | 1276 | syscall = regs.r1; | 
|  | 1277 | arch = AUDIT_ARCH_I386; | 
|  | 1278 | } else { | 
|  | 1279 | syscall = regs.r15; | 
|  | 1280 | arch = AUDIT_ARCH_IA64; | 
|  | 1281 | } | 
|  | 1282 |  | 
| Al Viro | 5411be5 | 2006-03-29 20:23:36 -0500 | [diff] [blame] | 1283 | audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); | 
|  | 2fd6f58 | 2005-04-29 16:08:28 +0100 | [diff] [blame] | 1284 | } | 
|  | 1285 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | } | 
|  | 1287 |  | 
|  | 1288 | /* "asmlinkage" so the input arguments are preserved... */ | 
|  | 1289 |  | 
|  | 1290 | asmlinkage void | 
|  | 1291 | syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, | 
|  | 1292 | long arg4, long arg5, long arg6, long arg7, | 
|  | 1293 | struct pt_regs regs) | 
|  | 1294 | { | 
| David Woodhouse | ee436dc | 2005-11-18 14:43:54 +0000 | [diff] [blame] | 1295 | if (unlikely(current->audit_context)) { | 
|  | 1296 | int success = AUDITSC_RESULT(regs.r10); | 
|  | 1297 | long result = regs.r8; | 
|  | 1298 |  | 
|  | 1299 | if (success != AUDITSC_SUCCESS) | 
|  | 1300 | result = -result; | 
| Al Viro | 5411be5 | 2006-03-29 20:23:36 -0500 | [diff] [blame] | 1301 | audit_syscall_exit(success, result); | 
| David Woodhouse | ee436dc | 2005-11-18 14:43:54 +0000 | [diff] [blame] | 1302 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 |  | 
| bibo,mao | 90f9d70 | 2007-01-31 17:50:31 +0800 | [diff] [blame] | 1304 | if ((test_thread_flag(TIF_SYSCALL_TRACE) | 
|  | 1305 | || test_thread_flag(TIF_SINGLESTEP)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | && (current->ptrace & PT_PTRACED)) | 
|  | 1307 | syscall_trace(); | 
| Petr Tesarik | 3b2ce0b | 2007-12-12 15:23:34 +0100 | [diff] [blame] | 1308 |  | 
|  | 1309 | /* copy user rbs to kernel rbs */ | 
|  | 1310 | if (test_thread_flag(TIF_RESTORE_RSE)) | 
|  | 1311 | ia64_sync_krbs(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1312 | } | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 1313 |  | 
|  | 1314 | /* Utrace implementation starts here */ | 
|  | 1315 | struct regset_get { | 
|  | 1316 | void *kbuf; | 
|  | 1317 | void __user *ubuf; | 
|  | 1318 | }; | 
|  | 1319 |  | 
|  | 1320 | struct regset_set { | 
|  | 1321 | const void *kbuf; | 
|  | 1322 | const void __user *ubuf; | 
|  | 1323 | }; | 
|  | 1324 |  | 
|  | 1325 | struct regset_getset { | 
|  | 1326 | struct task_struct *target; | 
|  | 1327 | const struct user_regset *regset; | 
|  | 1328 | union { | 
|  | 1329 | struct regset_get get; | 
|  | 1330 | struct regset_set set; | 
|  | 1331 | } u; | 
|  | 1332 | unsigned int pos; | 
|  | 1333 | unsigned int count; | 
|  | 1334 | int ret; | 
|  | 1335 | }; | 
|  | 1336 |  | 
|  | 1337 | static int | 
|  | 1338 | access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info, | 
|  | 1339 | unsigned long addr, unsigned long *data, int write_access) | 
|  | 1340 | { | 
|  | 1341 | struct pt_regs *pt; | 
|  | 1342 | unsigned long *ptr = NULL; | 
|  | 1343 | int ret; | 
|  | 1344 | char nat = 0; | 
|  | 1345 |  | 
|  | 1346 | pt = task_pt_regs(target); | 
|  | 1347 | switch (addr) { | 
|  | 1348 | case ELF_GR_OFFSET(1): | 
|  | 1349 | ptr = &pt->r1; | 
|  | 1350 | break; | 
|  | 1351 | case ELF_GR_OFFSET(2): | 
|  | 1352 | case ELF_GR_OFFSET(3): | 
|  | 1353 | ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2)); | 
|  | 1354 | break; | 
|  | 1355 | case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7): | 
|  | 1356 | if (write_access) { | 
|  | 1357 | /* read NaT bit first: */ | 
|  | 1358 | unsigned long dummy; | 
|  | 1359 |  | 
|  | 1360 | ret = unw_get_gr(info, addr/8, &dummy, &nat); | 
|  | 1361 | if (ret < 0) | 
|  | 1362 | return ret; | 
|  | 1363 | } | 
|  | 1364 | return unw_access_gr(info, addr/8, data, &nat, write_access); | 
|  | 1365 | case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11): | 
|  | 1366 | ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8); | 
|  | 1367 | break; | 
|  | 1368 | case ELF_GR_OFFSET(12): | 
|  | 1369 | case ELF_GR_OFFSET(13): | 
|  | 1370 | ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12); | 
|  | 1371 | break; | 
|  | 1372 | case ELF_GR_OFFSET(14): | 
|  | 1373 | ptr = &pt->r14; | 
|  | 1374 | break; | 
|  | 1375 | case ELF_GR_OFFSET(15): | 
|  | 1376 | ptr = &pt->r15; | 
|  | 1377 | } | 
|  | 1378 | if (write_access) | 
|  | 1379 | *ptr = *data; | 
|  | 1380 | else | 
|  | 1381 | *data = *ptr; | 
|  | 1382 | return 0; | 
|  | 1383 | } | 
|  | 1384 |  | 
|  | 1385 | static int | 
|  | 1386 | access_elf_breg(struct task_struct *target, struct unw_frame_info *info, | 
|  | 1387 | unsigned long addr, unsigned long *data, int write_access) | 
|  | 1388 | { | 
|  | 1389 | struct pt_regs *pt; | 
|  | 1390 | unsigned long *ptr = NULL; | 
|  | 1391 |  | 
|  | 1392 | pt = task_pt_regs(target); | 
|  | 1393 | switch (addr) { | 
|  | 1394 | case ELF_BR_OFFSET(0): | 
|  | 1395 | ptr = &pt->b0; | 
|  | 1396 | break; | 
|  | 1397 | case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5): | 
|  | 1398 | return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8, | 
|  | 1399 | data, write_access); | 
|  | 1400 | case ELF_BR_OFFSET(6): | 
|  | 1401 | ptr = &pt->b6; | 
|  | 1402 | break; | 
|  | 1403 | case ELF_BR_OFFSET(7): | 
|  | 1404 | ptr = &pt->b7; | 
|  | 1405 | } | 
|  | 1406 | if (write_access) | 
|  | 1407 | *ptr = *data; | 
|  | 1408 | else | 
|  | 1409 | *data = *ptr; | 
|  | 1410 | return 0; | 
|  | 1411 | } | 
|  | 1412 |  | 
|  | 1413 | static int | 
|  | 1414 | access_elf_areg(struct task_struct *target, struct unw_frame_info *info, | 
|  | 1415 | unsigned long addr, unsigned long *data, int write_access) | 
|  | 1416 | { | 
|  | 1417 | struct pt_regs *pt; | 
|  | 1418 | unsigned long cfm, urbs_end; | 
|  | 1419 | unsigned long *ptr = NULL; | 
|  | 1420 |  | 
|  | 1421 | pt = task_pt_regs(target); | 
|  | 1422 | if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) { | 
|  | 1423 | switch (addr) { | 
|  | 1424 | case ELF_AR_RSC_OFFSET: | 
|  | 1425 | /* force PL3 */ | 
|  | 1426 | if (write_access) | 
|  | 1427 | pt->ar_rsc = *data | (3 << 2); | 
|  | 1428 | else | 
|  | 1429 | *data = pt->ar_rsc; | 
|  | 1430 | return 0; | 
|  | 1431 | case ELF_AR_BSP_OFFSET: | 
|  | 1432 | /* | 
|  | 1433 | * By convention, we use PT_AR_BSP to refer to | 
|  | 1434 | * the end of the user-level backing store. | 
|  | 1435 | * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) | 
|  | 1436 | * to get the real value of ar.bsp at the time | 
|  | 1437 | * the kernel was entered. | 
|  | 1438 | * | 
|  | 1439 | * Furthermore, when changing the contents of | 
|  | 1440 | * PT_AR_BSP (or PT_CFM) while the task is | 
|  | 1441 | * blocked in a system call, convert the state | 
|  | 1442 | * so that the non-system-call exit | 
|  | 1443 | * path is used.  This ensures that the proper | 
|  | 1444 | * state will be picked up when resuming | 
|  | 1445 | * execution.  However, it *also* means that | 
|  | 1446 | * once we write PT_AR_BSP/PT_CFM, it won't be | 
|  | 1447 | * possible to modify the syscall arguments of | 
|  | 1448 | * the pending system call any longer.  This | 
|  | 1449 | * shouldn't be an issue because modifying | 
|  | 1450 | * PT_AR_BSP/PT_CFM generally implies that | 
|  | 1451 | * we're either abandoning the pending system | 
|  | 1452 | * call or that we defer it's re-execution | 
|  | 1453 | * (e.g., due to GDB doing an inferior | 
|  | 1454 | * function call). | 
|  | 1455 | */ | 
|  | 1456 | urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); | 
|  | 1457 | if (write_access) { | 
|  | 1458 | if (*data != urbs_end) { | 
|  | 1459 | if (in_syscall(pt)) | 
|  | 1460 | convert_to_non_syscall(target, | 
|  | 1461 | pt, | 
|  | 1462 | cfm); | 
|  | 1463 | /* | 
|  | 1464 | * Simulate user-level write | 
|  | 1465 | * of ar.bsp: | 
|  | 1466 | */ | 
|  | 1467 | pt->loadrs = 0; | 
|  | 1468 | pt->ar_bspstore = *data; | 
|  | 1469 | } | 
|  | 1470 | } else | 
|  | 1471 | *data = urbs_end; | 
|  | 1472 | return 0; | 
|  | 1473 | case ELF_AR_BSPSTORE_OFFSET: | 
|  | 1474 | ptr = &pt->ar_bspstore; | 
|  | 1475 | break; | 
|  | 1476 | case ELF_AR_RNAT_OFFSET: | 
|  | 1477 | ptr = &pt->ar_rnat; | 
|  | 1478 | break; | 
|  | 1479 | case ELF_AR_CCV_OFFSET: | 
|  | 1480 | ptr = &pt->ar_ccv; | 
|  | 1481 | break; | 
|  | 1482 | case ELF_AR_UNAT_OFFSET: | 
|  | 1483 | ptr = &pt->ar_unat; | 
|  | 1484 | break; | 
|  | 1485 | case ELF_AR_FPSR_OFFSET: | 
|  | 1486 | ptr = &pt->ar_fpsr; | 
|  | 1487 | break; | 
|  | 1488 | case ELF_AR_PFS_OFFSET: | 
|  | 1489 | ptr = &pt->ar_pfs; | 
|  | 1490 | break; | 
|  | 1491 | case ELF_AR_LC_OFFSET: | 
|  | 1492 | return unw_access_ar(info, UNW_AR_LC, data, | 
|  | 1493 | write_access); | 
|  | 1494 | case ELF_AR_EC_OFFSET: | 
|  | 1495 | return unw_access_ar(info, UNW_AR_EC, data, | 
|  | 1496 | write_access); | 
|  | 1497 | case ELF_AR_CSD_OFFSET: | 
|  | 1498 | ptr = &pt->ar_csd; | 
|  | 1499 | break; | 
|  | 1500 | case ELF_AR_SSD_OFFSET: | 
|  | 1501 | ptr = &pt->ar_ssd; | 
|  | 1502 | } | 
|  | 1503 | } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) { | 
|  | 1504 | switch (addr) { | 
|  | 1505 | case ELF_CR_IIP_OFFSET: | 
|  | 1506 | ptr = &pt->cr_iip; | 
|  | 1507 | break; | 
|  | 1508 | case ELF_CFM_OFFSET: | 
|  | 1509 | urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); | 
|  | 1510 | if (write_access) { | 
|  | 1511 | if (((cfm ^ *data) & PFM_MASK) != 0) { | 
|  | 1512 | if (in_syscall(pt)) | 
|  | 1513 | convert_to_non_syscall(target, | 
|  | 1514 | pt, | 
|  | 1515 | cfm); | 
|  | 1516 | pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) | 
|  | 1517 | | (*data & PFM_MASK)); | 
|  | 1518 | } | 
|  | 1519 | } else | 
|  | 1520 | *data = cfm; | 
|  | 1521 | return 0; | 
|  | 1522 | case ELF_CR_IPSR_OFFSET: | 
|  | 1523 | if (write_access) { | 
|  | 1524 | unsigned long tmp = *data; | 
|  | 1525 | /* psr.ri==3 is a reserved value: SDM 2:25 */ | 
|  | 1526 | if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) | 
|  | 1527 | tmp &= ~IA64_PSR_RI; | 
|  | 1528 | pt->cr_ipsr = ((tmp & IPSR_MASK) | 
|  | 1529 | | (pt->cr_ipsr & ~IPSR_MASK)); | 
|  | 1530 | } else | 
|  | 1531 | *data = (pt->cr_ipsr & IPSR_MASK); | 
|  | 1532 | return 0; | 
|  | 1533 | } | 
|  | 1534 | } else if (addr == ELF_NAT_OFFSET) | 
|  | 1535 | return access_nat_bits(target, pt, info, | 
|  | 1536 | data, write_access); | 
|  | 1537 | else if (addr == ELF_PR_OFFSET) | 
|  | 1538 | ptr = &pt->pr; | 
|  | 1539 | else | 
|  | 1540 | return -1; | 
|  | 1541 |  | 
|  | 1542 | if (write_access) | 
|  | 1543 | *ptr = *data; | 
|  | 1544 | else | 
|  | 1545 | *data = *ptr; | 
|  | 1546 |  | 
|  | 1547 | return 0; | 
|  | 1548 | } | 
|  | 1549 |  | 
|  | 1550 | static int | 
|  | 1551 | access_elf_reg(struct task_struct *target, struct unw_frame_info *info, | 
|  | 1552 | unsigned long addr, unsigned long *data, int write_access) | 
|  | 1553 | { | 
|  | 1554 | if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15)) | 
|  | 1555 | return access_elf_gpreg(target, info, addr, data, write_access); | 
|  | 1556 | else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7)) | 
|  | 1557 | return access_elf_breg(target, info, addr, data, write_access); | 
|  | 1558 | else | 
|  | 1559 | return access_elf_areg(target, info, addr, data, write_access); | 
|  | 1560 | } | 
|  | 1561 |  | 
|  | 1562 | void do_gpregs_get(struct unw_frame_info *info, void *arg) | 
|  | 1563 | { | 
|  | 1564 | struct pt_regs *pt; | 
|  | 1565 | struct regset_getset *dst = arg; | 
|  | 1566 | elf_greg_t tmp[16]; | 
|  | 1567 | unsigned int i, index, min_copy; | 
|  | 1568 |  | 
|  | 1569 | if (unw_unwind_to_user(info) < 0) | 
|  | 1570 | return; | 
|  | 1571 |  | 
|  | 1572 | /* | 
|  | 1573 | * coredump format: | 
|  | 1574 | *      r0-r31 | 
|  | 1575 | *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) | 
|  | 1576 | *      predicate registers (p0-p63) | 
|  | 1577 | *      b0-b7 | 
|  | 1578 | *      ip cfm user-mask | 
|  | 1579 | *      ar.rsc ar.bsp ar.bspstore ar.rnat | 
|  | 1580 | *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec | 
|  | 1581 | */ | 
|  | 1582 |  | 
|  | 1583 |  | 
|  | 1584 | /* Skip r0 */ | 
|  | 1585 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { | 
|  | 1586 | dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, | 
|  | 1587 | &dst->u.get.kbuf, | 
|  | 1588 | &dst->u.get.ubuf, | 
|  | 1589 | 0, ELF_GR_OFFSET(1)); | 
|  | 1590 | if (dst->ret || dst->count == 0) | 
|  | 1591 | return; | 
|  | 1592 | } | 
|  | 1593 |  | 
|  | 1594 | /* gr1 - gr15 */ | 
|  | 1595 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { | 
|  | 1596 | index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); | 
|  | 1597 | min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ? | 
|  | 1598 | (dst->pos + dst->count) : ELF_GR_OFFSET(16); | 
|  | 1599 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | 
|  | 1600 | index++) | 
|  | 1601 | if (access_elf_reg(dst->target, info, i, | 
|  | 1602 | &tmp[index], 0) < 0) { | 
|  | 1603 | dst->ret = -EIO; | 
|  | 1604 | return; | 
|  | 1605 | } | 
|  | 1606 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1607 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | 
|  | 1608 | ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); | 
|  | 1609 | if (dst->ret || dst->count == 0) | 
|  | 1610 | return; | 
|  | 1611 | } | 
|  | 1612 |  | 
|  | 1613 | /* r16-r31 */ | 
|  | 1614 | if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { | 
|  | 1615 | pt = task_pt_regs(dst->target); | 
|  | 1616 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1617 | &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16, | 
|  | 1618 | ELF_GR_OFFSET(16), ELF_NAT_OFFSET); | 
|  | 1619 | if (dst->ret || dst->count == 0) | 
|  | 1620 | return; | 
|  | 1621 | } | 
|  | 1622 |  | 
|  | 1623 | /* nat, pr, b0 - b7 */ | 
|  | 1624 | if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { | 
|  | 1625 | index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); | 
|  | 1626 | min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ? | 
|  | 1627 | (dst->pos + dst->count) : ELF_CR_IIP_OFFSET; | 
|  | 1628 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | 
|  | 1629 | index++) | 
|  | 1630 | if (access_elf_reg(dst->target, info, i, | 
|  | 1631 | &tmp[index], 0) < 0) { | 
|  | 1632 | dst->ret = -EIO; | 
|  | 1633 | return; | 
|  | 1634 | } | 
|  | 1635 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1636 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | 
|  | 1637 | ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); | 
|  | 1638 | if (dst->ret || dst->count == 0) | 
|  | 1639 | return; | 
|  | 1640 | } | 
|  | 1641 |  | 
|  | 1642 | /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat | 
|  | 1643 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd | 
|  | 1644 | */ | 
|  | 1645 | if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { | 
|  | 1646 | index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); | 
|  | 1647 | min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ? | 
|  | 1648 | (dst->pos + dst->count) : ELF_AR_END_OFFSET; | 
|  | 1649 | for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), | 
|  | 1650 | index++) | 
|  | 1651 | if (access_elf_reg(dst->target, info, i, | 
|  | 1652 | &tmp[index], 0) < 0) { | 
|  | 1653 | dst->ret = -EIO; | 
|  | 1654 | return; | 
|  | 1655 | } | 
|  | 1656 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1657 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | 
|  | 1658 | ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); | 
|  | 1659 | } | 
|  | 1660 | } | 
|  | 1661 |  | 
|  | 1662 | void do_gpregs_set(struct unw_frame_info *info, void *arg) | 
|  | 1663 | { | 
|  | 1664 | struct pt_regs *pt; | 
|  | 1665 | struct regset_getset *dst = arg; | 
|  | 1666 | elf_greg_t tmp[16]; | 
|  | 1667 | unsigned int i, index; | 
|  | 1668 |  | 
|  | 1669 | if (unw_unwind_to_user(info) < 0) | 
|  | 1670 | return; | 
|  | 1671 |  | 
|  | 1672 | /* Skip r0 */ | 
|  | 1673 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { | 
|  | 1674 | dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, | 
|  | 1675 | &dst->u.set.kbuf, | 
|  | 1676 | &dst->u.set.ubuf, | 
|  | 1677 | 0, ELF_GR_OFFSET(1)); | 
|  | 1678 | if (dst->ret || dst->count == 0) | 
|  | 1679 | return; | 
|  | 1680 | } | 
|  | 1681 |  | 
|  | 1682 | /* gr1-gr15 */ | 
|  | 1683 | if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { | 
|  | 1684 | i = dst->pos; | 
|  | 1685 | index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); | 
|  | 1686 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1687 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | 
|  | 1688 | ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); | 
|  | 1689 | if (dst->ret) | 
|  | 1690 | return; | 
|  | 1691 | for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) | 
|  | 1692 | if (access_elf_reg(dst->target, info, i, | 
|  | 1693 | &tmp[index], 1) < 0) { | 
|  | 1694 | dst->ret = -EIO; | 
|  | 1695 | return; | 
|  | 1696 | } | 
|  | 1697 | if (dst->count == 0) | 
|  | 1698 | return; | 
|  | 1699 | } | 
|  | 1700 |  | 
|  | 1701 | /* gr16-gr31 */ | 
|  | 1702 | if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { | 
|  | 1703 | pt = task_pt_regs(dst->target); | 
|  | 1704 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1705 | &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16, | 
|  | 1706 | ELF_GR_OFFSET(16), ELF_NAT_OFFSET); | 
|  | 1707 | if (dst->ret || dst->count == 0) | 
|  | 1708 | return; | 
|  | 1709 | } | 
|  | 1710 |  | 
|  | 1711 | /* nat, pr, b0 - b7 */ | 
|  | 1712 | if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { | 
|  | 1713 | i = dst->pos; | 
|  | 1714 | index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); | 
|  | 1715 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1716 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | 
|  | 1717 | ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); | 
|  | 1718 | if (dst->ret) | 
|  | 1719 | return; | 
|  | 1720 | for (; i < dst->pos; i += sizeof(elf_greg_t), index++) | 
|  | 1721 | if (access_elf_reg(dst->target, info, i, | 
|  | 1722 | &tmp[index], 1) < 0) { | 
|  | 1723 | dst->ret = -EIO; | 
|  | 1724 | return; | 
|  | 1725 | } | 
|  | 1726 | if (dst->count == 0) | 
|  | 1727 | return; | 
|  | 1728 | } | 
|  | 1729 |  | 
|  | 1730 | /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat | 
|  | 1731 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd | 
|  | 1732 | */ | 
|  | 1733 | if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { | 
|  | 1734 | i = dst->pos; | 
|  | 1735 | index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); | 
|  | 1736 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1737 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | 
|  | 1738 | ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); | 
|  | 1739 | if (dst->ret) | 
|  | 1740 | return; | 
|  | 1741 | for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) | 
|  | 1742 | if (access_elf_reg(dst->target, info, i, | 
|  | 1743 | &tmp[index], 1) < 0) { | 
|  | 1744 | dst->ret = -EIO; | 
|  | 1745 | return; | 
|  | 1746 | } | 
|  | 1747 | } | 
|  | 1748 | } | 
|  | 1749 |  | 
|  | 1750 | #define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t)) | 
|  | 1751 |  | 
|  | 1752 | void do_fpregs_get(struct unw_frame_info *info, void *arg) | 
|  | 1753 | { | 
|  | 1754 | struct regset_getset *dst = arg; | 
|  | 1755 | struct task_struct *task = dst->target; | 
|  | 1756 | elf_fpreg_t tmp[30]; | 
|  | 1757 | int index, min_copy, i; | 
|  | 1758 |  | 
|  | 1759 | if (unw_unwind_to_user(info) < 0) | 
|  | 1760 | return; | 
|  | 1761 |  | 
|  | 1762 | /* Skip pos 0 and 1 */ | 
|  | 1763 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { | 
|  | 1764 | dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, | 
|  | 1765 | &dst->u.get.kbuf, | 
|  | 1766 | &dst->u.get.ubuf, | 
|  | 1767 | 0, ELF_FP_OFFSET(2)); | 
|  | 1768 | if (dst->count == 0 || dst->ret) | 
|  | 1769 | return; | 
|  | 1770 | } | 
|  | 1771 |  | 
|  | 1772 | /* fr2-fr31 */ | 
|  | 1773 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { | 
|  | 1774 | index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t); | 
|  | 1775 |  | 
|  | 1776 | min_copy = min(((unsigned int)ELF_FP_OFFSET(32)), | 
|  | 1777 | dst->pos + dst->count); | 
|  | 1778 | for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), | 
|  | 1779 | index++) | 
|  | 1780 | if (unw_get_fr(info, i / sizeof(elf_fpreg_t), | 
|  | 1781 | &tmp[index])) { | 
|  | 1782 | dst->ret = -EIO; | 
|  | 1783 | return; | 
|  | 1784 | } | 
|  | 1785 | dst->ret = user_regset_copyout(&dst->pos, &dst->count, | 
|  | 1786 | &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, | 
|  | 1787 | ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); | 
|  | 1788 | if (dst->count == 0 || dst->ret) | 
|  | 1789 | return; | 
|  | 1790 | } | 
|  | 1791 |  | 
|  | 1792 | /* fph */ | 
|  | 1793 | if (dst->count > 0) { | 
|  | 1794 | ia64_flush_fph(dst->target); | 
|  | 1795 | if (task->thread.flags & IA64_THREAD_FPH_VALID) | 
|  | 1796 | dst->ret = user_regset_copyout( | 
|  | 1797 | &dst->pos, &dst->count, | 
|  | 1798 | &dst->u.get.kbuf, &dst->u.get.ubuf, | 
|  | 1799 | &dst->target->thread.fph, | 
|  | 1800 | ELF_FP_OFFSET(32), -1); | 
|  | 1801 | else | 
|  | 1802 | /* Zero fill instead.  */ | 
|  | 1803 | dst->ret = user_regset_copyout_zero( | 
|  | 1804 | &dst->pos, &dst->count, | 
|  | 1805 | &dst->u.get.kbuf, &dst->u.get.ubuf, | 
|  | 1806 | ELF_FP_OFFSET(32), -1); | 
|  | 1807 | } | 
|  | 1808 | } | 
|  | 1809 |  | 
|  | 1810 | void do_fpregs_set(struct unw_frame_info *info, void *arg) | 
|  | 1811 | { | 
|  | 1812 | struct regset_getset *dst = arg; | 
|  | 1813 | elf_fpreg_t fpreg, tmp[30]; | 
|  | 1814 | int index, start, end; | 
|  | 1815 |  | 
|  | 1816 | if (unw_unwind_to_user(info) < 0) | 
|  | 1817 | return; | 
|  | 1818 |  | 
|  | 1819 | /* Skip pos 0 and 1 */ | 
|  | 1820 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { | 
|  | 1821 | dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, | 
|  | 1822 | &dst->u.set.kbuf, | 
|  | 1823 | &dst->u.set.ubuf, | 
|  | 1824 | 0, ELF_FP_OFFSET(2)); | 
|  | 1825 | if (dst->count == 0 || dst->ret) | 
|  | 1826 | return; | 
|  | 1827 | } | 
|  | 1828 |  | 
|  | 1829 | /* fr2-fr31 */ | 
|  | 1830 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { | 
|  | 1831 | start = dst->pos; | 
|  | 1832 | end = min(((unsigned int)ELF_FP_OFFSET(32)), | 
|  | 1833 | dst->pos + dst->count); | 
|  | 1834 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1835 | &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, | 
|  | 1836 | ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); | 
|  | 1837 | if (dst->ret) | 
|  | 1838 | return; | 
|  | 1839 |  | 
|  | 1840 | if (start & 0xF) { /* only write high part */ | 
|  | 1841 | if (unw_get_fr(info, start / sizeof(elf_fpreg_t), | 
|  | 1842 | &fpreg)) { | 
|  | 1843 | dst->ret = -EIO; | 
|  | 1844 | return; | 
|  | 1845 | } | 
|  | 1846 | tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0] | 
|  | 1847 | = fpreg.u.bits[0]; | 
|  | 1848 | start &= ~0xFUL; | 
|  | 1849 | } | 
|  | 1850 | if (end & 0xF) { /* only write low part */ | 
|  | 1851 | if (unw_get_fr(info, end / sizeof(elf_fpreg_t), | 
|  | 1852 | &fpreg)) { | 
|  | 1853 | dst->ret = -EIO; | 
|  | 1854 | return; | 
|  | 1855 | } | 
|  | 1856 | tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1] | 
|  | 1857 | = fpreg.u.bits[1]; | 
|  | 1858 | end = (end + 0xF) & ~0xFUL; | 
|  | 1859 | } | 
|  | 1860 |  | 
|  | 1861 | for ( ;	start < end ; start += sizeof(elf_fpreg_t)) { | 
|  | 1862 | index = start / sizeof(elf_fpreg_t); | 
|  | 1863 | if (unw_set_fr(info, index, tmp[index - 2])) { | 
|  | 1864 | dst->ret = -EIO; | 
|  | 1865 | return; | 
|  | 1866 | } | 
|  | 1867 | } | 
|  | 1868 | if (dst->ret || dst->count == 0) | 
|  | 1869 | return; | 
|  | 1870 | } | 
|  | 1871 |  | 
|  | 1872 | /* fph */ | 
|  | 1873 | if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) { | 
|  | 1874 | ia64_sync_fph(dst->target); | 
|  | 1875 | dst->ret = user_regset_copyin(&dst->pos, &dst->count, | 
|  | 1876 | &dst->u.set.kbuf, | 
|  | 1877 | &dst->u.set.ubuf, | 
|  | 1878 | &dst->target->thread.fph, | 
|  | 1879 | ELF_FP_OFFSET(32), -1); | 
|  | 1880 | } | 
|  | 1881 | } | 
|  | 1882 |  | 
|  | 1883 | static int | 
|  | 1884 | do_regset_call(void (*call)(struct unw_frame_info *, void *), | 
|  | 1885 | struct task_struct *target, | 
|  | 1886 | const struct user_regset *regset, | 
|  | 1887 | unsigned int pos, unsigned int count, | 
|  | 1888 | const void *kbuf, const void __user *ubuf) | 
|  | 1889 | { | 
|  | 1890 | struct regset_getset info = { .target = target, .regset = regset, | 
|  | 1891 | .pos = pos, .count = count, | 
|  | 1892 | .u.set = { .kbuf = kbuf, .ubuf = ubuf }, | 
|  | 1893 | .ret = 0 }; | 
|  | 1894 |  | 
|  | 1895 | if (target == current) | 
|  | 1896 | unw_init_running(call, &info); | 
|  | 1897 | else { | 
|  | 1898 | struct unw_frame_info ufi; | 
|  | 1899 | memset(&ufi, 0, sizeof(ufi)); | 
|  | 1900 | unw_init_from_blocked_task(&ufi, target); | 
|  | 1901 | (*call)(&ufi, &info); | 
|  | 1902 | } | 
|  | 1903 |  | 
|  | 1904 | return info.ret; | 
|  | 1905 | } | 
|  | 1906 |  | 
|  | 1907 | static int | 
|  | 1908 | gpregs_get(struct task_struct *target, | 
|  | 1909 | const struct user_regset *regset, | 
|  | 1910 | unsigned int pos, unsigned int count, | 
|  | 1911 | void *kbuf, void __user *ubuf) | 
|  | 1912 | { | 
|  | 1913 | return do_regset_call(do_gpregs_get, target, regset, pos, count, | 
|  | 1914 | kbuf, ubuf); | 
|  | 1915 | } | 
|  | 1916 |  | 
|  | 1917 | static int gpregs_set(struct task_struct *target, | 
|  | 1918 | const struct user_regset *regset, | 
|  | 1919 | unsigned int pos, unsigned int count, | 
|  | 1920 | const void *kbuf, const void __user *ubuf) | 
|  | 1921 | { | 
|  | 1922 | return do_regset_call(do_gpregs_set, target, regset, pos, count, | 
|  | 1923 | kbuf, ubuf); | 
|  | 1924 | } | 
|  | 1925 |  | 
|  | 1926 | static void do_gpregs_writeback(struct unw_frame_info *info, void *arg) | 
|  | 1927 | { | 
|  | 1928 | do_sync_rbs(info, ia64_sync_user_rbs); | 
|  | 1929 | } | 
|  | 1930 |  | 
|  | 1931 | /* | 
|  | 1932 | * This is called to write back the register backing store. | 
|  | 1933 | * ptrace does this before it stops, so that a tracer reading the user | 
|  | 1934 | * memory after the thread stops will get the current register data. | 
|  | 1935 | */ | 
|  | 1936 | static int | 
|  | 1937 | gpregs_writeback(struct task_struct *target, | 
|  | 1938 | const struct user_regset *regset, | 
|  | 1939 | int now) | 
|  | 1940 | { | 
|  | 1941 | if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) | 
|  | 1942 | return 0; | 
|  | 1943 | tsk_set_notify_resume(target); | 
|  | 1944 | return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, | 
|  | 1945 | NULL, NULL); | 
|  | 1946 | } | 
|  | 1947 |  | 
|  | 1948 | static int | 
|  | 1949 | fpregs_active(struct task_struct *target, const struct user_regset *regset) | 
|  | 1950 | { | 
|  | 1951 | return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32; | 
|  | 1952 | } | 
|  | 1953 |  | 
|  | 1954 | static int fpregs_get(struct task_struct *target, | 
|  | 1955 | const struct user_regset *regset, | 
|  | 1956 | unsigned int pos, unsigned int count, | 
|  | 1957 | void *kbuf, void __user *ubuf) | 
|  | 1958 | { | 
|  | 1959 | return do_regset_call(do_fpregs_get, target, regset, pos, count, | 
|  | 1960 | kbuf, ubuf); | 
|  | 1961 | } | 
|  | 1962 |  | 
|  | 1963 | static int fpregs_set(struct task_struct *target, | 
|  | 1964 | const struct user_regset *regset, | 
|  | 1965 | unsigned int pos, unsigned int count, | 
|  | 1966 | const void *kbuf, const void __user *ubuf) | 
|  | 1967 | { | 
|  | 1968 | return do_regset_call(do_fpregs_set, target, regset, pos, count, | 
|  | 1969 | kbuf, ubuf); | 
|  | 1970 | } | 
|  | 1971 |  | 
| Shaohua Li | 4cd8dc8 | 2008-02-28 16:09:42 +0800 | [diff] [blame] | 1972 | static int | 
|  | 1973 | access_uarea(struct task_struct *child, unsigned long addr, | 
|  | 1974 | unsigned long *data, int write_access) | 
|  | 1975 | { | 
|  | 1976 | unsigned int pos = -1; /* an invalid value */ | 
|  | 1977 | int ret; | 
|  | 1978 | unsigned long *ptr, regnum; | 
|  | 1979 |  | 
|  | 1980 | if ((addr & 0x7) != 0) { | 
|  | 1981 | dprintk("ptrace: unaligned register address 0x%lx\n", addr); | 
|  | 1982 | return -1; | 
|  | 1983 | } | 
|  | 1984 | if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) || | 
|  | 1985 | (addr >= PT_R7 + 8 && addr < PT_B1) || | 
|  | 1986 | (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) || | 
|  | 1987 | (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) { | 
|  | 1988 | dprintk("ptrace: rejecting access to register " | 
|  | 1989 | "address 0x%lx\n", addr); | 
|  | 1990 | return -1; | 
|  | 1991 | } | 
|  | 1992 |  | 
|  | 1993 | switch (addr) { | 
|  | 1994 | case PT_F32 ... (PT_F127 + 15): | 
|  | 1995 | pos = addr - PT_F32 + ELF_FP_OFFSET(32); | 
|  | 1996 | break; | 
|  | 1997 | case PT_F2 ... (PT_F5 + 15): | 
|  | 1998 | pos = addr - PT_F2 + ELF_FP_OFFSET(2); | 
|  | 1999 | break; | 
|  | 2000 | case PT_F10 ... (PT_F31 + 15): | 
|  | 2001 | pos = addr - PT_F10 + ELF_FP_OFFSET(10); | 
|  | 2002 | break; | 
|  | 2003 | case PT_F6 ... (PT_F9 + 15): | 
|  | 2004 | pos = addr - PT_F6 + ELF_FP_OFFSET(6); | 
|  | 2005 | break; | 
|  | 2006 | } | 
|  | 2007 |  | 
|  | 2008 | if (pos != -1) { | 
|  | 2009 | if (write_access) | 
|  | 2010 | ret = fpregs_set(child, NULL, pos, | 
|  | 2011 | sizeof(unsigned long), data, NULL); | 
|  | 2012 | else | 
|  | 2013 | ret = fpregs_get(child, NULL, pos, | 
|  | 2014 | sizeof(unsigned long), data, NULL); | 
|  | 2015 | if (ret != 0) | 
|  | 2016 | return -1; | 
|  | 2017 | return 0; | 
|  | 2018 | } | 
|  | 2019 |  | 
|  | 2020 | switch (addr) { | 
|  | 2021 | case PT_NAT_BITS: | 
|  | 2022 | pos = ELF_NAT_OFFSET; | 
|  | 2023 | break; | 
|  | 2024 | case PT_R4 ... PT_R7: | 
|  | 2025 | pos = addr - PT_R4 + ELF_GR_OFFSET(4); | 
|  | 2026 | break; | 
|  | 2027 | case PT_B1 ... PT_B5: | 
|  | 2028 | pos = addr - PT_B1 + ELF_BR_OFFSET(1); | 
|  | 2029 | break; | 
|  | 2030 | case PT_AR_EC: | 
|  | 2031 | pos = ELF_AR_EC_OFFSET; | 
|  | 2032 | break; | 
|  | 2033 | case PT_AR_LC: | 
|  | 2034 | pos = ELF_AR_LC_OFFSET; | 
|  | 2035 | break; | 
|  | 2036 | case PT_CR_IPSR: | 
|  | 2037 | pos = ELF_CR_IPSR_OFFSET; | 
|  | 2038 | break; | 
|  | 2039 | case PT_CR_IIP: | 
|  | 2040 | pos = ELF_CR_IIP_OFFSET; | 
|  | 2041 | break; | 
|  | 2042 | case PT_CFM: | 
|  | 2043 | pos = ELF_CFM_OFFSET; | 
|  | 2044 | break; | 
|  | 2045 | case PT_AR_UNAT: | 
|  | 2046 | pos = ELF_AR_UNAT_OFFSET; | 
|  | 2047 | break; | 
|  | 2048 | case PT_AR_PFS: | 
|  | 2049 | pos = ELF_AR_PFS_OFFSET; | 
|  | 2050 | break; | 
|  | 2051 | case PT_AR_RSC: | 
|  | 2052 | pos = ELF_AR_RSC_OFFSET; | 
|  | 2053 | break; | 
|  | 2054 | case PT_AR_RNAT: | 
|  | 2055 | pos = ELF_AR_RNAT_OFFSET; | 
|  | 2056 | break; | 
|  | 2057 | case PT_AR_BSPSTORE: | 
|  | 2058 | pos = ELF_AR_BSPSTORE_OFFSET; | 
|  | 2059 | break; | 
|  | 2060 | case PT_PR: | 
|  | 2061 | pos = ELF_PR_OFFSET; | 
|  | 2062 | break; | 
|  | 2063 | case PT_B6: | 
|  | 2064 | pos = ELF_BR_OFFSET(6); | 
|  | 2065 | break; | 
|  | 2066 | case PT_AR_BSP: | 
|  | 2067 | pos = ELF_AR_BSP_OFFSET; | 
|  | 2068 | break; | 
|  | 2069 | case PT_R1 ... PT_R3: | 
|  | 2070 | pos = addr - PT_R1 + ELF_GR_OFFSET(1); | 
|  | 2071 | break; | 
|  | 2072 | case PT_R12 ... PT_R15: | 
|  | 2073 | pos = addr - PT_R12 + ELF_GR_OFFSET(12); | 
|  | 2074 | break; | 
|  | 2075 | case PT_R8 ... PT_R11: | 
|  | 2076 | pos = addr - PT_R8 + ELF_GR_OFFSET(8); | 
|  | 2077 | break; | 
|  | 2078 | case PT_R16 ... PT_R31: | 
|  | 2079 | pos = addr - PT_R16 + ELF_GR_OFFSET(16); | 
|  | 2080 | break; | 
|  | 2081 | case PT_AR_CCV: | 
|  | 2082 | pos = ELF_AR_CCV_OFFSET; | 
|  | 2083 | break; | 
|  | 2084 | case PT_AR_FPSR: | 
|  | 2085 | pos = ELF_AR_FPSR_OFFSET; | 
|  | 2086 | break; | 
|  | 2087 | case PT_B0: | 
|  | 2088 | pos = ELF_BR_OFFSET(0); | 
|  | 2089 | break; | 
|  | 2090 | case PT_B7: | 
|  | 2091 | pos = ELF_BR_OFFSET(7); | 
|  | 2092 | break; | 
|  | 2093 | case PT_AR_CSD: | 
|  | 2094 | pos = ELF_AR_CSD_OFFSET; | 
|  | 2095 | break; | 
|  | 2096 | case PT_AR_SSD: | 
|  | 2097 | pos = ELF_AR_SSD_OFFSET; | 
|  | 2098 | break; | 
|  | 2099 | } | 
|  | 2100 |  | 
|  | 2101 | if (pos != -1) { | 
|  | 2102 | if (write_access) | 
|  | 2103 | ret = gpregs_set(child, NULL, pos, | 
|  | 2104 | sizeof(unsigned long), data, NULL); | 
|  | 2105 | else | 
|  | 2106 | ret = gpregs_get(child, NULL, pos, | 
|  | 2107 | sizeof(unsigned long), data, NULL); | 
|  | 2108 | if (ret != 0) | 
|  | 2109 | return -1; | 
|  | 2110 | return 0; | 
|  | 2111 | } | 
|  | 2112 |  | 
|  | 2113 | /* access debug registers */ | 
|  | 2114 | if (addr >= PT_IBR) { | 
|  | 2115 | regnum = (addr - PT_IBR) >> 3; | 
|  | 2116 | ptr = &child->thread.ibr[0]; | 
|  | 2117 | } else { | 
|  | 2118 | regnum = (addr - PT_DBR) >> 3; | 
|  | 2119 | ptr = &child->thread.dbr[0]; | 
|  | 2120 | } | 
|  | 2121 |  | 
|  | 2122 | if (regnum >= 8) { | 
|  | 2123 | dprintk("ptrace: rejecting access to register " | 
|  | 2124 | "address 0x%lx\n", addr); | 
|  | 2125 | return -1; | 
|  | 2126 | } | 
|  | 2127 | #ifdef CONFIG_PERFMON | 
|  | 2128 | /* | 
|  | 2129 | * Check if debug registers are used by perfmon. This | 
|  | 2130 | * test must be done once we know that we can do the | 
|  | 2131 | * operation, i.e. the arguments are all valid, but | 
|  | 2132 | * before we start modifying the state. | 
|  | 2133 | * | 
|  | 2134 | * Perfmon needs to keep a count of how many processes | 
|  | 2135 | * are trying to modify the debug registers for system | 
|  | 2136 | * wide monitoring sessions. | 
|  | 2137 | * | 
|  | 2138 | * We also include read access here, because they may | 
|  | 2139 | * cause the PMU-installed debug register state | 
|  | 2140 | * (dbr[], ibr[]) to be reset. The two arrays are also | 
|  | 2141 | * used by perfmon, but we do not use | 
|  | 2142 | * IA64_THREAD_DBG_VALID. The registers are restored | 
|  | 2143 | * by the PMU context switch code. | 
|  | 2144 | */ | 
|  | 2145 | if (pfm_use_debug_registers(child)) | 
|  | 2146 | return -1; | 
|  | 2147 | #endif | 
|  | 2148 |  | 
|  | 2149 | if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { | 
|  | 2150 | child->thread.flags |= IA64_THREAD_DBG_VALID; | 
|  | 2151 | memset(child->thread.dbr, 0, | 
|  | 2152 | sizeof(child->thread.dbr)); | 
|  | 2153 | memset(child->thread.ibr, 0, | 
|  | 2154 | sizeof(child->thread.ibr)); | 
|  | 2155 | } | 
|  | 2156 |  | 
|  | 2157 | ptr += regnum; | 
|  | 2158 |  | 
|  | 2159 | if ((regnum & 1) && write_access) { | 
|  | 2160 | /* don't let the user set kernel-level breakpoints: */ | 
|  | 2161 | *ptr = *data & ~(7UL << 56); | 
|  | 2162 | return 0; | 
|  | 2163 | } | 
|  | 2164 | if (write_access) | 
|  | 2165 | *ptr = *data; | 
|  | 2166 | else | 
|  | 2167 | *data = *ptr; | 
|  | 2168 | return 0; | 
|  | 2169 | } | 
|  | 2170 |  | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 2171 | static const struct user_regset native_regsets[] = { | 
|  | 2172 | { | 
|  | 2173 | .core_note_type = NT_PRSTATUS, | 
|  | 2174 | .n = ELF_NGREG, | 
|  | 2175 | .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), | 
|  | 2176 | .get = gpregs_get, .set = gpregs_set, | 
|  | 2177 | .writeback = gpregs_writeback | 
|  | 2178 | }, | 
|  | 2179 | { | 
|  | 2180 | .core_note_type = NT_PRFPREG, | 
|  | 2181 | .n = ELF_NFPREG, | 
|  | 2182 | .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), | 
|  | 2183 | .get = fpregs_get, .set = fpregs_set, .active = fpregs_active | 
|  | 2184 | }, | 
|  | 2185 | }; | 
|  | 2186 |  | 
|  | 2187 | static const struct user_regset_view user_ia64_view = { | 
|  | 2188 | .name = "ia64", | 
|  | 2189 | .e_machine = EM_IA_64, | 
|  | 2190 | .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) | 
|  | 2191 | }; | 
|  | 2192 |  | 
|  | 2193 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) | 
|  | 2194 | { | 
| Shaohua Li | 7552921 | 2008-02-28 16:09:33 +0800 | [diff] [blame] | 2195 | #ifdef CONFIG_IA32_SUPPORT | 
|  | 2196 | extern const struct user_regset_view user_ia32_view; | 
|  | 2197 | if (IS_IA32_PROCESS(task_pt_regs(tsk))) | 
|  | 2198 | return &user_ia32_view; | 
|  | 2199 | #endif | 
| Shaohua Li | c70f8f6 | 2008-02-28 16:47:50 +0800 | [diff] [blame] | 2200 | return &user_ia64_view; | 
|  | 2201 | } |