blob: 5b32b6d062b42b97aad510e7fc8d16c9bc2a0c9d [file] [log] [blame]
Markus Metzgereee3af42008-01-30 13:31:09 +01001/*
2 * Debug Store support
3 *
4 * This provides a low-level interface to the hardware's Debug Store
Markus Metzger93fa7632008-04-08 11:01:58 +02005 * feature that is used for branch trace store (BTS) and
Markus Metzgereee3af42008-01-30 13:31:09 +01006 * precise-event based sampling (PEBS).
7 *
Markus Metzger93fa7632008-04-08 11:01:58 +02008 * It manages:
9 * - per-thread and per-cpu allocation of BTS and PEBS
10 * - buffer memory allocation (optional)
11 * - buffer overflow handling
12 * - buffer access
13 *
14 * It assumes:
15 * - get_task_struct on all parameter tasks
16 * - current is allowed to trace parameter tasks
Markus Metzgereee3af42008-01-30 13:31:09 +010017 *
18 *
Markus Metzger93fa7632008-04-08 11:01:58 +020019 * Copyright (C) 2007-2008 Intel Corporation.
20 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
Markus Metzgereee3af42008-01-30 13:31:09 +010021 */
22
Markus Metzger93fa7632008-04-08 11:01:58 +020023
24#ifdef CONFIG_X86_DS
25
Markus Metzgereee3af42008-01-30 13:31:09 +010026#include <asm/ds.h>
27
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/slab.h>
Markus Metzger93fa7632008-04-08 11:01:58 +020031#include <linux/sched.h>
32
33
34/*
35 * The configuration for a particular DS hardware implementation.
36 */
37struct ds_configuration {
38 /* the size of the DS structure in bytes */
39 unsigned char sizeof_ds;
40 /* the size of one pointer-typed field in the DS structure in bytes;
41 this covers the first 8 fields related to buffer management. */
42 unsigned char sizeof_field;
43 /* the size of a BTS/PEBS record in bytes */
44 unsigned char sizeof_rec[2];
45};
46static struct ds_configuration ds_cfg;
Markus Metzgereee3af42008-01-30 13:31:09 +010047
48
49/*
50 * Debug Store (DS) save area configuration (see Intel64 and IA32
51 * Architectures Software Developer's Manual, section 18.5)
52 *
53 * The DS configuration consists of the following fields; different
54 * architetures vary in the size of those fields.
55 * - double-word aligned base linear address of the BTS buffer
56 * - write pointer into the BTS buffer
57 * - end linear address of the BTS buffer (one byte beyond the end of
58 * the buffer)
59 * - interrupt pointer into BTS buffer
60 * (interrupt occurs when write pointer passes interrupt pointer)
61 * - double-word aligned base linear address of the PEBS buffer
62 * - write pointer into the PEBS buffer
63 * - end linear address of the PEBS buffer (one byte beyond the end of
64 * the buffer)
65 * - interrupt pointer into PEBS buffer
66 * (interrupt occurs when write pointer passes interrupt pointer)
67 * - value to which counter is reset following counter overflow
68 *
Markus Metzger93fa7632008-04-08 11:01:58 +020069 * Later architectures use 64bit pointers throughout, whereas earlier
70 * architectures use 32bit pointers in 32bit mode.
Markus Metzgereee3af42008-01-30 13:31:09 +010071 *
72 *
Markus Metzger93fa7632008-04-08 11:01:58 +020073 * We compute the base address for the first 8 fields based on:
74 * - the field size stored in the DS configuration
75 * - the relative field position
76 * - an offset giving the start of the respective region
Markus Metzgereee3af42008-01-30 13:31:09 +010077 *
Markus Metzger93fa7632008-04-08 11:01:58 +020078 * This offset is further used to index various arrays holding
79 * information for BTS and PEBS at the respective index.
Markus Metzgereee3af42008-01-30 13:31:09 +010080 *
Markus Metzger93fa7632008-04-08 11:01:58 +020081 * On later 32bit processors, we only access the lower 32bit of the
82 * 64bit pointer fields. The upper halves will be zeroed out.
Markus Metzgereee3af42008-01-30 13:31:09 +010083 */
84
Markus Metzger93fa7632008-04-08 11:01:58 +020085enum ds_field {
86 ds_buffer_base = 0,
87 ds_index,
88 ds_absolute_maximum,
89 ds_interrupt_threshold,
Markus Metzgereee3af42008-01-30 13:31:09 +010090};
91
Markus Metzger93fa7632008-04-08 11:01:58 +020092enum ds_qualifier {
93 ds_bts = 0,
94 ds_pebs
Markus Metzgereee3af42008-01-30 13:31:09 +010095};
96
Markus Metzger93fa7632008-04-08 11:01:58 +020097static inline unsigned long ds_get(const unsigned char *base,
98 enum ds_qualifier qual, enum ds_field field)
99{
100 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
101 return *(unsigned long *)base;
102}
103
104static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
105 enum ds_field field, unsigned long value)
106{
107 base += (ds_cfg.sizeof_field * (field + (4 * qual)));
108 (*(unsigned long *)base) = value;
109}
110
Markus Metzgereee3af42008-01-30 13:31:09 +0100111
112/*
Markus Metzger93fa7632008-04-08 11:01:58 +0200113 * Locking is done only for allocating BTS or PEBS resources and for
114 * guarding context and buffer memory allocation.
115 *
116 * Most functions require the current task to own the ds context part
117 * they are going to access. All the locking is done when validating
118 * access to the context.
Markus Metzgereee3af42008-01-30 13:31:09 +0100119 */
Markus Metzger93fa7632008-04-08 11:01:58 +0200120static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
Markus Metzgereee3af42008-01-30 13:31:09 +0100121
Markus Metzger93fa7632008-04-08 11:01:58 +0200122/*
123 * Validate that the current task is allowed to access the BTS/PEBS
124 * buffer of the parameter task.
125 *
126 * Returns 0, if access is granted; -Eerrno, otherwise.
127 */
128static inline int ds_validate_access(struct ds_context *context,
129 enum ds_qualifier qual)
Markus Metzgereee3af42008-01-30 13:31:09 +0100130{
Markus Metzger93fa7632008-04-08 11:01:58 +0200131 if (!context)
132 return -EPERM;
Markus Metzgereee3af42008-01-30 13:31:09 +0100133
Markus Metzger93fa7632008-04-08 11:01:58 +0200134 if (context->owner[qual] == current)
Markus Metzgera95d67f2008-01-30 13:31:20 +0100135 return 0;
136
Markus Metzger93fa7632008-04-08 11:01:58 +0200137 return -EPERM;
Markus Metzgera95d67f2008-01-30 13:31:20 +0100138}
139
Markus Metzger93fa7632008-04-08 11:01:58 +0200140
141/*
142 * We either support (system-wide) per-cpu or per-thread allocation.
143 * We distinguish the two based on the task_struct pointer, where a
144 * NULL pointer indicates per-cpu allocation for the current cpu.
145 *
146 * Allocations are use-counted. As soon as resources are allocated,
147 * further allocations must be of the same type (per-cpu or
148 * per-thread). We model this by counting allocations (i.e. the number
149 * of tracers of a certain type) for one type negatively:
150 * =0 no tracers
151 * >0 number of per-thread tracers
152 * <0 number of per-cpu tracers
153 *
154 * The below functions to get and put tracers and to check the
155 * allocation type require the ds_lock to be held by the caller.
156 *
157 * Tracers essentially gives the number of ds contexts for a certain
158 * type of allocation.
159 */
160static long tracers;
161
162static inline void get_tracer(struct task_struct *task)
Markus Metzgera95d67f2008-01-30 13:31:20 +0100163{
Markus Metzger93fa7632008-04-08 11:01:58 +0200164 tracers += (task ? 1 : -1);
Markus Metzgereee3af42008-01-30 13:31:09 +0100165}
166
Markus Metzger93fa7632008-04-08 11:01:58 +0200167static inline void put_tracer(struct task_struct *task)
Markus Metzgereee3af42008-01-30 13:31:09 +0100168{
Markus Metzger93fa7632008-04-08 11:01:58 +0200169 tracers -= (task ? 1 : -1);
Markus Metzgereee3af42008-01-30 13:31:09 +0100170}
171
Markus Metzger93fa7632008-04-08 11:01:58 +0200172static inline int check_tracer(struct task_struct *task)
Markus Metzgera95d67f2008-01-30 13:31:20 +0100173{
Markus Metzger93fa7632008-04-08 11:01:58 +0200174 return (task ? (tracers >= 0) : (tracers <= 0));
175}
176
177
178/*
179 * The DS context is either attached to a thread or to a cpu:
180 * - in the former case, the thread_struct contains a pointer to the
181 * attached context.
182 * - in the latter case, we use a static array of per-cpu context
183 * pointers.
184 *
185 * Contexts are use-counted. They are allocated on first access and
186 * deallocated when the last user puts the context.
187 *
188 * We distinguish between an allocating and a non-allocating get of a
189 * context:
190 * - the allocating get is used for requesting BTS/PEBS resources. It
191 * requires the caller to hold the global ds_lock.
192 * - the non-allocating get is used for all other cases. A
193 * non-existing context indicates an error. It acquires and releases
194 * the ds_lock itself for obtaining the context.
195 *
196 * A context and its DS configuration are allocated and deallocated
197 * together. A context always has a DS configuration of the
198 * appropriate size.
199 */
200static DEFINE_PER_CPU(struct ds_context *, system_context);
201
202#define this_system_context per_cpu(system_context, smp_processor_id())
203
204/*
205 * Returns the pointer to the parameter task's context or to the
206 * system-wide context, if task is NULL.
207 *
208 * Increases the use count of the returned context, if not NULL.
209 */
210static inline struct ds_context *ds_get_context(struct task_struct *task)
211{
212 struct ds_context *context;
213
214 spin_lock(&ds_lock);
215
216 context = (task ? task->thread.ds_ctx : this_system_context);
217 if (context)
218 context->count++;
219
220 spin_unlock(&ds_lock);
221
222 return context;
223}
224
225/*
226 * Same as ds_get_context, but allocates the context and it's DS
227 * structure, if necessary; returns NULL; if out of memory.
228 *
229 * pre: requires ds_lock to be held
230 */
231static inline struct ds_context *ds_alloc_context(struct task_struct *task)
232{
233 struct ds_context **p_context =
234 (task ? &task->thread.ds_ctx : &this_system_context);
235 struct ds_context *context = *p_context;
236
237 if (!context) {
238 context = kzalloc(sizeof(*context), GFP_KERNEL);
239
240 if (!context)
241 return 0;
242
243 context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
244 if (!context->ds) {
245 kfree(context);
246 return 0;
247 }
248
249 *p_context = context;
250
251 context->this = p_context;
252 context->task = task;
253
254 if (task)
255 set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
256
257 if (!task || (task == current))
258 wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
259
260 get_tracer(task);
261 }
262
263 context->count++;
264
265 return context;
266}
267
268/*
269 * Decreases the use count of the parameter context, if not NULL.
270 * Deallocates the context, if the use count reaches zero.
271 */
272static inline void ds_put_context(struct ds_context *context)
273{
274 if (!context)
275 return;
276
277 spin_lock(&ds_lock);
278
279 if (--context->count)
280 goto out;
281
282 *(context->this) = 0;
283
284 if (context->task)
285 clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
286
287 if (!context->task || (context->task == current))
288 wrmsrl(MSR_IA32_DS_AREA, 0);
289
290 put_tracer(context->task);
291
292 /* free any leftover buffers from tracers that did not
293 * deallocate them properly. */
294 kfree(context->buffer[ds_bts]);
295 kfree(context->buffer[ds_pebs]);
296 kfree(context->ds);
297 kfree(context);
298 out:
299 spin_unlock(&ds_lock);
300}
301
302
303/*
304 * Handle a buffer overflow
305 *
306 * task: the task whose buffers are overflowing;
307 * NULL for a buffer overflow on the current cpu
308 * context: the ds context
309 * qual: the buffer type
310 */
311static void ds_overflow(struct task_struct *task, struct ds_context *context,
312 enum ds_qualifier qual)
313{
314 if (!context)
315 return;
316
317 if (context->callback[qual])
318 (*context->callback[qual])(task);
319
320 /* todo: do some more overflow handling */
321}
322
323
324/*
325 * Allocate a non-pageable buffer of the parameter size.
326 * Checks the memory and the locked memory rlimit.
327 *
328 * Returns the buffer, if successful;
329 * NULL, if out of memory or rlimit exceeded.
330 *
331 * size: the requested buffer size in bytes
332 * pages (out): if not NULL, contains the number of pages reserved
333 */
334static inline void *ds_allocate_buffer(size_t size, unsigned int *pages)
335{
336 unsigned long rlim, vm, pgsz;
337 void *buffer;
338
339 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
340
341 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
342 vm = current->mm->total_vm + pgsz;
343 if (rlim < vm)
Markus Metzgera95d67f2008-01-30 13:31:20 +0100344 return 0;
Markus Metzger93fa7632008-04-08 11:01:58 +0200345
346 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
347 vm = current->mm->locked_vm + pgsz;
348 if (rlim < vm)
349 return 0;
350
351 buffer = kzalloc(size, GFP_KERNEL);
352 if (!buffer)
353 return 0;
354
355 current->mm->total_vm += pgsz;
356 current->mm->locked_vm += pgsz;
357
358 if (pages)
359 *pages = pgsz;
360
361 return buffer;
Markus Metzgera95d67f2008-01-30 13:31:20 +0100362}
363
Markus Metzger93fa7632008-04-08 11:01:58 +0200364static int ds_request(struct task_struct *task, void *base, size_t size,
365 ds_ovfl_callback_t ovfl, enum ds_qualifier qual)
Markus Metzgera95d67f2008-01-30 13:31:20 +0100366{
Markus Metzger93fa7632008-04-08 11:01:58 +0200367 struct ds_context *context;
368 unsigned long buffer, adj;
369 const unsigned long alignment = (1 << 3);
370 int error = 0;
Markus Metzgera95d67f2008-01-30 13:31:20 +0100371
Markus Metzger93fa7632008-04-08 11:01:58 +0200372 if (!ds_cfg.sizeof_ds)
Markus Metzgereee3af42008-01-30 13:31:09 +0100373 return -EOPNOTSUPP;
374
Markus Metzger93fa7632008-04-08 11:01:58 +0200375 /* we require some space to do alignment adjustments below */
376 if (size < (alignment + ds_cfg.sizeof_rec[qual]))
Markus Metzgereee3af42008-01-30 13:31:09 +0100377 return -EINVAL;
378
Markus Metzger93fa7632008-04-08 11:01:58 +0200379 /* buffer overflow notification is not yet implemented */
380 if (ovfl)
Markus Metzgereee3af42008-01-30 13:31:09 +0100381 return -EOPNOTSUPP;
382
Markus Metzgereee3af42008-01-30 13:31:09 +0100383
Markus Metzger93fa7632008-04-08 11:01:58 +0200384 spin_lock(&ds_lock);
Markus Metzgereee3af42008-01-30 13:31:09 +0100385
Markus Metzger93fa7632008-04-08 11:01:58 +0200386 if (!check_tracer(task))
387 return -EPERM;
Markus Metzgereee3af42008-01-30 13:31:09 +0100388
Markus Metzger93fa7632008-04-08 11:01:58 +0200389 error = -ENOMEM;
390 context = ds_alloc_context(task);
391 if (!context)
392 goto out_unlock;
Markus Metzgereee3af42008-01-30 13:31:09 +0100393
Markus Metzger93fa7632008-04-08 11:01:58 +0200394 error = -EALREADY;
395 if (context->owner[qual] == current)
396 goto out_unlock;
397 error = -EPERM;
398 if (context->owner[qual] != 0)
399 goto out_unlock;
400 context->owner[qual] = current;
Markus Metzgereee3af42008-01-30 13:31:09 +0100401
Markus Metzger93fa7632008-04-08 11:01:58 +0200402 spin_unlock(&ds_lock);
403
404
405 error = -ENOMEM;
406 if (!base) {
407 base = ds_allocate_buffer(size, &context->pages[qual]);
408 if (!base)
409 goto out_release;
410
411 context->buffer[qual] = base;
412 }
413 error = 0;
414
415 context->callback[qual] = ovfl;
416
417 /* adjust the buffer address and size to meet alignment
418 * constraints:
419 * - buffer is double-word aligned
420 * - size is multiple of record size
421 *
422 * We checked the size at the very beginning; we have enough
423 * space to do the adjustment.
424 */
425 buffer = (unsigned long)base;
426
427 adj = ALIGN(buffer, alignment) - buffer;
428 buffer += adj;
429 size -= adj;
430
431 size /= ds_cfg.sizeof_rec[qual];
432 size *= ds_cfg.sizeof_rec[qual];
433
434 ds_set(context->ds, qual, ds_buffer_base, buffer);
435 ds_set(context->ds, qual, ds_index, buffer);
436 ds_set(context->ds, qual, ds_absolute_maximum, buffer + size);
437
438 if (ovfl) {
439 /* todo: select a suitable interrupt threshold */
440 } else
441 ds_set(context->ds, qual,
442 ds_interrupt_threshold, buffer + size + 1);
443
444 /* we keep the context until ds_release */
445 return error;
446
447 out_release:
448 context->owner[qual] = 0;
449 ds_put_context(context);
450 return error;
451
452 out_unlock:
453 spin_unlock(&ds_lock);
454 ds_put_context(context);
455 return error;
456}
457
458int ds_request_bts(struct task_struct *task, void *base, size_t size,
459 ds_ovfl_callback_t ovfl)
460{
461 return ds_request(task, base, size, ovfl, ds_bts);
462}
463
464int ds_request_pebs(struct task_struct *task, void *base, size_t size,
465 ds_ovfl_callback_t ovfl)
466{
467 return ds_request(task, base, size, ovfl, ds_pebs);
468}
469
470static int ds_release(struct task_struct *task, enum ds_qualifier qual)
471{
472 struct ds_context *context;
473 int error;
474
475 context = ds_get_context(task);
476 error = ds_validate_access(context, qual);
477 if (error < 0)
478 goto out;
479
480 kfree(context->buffer[qual]);
481 context->buffer[qual] = 0;
482
483 current->mm->total_vm -= context->pages[qual];
484 current->mm->locked_vm -= context->pages[qual];
485 context->pages[qual] = 0;
486 context->owner[qual] = 0;
487
488 /*
489 * we put the context twice:
490 * once for the ds_get_context
491 * once for the corresponding ds_request
492 */
493 ds_put_context(context);
494 out:
495 ds_put_context(context);
496 return error;
497}
498
499int ds_release_bts(struct task_struct *task)
500{
501 return ds_release(task, ds_bts);
502}
503
504int ds_release_pebs(struct task_struct *task)
505{
506 return ds_release(task, ds_pebs);
507}
508
509static int ds_get_index(struct task_struct *task, size_t *pos,
510 enum ds_qualifier qual)
511{
512 struct ds_context *context;
513 unsigned long base, index;
514 int error;
515
516 context = ds_get_context(task);
517 error = ds_validate_access(context, qual);
518 if (error < 0)
519 goto out;
520
521 base = ds_get(context->ds, qual, ds_buffer_base);
522 index = ds_get(context->ds, qual, ds_index);
523
524 error = ((index - base) / ds_cfg.sizeof_rec[qual]);
525 if (pos)
526 *pos = error;
527 out:
528 ds_put_context(context);
529 return error;
530}
531
532int ds_get_bts_index(struct task_struct *task, size_t *pos)
533{
534 return ds_get_index(task, pos, ds_bts);
535}
536
537int ds_get_pebs_index(struct task_struct *task, size_t *pos)
538{
539 return ds_get_index(task, pos, ds_pebs);
540}
541
542static int ds_get_end(struct task_struct *task, size_t *pos,
543 enum ds_qualifier qual)
544{
545 struct ds_context *context;
546 unsigned long base, end;
547 int error;
548
549 context = ds_get_context(task);
550 error = ds_validate_access(context, qual);
551 if (error < 0)
552 goto out;
553
554 base = ds_get(context->ds, qual, ds_buffer_base);
555 end = ds_get(context->ds, qual, ds_absolute_maximum);
556
557 error = ((end - base) / ds_cfg.sizeof_rec[qual]);
558 if (pos)
559 *pos = error;
560 out:
561 ds_put_context(context);
562 return error;
563}
564
565int ds_get_bts_end(struct task_struct *task, size_t *pos)
566{
567 return ds_get_end(task, pos, ds_bts);
568}
569
570int ds_get_pebs_end(struct task_struct *task, size_t *pos)
571{
572 return ds_get_end(task, pos, ds_pebs);
573}
574
575static int ds_access(struct task_struct *task, size_t index,
576 const void **record, enum ds_qualifier qual)
577{
578 struct ds_context *context;
579 unsigned long base, idx;
580 int error;
581
582 if (!record)
Markus Metzgereee3af42008-01-30 13:31:09 +0100583 return -EINVAL;
Markus Metzger93fa7632008-04-08 11:01:58 +0200584
585 context = ds_get_context(task);
586 error = ds_validate_access(context, qual);
587 if (error < 0)
588 goto out;
589
590 base = ds_get(context->ds, qual, ds_buffer_base);
591 idx = base + (index * ds_cfg.sizeof_rec[qual]);
592
593 error = -EINVAL;
594 if (idx > ds_get(context->ds, qual, ds_absolute_maximum))
595 goto out;
596
597 *record = (const void *)idx;
598 error = ds_cfg.sizeof_rec[qual];
599 out:
600 ds_put_context(context);
601 return error;
602}
603
604int ds_access_bts(struct task_struct *task, size_t index, const void **record)
605{
606 return ds_access(task, index, record, ds_bts);
607}
608
609int ds_access_pebs(struct task_struct *task, size_t index, const void **record)
610{
611 return ds_access(task, index, record, ds_pebs);
612}
613
614static int ds_write(struct task_struct *task, const void *record, size_t size,
615 enum ds_qualifier qual, int force)
616{
617 struct ds_context *context;
618 int error;
619
620 if (!record)
621 return -EINVAL;
622
623 error = -EPERM;
624 context = ds_get_context(task);
625 if (!context)
626 goto out;
627
628 if (!force) {
629 error = ds_validate_access(context, qual);
630 if (error < 0)
631 goto out;
Markus Metzgereee3af42008-01-30 13:31:09 +0100632 }
633
Markus Metzger93fa7632008-04-08 11:01:58 +0200634 error = 0;
635 while (size) {
636 unsigned long base, index, end, write_end, int_th;
637 unsigned long write_size, adj_write_size;
Markus Metzgereee3af42008-01-30 13:31:09 +0100638
Markus Metzger93fa7632008-04-08 11:01:58 +0200639 /*
640 * write as much as possible without producing an
641 * overflow interrupt.
642 *
643 * interrupt_threshold must either be
644 * - bigger than absolute_maximum or
645 * - point to a record between buffer_base and absolute_maximum
646 *
647 * index points to a valid record.
648 */
649 base = ds_get(context->ds, qual, ds_buffer_base);
650 index = ds_get(context->ds, qual, ds_index);
651 end = ds_get(context->ds, qual, ds_absolute_maximum);
652 int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
653
654 write_end = min(end, int_th);
655
656 /* if we are already beyond the interrupt threshold,
657 * we fill the entire buffer */
658 if (write_end <= index)
659 write_end = end;
660
661 if (write_end <= index)
662 goto out;
663
664 write_size = min((unsigned long) size, write_end - index);
665 memcpy((void *)index, record, write_size);
666
667 record = (const char *)record + write_size;
668 size -= write_size;
669 error += write_size;
670
671 adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
672 adj_write_size *= ds_cfg.sizeof_rec[qual];
673
674 /* zero out trailing bytes */
675 memset((char *)index + write_size, 0,
676 adj_write_size - write_size);
677 index += adj_write_size;
678
679 if (index >= end)
680 index = base;
681 ds_set(context->ds, qual, ds_index, index);
682
683 if (index >= int_th)
684 ds_overflow(task, context, qual);
685 }
686
687 out:
688 ds_put_context(context);
689 return error;
Markus Metzgereee3af42008-01-30 13:31:09 +0100690}
691
Markus Metzger93fa7632008-04-08 11:01:58 +0200692int ds_write_bts(struct task_struct *task, const void *record, size_t size)
Markus Metzgereee3af42008-01-30 13:31:09 +0100693{
Markus Metzger93fa7632008-04-08 11:01:58 +0200694 return ds_write(task, record, size, ds_bts, /* force = */ 0);
Markus Metzgereee3af42008-01-30 13:31:09 +0100695}
696
Markus Metzger93fa7632008-04-08 11:01:58 +0200697int ds_write_pebs(struct task_struct *task, const void *record, size_t size)
698{
699 return ds_write(task, record, size, ds_pebs, /* force = */ 0);
700}
Markus Metzgereee3af42008-01-30 13:31:09 +0100701
Markus Metzger93fa7632008-04-08 11:01:58 +0200702int ds_unchecked_write_bts(struct task_struct *task,
703 const void *record, size_t size)
704{
705 return ds_write(task, record, size, ds_bts, /* force = */ 1);
706}
Markus Metzgereee3af42008-01-30 13:31:09 +0100707
Markus Metzger93fa7632008-04-08 11:01:58 +0200708int ds_unchecked_write_pebs(struct task_struct *task,
709 const void *record, size_t size)
710{
711 return ds_write(task, record, size, ds_pebs, /* force = */ 1);
712}
713
714static int ds_reset_or_clear(struct task_struct *task,
715 enum ds_qualifier qual, int clear)
716{
717 struct ds_context *context;
718 unsigned long base, end;
719 int error;
720
721 context = ds_get_context(task);
722 error = ds_validate_access(context, qual);
723 if (error < 0)
724 goto out;
725
726 base = ds_get(context->ds, qual, ds_buffer_base);
727 end = ds_get(context->ds, qual, ds_absolute_maximum);
728
729 if (clear)
730 memset((void *)base, 0, end - base);
731
732 ds_set(context->ds, qual, ds_index, base);
733
734 error = 0;
735 out:
736 ds_put_context(context);
737 return error;
738}
739
740int ds_reset_bts(struct task_struct *task)
741{
742 return ds_reset_or_clear(task, ds_bts, /* clear = */ 0);
743}
744
745int ds_reset_pebs(struct task_struct *task)
746{
747 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 0);
748}
749
750int ds_clear_bts(struct task_struct *task)
751{
752 return ds_reset_or_clear(task, ds_bts, /* clear = */ 1);
753}
754
755int ds_clear_pebs(struct task_struct *task)
756{
757 return ds_reset_or_clear(task, ds_pebs, /* clear = */ 1);
758}
759
760int ds_get_pebs_reset(struct task_struct *task, u64 *value)
761{
762 struct ds_context *context;
763 int error;
764
765 if (!value)
766 return -EINVAL;
767
768 context = ds_get_context(task);
769 error = ds_validate_access(context, ds_pebs);
770 if (error < 0)
771 goto out;
772
773 *value = *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8));
774
775 error = 0;
776 out:
777 ds_put_context(context);
778 return error;
779}
780
781int ds_set_pebs_reset(struct task_struct *task, u64 value)
782{
783 struct ds_context *context;
784 int error;
785
786 context = ds_get_context(task);
787 error = ds_validate_access(context, ds_pebs);
788 if (error < 0)
789 goto out;
790
791 *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)) = value;
792
793 error = 0;
794 out:
795 ds_put_context(context);
796 return error;
797}
798
799static const struct ds_configuration ds_cfg_var = {
800 .sizeof_ds = sizeof(long) * 12,
801 .sizeof_field = sizeof(long),
802 .sizeof_rec[ds_bts] = sizeof(long) * 3,
803 .sizeof_rec[ds_pebs] = sizeof(long) * 10
804};
805static const struct ds_configuration ds_cfg_64 = {
806 .sizeof_ds = 8 * 12,
807 .sizeof_field = 8,
808 .sizeof_rec[ds_bts] = 8 * 3,
809 .sizeof_rec[ds_pebs] = 8 * 10
Markus Metzgereee3af42008-01-30 13:31:09 +0100810};
811
812static inline void
813ds_configure(const struct ds_configuration *cfg)
814{
815 ds_cfg = *cfg;
816}
817
818void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
819{
820 switch (c->x86) {
821 case 0x6:
822 switch (c->x86_model) {
Markus Metzgereee3af42008-01-30 13:31:09 +0100823 case 0xD:
824 case 0xE: /* Pentium M */
Markus Metzger93fa7632008-04-08 11:01:58 +0200825 ds_configure(&ds_cfg_var);
Markus Metzgereee3af42008-01-30 13:31:09 +0100826 break;
Markus Metzgereee3af42008-01-30 13:31:09 +0100827 case 0xF: /* Core2 */
Markus Metzger93fa7632008-04-08 11:01:58 +0200828 case 0x1C: /* Atom */
829 ds_configure(&ds_cfg_64);
Markus Metzgereee3af42008-01-30 13:31:09 +0100830 break;
831 default:
832 /* sorry, don't know about them */
833 break;
834 }
835 break;
836 case 0xF:
837 switch (c->x86_model) {
Markus Metzgereee3af42008-01-30 13:31:09 +0100838 case 0x0:
839 case 0x1:
840 case 0x2: /* Netburst */
Markus Metzger93fa7632008-04-08 11:01:58 +0200841 ds_configure(&ds_cfg_var);
Markus Metzgereee3af42008-01-30 13:31:09 +0100842 break;
Markus Metzgereee3af42008-01-30 13:31:09 +0100843 default:
844 /* sorry, don't know about them */
845 break;
846 }
847 break;
848 default:
849 /* sorry, don't know about them */
850 break;
851 }
852}
Markus Metzger93fa7632008-04-08 11:01:58 +0200853
854void ds_free(struct ds_context *context)
855{
856 /* This is called when the task owning the parameter context
857 * is dying. There should not be any user of that context left
858 * to disturb us, anymore. */
859 unsigned long leftovers = context->count;
860 while (leftovers--)
861 ds_put_context(context);
862}
863#endif /* CONFIG_X86_DS */