x86, mce: Clean up thermal init by introducing intel_thermal_supported()
[safe/jmp/linux-2.6] / arch / x86 / kernel / ds.c
index 5b32b6d..ef42a03 100644 (file)
  * precise-event based sampling (PEBS).
  *
  * It manages:
- * - per-thread and per-cpu allocation of BTS and PEBS
- * - buffer memory allocation (optional)
- * - buffer overflow handling
+ * - DS and BTS hardware configuration
+ * - buffer overflow handling (to be done)
  * - buffer access
  *
- * It assumes:
- * - get_task_struct on all parameter tasks
- * - current is allowed to trace parameter tasks
+ * It does not do:
+ * - security checking (is the caller allowed to trace the task)
+ * - buffer allocation (memory accounting)
  *
  *
- * Copyright (C) 2007-2008 Intel Corporation.
- * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
+ * Copyright (C) 2007-2009 Intel Corporation.
+ * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
  */
 
-
-#ifdef CONFIG_X86_DS
-
-#include <asm/ds.h>
-
-#include <linux/errno.h>
+#include <linux/kernel.h>
 #include <linux/string.h>
-#include <linux/slab.h>
+#include <linux/errno.h>
 #include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/trace_clock.h>
 
+#include <asm/ds.h>
+
+#include "ds_selftest.h"
 
 /*
- * The configuration for a particular DS hardware implementation.
+ * The configuration for a particular DS hardware implementation:
  */
 struct ds_configuration {
-       /* the size of the DS structure in bytes */
-       unsigned char  sizeof_ds;
-       /* the size of one pointer-typed field in the DS structure in bytes;
-          this covers the first 8 fields related to buffer management. */
-       unsigned char  sizeof_field;
-       /* the size of a BTS/PEBS record in bytes */
-       unsigned char  sizeof_rec[2];
+       /* The name of the configuration: */
+       const char              *name;
+
+       /* The size of pointer-typed fields in DS, BTS, and PEBS: */
+       unsigned char           sizeof_ptr_field;
+
+       /* The size of a BTS/PEBS record in bytes: */
+       unsigned char           sizeof_rec[2];
+
+       /* The number of pebs counter reset values in the DS structure. */
+       unsigned char           nr_counter_reset;
+
+       /* Control bit-masks indexed by enum ds_feature: */
+       unsigned long           ctl[dsf_ctl_max];
 };
-static struct ds_configuration ds_cfg;
+static struct ds_configuration ds_cfg __read_mostly;
+
+
+/* Maximal size of a DS configuration: */
+#define MAX_SIZEOF_DS          0x80
 
+/* Maximal size of a BTS record: */
+#define MAX_SIZEOF_BTS         (3 * 8)
+
+/* BTS and PEBS buffer alignment: */
+#define DS_ALIGNMENT           (1 << 3)
+
+/* Number of buffer pointers in DS: */
+#define NUM_DS_PTR_FIELDS      8
+
+/* Size of a pebs reset value in DS: */
+#define PEBS_RESET_FIELD_SIZE  8
+
+/* Mask of control bits in the DS MSR register: */
+#define BTS_CONTROL                              \
+       ( ds_cfg.ctl[dsf_bts]                   | \
+         ds_cfg.ctl[dsf_bts_kernel]            | \
+         ds_cfg.ctl[dsf_bts_user]              | \
+         ds_cfg.ctl[dsf_bts_overflow] )
+
+/*
+ * A BTS or PEBS tracer.
+ *
+ * This holds the configuration of the tracer and serves as a handle
+ * to identify tracers.
+ */
+struct ds_tracer {
+       /* The DS context (partially) owned by this tracer. */
+       struct ds_context       *context;
+       /* The buffer provided on ds_request() and its size in bytes. */
+       void                    *buffer;
+       size_t                  size;
+};
+
+struct bts_tracer {
+       /* The common DS part: */
+       struct ds_tracer        ds;
+
+       /* The trace including the DS configuration: */
+       struct bts_trace        trace;
+
+       /* Buffer overflow notification function: */
+       bts_ovfl_callback_t     ovfl;
+
+       /* Active flags affecting trace collection. */
+       unsigned int            flags;
+};
+
+struct pebs_tracer {
+       /* The common DS part: */
+       struct ds_tracer        ds;
+
+       /* The trace including the DS configuration: */
+       struct pebs_trace       trace;
+
+       /* Buffer overflow notification function: */
+       pebs_ovfl_callback_t    ovfl;
+};
 
 /*
  * Debug Store (DS) save area configuration (see Intel64 and IA32
@@ -52,6 +120,7 @@ static struct ds_configuration ds_cfg;
  *
  * The DS configuration consists of the following fields; different
  * architetures vary in the size of those fields.
+ *
  * - double-word aligned base linear address of the BTS buffer
  * - write pointer into the BTS buffer
  * - end linear address of the BTS buffer (one byte beyond the end of
@@ -90,53 +159,30 @@ enum ds_field {
 };
 
 enum ds_qualifier {
-       ds_bts  = 0,
+       ds_bts = 0,
        ds_pebs
 };
 
-static inline unsigned long ds_get(const unsigned char *base,
-                                  enum ds_qualifier qual, enum ds_field field)
+static inline unsigned long
+ds_get(const unsigned char *base, enum ds_qualifier qual, enum ds_field field)
 {
-       base += (ds_cfg.sizeof_field * (field + (4 * qual)));
+       base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
        return *(unsigned long *)base;
 }
 
-static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
-                         enum ds_field field, unsigned long value)
+static inline void
+ds_set(unsigned char *base, enum ds_qualifier qual, enum ds_field field,
+       unsigned long value)
 {
-       base += (ds_cfg.sizeof_field * (field + (4 * qual)));
+       base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
        (*(unsigned long *)base) = value;
 }
 
 
 /*
- * Locking is done only for allocating BTS or PEBS resources and for
- * guarding context and buffer memory allocation.
- *
- * Most functions require the current task to own the ds context part
- * they are going to access. All the locking is done when validating
- * access to the context.
- */
-static spinlock_t ds_lock = __SPIN_LOCK_UNLOCKED(ds_lock);
-
-/*
- * Validate that the current task is allowed to access the BTS/PEBS
- * buffer of the parameter task.
- *
- * Returns 0, if access is granted; -Eerrno, otherwise.
+ * Locking is done only for allocating BTS or PEBS resources.
  */
-static inline int ds_validate_access(struct ds_context *context,
-                                    enum ds_qualifier qual)
-{
-       if (!context)
-               return -EPERM;
-
-       if (context->owner[qual] == current)
-               return 0;
-
-       return -EPERM;
-}
-
+static DEFINE_SPINLOCK(ds_lock);
 
 /*
  * We either support (system-wide) per-cpu or per-thread allocation.
@@ -151,30 +197,43 @@ static inline int ds_validate_access(struct ds_context *context,
  *   >0  number of per-thread tracers
  *   <0  number of per-cpu tracers
  *
- * The below functions to get and put tracers and to check the
- * allocation type require the ds_lock to be held by the caller.
- *
  * Tracers essentially gives the number of ds contexts for a certain
  * type of allocation.
  */
-static long tracers;
+static atomic_t tracers = ATOMIC_INIT(0);
 
-static inline void get_tracer(struct task_struct *task)
+static inline int get_tracer(struct task_struct *task)
 {
-       tracers += (task ? 1 : -1);
-}
+       int error;
 
-static inline void put_tracer(struct task_struct *task)
-{
-       tracers -= (task ? 1 : -1);
+       spin_lock_irq(&ds_lock);
+
+       if (task) {
+               error = -EPERM;
+               if (atomic_read(&tracers) < 0)
+                       goto out;
+               atomic_inc(&tracers);
+       } else {
+               error = -EPERM;
+               if (atomic_read(&tracers) > 0)
+                       goto out;
+               atomic_dec(&tracers);
+       }
+
+       error = 0;
+out:
+       spin_unlock_irq(&ds_lock);
+       return error;
 }
 
-static inline int check_tracer(struct task_struct *task)
+static inline void put_tracer(struct task_struct *task)
 {
-       return (task ? (tracers >= 0) : (tracers <= 0));
+       if (task)
+               atomic_dec(&tracers);
+       else
+               atomic_inc(&tracers);
 }
 
-
 /*
  * The DS context is either attached to a thread or to a cpu:
  * - in the former case, the thread_struct contains a pointer to the
@@ -184,237 +243,390 @@ static inline int check_tracer(struct task_struct *task)
  *
  * Contexts are use-counted. They are allocated on first access and
  * deallocated when the last user puts the context.
- *
- * We distinguish between an allocating and a non-allocating get of a
- * context:
- * - the allocating get is used for requesting BTS/PEBS resources. It
- *   requires the caller to hold the global ds_lock.
- * - the non-allocating get is used for all other cases. A
- *   non-existing context indicates an error. It acquires and releases
- *   the ds_lock itself for obtaining the context.
- *
- * A context and its DS configuration are allocated and deallocated
- * together. A context always has a DS configuration of the
- * appropriate size.
  */
-static DEFINE_PER_CPU(struct ds_context *, system_context);
+struct ds_context {
+       /* The DS configuration; goes into MSR_IA32_DS_AREA: */
+       unsigned char           ds[MAX_SIZEOF_DS];
 
-#define this_system_context per_cpu(system_context, smp_processor_id())
+       /* The owner of the BTS and PEBS configuration, respectively: */
+       struct bts_tracer       *bts_master;
+       struct pebs_tracer      *pebs_master;
 
-/*
- * Returns the pointer to the parameter task's context or to the
- * system-wide context, if task is NULL.
- *
- * Increases the use count of the returned context, if not NULL.
- */
-static inline struct ds_context *ds_get_context(struct task_struct *task)
-{
-       struct ds_context *context;
+       /* Use count: */
+       unsigned long           count;
 
-       spin_lock(&ds_lock);
+       /* Pointer to the context pointer field: */
+       struct ds_context       **this;
 
-       context = (task ? task->thread.ds_ctx : this_system_context);
-       if (context)
-               context->count++;
+       /* The traced task; NULL for cpu tracing: */
+       struct task_struct      *task;
 
-       spin_unlock(&ds_lock);
+       /* The traced cpu; only valid if task is NULL: */
+       int                     cpu;
+};
 
-       return context;
-}
+static DEFINE_PER_CPU(struct ds_context *, cpu_context);
 
-/*
- * Same as ds_get_context, but allocates the context and it's DS
- * structure, if necessary; returns NULL; if out of memory.
- *
- * pre: requires ds_lock to be held
- */
-static inline struct ds_context *ds_alloc_context(struct task_struct *task)
+
+static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
 {
        struct ds_context **p_context =
-               (task ? &task->thread.ds_ctx : &this_system_context);
-       struct ds_context *context = *p_context;
-
-       if (!context) {
-               context = kzalloc(sizeof(*context), GFP_KERNEL);
+               (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu));
+       struct ds_context *context = NULL;
+       struct ds_context *new_context = NULL;
 
-               if (!context)
-                       return 0;
+       /* Chances are small that we already have a context. */
+       new_context = kzalloc(sizeof(*new_context), GFP_KERNEL);
+       if (!new_context)
+               return NULL;
 
-               context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
-               if (!context->ds) {
-                       kfree(context);
-                       return 0;
-               }
+       spin_lock_irq(&ds_lock);
 
-               *p_context = context;
+       context = *p_context;
+       if (likely(!context)) {
+               context = new_context;
 
                context->this = p_context;
                context->task = task;
+               context->cpu = cpu;
+               context->count = 0;
 
-               if (task)
-                       set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
-
-               if (!task || (task == current))
-                       wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0);
-
-               get_tracer(task);
+               *p_context = context;
        }
 
        context->count++;
 
+       spin_unlock_irq(&ds_lock);
+
+       if (context != new_context)
+               kfree(new_context);
+
        return context;
 }
 
-/*
- * Decreases the use count of the parameter context, if not NULL.
- * Deallocates the context, if the use count reaches zero.
- */
-static inline void ds_put_context(struct ds_context *context)
+static void ds_put_context(struct ds_context *context)
 {
+       struct task_struct *task;
+       unsigned long irq;
+
        if (!context)
                return;
 
-       spin_lock(&ds_lock);
+       spin_lock_irqsave(&ds_lock, irq);
 
-       if (--context->count)
-               goto out;
+       if (--context->count) {
+               spin_unlock_irqrestore(&ds_lock, irq);
+               return;
+       }
+
+       *(context->this) = NULL;
 
-       *(context->this) = 0;
+       task = context->task;
 
-       if (context->task)
-               clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
+       if (task)
+               clear_tsk_thread_flag(task, TIF_DS_AREA_MSR);
+
+       /*
+        * We leave the (now dangling) pointer to the DS configuration in
+        * the DS_AREA msr. This is as good or as bad as replacing it with
+        * NULL - the hardware would crash if we enabled tracing.
+        *
+        * This saves us some problems with having to write an msr on a
+        * different cpu while preventing others from doing the same for the
+        * next context for that same cpu.
+        */
 
-       if (!context->task || (context->task == current))
-               wrmsrl(MSR_IA32_DS_AREA, 0);
+       spin_unlock_irqrestore(&ds_lock, irq);
 
-       put_tracer(context->task);
+       /* The context might still be in use for context switching. */
+       if (task && (task != current))
+               wait_task_context_switch(task);
 
-       /* free any leftover buffers from tracers that did not
-        * deallocate them properly. */
-       kfree(context->buffer[ds_bts]);
-       kfree(context->buffer[ds_pebs]);
-       kfree(context->ds);
        kfree(context);
- out:
-       spin_unlock(&ds_lock);
 }
 
+static void ds_install_ds_area(struct ds_context *context)
+{
+       unsigned long ds;
+
+       ds = (unsigned long)context->ds;
+
+       /*
+        * There is a race between the bts master and the pebs master.
+        *
+        * The thread/cpu access is synchronized via get/put_cpu() for
+        * task tracing and via wrmsr_on_cpu for cpu tracing.
+        *
+        * If bts and pebs are collected for the same task or same cpu,
+        * the same confiuration is written twice.
+        */
+       if (context->task) {
+               get_cpu();
+               if (context->task == current)
+                       wrmsrl(MSR_IA32_DS_AREA, ds);
+               set_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
+               put_cpu();
+       } else
+               wrmsr_on_cpu(context->cpu, MSR_IA32_DS_AREA,
+                            (u32)((u64)ds), (u32)((u64)ds >> 32));
+}
 
 /*
- * Handle a buffer overflow
+ * Call the tracer's callback on a buffer overflow.
  *
- * task: the task whose buffers are overflowing;
- *       NULL for a buffer overflow on the current cpu
  * context: the ds context
  * qual: the buffer type
  */
-static void ds_overflow(struct task_struct *task, struct ds_context *context,
-                       enum ds_qualifier qual)
+static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
 {
-       if (!context)
-               return;
-
-       if (context->callback[qual])
-               (*context->callback[qual])(task);
-
-       /* todo: do some more overflow handling */
+       switch (qual) {
+       case ds_bts:
+               if (context->bts_master &&
+                   context->bts_master->ovfl)
+                       context->bts_master->ovfl(context->bts_master);
+               break;
+       case ds_pebs:
+               if (context->pebs_master &&
+                   context->pebs_master->ovfl)
+                       context->pebs_master->ovfl(context->pebs_master);
+               break;
+       }
 }
 
 
 /*
- * Allocate a non-pageable buffer of the parameter size.
- * Checks the memory and the locked memory rlimit.
+ * Write raw data into the BTS or PEBS buffer.
  *
- * Returns the buffer, if successful;
- *         NULL, if out of memory or rlimit exceeded.
+ * The remainder of any partially written record is zeroed out.
  *
- * size: the requested buffer size in bytes
- * pages (out): if not NULL, contains the number of pages reserved
+ * context: the DS context
+ * qual:    the buffer type
+ * record:  the data to write
+ * size:    the size of the data
  */
-static inline void *ds_allocate_buffer(size_t size, unsigned int *pages)
+static int ds_write(struct ds_context *context, enum ds_qualifier qual,
+                   const void *record, size_t size)
 {
-       unsigned long rlim, vm, pgsz;
-       void *buffer;
+       int bytes_written = 0;
 
-       pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       if (!record)
+               return -EINVAL;
 
-       rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
-       vm   = current->mm->total_vm  + pgsz;
-       if (rlim < vm)
-               return 0;
+       while (size) {
+               unsigned long base, index, end, write_end, int_th;
+               unsigned long write_size, adj_write_size;
 
-       rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
-       vm   = current->mm->locked_vm  + pgsz;
-       if (rlim < vm)
-               return 0;
+               /*
+                * Write as much as possible without producing an
+                * overflow interrupt.
+                *
+                * Interrupt_threshold must either be
+                * - bigger than absolute_maximum or
+                * - point to a record between buffer_base and absolute_maximum
+                *
+                * Index points to a valid record.
+                */
+               base   = ds_get(context->ds, qual, ds_buffer_base);
+               index  = ds_get(context->ds, qual, ds_index);
+               end    = ds_get(context->ds, qual, ds_absolute_maximum);
+               int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
 
-       buffer = kzalloc(size, GFP_KERNEL);
-       if (!buffer)
-               return 0;
+               write_end = min(end, int_th);
+
+               /*
+                * If we are already beyond the interrupt threshold,
+                * we fill the entire buffer.
+                */
+               if (write_end <= index)
+                       write_end = end;
+
+               if (write_end <= index)
+                       break;
+
+               write_size = min((unsigned long) size, write_end - index);
+               memcpy((void *)index, record, write_size);
+
+               record = (const char *)record + write_size;
+               size -= write_size;
+               bytes_written += write_size;
+
+               adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
+               adj_write_size *= ds_cfg.sizeof_rec[qual];
+
+               /* Zero out trailing bytes. */
+               memset((char *)index + write_size, 0,
+                      adj_write_size - write_size);
+               index += adj_write_size;
 
-       current->mm->total_vm  += pgsz;
-       current->mm->locked_vm += pgsz;
+               if (index >= end)
+                       index = base;
+               ds_set(context->ds, qual, ds_index, index);
 
-       if (pages)
-               *pages = pgsz;
+               if (index >= int_th)
+                       ds_overflow(context, qual);
+       }
 
-       return buffer;
+       return bytes_written;
 }
 
-static int ds_request(struct task_struct *task, void *base, size_t size,
-                     ds_ovfl_callback_t ovfl, enum ds_qualifier qual)
+
+/*
+ * Branch Trace Store (BTS) uses the following format. Different
+ * architectures vary in the size of those fields.
+ * - source linear address
+ * - destination linear address
+ * - flags
+ *
+ * Later architectures use 64bit pointers throughout, whereas earlier
+ * architectures use 32bit pointers in 32bit mode.
+ *
+ * We compute the base address for the fields based on:
+ * - the field size stored in the DS configuration
+ * - the relative field position
+ *
+ * In order to store additional information in the BTS buffer, we use
+ * a special source address to indicate that the record requires
+ * special interpretation.
+ *
+ * Netburst indicated via a bit in the flags field whether the branch
+ * was predicted; this is ignored.
+ *
+ * We use two levels of abstraction:
+ * - the raw data level defined here
+ * - an arch-independent level defined in ds.h
+ */
+
+enum bts_field {
+       bts_from,
+       bts_to,
+       bts_flags,
+
+       bts_qual                = bts_from,
+       bts_clock               = bts_to,
+       bts_pid                 = bts_flags,
+
+       bts_qual_mask           = (bts_qual_max - 1),
+       bts_escape              = ((unsigned long)-1 & ~bts_qual_mask)
+};
+
+static inline unsigned long bts_get(const char *base, unsigned long field)
 {
-       struct ds_context *context;
-       unsigned long buffer, adj;
-       const unsigned long alignment = (1 << 3);
-       int error = 0;
+       base += (ds_cfg.sizeof_ptr_field * field);
+       return *(unsigned long *)base;
+}
+
+static inline void bts_set(char *base, unsigned long field, unsigned long val)
+{
+       base += (ds_cfg.sizeof_ptr_field * field);
+       (*(unsigned long *)base) = val;
+}
 
-       if (!ds_cfg.sizeof_ds)
-               return -EOPNOTSUPP;
 
-       /* we require some space to do alignment adjustments below */
-       if (size < (alignment + ds_cfg.sizeof_rec[qual]))
+/*
+ * The raw BTS data is architecture dependent.
+ *
+ * For higher-level users, we give an arch-independent view.
+ * - ds.h defines struct bts_struct
+ * - bts_read translates one raw bts record into a bts_struct
+ * - bts_write translates one bts_struct into the raw format and
+ *   writes it into the top of the parameter tracer's buffer.
+ *
+ * return: bytes read/written on success; -Eerrno, otherwise
+ */
+static int
+bts_read(struct bts_tracer *tracer, const void *at, struct bts_struct *out)
+{
+       if (!tracer)
                return -EINVAL;
 
-       /* buffer overflow notification is not yet implemented */
-       if (ovfl)
-               return -EOPNOTSUPP;
+       if (at < tracer->trace.ds.begin)
+               return -EINVAL;
 
+       if (tracer->trace.ds.end < (at + tracer->trace.ds.size))
+               return -EINVAL;
 
-       spin_lock(&ds_lock);
+       memset(out, 0, sizeof(*out));
+       if ((bts_get(at, bts_qual) & ~bts_qual_mask) == bts_escape) {
+               out->qualifier = (bts_get(at, bts_qual) & bts_qual_mask);
+               out->variant.event.clock = bts_get(at, bts_clock);
+               out->variant.event.pid = bts_get(at, bts_pid);
+       } else {
+               out->qualifier = bts_branch;
+               out->variant.lbr.from = bts_get(at, bts_from);
+               out->variant.lbr.to   = bts_get(at, bts_to);
+
+               if (!out->variant.lbr.from && !out->variant.lbr.to)
+                       out->qualifier = bts_invalid;
+       }
 
-       if (!check_tracer(task))
-               return -EPERM;
+       return ds_cfg.sizeof_rec[ds_bts];
+}
 
-       error = -ENOMEM;
-       context = ds_alloc_context(task);
-       if (!context)
-               goto out_unlock;
+static int bts_write(struct bts_tracer *tracer, const struct bts_struct *in)
+{
+       unsigned char raw[MAX_SIZEOF_BTS];
 
-       error = -EALREADY;
-       if (context->owner[qual] == current)
-               goto out_unlock;
-       error = -EPERM;
-       if (context->owner[qual] != 0)
-               goto out_unlock;
-       context->owner[qual] = current;
+       if (!tracer)
+               return -EINVAL;
 
-       spin_unlock(&ds_lock);
+       if (MAX_SIZEOF_BTS < ds_cfg.sizeof_rec[ds_bts])
+               return -EOVERFLOW;
 
+       switch (in->qualifier) {
+       case bts_invalid:
+               bts_set(raw, bts_from, 0);
+               bts_set(raw, bts_to, 0);
+               bts_set(raw, bts_flags, 0);
+               break;
+       case bts_branch:
+               bts_set(raw, bts_from, in->variant.lbr.from);
+               bts_set(raw, bts_to,   in->variant.lbr.to);
+               bts_set(raw, bts_flags, 0);
+               break;
+       case bts_task_arrives:
+       case bts_task_departs:
+               bts_set(raw, bts_qual, (bts_escape | in->qualifier));
+               bts_set(raw, bts_clock, in->variant.event.clock);
+               bts_set(raw, bts_pid, in->variant.event.pid);
+               break;
+       default:
+               return -EINVAL;
+       }
 
-       error = -ENOMEM;
-       if (!base) {
-               base = ds_allocate_buffer(size, &context->pages[qual]);
-               if (!base)
-                       goto out_release;
+       return ds_write(tracer->ds.context, ds_bts, raw,
+                       ds_cfg.sizeof_rec[ds_bts]);
+}
 
-               context->buffer[qual]   = base;
-       }
-       error = 0;
 
-       context->callback[qual] = ovfl;
+static void ds_write_config(struct ds_context *context,
+                           struct ds_trace *cfg, enum ds_qualifier qual)
+{
+       unsigned char *ds = context->ds;
+
+       ds_set(ds, qual, ds_buffer_base, (unsigned long)cfg->begin);
+       ds_set(ds, qual, ds_index, (unsigned long)cfg->top);
+       ds_set(ds, qual, ds_absolute_maximum, (unsigned long)cfg->end);
+       ds_set(ds, qual, ds_interrupt_threshold, (unsigned long)cfg->ith);
+}
 
-       /* adjust the buffer address and size to meet alignment
+static void ds_read_config(struct ds_context *context,
+                          struct ds_trace *cfg, enum ds_qualifier qual)
+{
+       unsigned char *ds = context->ds;
+
+       cfg->begin = (void *)ds_get(ds, qual, ds_buffer_base);
+       cfg->top = (void *)ds_get(ds, qual, ds_index);
+       cfg->end = (void *)ds_get(ds, qual, ds_absolute_maximum);
+       cfg->ith = (void *)ds_get(ds, qual, ds_interrupt_threshold);
+}
+
+static void ds_init_ds_trace(struct ds_trace *trace, enum ds_qualifier qual,
+                            void *base, size_t size, size_t ith,
+                            unsigned int flags) {
+       unsigned long buffer, adj;
+
+       /*
+        * Adjust the buffer address and size to meet alignment
         * constraints:
         * - buffer is double-word aligned
         * - size is multiple of record size
@@ -424,440 +636,802 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
         */
        buffer = (unsigned long)base;
 
-       adj = ALIGN(buffer, alignment) - buffer;
+       adj = ALIGN(buffer, DS_ALIGNMENT) - buffer;
        buffer += adj;
        size   -= adj;
 
-       size /= ds_cfg.sizeof_rec[qual];
-       size *= ds_cfg.sizeof_rec[qual];
+       trace->n = size / ds_cfg.sizeof_rec[qual];
+       trace->size = ds_cfg.sizeof_rec[qual];
 
-       ds_set(context->ds, qual, ds_buffer_base, buffer);
-       ds_set(context->ds, qual, ds_index, buffer);
-       ds_set(context->ds, qual, ds_absolute_maximum, buffer + size);
+       size = (trace->n * trace->size);
 
-       if (ovfl) {
-               /* todo: select a suitable interrupt threshold */
-       } else
-               ds_set(context->ds, qual,
-                      ds_interrupt_threshold, buffer + size + 1);
-
-       /* we keep the context until ds_release */
-       return error;
-
- out_release:
-       context->owner[qual] = 0;
-       ds_put_context(context);
-       return error;
+       trace->begin = (void *)buffer;
+       trace->top = trace->begin;
+       trace->end = (void *)(buffer + size);
+       /*
+        * The value for 'no threshold' is -1, which will set the
+        * threshold outside of the buffer, just like we want it.
+        */
+       ith *= ds_cfg.sizeof_rec[qual];
+       trace->ith = (void *)(buffer + size - ith);
 
- out_unlock:
-       spin_unlock(&ds_lock);
-       ds_put_context(context);
-       return error;
+       trace->flags = flags;
 }
 
-int ds_request_bts(struct task_struct *task, void *base, size_t size,
-                  ds_ovfl_callback_t ovfl)
-{
-       return ds_request(task, base, size, ovfl, ds_bts);
-}
 
-int ds_request_pebs(struct task_struct *task, void *base, size_t size,
-                   ds_ovfl_callback_t ovfl)
+static int ds_request(struct ds_tracer *tracer, struct ds_trace *trace,
+                     enum ds_qualifier qual, struct task_struct *task,
+                     int cpu, void *base, size_t size, size_t th)
 {
-       return ds_request(task, base, size, ovfl, ds_pebs);
+       struct ds_context *context;
+       int error;
+       size_t req_size;
+
+       error = -EOPNOTSUPP;
+       if (!ds_cfg.sizeof_rec[qual])
+               goto out;
+
+       error = -EINVAL;
+       if (!base)
+               goto out;
+
+       req_size = ds_cfg.sizeof_rec[qual];
+       /* We might need space for alignment adjustments. */
+       if (!IS_ALIGNED((unsigned long)base, DS_ALIGNMENT))
+               req_size += DS_ALIGNMENT;
+
+       error = -EINVAL;
+       if (size < req_size)
+               goto out;
+
+       if (th != (size_t)-1) {
+               th *= ds_cfg.sizeof_rec[qual];
+
+               error = -EINVAL;
+               if (size <= th)
+                       goto out;
+       }
+
+       tracer->buffer = base;
+       tracer->size = size;
+
+       error = -ENOMEM;
+       context = ds_get_context(task, cpu);
+       if (!context)
+               goto out;
+       tracer->context = context;
+
+       /*
+        * Defer any tracer-specific initialization work for the context until
+        * context ownership has been clarified.
+        */
+
+       error = 0;
+ out:
+       return error;
 }
 
-static int ds_release(struct task_struct *task, enum ds_qualifier qual)
+static struct bts_tracer *ds_request_bts(struct task_struct *task, int cpu,
+                                        void *base, size_t size,
+                                        bts_ovfl_callback_t ovfl, size_t th,
+                                        unsigned int flags)
 {
-       struct ds_context *context;
+       struct bts_tracer *tracer;
        int error;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
+       /* Buffer overflow notification is not yet implemented. */
+       error = -EOPNOTSUPP;
+       if (ovfl)
+               goto out;
+
+       error = get_tracer(task);
        if (error < 0)
                goto out;
 
-       kfree(context->buffer[qual]);
-       context->buffer[qual] = 0;
+       error = -ENOMEM;
+       tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+       if (!tracer)
+               goto out_put_tracer;
+       tracer->ovfl = ovfl;
+
+       /* Do some more error checking and acquire a tracing context. */
+       error = ds_request(&tracer->ds, &tracer->trace.ds,
+                          ds_bts, task, cpu, base, size, th);
+       if (error < 0)
+               goto out_tracer;
+
+       /* Claim the bts part of the tracing context we acquired above. */
+       spin_lock_irq(&ds_lock);
+
+       error = -EPERM;
+       if (tracer->ds.context->bts_master)
+               goto out_unlock;
+       tracer->ds.context->bts_master = tracer;
 
-       current->mm->total_vm  -= context->pages[qual];
-       current->mm->locked_vm -= context->pages[qual];
-       context->pages[qual] = 0;
-       context->owner[qual] = 0;
+       spin_unlock_irq(&ds_lock);
 
        /*
-        * we put the context twice:
-        *   once for the ds_get_context
-        *   once for the corresponding ds_request
+        * Now that we own the bts part of the context, let's complete the
+        * initialization for that part.
         */
-       ds_put_context(context);
+       ds_init_ds_trace(&tracer->trace.ds, ds_bts, base, size, th, flags);
+       ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
+       ds_install_ds_area(tracer->ds.context);
+
+       tracer->trace.read  = bts_read;
+       tracer->trace.write = bts_write;
+
+       /* Start tracing. */
+       ds_resume_bts(tracer);
+
+       return tracer;
+
+ out_unlock:
+       spin_unlock_irq(&ds_lock);
+       ds_put_context(tracer->ds.context);
+ out_tracer:
+       kfree(tracer);
+ out_put_tracer:
+       put_tracer(task);
  out:
-       ds_put_context(context);
-       return error;
+       return ERR_PTR(error);
 }
 
-int ds_release_bts(struct task_struct *task)
+struct bts_tracer *ds_request_bts_task(struct task_struct *task,
+                                      void *base, size_t size,
+                                      bts_ovfl_callback_t ovfl,
+                                      size_t th, unsigned int flags)
 {
-       return ds_release(task, ds_bts);
+       return ds_request_bts(task, 0, base, size, ovfl, th, flags);
 }
 
-int ds_release_pebs(struct task_struct *task)
+struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size,
+                                     bts_ovfl_callback_t ovfl,
+                                     size_t th, unsigned int flags)
 {
-       return ds_release(task, ds_pebs);
+       return ds_request_bts(NULL, cpu, base, size, ovfl, th, flags);
 }
 
-static int ds_get_index(struct task_struct *task, size_t *pos,
-                       enum ds_qualifier qual)
+static struct pebs_tracer *ds_request_pebs(struct task_struct *task, int cpu,
+                                          void *base, size_t size,
+                                          pebs_ovfl_callback_t ovfl, size_t th,
+                                          unsigned int flags)
 {
-       struct ds_context *context;
-       unsigned long base, index;
+       struct pebs_tracer *tracer;
        int error;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
+       /* Buffer overflow notification is not yet implemented. */
+       error = -EOPNOTSUPP;
+       if (ovfl)
+               goto out;
+
+       error = get_tracer(task);
        if (error < 0)
                goto out;
 
-       base  = ds_get(context->ds, qual, ds_buffer_base);
-       index = ds_get(context->ds, qual, ds_index);
+       error = -ENOMEM;
+       tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+       if (!tracer)
+               goto out_put_tracer;
+       tracer->ovfl = ovfl;
+
+       /* Do some more error checking and acquire a tracing context. */
+       error = ds_request(&tracer->ds, &tracer->trace.ds,
+                          ds_pebs, task, cpu, base, size, th);
+       if (error < 0)
+               goto out_tracer;
+
+       /* Claim the pebs part of the tracing context we acquired above. */
+       spin_lock_irq(&ds_lock);
 
-       error = ((index - base) / ds_cfg.sizeof_rec[qual]);
-       if (pos)
-               *pos = error;
+       error = -EPERM;
+       if (tracer->ds.context->pebs_master)
+               goto out_unlock;
+       tracer->ds.context->pebs_master = tracer;
+
+       spin_unlock_irq(&ds_lock);
+
+       /*
+        * Now that we own the pebs part of the context, let's complete the
+        * initialization for that part.
+        */
+       ds_init_ds_trace(&tracer->trace.ds, ds_pebs, base, size, th, flags);
+       ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
+       ds_install_ds_area(tracer->ds.context);
+
+       /* Start tracing. */
+       ds_resume_pebs(tracer);
+
+       return tracer;
+
+ out_unlock:
+       spin_unlock_irq(&ds_lock);
+       ds_put_context(tracer->ds.context);
+ out_tracer:
+       kfree(tracer);
+ out_put_tracer:
+       put_tracer(task);
  out:
-       ds_put_context(context);
-       return error;
+       return ERR_PTR(error);
 }
 
-int ds_get_bts_index(struct task_struct *task, size_t *pos)
+struct pebs_tracer *ds_request_pebs_task(struct task_struct *task,
+                                        void *base, size_t size,
+                                        pebs_ovfl_callback_t ovfl,
+                                        size_t th, unsigned int flags)
 {
-       return ds_get_index(task, pos, ds_bts);
+       return ds_request_pebs(task, 0, base, size, ovfl, th, flags);
 }
 
-int ds_get_pebs_index(struct task_struct *task, size_t *pos)
+struct pebs_tracer *ds_request_pebs_cpu(int cpu, void *base, size_t size,
+                                       pebs_ovfl_callback_t ovfl,
+                                       size_t th, unsigned int flags)
 {
-       return ds_get_index(task, pos, ds_pebs);
+       return ds_request_pebs(NULL, cpu, base, size, ovfl, th, flags);
 }
 
-static int ds_get_end(struct task_struct *task, size_t *pos,
-                     enum ds_qualifier qual)
+static void ds_free_bts(struct bts_tracer *tracer)
 {
-       struct ds_context *context;
-       unsigned long base, end;
-       int error;
+       struct task_struct *task;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
-       if (error < 0)
-               goto out;
+       task = tracer->ds.context->task;
 
-       base = ds_get(context->ds, qual, ds_buffer_base);
-       end  = ds_get(context->ds, qual, ds_absolute_maximum);
+       WARN_ON_ONCE(tracer->ds.context->bts_master != tracer);
+       tracer->ds.context->bts_master = NULL;
 
-       error = ((end - base) / ds_cfg.sizeof_rec[qual]);
-       if (pos)
-               *pos = error;
- out:
-       ds_put_context(context);
-       return error;
-}
+       /* Make sure tracing stopped and the tracer is not in use. */
+       if (task && (task != current))
+               wait_task_context_switch(task);
 
-int ds_get_bts_end(struct task_struct *task, size_t *pos)
-{
-       return ds_get_end(task, pos, ds_bts);
+       ds_put_context(tracer->ds.context);
+       put_tracer(task);
+
+       kfree(tracer);
 }
 
-int ds_get_pebs_end(struct task_struct *task, size_t *pos)
+void ds_release_bts(struct bts_tracer *tracer)
 {
-       return ds_get_end(task, pos, ds_pebs);
+       might_sleep();
+
+       if (!tracer)
+               return;
+
+       ds_suspend_bts(tracer);
+       ds_free_bts(tracer);
 }
 
-static int ds_access(struct task_struct *task, size_t index,
-                    const void **record, enum ds_qualifier qual)
+int ds_release_bts_noirq(struct bts_tracer *tracer)
 {
-       struct ds_context *context;
-       unsigned long base, idx;
+       struct task_struct *task;
+       unsigned long irq;
        int error;
 
-       if (!record)
-               return -EINVAL;
+       if (!tracer)
+               return 0;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
-       if (error < 0)
-               goto out;
+       task = tracer->ds.context->task;
 
-       base = ds_get(context->ds, qual, ds_buffer_base);
-       idx = base + (index * ds_cfg.sizeof_rec[qual]);
+       local_irq_save(irq);
 
-       error = -EINVAL;
-       if (idx > ds_get(context->ds, qual, ds_absolute_maximum))
+       error = -EPERM;
+       if (!task &&
+           (tracer->ds.context->cpu != smp_processor_id()))
                goto out;
 
-       *record = (const void *)idx;
-       error = ds_cfg.sizeof_rec[qual];
+       error = -EPERM;
+       if (task && (task != current))
+               goto out;
+
+       ds_suspend_bts_noirq(tracer);
+       ds_free_bts(tracer);
+
+       error = 0;
  out:
-       ds_put_context(context);
+       local_irq_restore(irq);
        return error;
 }
 
-int ds_access_bts(struct task_struct *task, size_t index, const void **record)
+static void update_task_debugctlmsr(struct task_struct *task,
+                                   unsigned long debugctlmsr)
 {
-       return ds_access(task, index, record, ds_bts);
+       task->thread.debugctlmsr = debugctlmsr;
+
+       get_cpu();
+       if (task == current)
+               update_debugctlmsr(debugctlmsr);
+       put_cpu();
 }
 
-int ds_access_pebs(struct task_struct *task, size_t index, const void **record)
+void ds_suspend_bts(struct bts_tracer *tracer)
 {
-       return ds_access(task, index, record, ds_pebs);
+       struct task_struct *task;
+       unsigned long debugctlmsr;
+       int cpu;
+
+       if (!tracer)
+               return;
+
+       tracer->flags = 0;
+
+       task = tracer->ds.context->task;
+       cpu  = tracer->ds.context->cpu;
+
+       WARN_ON(!task && irqs_disabled());
+
+       debugctlmsr = (task ?
+                      task->thread.debugctlmsr :
+                      get_debugctlmsr_on_cpu(cpu));
+       debugctlmsr &= ~BTS_CONTROL;
+
+       if (task)
+               update_task_debugctlmsr(task, debugctlmsr);
+       else
+               update_debugctlmsr_on_cpu(cpu, debugctlmsr);
 }
 
-static int ds_write(struct task_struct *task, const void *record, size_t size,
-                   enum ds_qualifier qual, int force)
+int ds_suspend_bts_noirq(struct bts_tracer *tracer)
 {
-       struct ds_context *context;
-       int error;
+       struct task_struct *task;
+       unsigned long debugctlmsr, irq;
+       int cpu, error = 0;
 
-       if (!record)
-               return -EINVAL;
+       if (!tracer)
+               return 0;
+
+       tracer->flags = 0;
+
+       task = tracer->ds.context->task;
+       cpu  = tracer->ds.context->cpu;
+
+       local_irq_save(irq);
 
        error = -EPERM;
-       context = ds_get_context(task);
-       if (!context)
+       if (!task && (cpu != smp_processor_id()))
                goto out;
 
-       if (!force) {
-               error = ds_validate_access(context, qual);
-               if (error < 0)
-                       goto out;
-       }
+       debugctlmsr = (task ?
+                      task->thread.debugctlmsr :
+                      get_debugctlmsr());
+       debugctlmsr &= ~BTS_CONTROL;
+
+       if (task)
+               update_task_debugctlmsr(task, debugctlmsr);
+       else
+               update_debugctlmsr(debugctlmsr);
 
        error = 0;
-       while (size) {
-               unsigned long base, index, end, write_end, int_th;
-               unsigned long write_size, adj_write_size;
+ out:
+       local_irq_restore(irq);
+       return error;
+}
 
-               /*
-                * write as much as possible without producing an
-                * overflow interrupt.
-                *
-                * interrupt_threshold must either be
-                * - bigger than absolute_maximum or
-                * - point to a record between buffer_base and absolute_maximum
-                *
-                * index points to a valid record.
-                */
-               base   = ds_get(context->ds, qual, ds_buffer_base);
-               index  = ds_get(context->ds, qual, ds_index);
-               end    = ds_get(context->ds, qual, ds_absolute_maximum);
-               int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
+static unsigned long ds_bts_control(struct bts_tracer *tracer)
+{
+       unsigned long control;
 
-               write_end = min(end, int_th);
+       control = ds_cfg.ctl[dsf_bts];
+       if (!(tracer->trace.ds.flags & BTS_KERNEL))
+               control |= ds_cfg.ctl[dsf_bts_kernel];
+       if (!(tracer->trace.ds.flags & BTS_USER))
+               control |= ds_cfg.ctl[dsf_bts_user];
 
-               /* if we are already beyond the interrupt threshold,
-                * we fill the entire buffer */
-               if (write_end <= index)
-                       write_end = end;
+       return control;
+}
 
-               if (write_end <= index)
-                       goto out;
+void ds_resume_bts(struct bts_tracer *tracer)
+{
+       struct task_struct *task;
+       unsigned long debugctlmsr;
+       int cpu;
 
-               write_size = min((unsigned long) size, write_end - index);
-               memcpy((void *)index, record, write_size);
+       if (!tracer)
+               return;
 
-               record = (const char *)record + write_size;
-               size  -= write_size;
-               error += write_size;
+       tracer->flags = tracer->trace.ds.flags;
 
-               adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
-               adj_write_size *= ds_cfg.sizeof_rec[qual];
+       task = tracer->ds.context->task;
+       cpu  = tracer->ds.context->cpu;
 
-               /* zero out trailing bytes */
-               memset((char *)index + write_size, 0,
-                      adj_write_size - write_size);
-               index += adj_write_size;
+       WARN_ON(!task && irqs_disabled());
 
-               if (index >= end)
-                       index = base;
-               ds_set(context->ds, qual, ds_index, index);
+       debugctlmsr = (task ?
+                      task->thread.debugctlmsr :
+                      get_debugctlmsr_on_cpu(cpu));
+       debugctlmsr |= ds_bts_control(tracer);
 
-               if (index >= int_th)
-                       ds_overflow(task, context, qual);
-       }
+       if (task)
+               update_task_debugctlmsr(task, debugctlmsr);
+       else
+               update_debugctlmsr_on_cpu(cpu, debugctlmsr);
+}
+
+int ds_resume_bts_noirq(struct bts_tracer *tracer)
+{
+       struct task_struct *task;
+       unsigned long debugctlmsr, irq;
+       int cpu, error = 0;
+
+       if (!tracer)
+               return 0;
+
+       tracer->flags = tracer->trace.ds.flags;
+
+       task = tracer->ds.context->task;
+       cpu  = tracer->ds.context->cpu;
+
+       local_irq_save(irq);
 
+       error = -EPERM;
+       if (!task && (cpu != smp_processor_id()))
+               goto out;
+
+       debugctlmsr = (task ?
+                      task->thread.debugctlmsr :
+                      get_debugctlmsr());
+       debugctlmsr |= ds_bts_control(tracer);
+
+       if (task)
+               update_task_debugctlmsr(task, debugctlmsr);
+       else
+               update_debugctlmsr(debugctlmsr);
+
+       error = 0;
  out:
-       ds_put_context(context);
+       local_irq_restore(irq);
        return error;
 }
 
-int ds_write_bts(struct task_struct *task, const void *record, size_t size)
+static void ds_free_pebs(struct pebs_tracer *tracer)
 {
-       return ds_write(task, record, size, ds_bts, /* force = */ 0);
-}
+       struct task_struct *task;
 
-int ds_write_pebs(struct task_struct *task, const void *record, size_t size)
-{
-       return ds_write(task, record, size, ds_pebs, /* force = */ 0);
-}
+       task = tracer->ds.context->task;
 
-int ds_unchecked_write_bts(struct task_struct *task,
-                          const void *record, size_t size)
-{
-       return ds_write(task, record, size, ds_bts, /* force = */ 1);
+       WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer);
+       tracer->ds.context->pebs_master = NULL;
+
+       ds_put_context(tracer->ds.context);
+       put_tracer(task);
+
+       kfree(tracer);
 }
 
-int ds_unchecked_write_pebs(struct task_struct *task,
-                           const void *record, size_t size)
+void ds_release_pebs(struct pebs_tracer *tracer)
 {
-       return ds_write(task, record, size, ds_pebs, /* force = */ 1);
+       might_sleep();
+
+       if (!tracer)
+               return;
+
+       ds_suspend_pebs(tracer);
+       ds_free_pebs(tracer);
 }
 
-static int ds_reset_or_clear(struct task_struct *task,
-                            enum ds_qualifier qual, int clear)
+int ds_release_pebs_noirq(struct pebs_tracer *tracer)
 {
-       struct ds_context *context;
-       unsigned long base, end;
+       struct task_struct *task;
+       unsigned long irq;
        int error;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, qual);
-       if (error < 0)
-               goto out;
+       if (!tracer)
+               return 0;
+
+       task = tracer->ds.context->task;
 
-       base = ds_get(context->ds, qual, ds_buffer_base);
-       end  = ds_get(context->ds, qual, ds_absolute_maximum);
+       local_irq_save(irq);
+
+       error = -EPERM;
+       if (!task &&
+           (tracer->ds.context->cpu != smp_processor_id()))
+               goto out;
 
-       if (clear)
-               memset((void *)base, 0, end - base);
+       error = -EPERM;
+       if (task && (task != current))
+               goto out;
 
-       ds_set(context->ds, qual, ds_index, base);
+       ds_suspend_pebs_noirq(tracer);
+       ds_free_pebs(tracer);
 
        error = 0;
  out:
-       ds_put_context(context);
+       local_irq_restore(irq);
        return error;
 }
 
-int ds_reset_bts(struct task_struct *task)
+void ds_suspend_pebs(struct pebs_tracer *tracer)
 {
-       return ds_reset_or_clear(task, ds_bts, /* clear = */ 0);
+
 }
 
-int ds_reset_pebs(struct task_struct *task)
+int ds_suspend_pebs_noirq(struct pebs_tracer *tracer)
 {
-       return ds_reset_or_clear(task, ds_pebs, /* clear = */ 0);
+       return 0;
 }
 
-int ds_clear_bts(struct task_struct *task)
+void ds_resume_pebs(struct pebs_tracer *tracer)
 {
-       return ds_reset_or_clear(task, ds_bts, /* clear = */ 1);
+
 }
 
-int ds_clear_pebs(struct task_struct *task)
+int ds_resume_pebs_noirq(struct pebs_tracer *tracer)
 {
-       return ds_reset_or_clear(task, ds_pebs, /* clear = */ 1);
+       return 0;
 }
 
-int ds_get_pebs_reset(struct task_struct *task, u64 *value)
+const struct bts_trace *ds_read_bts(struct bts_tracer *tracer)
 {
-       struct ds_context *context;
-       int error;
+       if (!tracer)
+               return NULL;
+
+       ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
+       return &tracer->trace;
+}
 
-       if (!value)
+const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer)
+{
+       if (!tracer)
+               return NULL;
+
+       ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
+
+       tracer->trace.counters = ds_cfg.nr_counter_reset;
+       memcpy(tracer->trace.counter_reset,
+              tracer->ds.context->ds +
+              (NUM_DS_PTR_FIELDS * ds_cfg.sizeof_ptr_field),
+              ds_cfg.nr_counter_reset * PEBS_RESET_FIELD_SIZE);
+
+       return &tracer->trace;
+}
+
+int ds_reset_bts(struct bts_tracer *tracer)
+{
+       if (!tracer)
                return -EINVAL;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, ds_pebs);
-       if (error < 0)
-               goto out;
+       tracer->trace.ds.top = tracer->trace.ds.begin;
 
-       *value = *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8));
+       ds_set(tracer->ds.context->ds, ds_bts, ds_index,
+              (unsigned long)tracer->trace.ds.top);
 
-       error = 0;
- out:
-       ds_put_context(context);
-       return error;
+       return 0;
 }
 
-int ds_set_pebs_reset(struct task_struct *task, u64 value)
+int ds_reset_pebs(struct pebs_tracer *tracer)
 {
-       struct ds_context *context;
-       int error;
+       if (!tracer)
+               return -EINVAL;
 
-       context = ds_get_context(task);
-       error = ds_validate_access(context, ds_pebs);
-       if (error < 0)
-               goto out;
+       tracer->trace.ds.top = tracer->trace.ds.begin;
 
-       *(u64 *)(context->ds + (ds_cfg.sizeof_field * 8)) = value;
+       ds_set(tracer->ds.context->ds, ds_pebs, ds_index,
+              (unsigned long)tracer->trace.ds.top);
 
-       error = 0;
- out:
-       ds_put_context(context);
-       return error;
+       return 0;
+}
+
+int ds_set_pebs_reset(struct pebs_tracer *tracer,
+                     unsigned int counter, u64 value)
+{
+       if (!tracer)
+               return -EINVAL;
+
+       if (ds_cfg.nr_counter_reset < counter)
+               return -EINVAL;
+
+       *(u64 *)(tracer->ds.context->ds +
+                (NUM_DS_PTR_FIELDS * ds_cfg.sizeof_ptr_field) +
+                (counter * PEBS_RESET_FIELD_SIZE)) = value;
+
+       return 0;
 }
 
-static const struct ds_configuration ds_cfg_var = {
-       .sizeof_ds    = sizeof(long) * 12,
-       .sizeof_field = sizeof(long),
-       .sizeof_rec[ds_bts]   = sizeof(long) * 3,
-       .sizeof_rec[ds_pebs]  = sizeof(long) * 10
+static const struct ds_configuration ds_cfg_netburst = {
+       .name = "Netburst",
+       .ctl[dsf_bts]           = (1 << 2) | (1 << 3),
+       .ctl[dsf_bts_kernel]    = (1 << 5),
+       .ctl[dsf_bts_user]      = (1 << 6),
+       .nr_counter_reset       = 1,
+};
+static const struct ds_configuration ds_cfg_pentium_m = {
+       .name = "Pentium M",
+       .ctl[dsf_bts]           = (1 << 6) | (1 << 7),
+       .nr_counter_reset       = 1,
 };
-static const struct ds_configuration ds_cfg_64 = {
-       .sizeof_ds    = 8 * 12,
-       .sizeof_field = 8,
-       .sizeof_rec[ds_bts]   = 8 * 3,
-       .sizeof_rec[ds_pebs]  = 8 * 10
+static const struct ds_configuration ds_cfg_core2_atom = {
+       .name = "Core 2/Atom",
+       .ctl[dsf_bts]           = (1 << 6) | (1 << 7),
+       .ctl[dsf_bts_kernel]    = (1 << 9),
+       .ctl[dsf_bts_user]      = (1 << 10),
+       .nr_counter_reset       = 1,
+};
+static const struct ds_configuration ds_cfg_core_i7 = {
+       .name = "Core i7",
+       .ctl[dsf_bts]           = (1 << 6) | (1 << 7),
+       .ctl[dsf_bts_kernel]    = (1 << 9),
+       .ctl[dsf_bts_user]      = (1 << 10),
+       .nr_counter_reset       = 4,
 };
 
-static inline void
-ds_configure(const struct ds_configuration *cfg)
+static void
+ds_configure(const struct ds_configuration *cfg,
+            struct cpuinfo_x86 *cpu)
 {
+       unsigned long nr_pebs_fields = 0;
+
+       printk(KERN_INFO "[ds] using %s configuration\n", cfg->name);
+
+#ifdef __i386__
+       nr_pebs_fields = 10;
+#else
+       nr_pebs_fields = 18;
+#endif
+
+       /*
+        * Starting with version 2, architectural performance
+        * monitoring supports a format specifier.
+        */
+       if ((cpuid_eax(0xa) & 0xff) > 1) {
+               unsigned long perf_capabilities, format;
+
+               rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_capabilities);
+
+               format = (perf_capabilities >> 8) & 0xf;
+
+               switch (format) {
+               case 0:
+                       nr_pebs_fields = 18;
+                       break;
+               case 1:
+                       nr_pebs_fields = 22;
+                       break;
+               default:
+                       printk(KERN_INFO
+                              "[ds] unknown PEBS format: %lu\n", format);
+                       nr_pebs_fields = 0;
+                       break;
+               }
+       }
+
+       memset(&ds_cfg, 0, sizeof(ds_cfg));
        ds_cfg = *cfg;
+
+       ds_cfg.sizeof_ptr_field =
+               (cpu_has(cpu, X86_FEATURE_DTES64) ? 8 : 4);
+
+       ds_cfg.sizeof_rec[ds_bts]  = ds_cfg.sizeof_ptr_field * 3;
+       ds_cfg.sizeof_rec[ds_pebs] = ds_cfg.sizeof_ptr_field * nr_pebs_fields;
+
+       if (!cpu_has(cpu, X86_FEATURE_BTS)) {
+               ds_cfg.sizeof_rec[ds_bts] = 0;
+               printk(KERN_INFO "[ds] bts not available\n");
+       }
+       if (!cpu_has(cpu, X86_FEATURE_PEBS)) {
+               ds_cfg.sizeof_rec[ds_pebs] = 0;
+               printk(KERN_INFO "[ds] pebs not available\n");
+       }
+
+       printk(KERN_INFO "[ds] sizes: address: %u bit, ",
+              8 * ds_cfg.sizeof_ptr_field);
+       printk("bts/pebs record: %u/%u bytes\n",
+              ds_cfg.sizeof_rec[ds_bts], ds_cfg.sizeof_rec[ds_pebs]);
+
+       WARN_ON_ONCE(MAX_PEBS_COUNTERS < ds_cfg.nr_counter_reset);
 }
 
 void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
 {
+       /* Only configure the first cpu. Others are identical. */
+       if (ds_cfg.name)
+               return;
+
        switch (c->x86) {
        case 0x6:
                switch (c->x86_model) {
-               case 0xD:
-               case 0xE: /* Pentium M */
-                       ds_configure(&ds_cfg_var);
+               case 0x9:
+               case 0xd: /* Pentium M */
+                       ds_configure(&ds_cfg_pentium_m, c);
+                       break;
+               case 0xf:
+               case 0x17: /* Core2 */
+               case 0x1c: /* Atom */
+                       ds_configure(&ds_cfg_core2_atom, c);
                        break;
-               case 0xF: /* Core2 */
-        case 0x1C: /* Atom */
-                       ds_configure(&ds_cfg_64);
+               case 0x1a: /* Core i7 */
+                       ds_configure(&ds_cfg_core_i7, c);
                        break;
                default:
-                       /* sorry, don't know about them */
+                       /* Sorry, don't know about them. */
                        break;
                }
                break;
-       case 0xF:
+       case 0xf:
                switch (c->x86_model) {
                case 0x0:
                case 0x1:
                case 0x2: /* Netburst */
-                       ds_configure(&ds_cfg_var);
+                       ds_configure(&ds_cfg_netburst, c);
                        break;
                default:
-                       /* sorry, don't know about them */
+                       /* Sorry, don't know about them. */
                        break;
                }
                break;
        default:
-               /* sorry, don't know about them */
+               /* Sorry, don't know about them. */
                break;
        }
 }
 
-void ds_free(struct ds_context *context)
+static inline void ds_take_timestamp(struct ds_context *context,
+                                    enum bts_qualifier qualifier,
+                                    struct task_struct *task)
+{
+       struct bts_tracer *tracer = context->bts_master;
+       struct bts_struct ts;
+
+       /* Prevent compilers from reading the tracer pointer twice. */
+       barrier();
+
+       if (!tracer || !(tracer->flags & BTS_TIMESTAMPS))
+               return;
+
+       memset(&ts, 0, sizeof(ts));
+       ts.qualifier            = qualifier;
+       ts.variant.event.clock  = trace_clock_global();
+       ts.variant.event.pid    = task->pid;
+
+       bts_write(tracer, &ts);
+}
+
+/*
+ * Change the DS configuration from tracing prev to tracing next.
+ */
+void ds_switch_to(struct task_struct *prev, struct task_struct *next)
 {
-       /* This is called when the task owning the parameter context
-        * is dying. There should not be any user of that context left
-        * to disturb us, anymore. */
-       unsigned long leftovers = context->count;
-       while (leftovers--)
-               ds_put_context(context);
+       struct ds_context *prev_ctx     = prev->thread.ds_ctx;
+       struct ds_context *next_ctx     = next->thread.ds_ctx;
+       unsigned long debugctlmsr       = next->thread.debugctlmsr;
+
+       /* Make sure all data is read before we start. */
+       barrier();
+
+       if (prev_ctx) {
+               update_debugctlmsr(0);
+
+               ds_take_timestamp(prev_ctx, bts_task_departs, prev);
+       }
+
+       if (next_ctx) {
+               ds_take_timestamp(next_ctx, bts_task_arrives, next);
+
+               wrmsrl(MSR_IA32_DS_AREA, (unsigned long)next_ctx->ds);
+       }
+
+       update_debugctlmsr(debugctlmsr);
+}
+
+static __init int ds_selftest(void)
+{
+       if (ds_cfg.sizeof_rec[ds_bts]) {
+               int error;
+
+               error = ds_selftest_bts();
+               if (error) {
+                       WARN(1, "[ds] selftest failed. disabling bts.\n");
+                       ds_cfg.sizeof_rec[ds_bts] = 0;
+               }
+       }
+
+       if (ds_cfg.sizeof_rec[ds_pebs]) {
+               int error;
+
+               error = ds_selftest_pebs();
+               if (error) {
+                       WARN(1, "[ds] selftest failed. disabling pebs.\n");
+                       ds_cfg.sizeof_rec[ds_pebs] = 0;
+               }
+       }
+
+       return 0;
 }
-#endif /* CONFIG_X86_DS */
+device_initcall(ds_selftest);