*
* Copyright (C) 2007 Alan Stern
* Copyright (C) IBM Corporation, 2009
+ * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ * Thanks to Ingo Molnar for his many suggestions.
+ *
+ * Authors: Alan Stern <stern@rowland.harvard.edu>
+ * K.Prasad <prasad@linux.vnet.ibm.com>
+ * Frederic Weisbecker <fweisbec@gmail.com>
*/
/*
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
+#include <linux/cpu.h>
#include <linux/smp.h>
-#include <asm/hw_breakpoint.h>
-#include <asm/processor.h>
+#include <linux/hw_breakpoint.h>
-#ifdef CONFIG_X86
-#include <asm/debugreg.h>
-#endif
/*
- * Spinlock that protects all (un)register operations over kernel/user-space
- * breakpoint requests
+ * Constraints data
*/
-static DEFINE_SPINLOCK(hw_breakpoint_lock);
-/* Array of kernel-space breakpoint structures */
-struct hw_breakpoint *hbp_kernel[HBP_NUM];
+/* Number of pinned cpu breakpoints in a cpu */
+static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
-/*
- * Per-processor copy of hbp_kernel[]. Used only when hbp_kernel is being
- * modified but we need the older copy to handle any hbp exceptions. It will
- * sync with hbp_kernel[] value after updation is done through IPIs.
- */
-DEFINE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]);
+/* Number of pinned task breakpoints in a cpu */
+static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
-/*
- * Kernel breakpoints grow downwards, starting from HBP_NUM
- * 'hbp_kernel_pos' denotes lowest numbered breakpoint register occupied for
- * kernel-space request. We will initialise it here and not in an __init
- * routine because load_debug_registers(), which uses this variable can be
- * called very early during CPU initialisation.
- */
-unsigned int hbp_kernel_pos = HBP_NUM;
+/* Number of non-pinned cpu/task breakpoints in a cpu */
+static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
-/*
- * An array containing refcount of threads using a given bkpt register
- * Accesses are synchronised by acquiring hw_breakpoint_lock
- */
-unsigned int hbp_user_refcount[HBP_NUM];
+/* Gather the number of total pinned and un-pinned bp in a cpuset */
+struct bp_busy_slots {
+ unsigned int pinned;
+ unsigned int flexible;
+};
+
+/* Serialize accesses to the above constraints */
+static DEFINE_MUTEX(nr_bp_mutex);
/*
- * Load the debug registers during startup of a CPU.
+ * Report the maximum number of pinned breakpoints a task
+ * have in this cpu
*/
-void load_debug_registers(void)
+static unsigned int max_task_bp_pinned(int cpu)
+{
+ int i;
+ unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
+
+ for (i = HBP_NUM -1; i >= 0; i--) {
+ if (tsk_pinned[i] > 0)
+ return i + 1;
+ }
+
+ return 0;
+}
+
+static int task_bp_pinned(struct task_struct *tsk)
{
+ struct perf_event_context *ctx = tsk->perf_event_ctxp;
+ struct list_head *list;
+ struct perf_event *bp;
unsigned long flags;
- struct task_struct *tsk = current;
+ int count = 0;
+
+ if (WARN_ONCE(!ctx, "No perf context for this task"))
+ return 0;
- spin_lock_bh(&hw_breakpoint_lock);
+ list = &ctx->event_list;
- /* Prevent IPIs for new kernel breakpoint updates */
- local_irq_save(flags);
- arch_update_kernel_hw_breakpoint(NULL);
- local_irq_restore(flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
- if (test_tsk_thread_flag(tsk, TIF_DEBUG))
- arch_install_thread_hw_breakpoint(tsk);
+ /*
+ * The current breakpoint counter is not included in the list
+ * at the open() callback time
+ */
+ list_for_each_entry(bp, list, event_entry) {
+ if (bp->attr.type == PERF_TYPE_BREAKPOINT)
+ count++;
+ }
- spin_unlock_bh(&hw_breakpoint_lock);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return count;
}
/*
- * Erase all the hardware breakpoint info associated with a thread.
- *
- * If tsk != current then tsk must not be usable (for example, a
- * child being cleaned up from a failed fork).
+ * Report the number of pinned/un-pinned breakpoints we have in
+ * a given cpu (cpu > -1) or in all of them (cpu = -1).
*/
-void flush_thread_hw_breakpoint(struct task_struct *tsk)
+static void
+fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
{
- int i;
- struct thread_struct *thread = &(tsk->thread);
+ int cpu = bp->cpu;
+ struct task_struct *tsk = bp->ctx->task;
- spin_lock_bh(&hw_breakpoint_lock);
+ if (cpu >= 0) {
+ slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
+ if (!tsk)
+ slots->pinned += max_task_bp_pinned(cpu);
+ else
+ slots->pinned += task_bp_pinned(tsk);
+ slots->flexible = per_cpu(nr_bp_flexible, cpu);
- /* The thread no longer has any breakpoints associated with it */
- clear_tsk_thread_flag(tsk, TIF_DEBUG);
- for (i = 0; i < HBP_NUM; i++) {
- if (thread->hbp[i]) {
- hbp_user_refcount[i]--;
- kfree(thread->hbp[i]);
- thread->hbp[i] = NULL;
- }
+ return;
}
- arch_flush_thread_hw_breakpoint(tsk);
+ for_each_online_cpu(cpu) {
+ unsigned int nr;
+
+ nr = per_cpu(nr_cpu_bp_pinned, cpu);
+ if (!tsk)
+ nr += max_task_bp_pinned(cpu);
+ else
+ nr += task_bp_pinned(tsk);
+
+ if (nr > slots->pinned)
+ slots->pinned = nr;
+
+ nr = per_cpu(nr_bp_flexible, cpu);
- /* Actually uninstall the breakpoints if necessary */
- if (tsk == current)
- arch_uninstall_thread_hw_breakpoint();
- spin_unlock_bh(&hw_breakpoint_lock);
+ if (nr > slots->flexible)
+ slots->flexible = nr;
+ }
}
/*
- * Copy the hardware breakpoint info from a thread to its cloned child.
+ * Add a pinned breakpoint for the given task in our constraint table
*/
-int copy_thread_hw_breakpoint(struct task_struct *tsk,
- struct task_struct *child, unsigned long clone_flags)
+static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
{
- /*
- * We will assume that breakpoint settings are not inherited
- * and the child starts out with no debug registers set.
- * But what about CLONE_PTRACE?
- */
- clear_tsk_thread_flag(child, TIF_DEBUG);
+ unsigned int *tsk_pinned;
+ int count = 0;
+
+ count = task_bp_pinned(tsk);
+
+ tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
+ if (enable) {
+ tsk_pinned[count]++;
+ if (count > 0)
+ tsk_pinned[count-1]--;
+ } else {
+ tsk_pinned[count]--;
+ if (count > 0)
+ tsk_pinned[count-1]++;
+ }
+}
- /* We will call flush routine since the debugregs are not inherited */
- arch_flush_thread_hw_breakpoint(child);
+/*
+ * Add/remove the given breakpoint in our constraint table
+ */
+static void toggle_bp_slot(struct perf_event *bp, bool enable)
+{
+ int cpu = bp->cpu;
+ struct task_struct *tsk = bp->ctx->task;
+
+ /* Pinned counter task profiling */
+ if (tsk) {
+ if (cpu >= 0) {
+ toggle_bp_task_slot(tsk, cpu, enable);
+ return;
+ }
- return 0;
+ for_each_online_cpu(cpu)
+ toggle_bp_task_slot(tsk, cpu, enable);
+ return;
+ }
+
+ /* Pinned counter cpu profiling */
+ if (enable)
+ per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
+ else
+ per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
}
-static int __register_user_hw_breakpoint(int pos, struct task_struct *tsk,
- struct hw_breakpoint *bp)
+/*
+ * Contraints to check before allowing this new breakpoint counter:
+ *
+ * == Non-pinned counter == (Considered as pinned for now)
+ *
+ * - If attached to a single cpu, check:
+ *
+ * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
+ *
+ * -> If there are already non-pinned counters in this cpu, it means
+ * there is already a free slot for them.
+ * Otherwise, we check that the maximum number of per task
+ * breakpoints (for this cpu) plus the number of per cpu breakpoint
+ * (for this cpu) doesn't cover every registers.
+ *
+ * - If attached to every cpus, check:
+ *
+ * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
+ *
+ * -> This is roughly the same, except we check the number of per cpu
+ * bp for every cpu and we keep the max one. Same for the per tasks
+ * breakpoints.
+ *
+ *
+ * == Pinned counter ==
+ *
+ * - If attached to a single cpu, check:
+ *
+ * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
+ * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
+ *
+ * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
+ * one register at least (or they will never be fed).
+ *
+ * - If attached to every cpus, check:
+ *
+ * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
+ * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
+ */
+static int __reserve_bp_slot(struct perf_event *bp)
{
- struct thread_struct *thread = &(tsk->thread);
- int rc;
+ struct bp_busy_slots slots = {0};
+
+ fetch_bp_busy_slots(&slots, bp);
- /* Do not overcommit. Fail if kernel has used the hbp registers */
- if (pos >= hbp_kernel_pos)
+ /* Flexible counters need to keep at least one slot */
+ if (slots.pinned + (!!slots.flexible) == HBP_NUM)
return -ENOSPC;
- rc = arch_validate_hwbkpt_settings(bp, tsk);
- if (rc)
- return rc;
+ toggle_bp_slot(bp, true);
- thread->hbp[pos] = bp;
- hbp_user_refcount[pos]++;
+ return 0;
+}
- arch_update_user_hw_breakpoint(pos, tsk);
- /*
- * Does it need to be installed right now?
- * Otherwise it will get installed the next time tsk runs
- */
- if (tsk == current)
- arch_install_thread_hw_breakpoint(tsk);
+int reserve_bp_slot(struct perf_event *bp)
+{
+ int ret;
+
+ mutex_lock(&nr_bp_mutex);
+
+ ret = __reserve_bp_slot(bp);
+
+ mutex_unlock(&nr_bp_mutex);
- return rc;
+ return ret;
+}
+
+static void __release_bp_slot(struct perf_event *bp)
+{
+ toggle_bp_slot(bp, false);
+}
+
+void release_bp_slot(struct perf_event *bp)
+{
+ mutex_lock(&nr_bp_mutex);
+
+ __release_bp_slot(bp);
+
+ mutex_unlock(&nr_bp_mutex);
}
/*
- * Modify the address of a hbp register already in use by the task
- * Do not invoke this in-lieu of a __unregister_user_hw_breakpoint()
+ * Allow the kernel debugger to reserve breakpoint slots without
+ * taking a lock using the dbg_* variant of for the reserve and
+ * release breakpoint slots.
*/
-static int __modify_user_hw_breakpoint(int pos, struct task_struct *tsk,
- struct hw_breakpoint *bp)
+int dbg_reserve_bp_slot(struct perf_event *bp)
{
- struct thread_struct *thread = &(tsk->thread);
+ if (mutex_is_locked(&nr_bp_mutex))
+ return -1;
- if ((pos >= hbp_kernel_pos) || (arch_validate_hwbkpt_settings(bp, tsk)))
- return -EINVAL;
-
- if (thread->hbp[pos] == NULL)
- return -EINVAL;
+ return __reserve_bp_slot(bp);
+}
- thread->hbp[pos] = bp;
- /*
- * 'pos' must be that of a hbp register already used by 'tsk'
- * Otherwise arch_modify_user_hw_breakpoint() will fail
- */
- arch_update_user_hw_breakpoint(pos, tsk);
+int dbg_release_bp_slot(struct perf_event *bp)
+{
+ if (mutex_is_locked(&nr_bp_mutex))
+ return -1;
- if (tsk == current)
- arch_install_thread_hw_breakpoint(tsk);
+ __release_bp_slot(bp);
return 0;
}
-static void __unregister_user_hw_breakpoint(int pos, struct task_struct *tsk)
+int register_perf_hw_breakpoint(struct perf_event *bp)
{
- hbp_user_refcount[pos]--;
- tsk->thread.hbp[pos] = NULL;
+ int ret;
+
+ ret = reserve_bp_slot(bp);
+ if (ret)
+ return ret;
+
+ /*
+ * Ptrace breakpoints can be temporary perf events only
+ * meant to reserve a slot. In this case, it is created disabled and
+ * we don't want to check the params right now (as we put a null addr)
+ * But perf tools create events as disabled and we want to check
+ * the params for them.
+ * This is a quick hack that will be removed soon, once we remove
+ * the tmp breakpoints from ptrace
+ */
+ if (!bp->attr.disabled || !bp->overflow_handler)
+ ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
- arch_update_user_hw_breakpoint(pos, tsk);
+ /* if arch_validate_hwbkpt_settings() fails then release bp slot */
+ if (ret)
+ release_bp_slot(bp);
- if (tsk == current)
- arch_install_thread_hw_breakpoint(tsk);
+ return ret;
}
/**
* register_user_hw_breakpoint - register a hardware breakpoint for user space
+ * @attr: breakpoint attributes
+ * @triggered: callback to trigger when we hit the breakpoint
* @tsk: pointer to 'task_struct' of the process to which the address belongs
- * @bp: the breakpoint structure to register
- *
- * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and
- * @bp->triggered must be set properly before invocation
- *
*/
-int register_user_hw_breakpoint(struct task_struct *tsk,
- struct hw_breakpoint *bp)
+struct perf_event *
+register_user_hw_breakpoint(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered,
+ struct task_struct *tsk)
{
- struct thread_struct *thread = &(tsk->thread);
- int i, rc = -ENOSPC;
-
- spin_lock_bh(&hw_breakpoint_lock);
-
- for (i = 0; i < hbp_kernel_pos; i++) {
- if (!thread->hbp[i]) {
- rc = __register_user_hw_breakpoint(i, tsk, bp);
- break;
- }
- }
- if (!rc)
- set_tsk_thread_flag(tsk, TIF_DEBUG);
-
- spin_unlock_bh(&hw_breakpoint_lock);
- return rc;
+ return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
/**
* modify_user_hw_breakpoint - modify a user-space hardware breakpoint
+ * @bp: the breakpoint structure to modify
+ * @attr: new breakpoint attributes
+ * @triggered: callback to trigger when we hit the breakpoint
* @tsk: pointer to 'task_struct' of the process to which the address belongs
- * @bp: the breakpoint structure to unregister
- *
*/
-int modify_user_hw_breakpoint(struct task_struct *tsk, struct hw_breakpoint *bp)
+int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
- struct thread_struct *thread = &(tsk->thread);
- int i, ret = -ENOENT;
+ u64 old_addr = bp->attr.bp_addr;
+ u64 old_len = bp->attr.bp_len;
+ int old_type = bp->attr.bp_type;
+ int err = 0;
- spin_lock_bh(&hw_breakpoint_lock);
- for (i = 0; i < hbp_kernel_pos; i++) {
- if (bp == thread->hbp[i]) {
- ret = __modify_user_hw_breakpoint(i, tsk, bp);
- break;
- }
+ perf_event_disable(bp);
+
+ bp->attr.bp_addr = attr->bp_addr;
+ bp->attr.bp_type = attr->bp_type;
+ bp->attr.bp_len = attr->bp_len;
+
+ if (attr->disabled)
+ goto end;
+
+ err = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
+ if (!err)
+ perf_event_enable(bp);
+
+ if (err) {
+ bp->attr.bp_addr = old_addr;
+ bp->attr.bp_type = old_type;
+ bp->attr.bp_len = old_len;
+ if (!bp->attr.disabled)
+ perf_event_enable(bp);
+
+ return err;
}
- spin_unlock_bh(&hw_breakpoint_lock);
- return ret;
+
+end:
+ bp->attr.disabled = attr->disabled;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
/**
- * unregister_user_hw_breakpoint - unregister a user-space hardware breakpoint
- * @tsk: pointer to 'task_struct' of the process to which the address belongs
+ * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
* @bp: the breakpoint structure to unregister
- *
*/
-void unregister_user_hw_breakpoint(struct task_struct *tsk,
- struct hw_breakpoint *bp)
+void unregister_hw_breakpoint(struct perf_event *bp)
{
- struct thread_struct *thread = &(tsk->thread);
- int i, pos = -1, hbp_counter = 0;
-
- spin_lock_bh(&hw_breakpoint_lock);
- for (i = 0; i < hbp_kernel_pos; i++) {
- if (thread->hbp[i])
- hbp_counter++;
- if (bp == thread->hbp[i])
- pos = i;
- }
- if (pos >= 0) {
- __unregister_user_hw_breakpoint(pos, tsk);
- hbp_counter--;
- }
- if (!hbp_counter)
- clear_tsk_thread_flag(tsk, TIF_DEBUG);
-
- spin_unlock_bh(&hw_breakpoint_lock);
+ if (!bp)
+ return;
+ perf_event_release_kernel(bp);
}
-EXPORT_SYMBOL_GPL(unregister_user_hw_breakpoint);
+EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
/**
- * register_kernel_hw_breakpoint - register a hardware breakpoint for kernel space
- * @bp: the breakpoint structure to register
- *
- * @bp.info->name or @bp.info->address, @bp.info->len, @bp.info->type and
- * @bp->triggered must be set properly before invocation
+ * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
+ * @attr: breakpoint attributes
+ * @triggered: callback to trigger when we hit the breakpoint
*
+ * @return a set of per_cpu pointers to perf events
*/
-int register_kernel_hw_breakpoint(struct hw_breakpoint *bp)
+struct perf_event * __percpu *
+register_wide_hw_breakpoint(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered)
{
- int rc;
+ struct perf_event * __percpu *cpu_events, **pevent, *bp;
+ long err;
+ int cpu;
- rc = arch_validate_hwbkpt_settings(bp, NULL);
- if (rc)
- return rc;
+ cpu_events = alloc_percpu(typeof(*cpu_events));
+ if (!cpu_events)
+ return (void __percpu __force *)ERR_PTR(-ENOMEM);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ pevent = per_cpu_ptr(cpu_events, cpu);
+ bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
+
+ *pevent = bp;
+
+ if (IS_ERR(bp)) {
+ err = PTR_ERR(bp);
+ goto fail;
+ }
+ }
+ put_online_cpus();
- spin_lock_bh(&hw_breakpoint_lock);
+ return cpu_events;
- rc = -ENOSPC;
- /* Check if we are over-committing */
- if ((hbp_kernel_pos > 0) && (!hbp_user_refcount[hbp_kernel_pos-1])) {
- hbp_kernel_pos--;
- hbp_kernel[hbp_kernel_pos] = bp;
- on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1);
- rc = 0;
+fail:
+ for_each_online_cpu(cpu) {
+ pevent = per_cpu_ptr(cpu_events, cpu);
+ if (IS_ERR(*pevent))
+ break;
+ unregister_hw_breakpoint(*pevent);
}
+ put_online_cpus();
- spin_unlock_bh(&hw_breakpoint_lock);
- return rc;
+ free_percpu(cpu_events);
+ return (void __percpu __force *)ERR_PTR(err);
}
-EXPORT_SYMBOL_GPL(register_kernel_hw_breakpoint);
+EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
/**
- * unregister_kernel_hw_breakpoint - unregister a HW breakpoint for kernel space
- * @bp: the breakpoint structure to unregister
- *
- * Uninstalls and unregisters @bp.
+ * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
+ * @cpu_events: the per cpu set of events to unregister
*/
-void unregister_kernel_hw_breakpoint(struct hw_breakpoint *bp)
+void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
{
- int i, j;
-
- spin_lock_bh(&hw_breakpoint_lock);
+ int cpu;
+ struct perf_event **pevent;
- /* Find the 'bp' in our list of breakpoints for kernel */
- for (i = hbp_kernel_pos; i < HBP_NUM; i++)
- if (bp == hbp_kernel[i])
- break;
-
- /* Check if we did not find a match for 'bp'. If so return early */
- if (i == HBP_NUM) {
- spin_unlock_bh(&hw_breakpoint_lock);
- return;
+ for_each_possible_cpu(cpu) {
+ pevent = per_cpu_ptr(cpu_events, cpu);
+ unregister_hw_breakpoint(*pevent);
}
-
- /*
- * We'll shift the breakpoints one-level above to compact if
- * unregistration creates a hole
- */
- for (j = i; j > hbp_kernel_pos; j--)
- hbp_kernel[j] = hbp_kernel[j-1];
-
- hbp_kernel[hbp_kernel_pos] = NULL;
- on_each_cpu(arch_update_kernel_hw_breakpoint, NULL, 1);
- hbp_kernel_pos++;
-
- spin_unlock_bh(&hw_breakpoint_lock);
+ free_percpu(cpu_events);
}
-EXPORT_SYMBOL_GPL(unregister_kernel_hw_breakpoint);
+EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
static struct notifier_block hw_breakpoint_exceptions_nb = {
.notifier_call = hw_breakpoint_exceptions_notify,
{
return register_die_notifier(&hw_breakpoint_exceptions_nb);
}
-
core_initcall(init_hw_breakpoint);
+
+
+struct pmu perf_ops_bp = {
+ .enable = arch_install_hw_breakpoint,
+ .disable = arch_uninstall_hw_breakpoint,
+ .read = hw_breakpoint_pmu_read,
+};