* Copyright (C) 2007 Alan Stern
* Copyright (C) IBM Corporation, 2009
* Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
+ *
+ * Thanks to Ingo Molnar for his many suggestions.
+ *
+ * Authors: Alan Stern <stern@rowland.harvard.edu>
+ * K.Prasad <prasad@linux.vnet.ibm.com>
+ * Frederic Weisbecker <fweisbec@gmail.com>
*/
/*
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/hw_breakpoint.h>
-#include <asm/processor.h>
-#ifdef CONFIG_X86
-#include <asm/debugreg.h>
-#endif
+/*
+ * Constraints data
+ */
+
+/* Number of pinned cpu breakpoints in a cpu */
+static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
-static atomic_t bp_slot;
+/* Number of pinned task breakpoints in a cpu */
+static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
-int reserve_bp_slot(struct perf_event *bp)
+/* Number of non-pinned cpu/task breakpoints in a cpu */
+static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
+
+static int nr_slots[TYPE_MAX];
+
+static int constraints_initialized;
+
+/* Gather the number of total pinned and un-pinned bp in a cpuset */
+struct bp_busy_slots {
+ unsigned int pinned;
+ unsigned int flexible;
+};
+
+/* Serialize accesses to the above constraints */
+static DEFINE_MUTEX(nr_bp_mutex);
+
+__weak int hw_breakpoint_weight(struct perf_event *bp)
{
- if (atomic_inc_return(&bp_slot) == HBP_NUM) {
- atomic_dec(&bp_slot);
+ return 1;
+}
- return -ENOSPC;
+static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
+{
+ if (bp->attr.bp_type & HW_BREAKPOINT_RW)
+ return TYPE_DATA;
+
+ return TYPE_INST;
+}
+
+/*
+ * Report the maximum number of pinned breakpoints a task
+ * have in this cpu
+ */
+static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
+{
+ int i;
+ unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
+
+ for (i = nr_slots[type] - 1; i >= 0; i--) {
+ if (tsk_pinned[i] > 0)
+ return i + 1;
}
return 0;
}
-void release_bp_slot(struct perf_event *bp)
+static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
+{
+ struct perf_event_context *ctx = tsk->perf_event_ctxp;
+ struct list_head *list;
+ struct perf_event *bp;
+ unsigned long flags;
+ int count = 0;
+
+ if (WARN_ONCE(!ctx, "No perf context for this task"))
+ return 0;
+
+ list = &ctx->event_list;
+
+ raw_spin_lock_irqsave(&ctx->lock, flags);
+
+ /*
+ * The current breakpoint counter is not included in the list
+ * at the open() callback time
+ */
+ list_for_each_entry(bp, list, event_entry) {
+ if (bp->attr.type == PERF_TYPE_BREAKPOINT)
+ if (find_slot_idx(bp) == type)
+ count += hw_breakpoint_weight(bp);
+ }
+
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return count;
+}
+
+/*
+ * Report the number of pinned/un-pinned breakpoints we have in
+ * a given cpu (cpu > -1) or in all of them (cpu = -1).
+ */
+static void
+fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
+ enum bp_type_idx type)
+{
+ int cpu = bp->cpu;
+ struct task_struct *tsk = bp->ctx->task;
+
+ if (cpu >= 0) {
+ slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
+ if (!tsk)
+ slots->pinned += max_task_bp_pinned(cpu, type);
+ else
+ slots->pinned += task_bp_pinned(tsk, type);
+ slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
+
+ return;
+ }
+
+ for_each_online_cpu(cpu) {
+ unsigned int nr;
+
+ nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
+ if (!tsk)
+ nr += max_task_bp_pinned(cpu, type);
+ else
+ nr += task_bp_pinned(tsk, type);
+
+ if (nr > slots->pinned)
+ slots->pinned = nr;
+
+ nr = per_cpu(nr_bp_flexible[type], cpu);
+
+ if (nr > slots->flexible)
+ slots->flexible = nr;
+ }
+}
+
+/*
+ * For now, continue to consider flexible as pinned, until we can
+ * ensure no flexible event can ever be scheduled before a pinned event
+ * in a same cpu.
+ */
+static void
+fetch_this_slot(struct bp_busy_slots *slots, int weight)
+{
+ slots->pinned += weight;
+}
+
+/*
+ * Add a pinned breakpoint for the given task in our constraint table
+ */
+static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
+ enum bp_type_idx type, int weight)
+{
+ unsigned int *tsk_pinned;
+ int old_count = 0;
+ int old_idx = 0;
+ int idx = 0;
+
+ old_count = task_bp_pinned(tsk, type);
+ old_idx = old_count - 1;
+ idx = old_idx + weight;
+
+ tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
+ if (enable) {
+ tsk_pinned[idx]++;
+ if (old_count > 0)
+ tsk_pinned[old_idx]--;
+ } else {
+ tsk_pinned[idx]--;
+ if (old_count > 0)
+ tsk_pinned[old_idx]++;
+ }
+}
+
+/*
+ * Add/remove the given breakpoint in our constraint table
+ */
+static void
+toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
+ int weight)
+{
+ int cpu = bp->cpu;
+ struct task_struct *tsk = bp->ctx->task;
+
+ /* Pinned counter task profiling */
+ if (tsk) {
+ if (cpu >= 0) {
+ toggle_bp_task_slot(tsk, cpu, enable, type, weight);
+ return;
+ }
+
+ for_each_online_cpu(cpu)
+ toggle_bp_task_slot(tsk, cpu, enable, type, weight);
+ return;
+ }
+
+ /* Pinned counter cpu profiling */
+ if (enable)
+ per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
+ else
+ per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
+}
+
+/*
+ * Contraints to check before allowing this new breakpoint counter:
+ *
+ * == Non-pinned counter == (Considered as pinned for now)
+ *
+ * - If attached to a single cpu, check:
+ *
+ * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
+ * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
+ *
+ * -> If there are already non-pinned counters in this cpu, it means
+ * there is already a free slot for them.
+ * Otherwise, we check that the maximum number of per task
+ * breakpoints (for this cpu) plus the number of per cpu breakpoint
+ * (for this cpu) doesn't cover every registers.
+ *
+ * - If attached to every cpus, check:
+ *
+ * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
+ * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
+ *
+ * -> This is roughly the same, except we check the number of per cpu
+ * bp for every cpu and we keep the max one. Same for the per tasks
+ * breakpoints.
+ *
+ *
+ * == Pinned counter ==
+ *
+ * - If attached to a single cpu, check:
+ *
+ * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
+ * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
+ *
+ * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
+ * one register at least (or they will never be fed).
+ *
+ * - If attached to every cpus, check:
+ *
+ * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
+ * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
+ */
+static int __reserve_bp_slot(struct perf_event *bp)
{
- atomic_dec(&bp_slot);
+ struct bp_busy_slots slots = {0};
+ enum bp_type_idx type;
+ int weight;
+
+ /* We couldn't initialize breakpoint constraints on boot */
+ if (!constraints_initialized)
+ return -ENOMEM;
+
+ /* Basic checks */
+ if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
+ bp->attr.bp_type == HW_BREAKPOINT_INVALID)
+ return -EINVAL;
+
+ type = find_slot_idx(bp);
+ weight = hw_breakpoint_weight(bp);
+
+ fetch_bp_busy_slots(&slots, bp, type);
+ fetch_this_slot(&slots, weight);
+
+ /* Flexible counters need to keep at least one slot */
+ if (slots.pinned + (!!slots.flexible) > nr_slots[type])
+ return -ENOSPC;
+
+ toggle_bp_slot(bp, true, type, weight);
+
+ return 0;
}
-int __register_perf_hw_breakpoint(struct perf_event *bp)
+int reserve_bp_slot(struct perf_event *bp)
{
int ret;
- ret = reserve_bp_slot(bp);
- if (ret)
- return ret;
+ mutex_lock(&nr_bp_mutex);
+
+ ret = __reserve_bp_slot(bp);
- if (!bp->attr.disabled)
- ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
+ mutex_unlock(&nr_bp_mutex);
return ret;
}
-int register_perf_hw_breakpoint(struct perf_event *bp)
+static void __release_bp_slot(struct perf_event *bp)
{
- bp->callback = perf_bp_event;
+ enum bp_type_idx type;
+ int weight;
- return __register_perf_hw_breakpoint(bp);
+ type = find_slot_idx(bp);
+ weight = hw_breakpoint_weight(bp);
+ toggle_bp_slot(bp, false, type, weight);
+}
+
+void release_bp_slot(struct perf_event *bp)
+{
+ mutex_lock(&nr_bp_mutex);
+
+ __release_bp_slot(bp);
+
+ mutex_unlock(&nr_bp_mutex);
}
/*
- * Register a breakpoint bound to a task and a given cpu.
- * If cpu is -1, the breakpoint is active for the task in every cpu
- * If the task is -1, the breakpoint is active for every tasks in the given
- * cpu.
+ * Allow the kernel debugger to reserve breakpoint slots without
+ * taking a lock using the dbg_* variant of for the reserve and
+ * release breakpoint slots.
*/
-static struct perf_event *
-register_user_hw_breakpoint_cpu(unsigned long addr,
- int len,
- int type,
- perf_callback_t triggered,
- pid_t pid,
- int cpu,
- bool active)
+int dbg_reserve_bp_slot(struct perf_event *bp)
{
- struct perf_event_attr *attr;
- struct perf_event *bp;
+ if (mutex_is_locked(&nr_bp_mutex))
+ return -1;
- attr = kzalloc(sizeof(*attr), GFP_KERNEL);
- if (!attr)
- return ERR_PTR(-ENOMEM);
+ return __reserve_bp_slot(bp);
+}
- attr->type = PERF_TYPE_BREAKPOINT;
- attr->size = sizeof(*attr);
- attr->bp_addr = addr;
- attr->bp_len = len;
- attr->bp_type = type;
- /*
- * Such breakpoints are used by debuggers to trigger signals when
- * we hit the excepted memory op. We can't miss such events, they
- * must be pinned.
- */
- attr->pinned = 1;
+int dbg_release_bp_slot(struct perf_event *bp)
+{
+ if (mutex_is_locked(&nr_bp_mutex))
+ return -1;
- if (!active)
- attr->disabled = 1;
+ __release_bp_slot(bp);
- bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered);
- kfree(attr);
+ return 0;
+}
- return bp;
+static int validate_hw_breakpoint(struct perf_event *bp)
+{
+ int ret;
+
+ ret = arch_validate_hwbkpt_settings(bp);
+ if (ret)
+ return ret;
+
+ if (arch_check_bp_in_kernelspace(bp)) {
+ if (bp->attr.exclude_kernel)
+ return -EINVAL;
+ /*
+ * Don't let unprivileged users set a breakpoint in the trap
+ * path to avoid trap recursion attacks.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int register_perf_hw_breakpoint(struct perf_event *bp)
+{
+ int ret;
+
+ ret = reserve_bp_slot(bp);
+ if (ret)
+ return ret;
+
+ ret = validate_hw_breakpoint(bp);
+
+ /* if arch_validate_hwbkpt_settings() fails then release bp slot */
+ if (ret)
+ release_bp_slot(bp);
+
+ return ret;
}
/**
* register_user_hw_breakpoint - register a hardware breakpoint for user space
- * @addr: is the memory address that triggers the breakpoint
- * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
- * @type: the type of the access to the memory (read/write/exec)
+ * @attr: breakpoint attributes
* @triggered: callback to trigger when we hit the breakpoint
* @tsk: pointer to 'task_struct' of the process to which the address belongs
- * @active: should we activate it while registering it
- *
*/
struct perf_event *
-register_user_hw_breakpoint(unsigned long addr,
- int len,
- int type,
- perf_callback_t triggered,
- struct task_struct *tsk,
- bool active)
+register_user_hw_breakpoint(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered,
+ struct task_struct *tsk)
{
- return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
- tsk->pid, -1, active);
+ return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
/**
* modify_user_hw_breakpoint - modify a user-space hardware breakpoint
* @bp: the breakpoint structure to modify
- * @addr: is the memory address that triggers the breakpoint
- * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
- * @type: the type of the access to the memory (read/write/exec)
+ * @attr: new breakpoint attributes
* @triggered: callback to trigger when we hit the breakpoint
* @tsk: pointer to 'task_struct' of the process to which the address belongs
- * @active: should we activate it while registering it
*/
-struct perf_event *
-modify_user_hw_breakpoint(struct perf_event *bp,
- unsigned long addr,
- int len,
- int type,
- perf_callback_t triggered,
- struct task_struct *tsk,
- bool active)
+int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
- /*
- * FIXME: do it without unregistering
- * - We don't want to lose our slot
- * - If the new bp is incorrect, don't lose the older one
- */
- unregister_hw_breakpoint(bp);
+ u64 old_addr = bp->attr.bp_addr;
+ u64 old_len = bp->attr.bp_len;
+ int old_type = bp->attr.bp_type;
+ int err = 0;
+
+ perf_event_disable(bp);
+
+ bp->attr.bp_addr = attr->bp_addr;
+ bp->attr.bp_type = attr->bp_type;
+ bp->attr.bp_len = attr->bp_len;
+
+ if (attr->disabled)
+ goto end;
+
+ err = validate_hw_breakpoint(bp);
+ if (!err)
+ perf_event_enable(bp);
+
+ if (err) {
+ bp->attr.bp_addr = old_addr;
+ bp->attr.bp_type = old_type;
+ bp->attr.bp_len = old_len;
+ if (!bp->attr.disabled)
+ perf_event_enable(bp);
+
+ return err;
+ }
+
+end:
+ bp->attr.disabled = attr->disabled;
- return register_user_hw_breakpoint(addr, len, type, triggered,
- tsk, active);
+ return 0;
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
}
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
-static struct perf_event *
-register_kernel_hw_breakpoint_cpu(unsigned long addr,
- int len,
- int type,
- perf_callback_t triggered,
- int cpu,
- bool active)
-{
- return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
- -1, cpu, active);
-}
-
/**
* register_wide_hw_breakpoint - register a wide breakpoint in the kernel
- * @addr: is the memory address that triggers the breakpoint
- * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
- * @type: the type of the access to the memory (read/write/exec)
+ * @attr: breakpoint attributes
* @triggered: callback to trigger when we hit the breakpoint
- * @active: should we activate it while registering it
*
* @return a set of per_cpu pointers to perf events
*/
-struct perf_event **
-register_wide_hw_breakpoint(unsigned long addr,
- int len,
- int type,
- perf_callback_t triggered,
- bool active)
+struct perf_event * __percpu *
+register_wide_hw_breakpoint(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered)
{
- struct perf_event **cpu_events, **pevent, *bp;
+ struct perf_event * __percpu *cpu_events, **pevent, *bp;
long err;
int cpu;
cpu_events = alloc_percpu(typeof(*cpu_events));
if (!cpu_events)
- return ERR_PTR(-ENOMEM);
+ return (void __percpu __force *)ERR_PTR(-ENOMEM);
- for_each_possible_cpu(cpu) {
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(cpu_events, cpu);
- bp = register_kernel_hw_breakpoint_cpu(addr, len, type,
- triggered, cpu, active);
+ bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
*pevent = bp;
- if (IS_ERR(bp) || !bp) {
+ if (IS_ERR(bp)) {
err = PTR_ERR(bp);
goto fail;
}
}
+ put_online_cpus();
return cpu_events;
fail:
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(cpu_events, cpu);
- if (IS_ERR(*pevent) || !*pevent)
+ if (IS_ERR(*pevent))
break;
unregister_hw_breakpoint(*pevent);
}
+ put_online_cpus();
+
free_percpu(cpu_events);
- /* return the error if any */
- return ERR_PTR(err);
+ return (void __percpu __force *)ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
/**
* unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
* @cpu_events: the per cpu set of events to unregister
*/
-void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
+void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
{
int cpu;
struct perf_event **pevent;
}
free_percpu(cpu_events);
}
-
+EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
static struct notifier_block hw_breakpoint_exceptions_nb = {
.notifier_call = hw_breakpoint_exceptions_notify,
static int __init init_hw_breakpoint(void)
{
+ unsigned int **task_bp_pinned;
+ int cpu, err_cpu;
+ int i;
+
+ for (i = 0; i < TYPE_MAX; i++)
+ nr_slots[i] = hw_breakpoint_slots(i);
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < TYPE_MAX; i++) {
+ task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
+ *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
+ GFP_KERNEL);
+ if (!*task_bp_pinned)
+ goto err_alloc;
+ }
+ }
+
+ constraints_initialized = 1;
+
return register_die_notifier(&hw_breakpoint_exceptions_nb);
+
+ err_alloc:
+ for_each_possible_cpu(err_cpu) {
+ if (err_cpu == cpu)
+ break;
+ for (i = 0; i < TYPE_MAX; i++)
+ kfree(per_cpu(nr_task_bp_pinned[i], cpu));
+ }
+
+ return -ENOMEM;
}
core_initcall(init_hw_breakpoint);
.enable = arch_install_hw_breakpoint,
.disable = arch_uninstall_hw_breakpoint,
.read = hw_breakpoint_pmu_read,
- .unthrottle = hw_breakpoint_pmu_unthrottle
};