2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
20 * Thanks to Ingo Molnar for his many suggestions.
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
33 #include <linux/irqflags.h>
34 #include <linux/kallsyms.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/percpu.h>
41 #include <linux/sched.h>
42 #include <linux/init.h>
43 #include <linux/cpu.h>
44 #include <linux/smp.h>
46 #include <linux/hw_breakpoint.h>
50 #ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
62 /* Number of pinned cpu breakpoints in a cpu */
63 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
65 /* Number of pinned task breakpoints in a cpu */
66 static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[TYPE_MAX][HBP_NUM]);
68 /* Number of non-pinned cpu/task breakpoints in a cpu */
69 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
71 /* Gather the number of total pinned and un-pinned bp in a cpuset */
72 struct bp_busy_slots {
74 unsigned int flexible;
77 /* Serialize accesses to the above constraints */
78 static DEFINE_MUTEX(nr_bp_mutex);
80 static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
82 if (bp->attr.bp_type & HW_BREAKPOINT_RW)
89 * Report the maximum number of pinned breakpoints a task
92 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
95 unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
97 for (i = HBP_NUM -1; i >= 0; i--) {
98 if (tsk_pinned[i] > 0)
105 static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
107 struct perf_event_context *ctx = tsk->perf_event_ctxp;
108 struct list_head *list;
109 struct perf_event *bp;
113 if (WARN_ONCE(!ctx, "No perf context for this task"))
116 list = &ctx->event_list;
118 raw_spin_lock_irqsave(&ctx->lock, flags);
121 * The current breakpoint counter is not included in the list
122 * at the open() callback time
124 list_for_each_entry(bp, list, event_entry) {
125 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
126 if (find_slot_idx(bp) == type)
130 raw_spin_unlock_irqrestore(&ctx->lock, flags);
136 * Report the number of pinned/un-pinned breakpoints we have in
137 * a given cpu (cpu > -1) or in all of them (cpu = -1).
140 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
141 enum bp_type_idx type)
144 struct task_struct *tsk = bp->ctx->task;
147 slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
149 slots->pinned += max_task_bp_pinned(cpu, type);
151 slots->pinned += task_bp_pinned(tsk, type);
152 slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
157 for_each_online_cpu(cpu) {
160 nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
162 nr += max_task_bp_pinned(cpu, type);
164 nr += task_bp_pinned(tsk, type);
166 if (nr > slots->pinned)
169 nr = per_cpu(nr_bp_flexible[type], cpu);
171 if (nr > slots->flexible)
172 slots->flexible = nr;
177 * Add a pinned breakpoint for the given task in our constraint table
179 static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
180 enum bp_type_idx type)
182 unsigned int *tsk_pinned;
185 count = task_bp_pinned(tsk, type);
187 tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
191 tsk_pinned[count-1]--;
195 tsk_pinned[count-1]++;
200 * Add/remove the given breakpoint in our constraint table
203 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type)
206 struct task_struct *tsk = bp->ctx->task;
208 /* Pinned counter task profiling */
211 toggle_bp_task_slot(tsk, cpu, enable, type);
215 for_each_online_cpu(cpu)
216 toggle_bp_task_slot(tsk, cpu, enable, type);
220 /* Pinned counter cpu profiling */
222 per_cpu(nr_cpu_bp_pinned[type], bp->cpu)++;
224 per_cpu(nr_cpu_bp_pinned[type], bp->cpu)--;
228 * Contraints to check before allowing this new breakpoint counter:
230 * == Non-pinned counter == (Considered as pinned for now)
232 * - If attached to a single cpu, check:
234 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
235 * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
237 * -> If there are already non-pinned counters in this cpu, it means
238 * there is already a free slot for them.
239 * Otherwise, we check that the maximum number of per task
240 * breakpoints (for this cpu) plus the number of per cpu breakpoint
241 * (for this cpu) doesn't cover every registers.
243 * - If attached to every cpus, check:
245 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
246 * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
248 * -> This is roughly the same, except we check the number of per cpu
249 * bp for every cpu and we keep the max one. Same for the per tasks
253 * == Pinned counter ==
255 * - If attached to a single cpu, check:
257 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
258 * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
260 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
261 * one register at least (or they will never be fed).
263 * - If attached to every cpus, check:
265 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
266 * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
268 static int __reserve_bp_slot(struct perf_event *bp)
270 struct bp_busy_slots slots = {0};
271 enum bp_type_idx type;
274 if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
275 bp->attr.bp_type == HW_BREAKPOINT_INVALID)
278 type = find_slot_idx(bp);
279 fetch_bp_busy_slots(&slots, bp, type);
281 /* Flexible counters need to keep at least one slot */
282 if (slots.pinned + (!!slots.flexible) == HBP_NUM)
285 toggle_bp_slot(bp, true, type);
290 int reserve_bp_slot(struct perf_event *bp)
294 mutex_lock(&nr_bp_mutex);
296 ret = __reserve_bp_slot(bp);
298 mutex_unlock(&nr_bp_mutex);
303 static void __release_bp_slot(struct perf_event *bp)
305 enum bp_type_idx type;
307 type = find_slot_idx(bp);
308 toggle_bp_slot(bp, false, type);
311 void release_bp_slot(struct perf_event *bp)
313 mutex_lock(&nr_bp_mutex);
315 __release_bp_slot(bp);
317 mutex_unlock(&nr_bp_mutex);
321 * Allow the kernel debugger to reserve breakpoint slots without
322 * taking a lock using the dbg_* variant of for the reserve and
323 * release breakpoint slots.
325 int dbg_reserve_bp_slot(struct perf_event *bp)
327 if (mutex_is_locked(&nr_bp_mutex))
330 return __reserve_bp_slot(bp);
333 int dbg_release_bp_slot(struct perf_event *bp)
335 if (mutex_is_locked(&nr_bp_mutex))
338 __release_bp_slot(bp);
343 static int validate_hw_breakpoint(struct perf_event *bp)
347 ret = arch_validate_hwbkpt_settings(bp);
351 if (arch_check_bp_in_kernelspace(bp)) {
352 if (bp->attr.exclude_kernel)
355 * Don't let unprivileged users set a breakpoint in the trap
356 * path to avoid trap recursion attacks.
358 if (!capable(CAP_SYS_ADMIN))
365 int register_perf_hw_breakpoint(struct perf_event *bp)
369 ret = reserve_bp_slot(bp);
373 ret = validate_hw_breakpoint(bp);
375 /* if arch_validate_hwbkpt_settings() fails then release bp slot */
383 * register_user_hw_breakpoint - register a hardware breakpoint for user space
384 * @attr: breakpoint attributes
385 * @triggered: callback to trigger when we hit the breakpoint
386 * @tsk: pointer to 'task_struct' of the process to which the address belongs
389 register_user_hw_breakpoint(struct perf_event_attr *attr,
390 perf_overflow_handler_t triggered,
391 struct task_struct *tsk)
393 return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
395 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
398 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
399 * @bp: the breakpoint structure to modify
400 * @attr: new breakpoint attributes
401 * @triggered: callback to trigger when we hit the breakpoint
402 * @tsk: pointer to 'task_struct' of the process to which the address belongs
404 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
406 u64 old_addr = bp->attr.bp_addr;
407 u64 old_len = bp->attr.bp_len;
408 int old_type = bp->attr.bp_type;
411 perf_event_disable(bp);
413 bp->attr.bp_addr = attr->bp_addr;
414 bp->attr.bp_type = attr->bp_type;
415 bp->attr.bp_len = attr->bp_len;
420 err = validate_hw_breakpoint(bp);
422 perf_event_enable(bp);
425 bp->attr.bp_addr = old_addr;
426 bp->attr.bp_type = old_type;
427 bp->attr.bp_len = old_len;
428 if (!bp->attr.disabled)
429 perf_event_enable(bp);
435 bp->attr.disabled = attr->disabled;
439 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
442 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
443 * @bp: the breakpoint structure to unregister
445 void unregister_hw_breakpoint(struct perf_event *bp)
449 perf_event_release_kernel(bp);
451 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
454 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
455 * @attr: breakpoint attributes
456 * @triggered: callback to trigger when we hit the breakpoint
458 * @return a set of per_cpu pointers to perf events
460 struct perf_event * __percpu *
461 register_wide_hw_breakpoint(struct perf_event_attr *attr,
462 perf_overflow_handler_t triggered)
464 struct perf_event * __percpu *cpu_events, **pevent, *bp;
468 cpu_events = alloc_percpu(typeof(*cpu_events));
470 return (void __percpu __force *)ERR_PTR(-ENOMEM);
473 for_each_online_cpu(cpu) {
474 pevent = per_cpu_ptr(cpu_events, cpu);
475 bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
489 for_each_online_cpu(cpu) {
490 pevent = per_cpu_ptr(cpu_events, cpu);
493 unregister_hw_breakpoint(*pevent);
497 free_percpu(cpu_events);
498 return (void __percpu __force *)ERR_PTR(err);
500 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
503 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
504 * @cpu_events: the per cpu set of events to unregister
506 void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
509 struct perf_event **pevent;
511 for_each_possible_cpu(cpu) {
512 pevent = per_cpu_ptr(cpu_events, cpu);
513 unregister_hw_breakpoint(*pevent);
515 free_percpu(cpu_events);
517 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
519 static struct notifier_block hw_breakpoint_exceptions_nb = {
520 .notifier_call = hw_breakpoint_exceptions_notify,
521 /* we need to be notified first */
522 .priority = 0x7fffffff
525 static int __init init_hw_breakpoint(void)
527 return register_die_notifier(&hw_breakpoint_exceptions_nb);
529 core_initcall(init_hw_breakpoint);
532 struct pmu perf_ops_bp = {
533 .enable = arch_install_hw_breakpoint,
534 .disable = arch_uninstall_hw_breakpoint,
535 .read = hw_breakpoint_pmu_read,