2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
20 * Thanks to Ingo Molnar for his many suggestions.
24 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
25 * using the CPU's debug registers.
26 * This file contains the arch-independent routines.
29 #include <linux/irqflags.h>
30 #include <linux/kallsyms.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/percpu.h>
37 #include <linux/sched.h>
38 #include <linux/init.h>
39 #include <linux/smp.h>
41 #include <linux/hw_breakpoint.h>
43 #include <asm/processor.h>
46 #include <asm/debugreg.h>
53 /* Number of pinned cpu breakpoints in a cpu */
54 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
56 /* Number of pinned task breakpoints in a cpu */
57 static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
59 /* Number of non-pinned cpu/task breakpoints in a cpu */
60 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
62 /* Gather the number of total pinned and un-pinned bp in a cpuset */
63 struct bp_busy_slots {
65 unsigned int flexible;
68 /* Serialize accesses to the above constraints */
69 static DEFINE_MUTEX(nr_bp_mutex);
72 * Report the maximum number of pinned breakpoints a task
75 static unsigned int max_task_bp_pinned(int cpu)
78 unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
80 for (i = HBP_NUM -1; i >= 0; i--) {
81 if (tsk_pinned[i] > 0)
89 * Report the number of pinned/un-pinned breakpoints we have in
90 * a given cpu (cpu > -1) or in all of them (cpu = -1).
92 static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
95 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
96 slots->pinned += max_task_bp_pinned(cpu);
97 slots->flexible = per_cpu(nr_bp_flexible, cpu);
102 for_each_online_cpu(cpu) {
105 nr = per_cpu(nr_cpu_bp_pinned, cpu);
106 nr += max_task_bp_pinned(cpu);
108 if (nr > slots->pinned)
111 nr = per_cpu(nr_bp_flexible, cpu);
113 if (nr > slots->flexible)
114 slots->flexible = nr;
119 * Add a pinned breakpoint for the given task in our constraint table
121 static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
124 struct perf_event *bp;
125 struct perf_event_context *ctx = tsk->perf_event_ctxp;
126 unsigned int *task_bp_pinned;
127 struct list_head *list;
130 if (WARN_ONCE(!ctx, "No perf context for this task"))
133 list = &ctx->event_list;
135 spin_lock_irqsave(&ctx->lock, flags);
138 * The current breakpoint counter is not included in the list
139 * at the open() callback time
141 list_for_each_entry(bp, list, event_entry) {
142 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
146 spin_unlock_irqrestore(&ctx->lock, flags);
148 if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
151 task_bp_pinned = per_cpu(task_bp_pinned, cpu);
153 task_bp_pinned[count]++;
155 task_bp_pinned[count-1]--;
157 task_bp_pinned[count]--;
159 task_bp_pinned[count-1]++;
164 * Add/remove the given breakpoint in our constraint table
166 static void toggle_bp_slot(struct perf_event *bp, bool enable)
169 struct task_struct *tsk = bp->ctx->task;
171 /* Pinned counter task profiling */
174 toggle_bp_task_slot(tsk, cpu, enable);
178 for_each_online_cpu(cpu)
179 toggle_bp_task_slot(tsk, cpu, enable);
183 /* Pinned counter cpu profiling */
185 per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
187 per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
191 * Contraints to check before allowing this new breakpoint counter:
193 * == Non-pinned counter == (Considered as pinned for now)
195 * - If attached to a single cpu, check:
197 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
198 * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
200 * -> If there are already non-pinned counters in this cpu, it means
201 * there is already a free slot for them.
202 * Otherwise, we check that the maximum number of per task
203 * breakpoints (for this cpu) plus the number of per cpu breakpoint
204 * (for this cpu) doesn't cover every registers.
206 * - If attached to every cpus, check:
208 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
209 * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
211 * -> This is roughly the same, except we check the number of per cpu
212 * bp for every cpu and we keep the max one. Same for the per tasks
216 * == Pinned counter ==
218 * - If attached to a single cpu, check:
220 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
221 * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
223 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
224 * one register at least (or they will never be fed).
226 * - If attached to every cpus, check:
228 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
229 * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
231 int reserve_bp_slot(struct perf_event *bp)
233 struct bp_busy_slots slots = {0};
236 mutex_lock(&nr_bp_mutex);
238 fetch_bp_busy_slots(&slots, bp->cpu);
240 /* Flexible counters need to keep at least one slot */
241 if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
246 toggle_bp_slot(bp, true);
249 mutex_unlock(&nr_bp_mutex);
254 void release_bp_slot(struct perf_event *bp)
256 mutex_lock(&nr_bp_mutex);
258 toggle_bp_slot(bp, false);
260 mutex_unlock(&nr_bp_mutex);
264 int __register_perf_hw_breakpoint(struct perf_event *bp)
268 ret = reserve_bp_slot(bp);
272 if (!bp->attr.disabled)
273 ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
278 int register_perf_hw_breakpoint(struct perf_event *bp)
280 bp->callback = perf_bp_event;
282 return __register_perf_hw_breakpoint(bp);
286 * Register a breakpoint bound to a task and a given cpu.
287 * If cpu is -1, the breakpoint is active for the task in every cpu
288 * If the task is -1, the breakpoint is active for every tasks in the given
291 static struct perf_event *
292 register_user_hw_breakpoint_cpu(unsigned long addr,
295 perf_callback_t triggered,
300 struct perf_event_attr *attr;
301 struct perf_event *bp;
303 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
305 return ERR_PTR(-ENOMEM);
307 attr->type = PERF_TYPE_BREAKPOINT;
308 attr->size = sizeof(*attr);
309 attr->bp_addr = addr;
311 attr->bp_type = type;
313 * Such breakpoints are used by debuggers to trigger signals when
314 * we hit the excepted memory op. We can't miss such events, they
322 bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered);
329 * register_user_hw_breakpoint - register a hardware breakpoint for user space
330 * @addr: is the memory address that triggers the breakpoint
331 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
332 * @type: the type of the access to the memory (read/write/exec)
333 * @triggered: callback to trigger when we hit the breakpoint
334 * @tsk: pointer to 'task_struct' of the process to which the address belongs
335 * @active: should we activate it while registering it
339 register_user_hw_breakpoint(unsigned long addr,
342 perf_callback_t triggered,
343 struct task_struct *tsk,
346 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
347 tsk->pid, -1, active);
349 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
352 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
353 * @bp: the breakpoint structure to modify
354 * @addr: is the memory address that triggers the breakpoint
355 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
356 * @type: the type of the access to the memory (read/write/exec)
357 * @triggered: callback to trigger when we hit the breakpoint
358 * @tsk: pointer to 'task_struct' of the process to which the address belongs
359 * @active: should we activate it while registering it
362 modify_user_hw_breakpoint(struct perf_event *bp,
366 perf_callback_t triggered,
367 struct task_struct *tsk,
371 * FIXME: do it without unregistering
372 * - We don't want to lose our slot
373 * - If the new bp is incorrect, don't lose the older one
375 unregister_hw_breakpoint(bp);
377 return register_user_hw_breakpoint(addr, len, type, triggered,
380 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
383 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
384 * @bp: the breakpoint structure to unregister
386 void unregister_hw_breakpoint(struct perf_event *bp)
390 perf_event_release_kernel(bp);
392 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
394 static struct perf_event *
395 register_kernel_hw_breakpoint_cpu(unsigned long addr,
398 perf_callback_t triggered,
402 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
407 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
408 * @addr: is the memory address that triggers the breakpoint
409 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
410 * @type: the type of the access to the memory (read/write/exec)
411 * @triggered: callback to trigger when we hit the breakpoint
412 * @active: should we activate it while registering it
414 * @return a set of per_cpu pointers to perf events
417 register_wide_hw_breakpoint(unsigned long addr,
420 perf_callback_t triggered,
423 struct perf_event **cpu_events, **pevent, *bp;
427 cpu_events = alloc_percpu(typeof(*cpu_events));
429 return ERR_PTR(-ENOMEM);
431 for_each_possible_cpu(cpu) {
432 pevent = per_cpu_ptr(cpu_events, cpu);
433 bp = register_kernel_hw_breakpoint_cpu(addr, len, type,
434 triggered, cpu, active);
438 if (IS_ERR(bp) || !bp) {
447 for_each_possible_cpu(cpu) {
448 pevent = per_cpu_ptr(cpu_events, cpu);
449 if (IS_ERR(*pevent) || !*pevent)
451 unregister_hw_breakpoint(*pevent);
453 free_percpu(cpu_events);
454 /* return the error if any */
457 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
460 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
461 * @cpu_events: the per cpu set of events to unregister
463 void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
466 struct perf_event **pevent;
468 for_each_possible_cpu(cpu) {
469 pevent = per_cpu_ptr(cpu_events, cpu);
470 unregister_hw_breakpoint(*pevent);
472 free_percpu(cpu_events);
474 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
476 static struct notifier_block hw_breakpoint_exceptions_nb = {
477 .notifier_call = hw_breakpoint_exceptions_notify,
478 /* we need to be notified first */
479 .priority = 0x7fffffff
482 static int __init init_hw_breakpoint(void)
484 return register_die_notifier(&hw_breakpoint_exceptions_nb);
486 core_initcall(init_hw_breakpoint);
489 struct pmu perf_ops_bp = {
490 .enable = arch_install_hw_breakpoint,
491 .disable = arch_uninstall_hw_breakpoint,
492 .read = hw_breakpoint_pmu_read,
493 .unthrottle = hw_breakpoint_pmu_unthrottle