2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
20 * Thanks to Ingo Molnar for his many suggestions.
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
33 #include <linux/irqflags.h>
34 #include <linux/kallsyms.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/percpu.h>
41 #include <linux/sched.h>
42 #include <linux/init.h>
43 #include <linux/smp.h>
45 #include <linux/hw_breakpoint.h>
51 /* Number of pinned cpu breakpoints in a cpu */
52 static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
54 /* Number of pinned task breakpoints in a cpu */
55 static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
57 /* Number of non-pinned cpu/task breakpoints in a cpu */
58 static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
60 /* Gather the number of total pinned and un-pinned bp in a cpuset */
61 struct bp_busy_slots {
63 unsigned int flexible;
66 /* Serialize accesses to the above constraints */
67 static DEFINE_MUTEX(nr_bp_mutex);
70 * Report the maximum number of pinned breakpoints a task
73 static unsigned int max_task_bp_pinned(int cpu)
76 unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
78 for (i = HBP_NUM -1; i >= 0; i--) {
79 if (tsk_pinned[i] > 0)
87 * Report the number of pinned/un-pinned breakpoints we have in
88 * a given cpu (cpu > -1) or in all of them (cpu = -1).
90 static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
93 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
94 slots->pinned += max_task_bp_pinned(cpu);
95 slots->flexible = per_cpu(nr_bp_flexible, cpu);
100 for_each_online_cpu(cpu) {
103 nr = per_cpu(nr_cpu_bp_pinned, cpu);
104 nr += max_task_bp_pinned(cpu);
106 if (nr > slots->pinned)
109 nr = per_cpu(nr_bp_flexible, cpu);
111 if (nr > slots->flexible)
112 slots->flexible = nr;
117 * Add a pinned breakpoint for the given task in our constraint table
119 static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
122 struct perf_event *bp;
123 struct perf_event_context *ctx = tsk->perf_event_ctxp;
124 unsigned int *task_bp_pinned;
125 struct list_head *list;
128 if (WARN_ONCE(!ctx, "No perf context for this task"))
131 list = &ctx->event_list;
133 spin_lock_irqsave(&ctx->lock, flags);
136 * The current breakpoint counter is not included in the list
137 * at the open() callback time
139 list_for_each_entry(bp, list, event_entry) {
140 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
144 spin_unlock_irqrestore(&ctx->lock, flags);
146 if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
149 task_bp_pinned = per_cpu(task_bp_pinned, cpu);
151 task_bp_pinned[count]++;
153 task_bp_pinned[count-1]--;
155 task_bp_pinned[count]--;
157 task_bp_pinned[count-1]++;
162 * Add/remove the given breakpoint in our constraint table
164 static void toggle_bp_slot(struct perf_event *bp, bool enable)
167 struct task_struct *tsk = bp->ctx->task;
169 /* Pinned counter task profiling */
172 toggle_bp_task_slot(tsk, cpu, enable);
176 for_each_online_cpu(cpu)
177 toggle_bp_task_slot(tsk, cpu, enable);
181 /* Pinned counter cpu profiling */
183 per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
185 per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
189 * Contraints to check before allowing this new breakpoint counter:
191 * == Non-pinned counter == (Considered as pinned for now)
193 * - If attached to a single cpu, check:
195 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
196 * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
198 * -> If there are already non-pinned counters in this cpu, it means
199 * there is already a free slot for them.
200 * Otherwise, we check that the maximum number of per task
201 * breakpoints (for this cpu) plus the number of per cpu breakpoint
202 * (for this cpu) doesn't cover every registers.
204 * - If attached to every cpus, check:
206 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
207 * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
209 * -> This is roughly the same, except we check the number of per cpu
210 * bp for every cpu and we keep the max one. Same for the per tasks
214 * == Pinned counter ==
216 * - If attached to a single cpu, check:
218 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
219 * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
221 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
222 * one register at least (or they will never be fed).
224 * - If attached to every cpus, check:
226 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
227 * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
229 int reserve_bp_slot(struct perf_event *bp)
231 struct bp_busy_slots slots = {0};
234 mutex_lock(&nr_bp_mutex);
236 fetch_bp_busy_slots(&slots, bp->cpu);
238 /* Flexible counters need to keep at least one slot */
239 if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
244 toggle_bp_slot(bp, true);
247 mutex_unlock(&nr_bp_mutex);
252 void release_bp_slot(struct perf_event *bp)
254 mutex_lock(&nr_bp_mutex);
256 toggle_bp_slot(bp, false);
258 mutex_unlock(&nr_bp_mutex);
262 int __register_perf_hw_breakpoint(struct perf_event *bp)
266 ret = reserve_bp_slot(bp);
271 * Ptrace breakpoints can be temporary perf events only
272 * meant to reserve a slot. In this case, it is created disabled and
273 * we don't want to check the params right now (as we put a null addr)
274 * But perf tools create events as disabled and we want to check
275 * the params for them.
276 * This is a quick hack that will be removed soon, once we remove
277 * the tmp breakpoints from ptrace
279 if (!bp->attr.disabled || bp->callback == perf_bp_event)
280 ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
285 int register_perf_hw_breakpoint(struct perf_event *bp)
287 bp->callback = perf_bp_event;
289 return __register_perf_hw_breakpoint(bp);
293 * Register a breakpoint bound to a task and a given cpu.
294 * If cpu is -1, the breakpoint is active for the task in every cpu
295 * If the task is -1, the breakpoint is active for every tasks in the given
298 static struct perf_event *
299 register_user_hw_breakpoint_cpu(unsigned long addr,
302 perf_callback_t triggered,
307 struct perf_event_attr *attr;
308 struct perf_event *bp;
310 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
312 return ERR_PTR(-ENOMEM);
314 attr->type = PERF_TYPE_BREAKPOINT;
315 attr->size = sizeof(*attr);
316 attr->bp_addr = addr;
318 attr->bp_type = type;
320 * Such breakpoints are used by debuggers to trigger signals when
321 * we hit the excepted memory op. We can't miss such events, they
329 bp = perf_event_create_kernel_counter(attr, cpu, pid, triggered);
336 * register_user_hw_breakpoint - register a hardware breakpoint for user space
337 * @addr: is the memory address that triggers the breakpoint
338 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
339 * @type: the type of the access to the memory (read/write/exec)
340 * @triggered: callback to trigger when we hit the breakpoint
341 * @tsk: pointer to 'task_struct' of the process to which the address belongs
342 * @active: should we activate it while registering it
346 register_user_hw_breakpoint(unsigned long addr,
349 perf_callback_t triggered,
350 struct task_struct *tsk,
353 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
354 tsk->pid, -1, active);
356 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
359 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
360 * @bp: the breakpoint structure to modify
361 * @addr: is the memory address that triggers the breakpoint
362 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
363 * @type: the type of the access to the memory (read/write/exec)
364 * @triggered: callback to trigger when we hit the breakpoint
365 * @tsk: pointer to 'task_struct' of the process to which the address belongs
366 * @active: should we activate it while registering it
369 modify_user_hw_breakpoint(struct perf_event *bp,
373 perf_callback_t triggered,
374 struct task_struct *tsk,
378 * FIXME: do it without unregistering
379 * - We don't want to lose our slot
380 * - If the new bp is incorrect, don't lose the older one
382 unregister_hw_breakpoint(bp);
384 return register_user_hw_breakpoint(addr, len, type, triggered,
387 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
390 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
391 * @bp: the breakpoint structure to unregister
393 void unregister_hw_breakpoint(struct perf_event *bp)
397 perf_event_release_kernel(bp);
399 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
401 static struct perf_event *
402 register_kernel_hw_breakpoint_cpu(unsigned long addr,
405 perf_callback_t triggered,
409 return register_user_hw_breakpoint_cpu(addr, len, type, triggered,
414 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
415 * @addr: is the memory address that triggers the breakpoint
416 * @len: the length of the access to the memory (1 byte, 2 bytes etc...)
417 * @type: the type of the access to the memory (read/write/exec)
418 * @triggered: callback to trigger when we hit the breakpoint
419 * @active: should we activate it while registering it
421 * @return a set of per_cpu pointers to perf events
424 register_wide_hw_breakpoint(unsigned long addr,
427 perf_callback_t triggered,
430 struct perf_event **cpu_events, **pevent, *bp;
434 cpu_events = alloc_percpu(typeof(*cpu_events));
436 return ERR_PTR(-ENOMEM);
438 for_each_possible_cpu(cpu) {
439 pevent = per_cpu_ptr(cpu_events, cpu);
440 bp = register_kernel_hw_breakpoint_cpu(addr, len, type,
441 triggered, cpu, active);
445 if (IS_ERR(bp) || !bp) {
454 for_each_possible_cpu(cpu) {
455 pevent = per_cpu_ptr(cpu_events, cpu);
456 if (IS_ERR(*pevent) || !*pevent)
458 unregister_hw_breakpoint(*pevent);
460 free_percpu(cpu_events);
461 /* return the error if any */
464 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
467 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
468 * @cpu_events: the per cpu set of events to unregister
470 void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
473 struct perf_event **pevent;
475 for_each_possible_cpu(cpu) {
476 pevent = per_cpu_ptr(cpu_events, cpu);
477 unregister_hw_breakpoint(*pevent);
479 free_percpu(cpu_events);
481 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
483 static struct notifier_block hw_breakpoint_exceptions_nb = {
484 .notifier_call = hw_breakpoint_exceptions_notify,
485 /* we need to be notified first */
486 .priority = 0x7fffffff
489 static int __init init_hw_breakpoint(void)
491 return register_die_notifier(&hw_breakpoint_exceptions_nb);
493 core_initcall(init_hw_breakpoint);
496 struct pmu perf_ops_bp = {
497 .enable = arch_install_hw_breakpoint,
498 .disable = arch_uninstall_hw_breakpoint,
499 .read = hw_breakpoint_pmu_read,
500 .unthrottle = hw_breakpoint_pmu_unthrottle