2 * Performance counter core code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
8 * For licensing details see kernel-base/COPYING
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/file.h>
16 #include <linux/poll.h>
17 #include <linux/sysfs.h>
18 #include <linux/ptrace.h>
19 #include <linux/percpu.h>
20 #include <linux/vmstat.h>
21 #include <linux/hardirq.h>
22 #include <linux/rculist.h>
23 #include <linux/uaccess.h>
24 #include <linux/syscalls.h>
25 #include <linux/anon_inodes.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/perf_counter.h>
29 #include <asm/irq_regs.h>
32 * Each CPU has a list of per CPU counters:
34 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
36 int perf_max_counters __read_mostly = 1;
37 static int perf_reserved_percpu __read_mostly;
38 static int perf_overcommit __read_mostly = 1;
41 * Mutex for (sysadmin-configurable) counter reservations:
43 static DEFINE_MUTEX(perf_resource_mutex);
46 * Architecture provided APIs - weak aliases:
48 extern __weak const struct hw_perf_counter_ops *
49 hw_perf_counter_init(struct perf_counter *counter)
54 u64 __weak hw_perf_save_disable(void) { return 0; }
55 void __weak hw_perf_restore(u64 ctrl) { barrier(); }
56 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
57 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
58 struct perf_cpu_context *cpuctx,
59 struct perf_counter_context *ctx, int cpu)
64 void __weak perf_counter_print_debug(void) { }
67 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
69 struct perf_counter *group_leader = counter->group_leader;
72 * Depending on whether it is a standalone or sibling counter,
73 * add it straight to the context's counter list, or to the group
74 * leader's sibling list:
76 if (counter->group_leader == counter)
77 list_add_tail(&counter->list_entry, &ctx->counter_list);
79 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
81 list_add_rcu(&counter->event_entry, &ctx->event_list);
85 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
87 struct perf_counter *sibling, *tmp;
89 list_del_init(&counter->list_entry);
90 list_del_rcu(&counter->event_entry);
93 * If this was a group counter with sibling counters then
94 * upgrade the siblings to singleton counters by adding them
95 * to the context list directly:
97 list_for_each_entry_safe(sibling, tmp,
98 &counter->sibling_list, list_entry) {
100 list_move_tail(&sibling->list_entry, &ctx->counter_list);
101 sibling->group_leader = sibling;
106 counter_sched_out(struct perf_counter *counter,
107 struct perf_cpu_context *cpuctx,
108 struct perf_counter_context *ctx)
110 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
113 counter->state = PERF_COUNTER_STATE_INACTIVE;
114 counter->hw_ops->disable(counter);
117 if (!is_software_counter(counter))
118 cpuctx->active_oncpu--;
120 if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
121 cpuctx->exclusive = 0;
125 group_sched_out(struct perf_counter *group_counter,
126 struct perf_cpu_context *cpuctx,
127 struct perf_counter_context *ctx)
129 struct perf_counter *counter;
131 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
134 counter_sched_out(group_counter, cpuctx, ctx);
137 * Schedule out siblings (if any):
139 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
140 counter_sched_out(counter, cpuctx, ctx);
142 if (group_counter->hw_event.exclusive)
143 cpuctx->exclusive = 0;
147 * Cross CPU call to remove a performance counter
149 * We disable the counter on the hardware level first. After that we
150 * remove it from the context list.
152 static void __perf_counter_remove_from_context(void *info)
154 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
155 struct perf_counter *counter = info;
156 struct perf_counter_context *ctx = counter->ctx;
161 * If this is a task context, we need to check whether it is
162 * the current task context of this cpu. If not it has been
163 * scheduled out before the smp call arrived.
165 if (ctx->task && cpuctx->task_ctx != ctx)
168 curr_rq_lock_irq_save(&flags);
169 spin_lock(&ctx->lock);
171 counter_sched_out(counter, cpuctx, ctx);
173 counter->task = NULL;
177 * Protect the list operation against NMI by disabling the
178 * counters on a global level. NOP for non NMI based counters.
180 perf_flags = hw_perf_save_disable();
181 list_del_counter(counter, ctx);
182 hw_perf_restore(perf_flags);
186 * Allow more per task counters with respect to the
189 cpuctx->max_pertask =
190 min(perf_max_counters - ctx->nr_counters,
191 perf_max_counters - perf_reserved_percpu);
194 spin_unlock(&ctx->lock);
195 curr_rq_unlock_irq_restore(&flags);
200 * Remove the counter from a task's (or a CPU's) list of counters.
202 * Must be called with counter->mutex and ctx->mutex held.
204 * CPU counters are removed with a smp call. For task counters we only
205 * call when the task is on a CPU.
207 static void perf_counter_remove_from_context(struct perf_counter *counter)
209 struct perf_counter_context *ctx = counter->ctx;
210 struct task_struct *task = ctx->task;
214 * Per cpu counters are removed via an smp call and
215 * the removal is always sucessful.
217 smp_call_function_single(counter->cpu,
218 __perf_counter_remove_from_context,
224 task_oncpu_function_call(task, __perf_counter_remove_from_context,
227 spin_lock_irq(&ctx->lock);
229 * If the context is active we need to retry the smp call.
231 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
232 spin_unlock_irq(&ctx->lock);
237 * The lock prevents that this context is scheduled in so we
238 * can remove the counter safely, if the call above did not
241 if (!list_empty(&counter->list_entry)) {
243 list_del_counter(counter, ctx);
244 counter->task = NULL;
246 spin_unlock_irq(&ctx->lock);
250 * Cross CPU call to disable a performance counter
252 static void __perf_counter_disable(void *info)
254 struct perf_counter *counter = info;
255 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
256 struct perf_counter_context *ctx = counter->ctx;
260 * If this is a per-task counter, need to check whether this
261 * counter's task is the current task on this cpu.
263 if (ctx->task && cpuctx->task_ctx != ctx)
266 curr_rq_lock_irq_save(&flags);
267 spin_lock(&ctx->lock);
270 * If the counter is on, turn it off.
271 * If it is in error state, leave it in error state.
273 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
274 if (counter == counter->group_leader)
275 group_sched_out(counter, cpuctx, ctx);
277 counter_sched_out(counter, cpuctx, ctx);
278 counter->state = PERF_COUNTER_STATE_OFF;
281 spin_unlock(&ctx->lock);
282 curr_rq_unlock_irq_restore(&flags);
288 static void perf_counter_disable(struct perf_counter *counter)
290 struct perf_counter_context *ctx = counter->ctx;
291 struct task_struct *task = ctx->task;
295 * Disable the counter on the cpu that it's on
297 smp_call_function_single(counter->cpu, __perf_counter_disable,
303 task_oncpu_function_call(task, __perf_counter_disable, counter);
305 spin_lock_irq(&ctx->lock);
307 * If the counter is still active, we need to retry the cross-call.
309 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
310 spin_unlock_irq(&ctx->lock);
315 * Since we have the lock this context can't be scheduled
316 * in, so we can change the state safely.
318 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
319 counter->state = PERF_COUNTER_STATE_OFF;
321 spin_unlock_irq(&ctx->lock);
325 * Disable a counter and all its children.
327 static void perf_counter_disable_family(struct perf_counter *counter)
329 struct perf_counter *child;
331 perf_counter_disable(counter);
334 * Lock the mutex to protect the list of children
336 mutex_lock(&counter->mutex);
337 list_for_each_entry(child, &counter->child_list, child_list)
338 perf_counter_disable(child);
339 mutex_unlock(&counter->mutex);
343 counter_sched_in(struct perf_counter *counter,
344 struct perf_cpu_context *cpuctx,
345 struct perf_counter_context *ctx,
348 if (counter->state <= PERF_COUNTER_STATE_OFF)
351 counter->state = PERF_COUNTER_STATE_ACTIVE;
352 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
354 * The new state must be visible before we turn it on in the hardware:
358 if (counter->hw_ops->enable(counter)) {
359 counter->state = PERF_COUNTER_STATE_INACTIVE;
364 if (!is_software_counter(counter))
365 cpuctx->active_oncpu++;
368 if (counter->hw_event.exclusive)
369 cpuctx->exclusive = 1;
375 * Return 1 for a group consisting entirely of software counters,
376 * 0 if the group contains any hardware counters.
378 static int is_software_only_group(struct perf_counter *leader)
380 struct perf_counter *counter;
382 if (!is_software_counter(leader))
384 list_for_each_entry(counter, &leader->sibling_list, list_entry)
385 if (!is_software_counter(counter))
391 * Work out whether we can put this counter group on the CPU now.
393 static int group_can_go_on(struct perf_counter *counter,
394 struct perf_cpu_context *cpuctx,
398 * Groups consisting entirely of software counters can always go on.
400 if (is_software_only_group(counter))
403 * If an exclusive group is already on, no other hardware
404 * counters can go on.
406 if (cpuctx->exclusive)
409 * If this group is exclusive and there are already
410 * counters on the CPU, it can't go on.
412 if (counter->hw_event.exclusive && cpuctx->active_oncpu)
415 * Otherwise, try to add it if all previous groups were able
422 * Cross CPU call to install and enable a performance counter
424 static void __perf_install_in_context(void *info)
426 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
427 struct perf_counter *counter = info;
428 struct perf_counter_context *ctx = counter->ctx;
429 struct perf_counter *leader = counter->group_leader;
430 int cpu = smp_processor_id();
436 * If this is a task context, we need to check whether it is
437 * the current task context of this cpu. If not it has been
438 * scheduled out before the smp call arrived.
440 if (ctx->task && cpuctx->task_ctx != ctx)
443 curr_rq_lock_irq_save(&flags);
444 spin_lock(&ctx->lock);
447 * Protect the list operation against NMI by disabling the
448 * counters on a global level. NOP for non NMI based counters.
450 perf_flags = hw_perf_save_disable();
452 list_add_counter(counter, ctx);
454 counter->prev_state = PERF_COUNTER_STATE_OFF;
457 * Don't put the counter on if it is disabled or if
458 * it is in a group and the group isn't on.
460 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
461 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
465 * An exclusive counter can't go on if there are already active
466 * hardware counters, and no hardware counter can go on if there
467 * is already an exclusive counter on.
469 if (!group_can_go_on(counter, cpuctx, 1))
472 err = counter_sched_in(counter, cpuctx, ctx, cpu);
476 * This counter couldn't go on. If it is in a group
477 * then we have to pull the whole group off.
478 * If the counter group is pinned then put it in error state.
480 if (leader != counter)
481 group_sched_out(leader, cpuctx, ctx);
482 if (leader->hw_event.pinned)
483 leader->state = PERF_COUNTER_STATE_ERROR;
486 if (!err && !ctx->task && cpuctx->max_pertask)
487 cpuctx->max_pertask--;
490 hw_perf_restore(perf_flags);
492 spin_unlock(&ctx->lock);
493 curr_rq_unlock_irq_restore(&flags);
497 * Attach a performance counter to a context
499 * First we add the counter to the list with the hardware enable bit
500 * in counter->hw_config cleared.
502 * If the counter is attached to a task which is on a CPU we use a smp
503 * call to enable it in the task context. The task might have been
504 * scheduled away, but we check this in the smp call again.
506 * Must be called with ctx->mutex held.
509 perf_install_in_context(struct perf_counter_context *ctx,
510 struct perf_counter *counter,
513 struct task_struct *task = ctx->task;
517 * Per cpu counters are installed via an smp call and
518 * the install is always sucessful.
520 smp_call_function_single(cpu, __perf_install_in_context,
525 counter->task = task;
527 task_oncpu_function_call(task, __perf_install_in_context,
530 spin_lock_irq(&ctx->lock);
532 * we need to retry the smp call.
534 if (ctx->is_active && list_empty(&counter->list_entry)) {
535 spin_unlock_irq(&ctx->lock);
540 * The lock prevents that this context is scheduled in so we
541 * can add the counter safely, if it the call above did not
544 if (list_empty(&counter->list_entry)) {
545 list_add_counter(counter, ctx);
548 spin_unlock_irq(&ctx->lock);
552 * Cross CPU call to enable a performance counter
554 static void __perf_counter_enable(void *info)
556 struct perf_counter *counter = info;
557 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
558 struct perf_counter_context *ctx = counter->ctx;
559 struct perf_counter *leader = counter->group_leader;
564 * If this is a per-task counter, need to check whether this
565 * counter's task is the current task on this cpu.
567 if (ctx->task && cpuctx->task_ctx != ctx)
570 curr_rq_lock_irq_save(&flags);
571 spin_lock(&ctx->lock);
573 counter->prev_state = counter->state;
574 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
576 counter->state = PERF_COUNTER_STATE_INACTIVE;
579 * If the counter is in a group and isn't the group leader,
580 * then don't put it on unless the group is on.
582 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
585 if (!group_can_go_on(counter, cpuctx, 1))
588 err = counter_sched_in(counter, cpuctx, ctx,
593 * If this counter can't go on and it's part of a
594 * group, then the whole group has to come off.
596 if (leader != counter)
597 group_sched_out(leader, cpuctx, ctx);
598 if (leader->hw_event.pinned)
599 leader->state = PERF_COUNTER_STATE_ERROR;
603 spin_unlock(&ctx->lock);
604 curr_rq_unlock_irq_restore(&flags);
610 static void perf_counter_enable(struct perf_counter *counter)
612 struct perf_counter_context *ctx = counter->ctx;
613 struct task_struct *task = ctx->task;
617 * Enable the counter on the cpu that it's on
619 smp_call_function_single(counter->cpu, __perf_counter_enable,
624 spin_lock_irq(&ctx->lock);
625 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
629 * If the counter is in error state, clear that first.
630 * That way, if we see the counter in error state below, we
631 * know that it has gone back into error state, as distinct
632 * from the task having been scheduled away before the
633 * cross-call arrived.
635 if (counter->state == PERF_COUNTER_STATE_ERROR)
636 counter->state = PERF_COUNTER_STATE_OFF;
639 spin_unlock_irq(&ctx->lock);
640 task_oncpu_function_call(task, __perf_counter_enable, counter);
642 spin_lock_irq(&ctx->lock);
645 * If the context is active and the counter is still off,
646 * we need to retry the cross-call.
648 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
652 * Since we have the lock this context can't be scheduled
653 * in, so we can change the state safely.
655 if (counter->state == PERF_COUNTER_STATE_OFF)
656 counter->state = PERF_COUNTER_STATE_INACTIVE;
658 spin_unlock_irq(&ctx->lock);
662 * Enable a counter and all its children.
664 static void perf_counter_enable_family(struct perf_counter *counter)
666 struct perf_counter *child;
668 perf_counter_enable(counter);
671 * Lock the mutex to protect the list of children
673 mutex_lock(&counter->mutex);
674 list_for_each_entry(child, &counter->child_list, child_list)
675 perf_counter_enable(child);
676 mutex_unlock(&counter->mutex);
679 void __perf_counter_sched_out(struct perf_counter_context *ctx,
680 struct perf_cpu_context *cpuctx)
682 struct perf_counter *counter;
685 spin_lock(&ctx->lock);
687 if (likely(!ctx->nr_counters))
690 flags = hw_perf_save_disable();
691 if (ctx->nr_active) {
692 list_for_each_entry(counter, &ctx->counter_list, list_entry)
693 group_sched_out(counter, cpuctx, ctx);
695 hw_perf_restore(flags);
697 spin_unlock(&ctx->lock);
701 * Called from scheduler to remove the counters of the current task,
702 * with interrupts disabled.
704 * We stop each counter and update the counter value in counter->count.
706 * This does not protect us against NMI, but disable()
707 * sets the disabled bit in the control field of counter _before_
708 * accessing the counter control register. If a NMI hits, then it will
709 * not restart the counter.
711 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
713 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
714 struct perf_counter_context *ctx = &task->perf_counter_ctx;
715 struct pt_regs *regs;
717 if (likely(!cpuctx->task_ctx))
720 regs = task_pt_regs(task);
721 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
722 __perf_counter_sched_out(ctx, cpuctx);
724 cpuctx->task_ctx = NULL;
727 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
729 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
733 group_sched_in(struct perf_counter *group_counter,
734 struct perf_cpu_context *cpuctx,
735 struct perf_counter_context *ctx,
738 struct perf_counter *counter, *partial_group;
741 if (group_counter->state == PERF_COUNTER_STATE_OFF)
744 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
746 return ret < 0 ? ret : 0;
748 group_counter->prev_state = group_counter->state;
749 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
753 * Schedule in siblings as one group (if any):
755 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
756 counter->prev_state = counter->state;
757 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
758 partial_group = counter;
767 * Groups can be scheduled in as one unit only, so undo any
768 * partial group before returning:
770 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
771 if (counter == partial_group)
773 counter_sched_out(counter, cpuctx, ctx);
775 counter_sched_out(group_counter, cpuctx, ctx);
781 __perf_counter_sched_in(struct perf_counter_context *ctx,
782 struct perf_cpu_context *cpuctx, int cpu)
784 struct perf_counter *counter;
788 spin_lock(&ctx->lock);
790 if (likely(!ctx->nr_counters))
793 flags = hw_perf_save_disable();
796 * First go through the list and put on any pinned groups
797 * in order to give them the best chance of going on.
799 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
800 if (counter->state <= PERF_COUNTER_STATE_OFF ||
801 !counter->hw_event.pinned)
803 if (counter->cpu != -1 && counter->cpu != cpu)
806 if (group_can_go_on(counter, cpuctx, 1))
807 group_sched_in(counter, cpuctx, ctx, cpu);
810 * If this pinned group hasn't been scheduled,
811 * put it in error state.
813 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
814 counter->state = PERF_COUNTER_STATE_ERROR;
817 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
819 * Ignore counters in OFF or ERROR state, and
820 * ignore pinned counters since we did them already.
822 if (counter->state <= PERF_COUNTER_STATE_OFF ||
823 counter->hw_event.pinned)
827 * Listen to the 'cpu' scheduling filter constraint
830 if (counter->cpu != -1 && counter->cpu != cpu)
833 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
834 if (group_sched_in(counter, cpuctx, ctx, cpu))
838 hw_perf_restore(flags);
840 spin_unlock(&ctx->lock);
844 * Called from scheduler to add the counters of the current task
845 * with interrupts disabled.
847 * We restore the counter value and then enable it.
849 * This does not protect us against NMI, but enable()
850 * sets the enabled bit in the control field of counter _before_
851 * accessing the counter control register. If a NMI hits, then it will
852 * keep the counter running.
854 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
856 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
857 struct perf_counter_context *ctx = &task->perf_counter_ctx;
859 __perf_counter_sched_in(ctx, cpuctx, cpu);
860 cpuctx->task_ctx = ctx;
863 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
865 struct perf_counter_context *ctx = &cpuctx->ctx;
867 __perf_counter_sched_in(ctx, cpuctx, cpu);
870 int perf_counter_task_disable(void)
872 struct task_struct *curr = current;
873 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
874 struct perf_counter *counter;
879 if (likely(!ctx->nr_counters))
882 curr_rq_lock_irq_save(&flags);
883 cpu = smp_processor_id();
885 /* force the update of the task clock: */
886 __task_delta_exec(curr, 1);
888 perf_counter_task_sched_out(curr, cpu);
890 spin_lock(&ctx->lock);
893 * Disable all the counters:
895 perf_flags = hw_perf_save_disable();
897 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
898 if (counter->state != PERF_COUNTER_STATE_ERROR)
899 counter->state = PERF_COUNTER_STATE_OFF;
902 hw_perf_restore(perf_flags);
904 spin_unlock(&ctx->lock);
906 curr_rq_unlock_irq_restore(&flags);
911 int perf_counter_task_enable(void)
913 struct task_struct *curr = current;
914 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
915 struct perf_counter *counter;
920 if (likely(!ctx->nr_counters))
923 curr_rq_lock_irq_save(&flags);
924 cpu = smp_processor_id();
926 /* force the update of the task clock: */
927 __task_delta_exec(curr, 1);
929 perf_counter_task_sched_out(curr, cpu);
931 spin_lock(&ctx->lock);
934 * Disable all the counters:
936 perf_flags = hw_perf_save_disable();
938 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
939 if (counter->state > PERF_COUNTER_STATE_OFF)
941 counter->state = PERF_COUNTER_STATE_INACTIVE;
942 counter->hw_event.disabled = 0;
944 hw_perf_restore(perf_flags);
946 spin_unlock(&ctx->lock);
948 perf_counter_task_sched_in(curr, cpu);
950 curr_rq_unlock_irq_restore(&flags);
956 * Round-robin a context's counters:
958 static void rotate_ctx(struct perf_counter_context *ctx)
960 struct perf_counter *counter;
963 if (!ctx->nr_counters)
966 spin_lock(&ctx->lock);
968 * Rotate the first entry last (works just fine for group counters too):
970 perf_flags = hw_perf_save_disable();
971 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
972 list_move_tail(&counter->list_entry, &ctx->counter_list);
975 hw_perf_restore(perf_flags);
977 spin_unlock(&ctx->lock);
980 void perf_counter_task_tick(struct task_struct *curr, int cpu)
982 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
983 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
984 const int rotate_percpu = 0;
987 perf_counter_cpu_sched_out(cpuctx);
988 perf_counter_task_sched_out(curr, cpu);
991 rotate_ctx(&cpuctx->ctx);
995 perf_counter_cpu_sched_in(cpuctx, cpu);
996 perf_counter_task_sched_in(curr, cpu);
1000 * Cross CPU call to read the hardware counter
1002 static void __read(void *info)
1004 struct perf_counter *counter = info;
1005 unsigned long flags;
1007 curr_rq_lock_irq_save(&flags);
1008 counter->hw_ops->read(counter);
1009 curr_rq_unlock_irq_restore(&flags);
1012 static u64 perf_counter_read(struct perf_counter *counter)
1015 * If counter is enabled and currently active on a CPU, update the
1016 * value in the counter structure:
1018 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1019 smp_call_function_single(counter->oncpu,
1020 __read, counter, 1);
1023 return atomic64_read(&counter->count);
1026 static void put_context(struct perf_counter_context *ctx)
1029 put_task_struct(ctx->task);
1032 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1034 struct perf_cpu_context *cpuctx;
1035 struct perf_counter_context *ctx;
1036 struct task_struct *task;
1039 * If cpu is not a wildcard then this is a percpu counter:
1042 /* Must be root to operate on a CPU counter: */
1043 if (!capable(CAP_SYS_ADMIN))
1044 return ERR_PTR(-EACCES);
1046 if (cpu < 0 || cpu > num_possible_cpus())
1047 return ERR_PTR(-EINVAL);
1050 * We could be clever and allow to attach a counter to an
1051 * offline CPU and activate it when the CPU comes up, but
1054 if (!cpu_isset(cpu, cpu_online_map))
1055 return ERR_PTR(-ENODEV);
1057 cpuctx = &per_cpu(perf_cpu_context, cpu);
1067 task = find_task_by_vpid(pid);
1069 get_task_struct(task);
1073 return ERR_PTR(-ESRCH);
1075 ctx = &task->perf_counter_ctx;
1078 /* Reuse ptrace permission checks for now. */
1079 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1081 return ERR_PTR(-EACCES);
1087 static void free_counter_rcu(struct rcu_head *head)
1089 struct perf_counter *counter;
1091 counter = container_of(head, struct perf_counter, rcu_head);
1095 static void free_counter(struct perf_counter *counter)
1097 if (counter->destroy)
1098 counter->destroy(counter);
1100 call_rcu(&counter->rcu_head, free_counter_rcu);
1104 * Called when the last reference to the file is gone.
1106 static int perf_release(struct inode *inode, struct file *file)
1108 struct perf_counter *counter = file->private_data;
1109 struct perf_counter_context *ctx = counter->ctx;
1111 file->private_data = NULL;
1113 mutex_lock(&ctx->mutex);
1114 mutex_lock(&counter->mutex);
1116 perf_counter_remove_from_context(counter);
1118 mutex_unlock(&counter->mutex);
1119 mutex_unlock(&ctx->mutex);
1121 free_counter(counter);
1128 * Read the performance counter - simple non blocking version for now
1131 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1135 if (count < sizeof(cntval))
1139 * Return end-of-file for a read on a counter that is in
1140 * error state (i.e. because it was pinned but it couldn't be
1141 * scheduled on to the CPU at some point).
1143 if (counter->state == PERF_COUNTER_STATE_ERROR)
1146 mutex_lock(&counter->mutex);
1147 cntval = perf_counter_read(counter);
1148 mutex_unlock(&counter->mutex);
1150 return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
1154 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1156 struct perf_counter *counter = file->private_data;
1158 return perf_read_hw(counter, buf, count);
1161 static unsigned int perf_poll(struct file *file, poll_table *wait)
1163 struct perf_counter *counter = file->private_data;
1164 struct perf_mmap_data *data;
1165 unsigned int events;
1168 data = rcu_dereference(counter->data);
1170 events = atomic_xchg(&data->wakeup, 0);
1175 poll_wait(file, &counter->waitq, wait);
1180 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1182 struct perf_counter *counter = file->private_data;
1186 case PERF_COUNTER_IOC_ENABLE:
1187 perf_counter_enable_family(counter);
1189 case PERF_COUNTER_IOC_DISABLE:
1190 perf_counter_disable_family(counter);
1198 static void __perf_counter_update_userpage(struct perf_counter *counter,
1199 struct perf_mmap_data *data)
1201 struct perf_counter_mmap_page *userpg = data->user_page;
1204 * Disable preemption so as to not let the corresponding user-space
1205 * spin too long if we get preempted.
1210 userpg->index = counter->hw.idx;
1211 userpg->offset = atomic64_read(&counter->count);
1212 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1213 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1215 userpg->data_head = atomic_read(&data->head);
1221 void perf_counter_update_userpage(struct perf_counter *counter)
1223 struct perf_mmap_data *data;
1226 data = rcu_dereference(counter->data);
1228 __perf_counter_update_userpage(counter, data);
1232 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1234 struct perf_counter *counter = vma->vm_file->private_data;
1235 struct perf_mmap_data *data;
1236 int ret = VM_FAULT_SIGBUS;
1239 data = rcu_dereference(counter->data);
1243 if (vmf->pgoff == 0) {
1244 vmf->page = virt_to_page(data->user_page);
1246 int nr = vmf->pgoff - 1;
1248 if ((unsigned)nr > data->nr_pages)
1251 vmf->page = virt_to_page(data->data_pages[nr]);
1253 get_page(vmf->page);
1261 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1263 struct perf_mmap_data *data;
1267 WARN_ON(atomic_read(&counter->mmap_count));
1269 size = sizeof(struct perf_mmap_data);
1270 size += nr_pages * sizeof(void *);
1272 data = kzalloc(size, GFP_KERNEL);
1276 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1277 if (!data->user_page)
1278 goto fail_user_page;
1280 for (i = 0; i < nr_pages; i++) {
1281 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1282 if (!data->data_pages[i])
1283 goto fail_data_pages;
1286 data->nr_pages = nr_pages;
1288 rcu_assign_pointer(counter->data, data);
1293 for (i--; i >= 0; i--)
1294 free_page((unsigned long)data->data_pages[i]);
1296 free_page((unsigned long)data->user_page);
1305 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1307 struct perf_mmap_data *data = container_of(rcu_head,
1308 struct perf_mmap_data, rcu_head);
1311 free_page((unsigned long)data->user_page);
1312 for (i = 0; i < data->nr_pages; i++)
1313 free_page((unsigned long)data->data_pages[i]);
1317 static void perf_mmap_data_free(struct perf_counter *counter)
1319 struct perf_mmap_data *data = counter->data;
1321 WARN_ON(atomic_read(&counter->mmap_count));
1323 rcu_assign_pointer(counter->data, NULL);
1324 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1327 static void perf_mmap_open(struct vm_area_struct *vma)
1329 struct perf_counter *counter = vma->vm_file->private_data;
1331 atomic_inc(&counter->mmap_count);
1334 static void perf_mmap_close(struct vm_area_struct *vma)
1336 struct perf_counter *counter = vma->vm_file->private_data;
1338 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1339 &counter->mmap_mutex)) {
1340 perf_mmap_data_free(counter);
1341 mutex_unlock(&counter->mmap_mutex);
1345 static struct vm_operations_struct perf_mmap_vmops = {
1346 .open = perf_mmap_open,
1347 .close = perf_mmap_close,
1348 .fault = perf_mmap_fault,
1351 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1353 struct perf_counter *counter = file->private_data;
1354 unsigned long vma_size;
1355 unsigned long nr_pages;
1356 unsigned long locked, lock_limit;
1359 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1362 vma_size = vma->vm_end - vma->vm_start;
1363 nr_pages = (vma_size / PAGE_SIZE) - 1;
1365 if (nr_pages == 0 || !is_power_of_2(nr_pages))
1368 if (vma_size != PAGE_SIZE * (1 + nr_pages))
1371 if (vma->vm_pgoff != 0)
1374 locked = vma_size >> PAGE_SHIFT;
1375 locked += vma->vm_mm->locked_vm;
1377 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1378 lock_limit >>= PAGE_SHIFT;
1380 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK))
1383 mutex_lock(&counter->mmap_mutex);
1384 if (atomic_inc_not_zero(&counter->mmap_count))
1387 WARN_ON(counter->data);
1388 ret = perf_mmap_data_alloc(counter, nr_pages);
1390 atomic_set(&counter->mmap_count, 1);
1392 mutex_unlock(&counter->mmap_mutex);
1394 vma->vm_flags &= ~VM_MAYWRITE;
1395 vma->vm_flags |= VM_RESERVED;
1396 vma->vm_ops = &perf_mmap_vmops;
1401 static const struct file_operations perf_fops = {
1402 .release = perf_release,
1405 .unlocked_ioctl = perf_ioctl,
1406 .compat_ioctl = perf_ioctl,
1414 struct perf_output_handle {
1415 struct perf_counter *counter;
1416 struct perf_mmap_data *data;
1417 unsigned int offset;
1421 static int perf_output_begin(struct perf_output_handle *handle,
1422 struct perf_counter *counter, unsigned int size)
1424 struct perf_mmap_data *data;
1425 unsigned int offset, head;
1428 data = rcu_dereference(counter->data);
1432 if (!data->nr_pages)
1436 offset = head = atomic_read(&data->head);
1438 } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1440 handle->counter = counter;
1441 handle->data = data;
1442 handle->offset = offset;
1443 handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
1453 static void perf_output_copy(struct perf_output_handle *handle,
1454 void *buf, unsigned int len)
1456 unsigned int pages_mask;
1457 unsigned int offset;
1461 offset = handle->offset;
1462 pages_mask = handle->data->nr_pages - 1;
1463 pages = handle->data->data_pages;
1466 unsigned int page_offset;
1469 nr = (offset >> PAGE_SHIFT) & pages_mask;
1470 page_offset = offset & (PAGE_SIZE - 1);
1471 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
1473 memcpy(pages[nr] + page_offset, buf, size);
1480 handle->offset = offset;
1483 static void perf_output_end(struct perf_output_handle *handle, int nmi)
1485 if (handle->wakeup) {
1486 (void)atomic_xchg(&handle->data->wakeup, POLL_IN);
1487 __perf_counter_update_userpage(handle->counter, handle->data);
1489 handle->counter->wakeup_pending = 1;
1490 set_perf_counter_pending();
1492 wake_up(&handle->counter->waitq);
1497 static int perf_output_write(struct perf_counter *counter, int nmi,
1498 void *buf, ssize_t size)
1500 struct perf_output_handle handle;
1503 ret = perf_output_begin(&handle, counter, size);
1507 perf_output_copy(&handle, buf, size);
1508 perf_output_end(&handle, nmi);
1514 static void perf_output_simple(struct perf_counter *counter,
1515 int nmi, struct pt_regs *regs)
1519 entry = instruction_pointer(regs);
1521 perf_output_write(counter, nmi, &entry, sizeof(entry));
1524 struct group_entry {
1529 static void perf_output_group(struct perf_counter *counter, int nmi)
1531 struct perf_counter *leader, *sub;
1533 leader = counter->group_leader;
1534 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1535 struct group_entry entry;
1538 sub->hw_ops->read(sub);
1540 entry.event = sub->hw_event.config;
1541 entry.counter = atomic64_read(&sub->count);
1543 perf_output_write(counter, nmi, &entry, sizeof(entry));
1547 void perf_counter_output(struct perf_counter *counter,
1548 int nmi, struct pt_regs *regs)
1550 switch (counter->hw_event.record_type) {
1551 case PERF_RECORD_SIMPLE:
1554 case PERF_RECORD_IRQ:
1555 perf_output_simple(counter, nmi, regs);
1558 case PERF_RECORD_GROUP:
1559 perf_output_group(counter, nmi);
1565 * Generic software counter infrastructure
1568 static void perf_swcounter_update(struct perf_counter *counter)
1570 struct hw_perf_counter *hwc = &counter->hw;
1575 prev = atomic64_read(&hwc->prev_count);
1576 now = atomic64_read(&hwc->count);
1577 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
1582 atomic64_add(delta, &counter->count);
1583 atomic64_sub(delta, &hwc->period_left);
1586 static void perf_swcounter_set_period(struct perf_counter *counter)
1588 struct hw_perf_counter *hwc = &counter->hw;
1589 s64 left = atomic64_read(&hwc->period_left);
1590 s64 period = hwc->irq_period;
1592 if (unlikely(left <= -period)) {
1594 atomic64_set(&hwc->period_left, left);
1597 if (unlikely(left <= 0)) {
1599 atomic64_add(period, &hwc->period_left);
1602 atomic64_set(&hwc->prev_count, -left);
1603 atomic64_set(&hwc->count, -left);
1606 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1608 struct perf_counter *counter;
1609 struct pt_regs *regs;
1611 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
1612 counter->hw_ops->read(counter);
1614 regs = get_irq_regs();
1616 * In case we exclude kernel IPs or are somehow not in interrupt
1617 * context, provide the next best thing, the user IP.
1619 if ((counter->hw_event.exclude_kernel || !regs) &&
1620 !counter->hw_event.exclude_user)
1621 regs = task_pt_regs(current);
1624 perf_counter_output(counter, 0, regs);
1626 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
1628 return HRTIMER_RESTART;
1631 static void perf_swcounter_overflow(struct perf_counter *counter,
1632 int nmi, struct pt_regs *regs)
1634 perf_swcounter_update(counter);
1635 perf_swcounter_set_period(counter);
1636 perf_counter_output(counter, nmi, regs);
1639 static int perf_swcounter_match(struct perf_counter *counter,
1640 enum perf_event_types type,
1641 u32 event, struct pt_regs *regs)
1643 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1646 if (perf_event_raw(&counter->hw_event))
1649 if (perf_event_type(&counter->hw_event) != type)
1652 if (perf_event_id(&counter->hw_event) != event)
1655 if (counter->hw_event.exclude_user && user_mode(regs))
1658 if (counter->hw_event.exclude_kernel && !user_mode(regs))
1664 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
1665 int nmi, struct pt_regs *regs)
1667 int neg = atomic64_add_negative(nr, &counter->hw.count);
1668 if (counter->hw.irq_period && !neg)
1669 perf_swcounter_overflow(counter, nmi, regs);
1672 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
1673 enum perf_event_types type, u32 event,
1674 u64 nr, int nmi, struct pt_regs *regs)
1676 struct perf_counter *counter;
1678 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
1682 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
1683 if (perf_swcounter_match(counter, type, event, regs))
1684 perf_swcounter_add(counter, nr, nmi, regs);
1689 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
1692 return &cpuctx->recursion[3];
1695 return &cpuctx->recursion[2];
1698 return &cpuctx->recursion[1];
1700 return &cpuctx->recursion[0];
1703 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
1704 u64 nr, int nmi, struct pt_regs *regs)
1706 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
1707 int *recursion = perf_swcounter_recursion_context(cpuctx);
1715 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
1716 if (cpuctx->task_ctx) {
1717 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
1725 put_cpu_var(perf_cpu_context);
1728 void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs)
1730 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs);
1733 static void perf_swcounter_read(struct perf_counter *counter)
1735 perf_swcounter_update(counter);
1738 static int perf_swcounter_enable(struct perf_counter *counter)
1740 perf_swcounter_set_period(counter);
1744 static void perf_swcounter_disable(struct perf_counter *counter)
1746 perf_swcounter_update(counter);
1749 static const struct hw_perf_counter_ops perf_ops_generic = {
1750 .enable = perf_swcounter_enable,
1751 .disable = perf_swcounter_disable,
1752 .read = perf_swcounter_read,
1756 * Software counter: cpu wall time clock
1759 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
1761 int cpu = raw_smp_processor_id();
1765 now = cpu_clock(cpu);
1766 prev = atomic64_read(&counter->hw.prev_count);
1767 atomic64_set(&counter->hw.prev_count, now);
1768 atomic64_add(now - prev, &counter->count);
1771 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
1773 struct hw_perf_counter *hwc = &counter->hw;
1774 int cpu = raw_smp_processor_id();
1776 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
1777 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1778 hwc->hrtimer.function = perf_swcounter_hrtimer;
1779 if (hwc->irq_period) {
1780 __hrtimer_start_range_ns(&hwc->hrtimer,
1781 ns_to_ktime(hwc->irq_period), 0,
1782 HRTIMER_MODE_REL, 0);
1788 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
1790 hrtimer_cancel(&counter->hw.hrtimer);
1791 cpu_clock_perf_counter_update(counter);
1794 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
1796 cpu_clock_perf_counter_update(counter);
1799 static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
1800 .enable = cpu_clock_perf_counter_enable,
1801 .disable = cpu_clock_perf_counter_disable,
1802 .read = cpu_clock_perf_counter_read,
1806 * Software counter: task time clock
1810 * Called from within the scheduler:
1812 static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
1814 struct task_struct *curr = counter->task;
1817 delta = __task_delta_exec(curr, update);
1819 return curr->se.sum_exec_runtime + delta;
1822 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
1827 prev = atomic64_read(&counter->hw.prev_count);
1829 atomic64_set(&counter->hw.prev_count, now);
1833 atomic64_add(delta, &counter->count);
1836 static int task_clock_perf_counter_enable(struct perf_counter *counter)
1838 struct hw_perf_counter *hwc = &counter->hw;
1840 atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
1841 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1842 hwc->hrtimer.function = perf_swcounter_hrtimer;
1843 if (hwc->irq_period) {
1844 __hrtimer_start_range_ns(&hwc->hrtimer,
1845 ns_to_ktime(hwc->irq_period), 0,
1846 HRTIMER_MODE_REL, 0);
1852 static void task_clock_perf_counter_disable(struct perf_counter *counter)
1854 hrtimer_cancel(&counter->hw.hrtimer);
1855 task_clock_perf_counter_update(counter,
1856 task_clock_perf_counter_val(counter, 0));
1859 static void task_clock_perf_counter_read(struct perf_counter *counter)
1861 task_clock_perf_counter_update(counter,
1862 task_clock_perf_counter_val(counter, 1));
1865 static const struct hw_perf_counter_ops perf_ops_task_clock = {
1866 .enable = task_clock_perf_counter_enable,
1867 .disable = task_clock_perf_counter_disable,
1868 .read = task_clock_perf_counter_read,
1872 * Software counter: cpu migrations
1875 static inline u64 get_cpu_migrations(struct perf_counter *counter)
1877 struct task_struct *curr = counter->ctx->task;
1880 return curr->se.nr_migrations;
1881 return cpu_nr_migrations(smp_processor_id());
1884 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
1889 prev = atomic64_read(&counter->hw.prev_count);
1890 now = get_cpu_migrations(counter);
1892 atomic64_set(&counter->hw.prev_count, now);
1896 atomic64_add(delta, &counter->count);
1899 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1901 cpu_migrations_perf_counter_update(counter);
1904 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
1906 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1907 atomic64_set(&counter->hw.prev_count,
1908 get_cpu_migrations(counter));
1912 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1914 cpu_migrations_perf_counter_update(counter);
1917 static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
1918 .enable = cpu_migrations_perf_counter_enable,
1919 .disable = cpu_migrations_perf_counter_disable,
1920 .read = cpu_migrations_perf_counter_read,
1923 #ifdef CONFIG_EVENT_PROFILE
1924 void perf_tpcounter_event(int event_id)
1926 struct pt_regs *regs = get_irq_regs();
1929 regs = task_pt_regs(current);
1931 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs);
1934 extern int ftrace_profile_enable(int);
1935 extern void ftrace_profile_disable(int);
1937 static void tp_perf_counter_destroy(struct perf_counter *counter)
1939 ftrace_profile_disable(perf_event_id(&counter->hw_event));
1942 static const struct hw_perf_counter_ops *
1943 tp_perf_counter_init(struct perf_counter *counter)
1945 int event_id = perf_event_id(&counter->hw_event);
1948 ret = ftrace_profile_enable(event_id);
1952 counter->destroy = tp_perf_counter_destroy;
1953 counter->hw.irq_period = counter->hw_event.irq_period;
1955 return &perf_ops_generic;
1958 static const struct hw_perf_counter_ops *
1959 tp_perf_counter_init(struct perf_counter *counter)
1965 static const struct hw_perf_counter_ops *
1966 sw_perf_counter_init(struct perf_counter *counter)
1968 struct perf_counter_hw_event *hw_event = &counter->hw_event;
1969 const struct hw_perf_counter_ops *hw_ops = NULL;
1970 struct hw_perf_counter *hwc = &counter->hw;
1973 * Software counters (currently) can't in general distinguish
1974 * between user, kernel and hypervisor events.
1975 * However, context switches and cpu migrations are considered
1976 * to be kernel events, and page faults are never hypervisor
1979 switch (perf_event_id(&counter->hw_event)) {
1980 case PERF_COUNT_CPU_CLOCK:
1981 hw_ops = &perf_ops_cpu_clock;
1983 if (hw_event->irq_period && hw_event->irq_period < 10000)
1984 hw_event->irq_period = 10000;
1986 case PERF_COUNT_TASK_CLOCK:
1988 * If the user instantiates this as a per-cpu counter,
1989 * use the cpu_clock counter instead.
1991 if (counter->ctx->task)
1992 hw_ops = &perf_ops_task_clock;
1994 hw_ops = &perf_ops_cpu_clock;
1996 if (hw_event->irq_period && hw_event->irq_period < 10000)
1997 hw_event->irq_period = 10000;
1999 case PERF_COUNT_PAGE_FAULTS:
2000 case PERF_COUNT_PAGE_FAULTS_MIN:
2001 case PERF_COUNT_PAGE_FAULTS_MAJ:
2002 case PERF_COUNT_CONTEXT_SWITCHES:
2003 hw_ops = &perf_ops_generic;
2005 case PERF_COUNT_CPU_MIGRATIONS:
2006 if (!counter->hw_event.exclude_kernel)
2007 hw_ops = &perf_ops_cpu_migrations;
2012 hwc->irq_period = hw_event->irq_period;
2018 * Allocate and initialize a counter structure
2020 static struct perf_counter *
2021 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2023 struct perf_counter_context *ctx,
2024 struct perf_counter *group_leader,
2027 const struct hw_perf_counter_ops *hw_ops;
2028 struct perf_counter *counter;
2030 counter = kzalloc(sizeof(*counter), gfpflags);
2035 * Single counters are their own group leaders, with an
2036 * empty sibling list:
2039 group_leader = counter;
2041 mutex_init(&counter->mutex);
2042 INIT_LIST_HEAD(&counter->list_entry);
2043 INIT_LIST_HEAD(&counter->event_entry);
2044 INIT_LIST_HEAD(&counter->sibling_list);
2045 init_waitqueue_head(&counter->waitq);
2047 mutex_init(&counter->mmap_mutex);
2049 INIT_LIST_HEAD(&counter->child_list);
2052 counter->hw_event = *hw_event;
2053 counter->wakeup_pending = 0;
2054 counter->group_leader = group_leader;
2055 counter->hw_ops = NULL;
2058 counter->state = PERF_COUNTER_STATE_INACTIVE;
2059 if (hw_event->disabled)
2060 counter->state = PERF_COUNTER_STATE_OFF;
2064 if (perf_event_raw(hw_event)) {
2065 hw_ops = hw_perf_counter_init(counter);
2069 switch (perf_event_type(hw_event)) {
2070 case PERF_TYPE_HARDWARE:
2071 hw_ops = hw_perf_counter_init(counter);
2074 case PERF_TYPE_SOFTWARE:
2075 hw_ops = sw_perf_counter_init(counter);
2078 case PERF_TYPE_TRACEPOINT:
2079 hw_ops = tp_perf_counter_init(counter);
2088 counter->hw_ops = hw_ops;
2094 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
2096 * @hw_event_uptr: event type attributes for monitoring/sampling
2099 * @group_fd: group leader counter fd
2101 SYSCALL_DEFINE5(perf_counter_open,
2102 const struct perf_counter_hw_event __user *, hw_event_uptr,
2103 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
2105 struct perf_counter *counter, *group_leader;
2106 struct perf_counter_hw_event hw_event;
2107 struct perf_counter_context *ctx;
2108 struct file *counter_file = NULL;
2109 struct file *group_file = NULL;
2110 int fput_needed = 0;
2111 int fput_needed2 = 0;
2114 /* for future expandability... */
2118 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
2122 * Get the target context (task or percpu):
2124 ctx = find_get_context(pid, cpu);
2126 return PTR_ERR(ctx);
2129 * Look up the group leader (we will attach this counter to it):
2131 group_leader = NULL;
2132 if (group_fd != -1) {
2134 group_file = fget_light(group_fd, &fput_needed);
2136 goto err_put_context;
2137 if (group_file->f_op != &perf_fops)
2138 goto err_put_context;
2140 group_leader = group_file->private_data;
2142 * Do not allow a recursive hierarchy (this new sibling
2143 * becoming part of another group-sibling):
2145 if (group_leader->group_leader != group_leader)
2146 goto err_put_context;
2148 * Do not allow to attach to a group in a different
2149 * task or CPU context:
2151 if (group_leader->ctx != ctx)
2152 goto err_put_context;
2154 * Only a group leader can be exclusive or pinned
2156 if (hw_event.exclusive || hw_event.pinned)
2157 goto err_put_context;
2161 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2164 goto err_put_context;
2166 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
2168 goto err_free_put_context;
2170 counter_file = fget_light(ret, &fput_needed2);
2172 goto err_free_put_context;
2174 counter->filp = counter_file;
2175 mutex_lock(&ctx->mutex);
2176 perf_install_in_context(ctx, counter, cpu);
2177 mutex_unlock(&ctx->mutex);
2179 fput_light(counter_file, fput_needed2);
2182 fput_light(group_file, fput_needed);
2186 err_free_put_context:
2196 * Initialize the perf_counter context in a task_struct:
2199 __perf_counter_init_context(struct perf_counter_context *ctx,
2200 struct task_struct *task)
2202 memset(ctx, 0, sizeof(*ctx));
2203 spin_lock_init(&ctx->lock);
2204 mutex_init(&ctx->mutex);
2205 INIT_LIST_HEAD(&ctx->counter_list);
2206 INIT_LIST_HEAD(&ctx->event_list);
2211 * inherit a counter from parent task to child task:
2213 static struct perf_counter *
2214 inherit_counter(struct perf_counter *parent_counter,
2215 struct task_struct *parent,
2216 struct perf_counter_context *parent_ctx,
2217 struct task_struct *child,
2218 struct perf_counter *group_leader,
2219 struct perf_counter_context *child_ctx)
2221 struct perf_counter *child_counter;
2224 * Instead of creating recursive hierarchies of counters,
2225 * we link inherited counters back to the original parent,
2226 * which has a filp for sure, which we use as the reference
2229 if (parent_counter->parent)
2230 parent_counter = parent_counter->parent;
2232 child_counter = perf_counter_alloc(&parent_counter->hw_event,
2233 parent_counter->cpu, child_ctx,
2234 group_leader, GFP_KERNEL);
2239 * Link it up in the child's context:
2241 child_counter->task = child;
2242 list_add_counter(child_counter, child_ctx);
2243 child_ctx->nr_counters++;
2245 child_counter->parent = parent_counter;
2247 * inherit into child's child as well:
2249 child_counter->hw_event.inherit = 1;
2252 * Get a reference to the parent filp - we will fput it
2253 * when the child counter exits. This is safe to do because
2254 * we are in the parent and we know that the filp still
2255 * exists and has a nonzero count:
2257 atomic_long_inc(&parent_counter->filp->f_count);
2260 * Link this into the parent counter's child list
2262 mutex_lock(&parent_counter->mutex);
2263 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
2266 * Make the child state follow the state of the parent counter,
2267 * not its hw_event.disabled bit. We hold the parent's mutex,
2268 * so we won't race with perf_counter_{en,dis}able_family.
2270 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
2271 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
2273 child_counter->state = PERF_COUNTER_STATE_OFF;
2275 mutex_unlock(&parent_counter->mutex);
2277 return child_counter;
2280 static int inherit_group(struct perf_counter *parent_counter,
2281 struct task_struct *parent,
2282 struct perf_counter_context *parent_ctx,
2283 struct task_struct *child,
2284 struct perf_counter_context *child_ctx)
2286 struct perf_counter *leader;
2287 struct perf_counter *sub;
2289 leader = inherit_counter(parent_counter, parent, parent_ctx,
2290 child, NULL, child_ctx);
2293 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
2294 if (!inherit_counter(sub, parent, parent_ctx,
2295 child, leader, child_ctx))
2301 static void sync_child_counter(struct perf_counter *child_counter,
2302 struct perf_counter *parent_counter)
2304 u64 parent_val, child_val;
2306 parent_val = atomic64_read(&parent_counter->count);
2307 child_val = atomic64_read(&child_counter->count);
2310 * Add back the child's count to the parent's count:
2312 atomic64_add(child_val, &parent_counter->count);
2315 * Remove this counter from the parent's list
2317 mutex_lock(&parent_counter->mutex);
2318 list_del_init(&child_counter->child_list);
2319 mutex_unlock(&parent_counter->mutex);
2322 * Release the parent counter, if this was the last
2325 fput(parent_counter->filp);
2329 __perf_counter_exit_task(struct task_struct *child,
2330 struct perf_counter *child_counter,
2331 struct perf_counter_context *child_ctx)
2333 struct perf_counter *parent_counter;
2334 struct perf_counter *sub, *tmp;
2337 * If we do not self-reap then we have to wait for the
2338 * child task to unschedule (it will happen for sure),
2339 * so that its counter is at its final count. (This
2340 * condition triggers rarely - child tasks usually get
2341 * off their CPU before the parent has a chance to
2342 * get this far into the reaping action)
2344 if (child != current) {
2345 wait_task_inactive(child, 0);
2346 list_del_init(&child_counter->list_entry);
2348 struct perf_cpu_context *cpuctx;
2349 unsigned long flags;
2353 * Disable and unlink this counter.
2355 * Be careful about zapping the list - IRQ/NMI context
2356 * could still be processing it:
2358 curr_rq_lock_irq_save(&flags);
2359 perf_flags = hw_perf_save_disable();
2361 cpuctx = &__get_cpu_var(perf_cpu_context);
2363 group_sched_out(child_counter, cpuctx, child_ctx);
2365 list_del_init(&child_counter->list_entry);
2367 child_ctx->nr_counters--;
2369 hw_perf_restore(perf_flags);
2370 curr_rq_unlock_irq_restore(&flags);
2373 parent_counter = child_counter->parent;
2375 * It can happen that parent exits first, and has counters
2376 * that are still around due to the child reference. These
2377 * counters need to be zapped - but otherwise linger.
2379 if (parent_counter) {
2380 sync_child_counter(child_counter, parent_counter);
2381 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
2384 sync_child_counter(sub, sub->parent);
2388 free_counter(child_counter);
2393 * When a child task exits, feed back counter values to parent counters.
2395 * Note: we may be running in child context, but the PID is not hashed
2396 * anymore so new counters will not be added.
2398 void perf_counter_exit_task(struct task_struct *child)
2400 struct perf_counter *child_counter, *tmp;
2401 struct perf_counter_context *child_ctx;
2403 child_ctx = &child->perf_counter_ctx;
2405 if (likely(!child_ctx->nr_counters))
2408 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
2410 __perf_counter_exit_task(child, child_counter, child_ctx);
2414 * Initialize the perf_counter context in task_struct
2416 void perf_counter_init_task(struct task_struct *child)
2418 struct perf_counter_context *child_ctx, *parent_ctx;
2419 struct perf_counter *counter;
2420 struct task_struct *parent = current;
2422 child_ctx = &child->perf_counter_ctx;
2423 parent_ctx = &parent->perf_counter_ctx;
2425 __perf_counter_init_context(child_ctx, child);
2428 * This is executed from the parent task context, so inherit
2429 * counters that have been marked for cloning:
2432 if (likely(!parent_ctx->nr_counters))
2436 * Lock the parent list. No need to lock the child - not PID
2437 * hashed yet and not running, so nobody can access it.
2439 mutex_lock(&parent_ctx->mutex);
2442 * We dont have to disable NMIs - we are only looking at
2443 * the list, not manipulating it:
2445 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
2446 if (!counter->hw_event.inherit)
2449 if (inherit_group(counter, parent,
2450 parent_ctx, child, child_ctx))
2454 mutex_unlock(&parent_ctx->mutex);
2457 static void __cpuinit perf_counter_init_cpu(int cpu)
2459 struct perf_cpu_context *cpuctx;
2461 cpuctx = &per_cpu(perf_cpu_context, cpu);
2462 __perf_counter_init_context(&cpuctx->ctx, NULL);
2464 mutex_lock(&perf_resource_mutex);
2465 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
2466 mutex_unlock(&perf_resource_mutex);
2468 hw_perf_counter_setup(cpu);
2471 #ifdef CONFIG_HOTPLUG_CPU
2472 static void __perf_counter_exit_cpu(void *info)
2474 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
2475 struct perf_counter_context *ctx = &cpuctx->ctx;
2476 struct perf_counter *counter, *tmp;
2478 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
2479 __perf_counter_remove_from_context(counter);
2481 static void perf_counter_exit_cpu(int cpu)
2483 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
2484 struct perf_counter_context *ctx = &cpuctx->ctx;
2486 mutex_lock(&ctx->mutex);
2487 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
2488 mutex_unlock(&ctx->mutex);
2491 static inline void perf_counter_exit_cpu(int cpu) { }
2494 static int __cpuinit
2495 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
2497 unsigned int cpu = (long)hcpu;
2501 case CPU_UP_PREPARE:
2502 case CPU_UP_PREPARE_FROZEN:
2503 perf_counter_init_cpu(cpu);
2506 case CPU_DOWN_PREPARE:
2507 case CPU_DOWN_PREPARE_FROZEN:
2508 perf_counter_exit_cpu(cpu);
2518 static struct notifier_block __cpuinitdata perf_cpu_nb = {
2519 .notifier_call = perf_cpu_notify,
2522 static int __init perf_counter_init(void)
2524 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
2525 (void *)(long)smp_processor_id());
2526 register_cpu_notifier(&perf_cpu_nb);
2530 early_initcall(perf_counter_init);
2532 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
2534 return sprintf(buf, "%d\n", perf_reserved_percpu);
2538 perf_set_reserve_percpu(struct sysdev_class *class,
2542 struct perf_cpu_context *cpuctx;
2546 err = strict_strtoul(buf, 10, &val);
2549 if (val > perf_max_counters)
2552 mutex_lock(&perf_resource_mutex);
2553 perf_reserved_percpu = val;
2554 for_each_online_cpu(cpu) {
2555 cpuctx = &per_cpu(perf_cpu_context, cpu);
2556 spin_lock_irq(&cpuctx->ctx.lock);
2557 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
2558 perf_max_counters - perf_reserved_percpu);
2559 cpuctx->max_pertask = mpt;
2560 spin_unlock_irq(&cpuctx->ctx.lock);
2562 mutex_unlock(&perf_resource_mutex);
2567 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
2569 return sprintf(buf, "%d\n", perf_overcommit);
2573 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
2578 err = strict_strtoul(buf, 10, &val);
2584 mutex_lock(&perf_resource_mutex);
2585 perf_overcommit = val;
2586 mutex_unlock(&perf_resource_mutex);
2591 static SYSDEV_CLASS_ATTR(
2594 perf_show_reserve_percpu,
2595 perf_set_reserve_percpu
2598 static SYSDEV_CLASS_ATTR(
2601 perf_show_overcommit,
2605 static struct attribute *perfclass_attrs[] = {
2606 &attr_reserve_percpu.attr,
2607 &attr_overcommit.attr,
2611 static struct attribute_group perfclass_attr_group = {
2612 .attrs = perfclass_attrs,
2613 .name = "perf_counters",
2616 static int __init perf_counter_sysfs_init(void)
2618 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
2619 &perfclass_attr_group);
2621 device_initcall(perf_counter_sysfs_init);