2 * Performance events core code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/sysfs.h>
21 #include <linux/dcache.h>
22 #include <linux/percpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/vmstat.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hardirq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 #include <linux/syscalls.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/perf_event.h>
33 #include <linux/ftrace_event.h>
34 #include <linux/hw_breakpoint.h>
36 #include <asm/irq_regs.h>
39 * Each CPU has a list of per CPU events:
41 static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
43 int perf_max_events __read_mostly = 1;
44 static int perf_reserved_percpu __read_mostly;
45 static int perf_overcommit __read_mostly = 1;
47 static atomic_t nr_events __read_mostly;
48 static atomic_t nr_mmap_events __read_mostly;
49 static atomic_t nr_comm_events __read_mostly;
50 static atomic_t nr_task_events __read_mostly;
53 * perf event paranoia level:
54 * -1 - not paranoid at all
55 * 0 - disallow raw tracepoint access for unpriv
56 * 1 - disallow cpu events for unpriv
57 * 2 - disallow kernel profiling for unpriv
59 int sysctl_perf_event_paranoid __read_mostly = 1;
61 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
64 * max perf event sample rate
66 int sysctl_perf_event_sample_rate __read_mostly = 100000;
68 static atomic64_t perf_event_id;
71 * Lock for (sysadmin-configurable) event reservations:
73 static DEFINE_SPINLOCK(perf_resource_lock);
76 * Architecture provided APIs - weak aliases:
78 extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
83 void __weak hw_perf_disable(void) { barrier(); }
84 void __weak hw_perf_enable(void) { barrier(); }
86 void __weak perf_event_print_debug(void) { }
88 static DEFINE_PER_CPU(int, perf_disable_count);
90 void perf_disable(void)
92 if (!__get_cpu_var(perf_disable_count)++)
96 void perf_enable(void)
98 if (!--__get_cpu_var(perf_disable_count))
102 static void get_ctx(struct perf_event_context *ctx)
104 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
107 static void free_ctx(struct rcu_head *head)
109 struct perf_event_context *ctx;
111 ctx = container_of(head, struct perf_event_context, rcu_head);
115 static void put_ctx(struct perf_event_context *ctx)
117 if (atomic_dec_and_test(&ctx->refcount)) {
119 put_ctx(ctx->parent_ctx);
121 put_task_struct(ctx->task);
122 call_rcu(&ctx->rcu_head, free_ctx);
126 static void unclone_ctx(struct perf_event_context *ctx)
128 if (ctx->parent_ctx) {
129 put_ctx(ctx->parent_ctx);
130 ctx->parent_ctx = NULL;
135 * If we inherit events we want to return the parent event id
138 static u64 primary_event_id(struct perf_event *event)
143 id = event->parent->id;
149 * Get the perf_event_context for a task and lock it.
150 * This has to cope with with the fact that until it is locked,
151 * the context could get moved to another task.
153 static struct perf_event_context *
154 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
156 struct perf_event_context *ctx;
160 ctx = rcu_dereference(task->perf_event_ctxp);
163 * If this context is a clone of another, it might
164 * get swapped for another underneath us by
165 * perf_event_task_sched_out, though the
166 * rcu_read_lock() protects us from any context
167 * getting freed. Lock the context and check if it
168 * got swapped before we could get the lock, and retry
169 * if so. If we locked the right context, then it
170 * can't get swapped on us any more.
172 raw_spin_lock_irqsave(&ctx->lock, *flags);
173 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
174 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
178 if (!atomic_inc_not_zero(&ctx->refcount)) {
179 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
188 * Get the context for a task and increment its pin_count so it
189 * can't get swapped to another task. This also increments its
190 * reference count so that the context can't get freed.
192 static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
194 struct perf_event_context *ctx;
197 ctx = perf_lock_task_context(task, &flags);
200 raw_spin_unlock_irqrestore(&ctx->lock, flags);
205 static void perf_unpin_context(struct perf_event_context *ctx)
209 raw_spin_lock_irqsave(&ctx->lock, flags);
211 raw_spin_unlock_irqrestore(&ctx->lock, flags);
215 static inline u64 perf_clock(void)
217 return cpu_clock(raw_smp_processor_id());
221 * Update the record of the current time in a context.
223 static void update_context_time(struct perf_event_context *ctx)
225 u64 now = perf_clock();
227 ctx->time += now - ctx->timestamp;
228 ctx->timestamp = now;
232 * Update the total_time_enabled and total_time_running fields for a event.
234 static void update_event_times(struct perf_event *event)
236 struct perf_event_context *ctx = event->ctx;
239 if (event->state < PERF_EVENT_STATE_INACTIVE ||
240 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
246 run_end = event->tstamp_stopped;
248 event->total_time_enabled = run_end - event->tstamp_enabled;
250 if (event->state == PERF_EVENT_STATE_INACTIVE)
251 run_end = event->tstamp_stopped;
255 event->total_time_running = run_end - event->tstamp_running;
259 * Update total_time_enabled and total_time_running for all events in a group.
261 static void update_group_times(struct perf_event *leader)
263 struct perf_event *event;
265 update_event_times(leader);
266 list_for_each_entry(event, &leader->sibling_list, group_entry)
267 update_event_times(event);
270 static struct list_head *
271 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
273 if (event->attr.pinned)
274 return &ctx->pinned_groups;
276 return &ctx->flexible_groups;
280 * Add a event from the lists for its context.
281 * Must be called with ctx->mutex and ctx->lock held.
284 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
286 struct perf_event *group_leader = event->group_leader;
289 * Depending on whether it is a standalone or sibling event,
290 * add it straight to the context's event list, or to the group
291 * leader's sibling list:
293 if (group_leader == event) {
294 struct list_head *list;
296 if (is_software_event(event))
297 event->group_flags |= PERF_GROUP_SOFTWARE;
299 list = ctx_group_list(event, ctx);
300 list_add_tail(&event->group_entry, list);
302 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
303 !is_software_event(event))
304 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
306 list_add_tail(&event->group_entry, &group_leader->sibling_list);
307 group_leader->nr_siblings++;
310 list_add_rcu(&event->event_entry, &ctx->event_list);
312 if (event->attr.inherit_stat)
317 * Remove a event from the lists for its context.
318 * Must be called with ctx->mutex and ctx->lock held.
321 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
323 if (list_empty(&event->group_entry))
326 if (event->attr.inherit_stat)
329 list_del_init(&event->group_entry);
330 list_del_rcu(&event->event_entry);
332 if (event->group_leader != event)
333 event->group_leader->nr_siblings--;
335 update_group_times(event);
338 * If event was in error state, then keep it
339 * that way, otherwise bogus counts will be
340 * returned on read(). The only way to get out
341 * of error state is by explicit re-enabling
344 if (event->state > PERF_EVENT_STATE_OFF)
345 event->state = PERF_EVENT_STATE_OFF;
349 perf_destroy_group(struct perf_event *event, struct perf_event_context *ctx)
351 struct perf_event *sibling, *tmp;
354 * If this was a group event with sibling events then
355 * upgrade the siblings to singleton events by adding them
356 * to the context list directly:
358 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
359 struct list_head *list;
361 list = ctx_group_list(event, ctx);
362 list_move_tail(&sibling->group_entry, list);
363 sibling->group_leader = sibling;
365 /* Inherit group flags from the previous leader */
366 sibling->group_flags = event->group_flags;
371 event_sched_out(struct perf_event *event,
372 struct perf_cpu_context *cpuctx,
373 struct perf_event_context *ctx)
375 if (event->state != PERF_EVENT_STATE_ACTIVE)
378 event->state = PERF_EVENT_STATE_INACTIVE;
379 if (event->pending_disable) {
380 event->pending_disable = 0;
381 event->state = PERF_EVENT_STATE_OFF;
383 event->tstamp_stopped = ctx->time;
384 event->pmu->disable(event);
387 if (!is_software_event(event))
388 cpuctx->active_oncpu--;
390 if (event->attr.exclusive || !cpuctx->active_oncpu)
391 cpuctx->exclusive = 0;
395 group_sched_out(struct perf_event *group_event,
396 struct perf_cpu_context *cpuctx,
397 struct perf_event_context *ctx)
399 struct perf_event *event;
401 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
404 event_sched_out(group_event, cpuctx, ctx);
407 * Schedule out siblings (if any):
409 list_for_each_entry(event, &group_event->sibling_list, group_entry)
410 event_sched_out(event, cpuctx, ctx);
412 if (group_event->attr.exclusive)
413 cpuctx->exclusive = 0;
417 * Cross CPU call to remove a performance event
419 * We disable the event on the hardware level first. After that we
420 * remove it from the context list.
422 static void __perf_event_remove_from_context(void *info)
424 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
425 struct perf_event *event = info;
426 struct perf_event_context *ctx = event->ctx;
429 * If this is a task context, we need to check whether it is
430 * the current task context of this cpu. If not it has been
431 * scheduled out before the smp call arrived.
433 if (ctx->task && cpuctx->task_ctx != ctx)
436 raw_spin_lock(&ctx->lock);
438 * Protect the list operation against NMI by disabling the
439 * events on a global level.
443 event_sched_out(event, cpuctx, ctx);
445 list_del_event(event, ctx);
449 * Allow more per task events with respect to the
452 cpuctx->max_pertask =
453 min(perf_max_events - ctx->nr_events,
454 perf_max_events - perf_reserved_percpu);
458 raw_spin_unlock(&ctx->lock);
463 * Remove the event from a task's (or a CPU's) list of events.
465 * Must be called with ctx->mutex held.
467 * CPU events are removed with a smp call. For task events we only
468 * call when the task is on a CPU.
470 * If event->ctx is a cloned context, callers must make sure that
471 * every task struct that event->ctx->task could possibly point to
472 * remains valid. This is OK when called from perf_release since
473 * that only calls us on the top-level context, which can't be a clone.
474 * When called from perf_event_exit_task, it's OK because the
475 * context has been detached from its task.
477 static void perf_event_remove_from_context(struct perf_event *event)
479 struct perf_event_context *ctx = event->ctx;
480 struct task_struct *task = ctx->task;
484 * Per cpu events are removed via an smp call and
485 * the removal is always successful.
487 smp_call_function_single(event->cpu,
488 __perf_event_remove_from_context,
494 task_oncpu_function_call(task, __perf_event_remove_from_context,
497 raw_spin_lock_irq(&ctx->lock);
499 * If the context is active we need to retry the smp call.
501 if (ctx->nr_active && !list_empty(&event->group_entry)) {
502 raw_spin_unlock_irq(&ctx->lock);
507 * The lock prevents that this context is scheduled in so we
508 * can remove the event safely, if the call above did not
511 if (!list_empty(&event->group_entry))
512 list_del_event(event, ctx);
513 raw_spin_unlock_irq(&ctx->lock);
517 * Cross CPU call to disable a performance event
519 static void __perf_event_disable(void *info)
521 struct perf_event *event = info;
522 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
523 struct perf_event_context *ctx = event->ctx;
526 * If this is a per-task event, need to check whether this
527 * event's task is the current task on this cpu.
529 if (ctx->task && cpuctx->task_ctx != ctx)
532 raw_spin_lock(&ctx->lock);
535 * If the event is on, turn it off.
536 * If it is in error state, leave it in error state.
538 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
539 update_context_time(ctx);
540 update_group_times(event);
541 if (event == event->group_leader)
542 group_sched_out(event, cpuctx, ctx);
544 event_sched_out(event, cpuctx, ctx);
545 event->state = PERF_EVENT_STATE_OFF;
548 raw_spin_unlock(&ctx->lock);
554 * If event->ctx is a cloned context, callers must make sure that
555 * every task struct that event->ctx->task could possibly point to
556 * remains valid. This condition is satisifed when called through
557 * perf_event_for_each_child or perf_event_for_each because they
558 * hold the top-level event's child_mutex, so any descendant that
559 * goes to exit will block in sync_child_event.
560 * When called from perf_pending_event it's OK because event->ctx
561 * is the current context on this CPU and preemption is disabled,
562 * hence we can't get into perf_event_task_sched_out for this context.
564 void perf_event_disable(struct perf_event *event)
566 struct perf_event_context *ctx = event->ctx;
567 struct task_struct *task = ctx->task;
571 * Disable the event on the cpu that it's on
573 smp_call_function_single(event->cpu, __perf_event_disable,
579 task_oncpu_function_call(task, __perf_event_disable, event);
581 raw_spin_lock_irq(&ctx->lock);
583 * If the event is still active, we need to retry the cross-call.
585 if (event->state == PERF_EVENT_STATE_ACTIVE) {
586 raw_spin_unlock_irq(&ctx->lock);
591 * Since we have the lock this context can't be scheduled
592 * in, so we can change the state safely.
594 if (event->state == PERF_EVENT_STATE_INACTIVE) {
595 update_group_times(event);
596 event->state = PERF_EVENT_STATE_OFF;
599 raw_spin_unlock_irq(&ctx->lock);
603 event_sched_in(struct perf_event *event,
604 struct perf_cpu_context *cpuctx,
605 struct perf_event_context *ctx)
607 if (event->state <= PERF_EVENT_STATE_OFF)
610 event->state = PERF_EVENT_STATE_ACTIVE;
611 event->oncpu = smp_processor_id();
613 * The new state must be visible before we turn it on in the hardware:
617 if (event->pmu->enable(event)) {
618 event->state = PERF_EVENT_STATE_INACTIVE;
623 event->tstamp_running += ctx->time - event->tstamp_stopped;
625 if (!is_software_event(event))
626 cpuctx->active_oncpu++;
629 if (event->attr.exclusive)
630 cpuctx->exclusive = 1;
636 group_sched_in(struct perf_event *group_event,
637 struct perf_cpu_context *cpuctx,
638 struct perf_event_context *ctx)
640 struct perf_event *event, *partial_group = NULL;
641 const struct pmu *pmu = group_event->pmu;
645 if (group_event->state == PERF_EVENT_STATE_OFF)
648 /* Check if group transaction availabe */
655 if (event_sched_in(group_event, cpuctx, ctx))
659 * Schedule in siblings as one group (if any):
661 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
662 if (event_sched_in(event, cpuctx, ctx)) {
663 partial_group = event;
671 ret = pmu->commit_txn(pmu);
673 pmu->cancel_txn(pmu);
679 pmu->cancel_txn(pmu);
682 * Groups can be scheduled in as one unit only, so undo any
683 * partial group before returning:
685 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
686 if (event == partial_group)
688 event_sched_out(event, cpuctx, ctx);
690 event_sched_out(group_event, cpuctx, ctx);
696 * Work out whether we can put this event group on the CPU now.
698 static int group_can_go_on(struct perf_event *event,
699 struct perf_cpu_context *cpuctx,
703 * Groups consisting entirely of software events can always go on.
705 if (event->group_flags & PERF_GROUP_SOFTWARE)
708 * If an exclusive group is already on, no other hardware
711 if (cpuctx->exclusive)
714 * If this group is exclusive and there are already
715 * events on the CPU, it can't go on.
717 if (event->attr.exclusive && cpuctx->active_oncpu)
720 * Otherwise, try to add it if all previous groups were able
726 static void add_event_to_ctx(struct perf_event *event,
727 struct perf_event_context *ctx)
729 list_add_event(event, ctx);
730 event->tstamp_enabled = ctx->time;
731 event->tstamp_running = ctx->time;
732 event->tstamp_stopped = ctx->time;
736 * Cross CPU call to install and enable a performance event
738 * Must be called with ctx->mutex held
740 static void __perf_install_in_context(void *info)
742 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
743 struct perf_event *event = info;
744 struct perf_event_context *ctx = event->ctx;
745 struct perf_event *leader = event->group_leader;
749 * If this is a task context, we need to check whether it is
750 * the current task context of this cpu. If not it has been
751 * scheduled out before the smp call arrived.
752 * Or possibly this is the right context but it isn't
753 * on this cpu because it had no events.
755 if (ctx->task && cpuctx->task_ctx != ctx) {
756 if (cpuctx->task_ctx || ctx->task != current)
758 cpuctx->task_ctx = ctx;
761 raw_spin_lock(&ctx->lock);
763 update_context_time(ctx);
766 * Protect the list operation against NMI by disabling the
767 * events on a global level. NOP for non NMI based events.
771 add_event_to_ctx(event, ctx);
773 if (event->cpu != -1 && event->cpu != smp_processor_id())
777 * Don't put the event on if it is disabled or if
778 * it is in a group and the group isn't on.
780 if (event->state != PERF_EVENT_STATE_INACTIVE ||
781 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
785 * An exclusive event can't go on if there are already active
786 * hardware events, and no hardware event can go on if there
787 * is already an exclusive event on.
789 if (!group_can_go_on(event, cpuctx, 1))
792 err = event_sched_in(event, cpuctx, ctx);
796 * This event couldn't go on. If it is in a group
797 * then we have to pull the whole group off.
798 * If the event group is pinned then put it in error state.
801 group_sched_out(leader, cpuctx, ctx);
802 if (leader->attr.pinned) {
803 update_group_times(leader);
804 leader->state = PERF_EVENT_STATE_ERROR;
808 if (!err && !ctx->task && cpuctx->max_pertask)
809 cpuctx->max_pertask--;
814 raw_spin_unlock(&ctx->lock);
818 * Attach a performance event to a context
820 * First we add the event to the list with the hardware enable bit
821 * in event->hw_config cleared.
823 * If the event is attached to a task which is on a CPU we use a smp
824 * call to enable it in the task context. The task might have been
825 * scheduled away, but we check this in the smp call again.
827 * Must be called with ctx->mutex held.
830 perf_install_in_context(struct perf_event_context *ctx,
831 struct perf_event *event,
834 struct task_struct *task = ctx->task;
838 * Per cpu events are installed via an smp call and
839 * the install is always successful.
841 smp_call_function_single(cpu, __perf_install_in_context,
847 task_oncpu_function_call(task, __perf_install_in_context,
850 raw_spin_lock_irq(&ctx->lock);
852 * we need to retry the smp call.
854 if (ctx->is_active && list_empty(&event->group_entry)) {
855 raw_spin_unlock_irq(&ctx->lock);
860 * The lock prevents that this context is scheduled in so we
861 * can add the event safely, if it the call above did not
864 if (list_empty(&event->group_entry))
865 add_event_to_ctx(event, ctx);
866 raw_spin_unlock_irq(&ctx->lock);
870 * Put a event into inactive state and update time fields.
871 * Enabling the leader of a group effectively enables all
872 * the group members that aren't explicitly disabled, so we
873 * have to update their ->tstamp_enabled also.
874 * Note: this works for group members as well as group leaders
875 * since the non-leader members' sibling_lists will be empty.
877 static void __perf_event_mark_enabled(struct perf_event *event,
878 struct perf_event_context *ctx)
880 struct perf_event *sub;
882 event->state = PERF_EVENT_STATE_INACTIVE;
883 event->tstamp_enabled = ctx->time - event->total_time_enabled;
884 list_for_each_entry(sub, &event->sibling_list, group_entry)
885 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
886 sub->tstamp_enabled =
887 ctx->time - sub->total_time_enabled;
891 * Cross CPU call to enable a performance event
893 static void __perf_event_enable(void *info)
895 struct perf_event *event = info;
896 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
897 struct perf_event_context *ctx = event->ctx;
898 struct perf_event *leader = event->group_leader;
902 * If this is a per-task event, need to check whether this
903 * event's task is the current task on this cpu.
905 if (ctx->task && cpuctx->task_ctx != ctx) {
906 if (cpuctx->task_ctx || ctx->task != current)
908 cpuctx->task_ctx = ctx;
911 raw_spin_lock(&ctx->lock);
913 update_context_time(ctx);
915 if (event->state >= PERF_EVENT_STATE_INACTIVE)
917 __perf_event_mark_enabled(event, ctx);
919 if (event->cpu != -1 && event->cpu != smp_processor_id())
923 * If the event is in a group and isn't the group leader,
924 * then don't put it on unless the group is on.
926 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
929 if (!group_can_go_on(event, cpuctx, 1)) {
934 err = group_sched_in(event, cpuctx, ctx);
936 err = event_sched_in(event, cpuctx, ctx);
942 * If this event can't go on and it's part of a
943 * group, then the whole group has to come off.
946 group_sched_out(leader, cpuctx, ctx);
947 if (leader->attr.pinned) {
948 update_group_times(leader);
949 leader->state = PERF_EVENT_STATE_ERROR;
954 raw_spin_unlock(&ctx->lock);
960 * If event->ctx is a cloned context, callers must make sure that
961 * every task struct that event->ctx->task could possibly point to
962 * remains valid. This condition is satisfied when called through
963 * perf_event_for_each_child or perf_event_for_each as described
964 * for perf_event_disable.
966 void perf_event_enable(struct perf_event *event)
968 struct perf_event_context *ctx = event->ctx;
969 struct task_struct *task = ctx->task;
973 * Enable the event on the cpu that it's on
975 smp_call_function_single(event->cpu, __perf_event_enable,
980 raw_spin_lock_irq(&ctx->lock);
981 if (event->state >= PERF_EVENT_STATE_INACTIVE)
985 * If the event is in error state, clear that first.
986 * That way, if we see the event in error state below, we
987 * know that it has gone back into error state, as distinct
988 * from the task having been scheduled away before the
989 * cross-call arrived.
991 if (event->state == PERF_EVENT_STATE_ERROR)
992 event->state = PERF_EVENT_STATE_OFF;
995 raw_spin_unlock_irq(&ctx->lock);
996 task_oncpu_function_call(task, __perf_event_enable, event);
998 raw_spin_lock_irq(&ctx->lock);
1001 * If the context is active and the event is still off,
1002 * we need to retry the cross-call.
1004 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1008 * Since we have the lock this context can't be scheduled
1009 * in, so we can change the state safely.
1011 if (event->state == PERF_EVENT_STATE_OFF)
1012 __perf_event_mark_enabled(event, ctx);
1015 raw_spin_unlock_irq(&ctx->lock);
1018 static int perf_event_refresh(struct perf_event *event, int refresh)
1021 * not supported on inherited events
1023 if (event->attr.inherit)
1026 atomic_add(refresh, &event->event_limit);
1027 perf_event_enable(event);
1033 EVENT_FLEXIBLE = 0x1,
1035 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1038 static void ctx_sched_out(struct perf_event_context *ctx,
1039 struct perf_cpu_context *cpuctx,
1040 enum event_type_t event_type)
1042 struct perf_event *event;
1044 raw_spin_lock(&ctx->lock);
1046 if (likely(!ctx->nr_events))
1048 update_context_time(ctx);
1051 if (!ctx->nr_active)
1054 if (event_type & EVENT_PINNED)
1055 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1056 group_sched_out(event, cpuctx, ctx);
1058 if (event_type & EVENT_FLEXIBLE)
1059 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1060 group_sched_out(event, cpuctx, ctx);
1065 raw_spin_unlock(&ctx->lock);
1069 * Test whether two contexts are equivalent, i.e. whether they
1070 * have both been cloned from the same version of the same context
1071 * and they both have the same number of enabled events.
1072 * If the number of enabled events is the same, then the set
1073 * of enabled events should be the same, because these are both
1074 * inherited contexts, therefore we can't access individual events
1075 * in them directly with an fd; we can only enable/disable all
1076 * events via prctl, or enable/disable all events in a family
1077 * via ioctl, which will have the same effect on both contexts.
1079 static int context_equiv(struct perf_event_context *ctx1,
1080 struct perf_event_context *ctx2)
1082 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1083 && ctx1->parent_gen == ctx2->parent_gen
1084 && !ctx1->pin_count && !ctx2->pin_count;
1087 static void __perf_event_sync_stat(struct perf_event *event,
1088 struct perf_event *next_event)
1092 if (!event->attr.inherit_stat)
1096 * Update the event value, we cannot use perf_event_read()
1097 * because we're in the middle of a context switch and have IRQs
1098 * disabled, which upsets smp_call_function_single(), however
1099 * we know the event must be on the current CPU, therefore we
1100 * don't need to use it.
1102 switch (event->state) {
1103 case PERF_EVENT_STATE_ACTIVE:
1104 event->pmu->read(event);
1107 case PERF_EVENT_STATE_INACTIVE:
1108 update_event_times(event);
1116 * In order to keep per-task stats reliable we need to flip the event
1117 * values when we flip the contexts.
1119 value = atomic64_read(&next_event->count);
1120 value = atomic64_xchg(&event->count, value);
1121 atomic64_set(&next_event->count, value);
1123 swap(event->total_time_enabled, next_event->total_time_enabled);
1124 swap(event->total_time_running, next_event->total_time_running);
1127 * Since we swizzled the values, update the user visible data too.
1129 perf_event_update_userpage(event);
1130 perf_event_update_userpage(next_event);
1133 #define list_next_entry(pos, member) \
1134 list_entry(pos->member.next, typeof(*pos), member)
1136 static void perf_event_sync_stat(struct perf_event_context *ctx,
1137 struct perf_event_context *next_ctx)
1139 struct perf_event *event, *next_event;
1144 update_context_time(ctx);
1146 event = list_first_entry(&ctx->event_list,
1147 struct perf_event, event_entry);
1149 next_event = list_first_entry(&next_ctx->event_list,
1150 struct perf_event, event_entry);
1152 while (&event->event_entry != &ctx->event_list &&
1153 &next_event->event_entry != &next_ctx->event_list) {
1155 __perf_event_sync_stat(event, next_event);
1157 event = list_next_entry(event, event_entry);
1158 next_event = list_next_entry(next_event, event_entry);
1163 * Called from scheduler to remove the events of the current task,
1164 * with interrupts disabled.
1166 * We stop each event and update the event value in event->count.
1168 * This does not protect us against NMI, but disable()
1169 * sets the disabled bit in the control field of event _before_
1170 * accessing the event control register. If a NMI hits, then it will
1171 * not restart the event.
1173 void perf_event_task_sched_out(struct task_struct *task,
1174 struct task_struct *next)
1176 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1177 struct perf_event_context *ctx = task->perf_event_ctxp;
1178 struct perf_event_context *next_ctx;
1179 struct perf_event_context *parent;
1182 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1184 if (likely(!ctx || !cpuctx->task_ctx))
1188 parent = rcu_dereference(ctx->parent_ctx);
1189 next_ctx = next->perf_event_ctxp;
1190 if (parent && next_ctx &&
1191 rcu_dereference(next_ctx->parent_ctx) == parent) {
1193 * Looks like the two contexts are clones, so we might be
1194 * able to optimize the context switch. We lock both
1195 * contexts and check that they are clones under the
1196 * lock (including re-checking that neither has been
1197 * uncloned in the meantime). It doesn't matter which
1198 * order we take the locks because no other cpu could
1199 * be trying to lock both of these tasks.
1201 raw_spin_lock(&ctx->lock);
1202 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1203 if (context_equiv(ctx, next_ctx)) {
1205 * XXX do we need a memory barrier of sorts
1206 * wrt to rcu_dereference() of perf_event_ctxp
1208 task->perf_event_ctxp = next_ctx;
1209 next->perf_event_ctxp = ctx;
1211 next_ctx->task = task;
1214 perf_event_sync_stat(ctx, next_ctx);
1216 raw_spin_unlock(&next_ctx->lock);
1217 raw_spin_unlock(&ctx->lock);
1222 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1223 cpuctx->task_ctx = NULL;
1227 static void task_ctx_sched_out(struct perf_event_context *ctx,
1228 enum event_type_t event_type)
1230 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1232 if (!cpuctx->task_ctx)
1235 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1238 ctx_sched_out(ctx, cpuctx, event_type);
1239 cpuctx->task_ctx = NULL;
1243 * Called with IRQs disabled
1245 static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1247 task_ctx_sched_out(ctx, EVENT_ALL);
1251 * Called with IRQs disabled
1253 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1254 enum event_type_t event_type)
1256 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1260 ctx_pinned_sched_in(struct perf_event_context *ctx,
1261 struct perf_cpu_context *cpuctx)
1263 struct perf_event *event;
1265 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1266 if (event->state <= PERF_EVENT_STATE_OFF)
1268 if (event->cpu != -1 && event->cpu != smp_processor_id())
1271 if (group_can_go_on(event, cpuctx, 1))
1272 group_sched_in(event, cpuctx, ctx);
1275 * If this pinned group hasn't been scheduled,
1276 * put it in error state.
1278 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1279 update_group_times(event);
1280 event->state = PERF_EVENT_STATE_ERROR;
1286 ctx_flexible_sched_in(struct perf_event_context *ctx,
1287 struct perf_cpu_context *cpuctx)
1289 struct perf_event *event;
1292 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1293 /* Ignore events in OFF or ERROR state */
1294 if (event->state <= PERF_EVENT_STATE_OFF)
1297 * Listen to the 'cpu' scheduling filter constraint
1300 if (event->cpu != -1 && event->cpu != smp_processor_id())
1303 if (group_can_go_on(event, cpuctx, can_add_hw))
1304 if (group_sched_in(event, cpuctx, ctx))
1310 ctx_sched_in(struct perf_event_context *ctx,
1311 struct perf_cpu_context *cpuctx,
1312 enum event_type_t event_type)
1314 raw_spin_lock(&ctx->lock);
1316 if (likely(!ctx->nr_events))
1319 ctx->timestamp = perf_clock();
1324 * First go through the list and put on any pinned groups
1325 * in order to give them the best chance of going on.
1327 if (event_type & EVENT_PINNED)
1328 ctx_pinned_sched_in(ctx, cpuctx);
1330 /* Then walk through the lower prio flexible groups */
1331 if (event_type & EVENT_FLEXIBLE)
1332 ctx_flexible_sched_in(ctx, cpuctx);
1336 raw_spin_unlock(&ctx->lock);
1339 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1340 enum event_type_t event_type)
1342 struct perf_event_context *ctx = &cpuctx->ctx;
1344 ctx_sched_in(ctx, cpuctx, event_type);
1347 static void task_ctx_sched_in(struct task_struct *task,
1348 enum event_type_t event_type)
1350 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1351 struct perf_event_context *ctx = task->perf_event_ctxp;
1355 if (cpuctx->task_ctx == ctx)
1357 ctx_sched_in(ctx, cpuctx, event_type);
1358 cpuctx->task_ctx = ctx;
1361 * Called from scheduler to add the events of the current task
1362 * with interrupts disabled.
1364 * We restore the event value and then enable it.
1366 * This does not protect us against NMI, but enable()
1367 * sets the enabled bit in the control field of event _before_
1368 * accessing the event control register. If a NMI hits, then it will
1369 * keep the event running.
1371 void perf_event_task_sched_in(struct task_struct *task)
1373 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1374 struct perf_event_context *ctx = task->perf_event_ctxp;
1379 if (cpuctx->task_ctx == ctx)
1385 * We want to keep the following priority order:
1386 * cpu pinned (that don't need to move), task pinned,
1387 * cpu flexible, task flexible.
1389 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1391 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1392 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1393 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1395 cpuctx->task_ctx = ctx;
1400 #define MAX_INTERRUPTS (~0ULL)
1402 static void perf_log_throttle(struct perf_event *event, int enable);
1404 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1406 u64 frequency = event->attr.sample_freq;
1407 u64 sec = NSEC_PER_SEC;
1408 u64 divisor, dividend;
1410 int count_fls, nsec_fls, frequency_fls, sec_fls;
1412 count_fls = fls64(count);
1413 nsec_fls = fls64(nsec);
1414 frequency_fls = fls64(frequency);
1418 * We got @count in @nsec, with a target of sample_freq HZ
1419 * the target period becomes:
1422 * period = -------------------
1423 * @nsec * sample_freq
1428 * Reduce accuracy by one bit such that @a and @b converge
1429 * to a similar magnitude.
1431 #define REDUCE_FLS(a, b) \
1433 if (a##_fls > b##_fls) { \
1443 * Reduce accuracy until either term fits in a u64, then proceed with
1444 * the other, so that finally we can do a u64/u64 division.
1446 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1447 REDUCE_FLS(nsec, frequency);
1448 REDUCE_FLS(sec, count);
1451 if (count_fls + sec_fls > 64) {
1452 divisor = nsec * frequency;
1454 while (count_fls + sec_fls > 64) {
1455 REDUCE_FLS(count, sec);
1459 dividend = count * sec;
1461 dividend = count * sec;
1463 while (nsec_fls + frequency_fls > 64) {
1464 REDUCE_FLS(nsec, frequency);
1468 divisor = nsec * frequency;
1471 return div64_u64(dividend, divisor);
1474 static void perf_event_stop(struct perf_event *event)
1476 if (!event->pmu->stop)
1477 return event->pmu->disable(event);
1479 return event->pmu->stop(event);
1482 static int perf_event_start(struct perf_event *event)
1484 if (!event->pmu->start)
1485 return event->pmu->enable(event);
1487 return event->pmu->start(event);
1490 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1492 struct hw_perf_event *hwc = &event->hw;
1493 u64 period, sample_period;
1496 period = perf_calculate_period(event, nsec, count);
1498 delta = (s64)(period - hwc->sample_period);
1499 delta = (delta + 7) / 8; /* low pass filter */
1501 sample_period = hwc->sample_period + delta;
1506 hwc->sample_period = sample_period;
1508 if (atomic64_read(&hwc->period_left) > 8*sample_period) {
1510 perf_event_stop(event);
1511 atomic64_set(&hwc->period_left, 0);
1512 perf_event_start(event);
1517 static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1519 struct perf_event *event;
1520 struct hw_perf_event *hwc;
1521 u64 interrupts, now;
1524 raw_spin_lock(&ctx->lock);
1525 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1526 if (event->state != PERF_EVENT_STATE_ACTIVE)
1529 if (event->cpu != -1 && event->cpu != smp_processor_id())
1534 interrupts = hwc->interrupts;
1535 hwc->interrupts = 0;
1538 * unthrottle events on the tick
1540 if (interrupts == MAX_INTERRUPTS) {
1541 perf_log_throttle(event, 1);
1543 event->pmu->unthrottle(event);
1547 if (!event->attr.freq || !event->attr.sample_freq)
1551 event->pmu->read(event);
1552 now = atomic64_read(&event->count);
1553 delta = now - hwc->freq_count_stamp;
1554 hwc->freq_count_stamp = now;
1557 perf_adjust_period(event, TICK_NSEC, delta);
1560 raw_spin_unlock(&ctx->lock);
1564 * Round-robin a context's events:
1566 static void rotate_ctx(struct perf_event_context *ctx)
1568 raw_spin_lock(&ctx->lock);
1570 /* Rotate the first entry last of non-pinned groups */
1571 list_rotate_left(&ctx->flexible_groups);
1573 raw_spin_unlock(&ctx->lock);
1576 void perf_event_task_tick(struct task_struct *curr)
1578 struct perf_cpu_context *cpuctx;
1579 struct perf_event_context *ctx;
1582 if (!atomic_read(&nr_events))
1585 cpuctx = &__get_cpu_var(perf_cpu_context);
1586 if (cpuctx->ctx.nr_events &&
1587 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1590 ctx = curr->perf_event_ctxp;
1591 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1594 perf_ctx_adjust_freq(&cpuctx->ctx);
1596 perf_ctx_adjust_freq(ctx);
1602 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1604 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
1606 rotate_ctx(&cpuctx->ctx);
1610 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1612 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1616 static int event_enable_on_exec(struct perf_event *event,
1617 struct perf_event_context *ctx)
1619 if (!event->attr.enable_on_exec)
1622 event->attr.enable_on_exec = 0;
1623 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1626 __perf_event_mark_enabled(event, ctx);
1632 * Enable all of a task's events that have been marked enable-on-exec.
1633 * This expects task == current.
1635 static void perf_event_enable_on_exec(struct task_struct *task)
1637 struct perf_event_context *ctx;
1638 struct perf_event *event;
1639 unsigned long flags;
1643 local_irq_save(flags);
1644 ctx = task->perf_event_ctxp;
1645 if (!ctx || !ctx->nr_events)
1648 __perf_event_task_sched_out(ctx);
1650 raw_spin_lock(&ctx->lock);
1652 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1653 ret = event_enable_on_exec(event, ctx);
1658 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1659 ret = event_enable_on_exec(event, ctx);
1665 * Unclone this context if we enabled any event.
1670 raw_spin_unlock(&ctx->lock);
1672 perf_event_task_sched_in(task);
1674 local_irq_restore(flags);
1678 * Cross CPU call to read the hardware event
1680 static void __perf_event_read(void *info)
1682 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1683 struct perf_event *event = info;
1684 struct perf_event_context *ctx = event->ctx;
1687 * If this is a task context, we need to check whether it is
1688 * the current task context of this cpu. If not it has been
1689 * scheduled out before the smp call arrived. In that case
1690 * event->count would have been updated to a recent sample
1691 * when the event was scheduled out.
1693 if (ctx->task && cpuctx->task_ctx != ctx)
1696 raw_spin_lock(&ctx->lock);
1697 update_context_time(ctx);
1698 update_event_times(event);
1699 raw_spin_unlock(&ctx->lock);
1701 event->pmu->read(event);
1704 static u64 perf_event_read(struct perf_event *event)
1707 * If event is enabled and currently active on a CPU, update the
1708 * value in the event structure:
1710 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1711 smp_call_function_single(event->oncpu,
1712 __perf_event_read, event, 1);
1713 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1714 struct perf_event_context *ctx = event->ctx;
1715 unsigned long flags;
1717 raw_spin_lock_irqsave(&ctx->lock, flags);
1718 update_context_time(ctx);
1719 update_event_times(event);
1720 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1723 return atomic64_read(&event->count);
1727 * Initialize the perf_event context in a task_struct:
1730 __perf_event_init_context(struct perf_event_context *ctx,
1731 struct task_struct *task)
1733 raw_spin_lock_init(&ctx->lock);
1734 mutex_init(&ctx->mutex);
1735 INIT_LIST_HEAD(&ctx->pinned_groups);
1736 INIT_LIST_HEAD(&ctx->flexible_groups);
1737 INIT_LIST_HEAD(&ctx->event_list);
1738 atomic_set(&ctx->refcount, 1);
1742 static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1744 struct perf_event_context *ctx;
1745 struct perf_cpu_context *cpuctx;
1746 struct task_struct *task;
1747 unsigned long flags;
1750 if (pid == -1 && cpu != -1) {
1751 /* Must be root to operate on a CPU event: */
1752 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1753 return ERR_PTR(-EACCES);
1755 if (cpu < 0 || cpu >= nr_cpumask_bits)
1756 return ERR_PTR(-EINVAL);
1759 * We could be clever and allow to attach a event to an
1760 * offline CPU and activate it when the CPU comes up, but
1763 if (!cpu_online(cpu))
1764 return ERR_PTR(-ENODEV);
1766 cpuctx = &per_cpu(perf_cpu_context, cpu);
1777 task = find_task_by_vpid(pid);
1779 get_task_struct(task);
1783 return ERR_PTR(-ESRCH);
1786 * Can't attach events to a dying task.
1789 if (task->flags & PF_EXITING)
1792 /* Reuse ptrace permission checks for now. */
1794 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1798 ctx = perf_lock_task_context(task, &flags);
1801 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1805 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
1809 __perf_event_init_context(ctx, task);
1811 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1813 * We raced with some other task; use
1814 * the context they set.
1819 get_task_struct(task);
1822 put_task_struct(task);
1826 put_task_struct(task);
1827 return ERR_PTR(err);
1830 static void perf_event_free_filter(struct perf_event *event);
1832 static void free_event_rcu(struct rcu_head *head)
1834 struct perf_event *event;
1836 event = container_of(head, struct perf_event, rcu_head);
1838 put_pid_ns(event->ns);
1839 perf_event_free_filter(event);
1843 static void perf_pending_sync(struct perf_event *event);
1845 static void free_event(struct perf_event *event)
1847 perf_pending_sync(event);
1849 if (!event->parent) {
1850 atomic_dec(&nr_events);
1851 if (event->attr.mmap)
1852 atomic_dec(&nr_mmap_events);
1853 if (event->attr.comm)
1854 atomic_dec(&nr_comm_events);
1855 if (event->attr.task)
1856 atomic_dec(&nr_task_events);
1859 if (event->output) {
1860 fput(event->output->filp);
1861 event->output = NULL;
1865 event->destroy(event);
1867 put_ctx(event->ctx);
1868 call_rcu(&event->rcu_head, free_event_rcu);
1871 int perf_event_release_kernel(struct perf_event *event)
1873 struct perf_event_context *ctx = event->ctx;
1876 * Remove from the PMU, can't get re-enabled since we got
1877 * here because the last ref went.
1879 perf_event_disable(event);
1881 WARN_ON_ONCE(ctx->parent_ctx);
1883 * There are two ways this annotation is useful:
1885 * 1) there is a lock recursion from perf_event_exit_task
1886 * see the comment there.
1888 * 2) there is a lock-inversion with mmap_sem through
1889 * perf_event_read_group(), which takes faults while
1890 * holding ctx->mutex, however this is called after
1891 * the last filedesc died, so there is no possibility
1892 * to trigger the AB-BA case.
1894 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
1895 raw_spin_lock_irq(&ctx->lock);
1896 list_del_event(event, ctx);
1897 perf_destroy_group(event, ctx);
1898 raw_spin_unlock_irq(&ctx->lock);
1899 mutex_unlock(&ctx->mutex);
1901 mutex_lock(&event->owner->perf_event_mutex);
1902 list_del_init(&event->owner_entry);
1903 mutex_unlock(&event->owner->perf_event_mutex);
1904 put_task_struct(event->owner);
1910 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1913 * Called when the last reference to the file is gone.
1915 static int perf_release(struct inode *inode, struct file *file)
1917 struct perf_event *event = file->private_data;
1919 file->private_data = NULL;
1921 return perf_event_release_kernel(event);
1924 static int perf_event_read_size(struct perf_event *event)
1926 int entry = sizeof(u64); /* value */
1930 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1931 size += sizeof(u64);
1933 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1934 size += sizeof(u64);
1936 if (event->attr.read_format & PERF_FORMAT_ID)
1937 entry += sizeof(u64);
1939 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1940 nr += event->group_leader->nr_siblings;
1941 size += sizeof(u64);
1949 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
1951 struct perf_event *child;
1957 mutex_lock(&event->child_mutex);
1958 total += perf_event_read(event);
1959 *enabled += event->total_time_enabled +
1960 atomic64_read(&event->child_total_time_enabled);
1961 *running += event->total_time_running +
1962 atomic64_read(&event->child_total_time_running);
1964 list_for_each_entry(child, &event->child_list, child_list) {
1965 total += perf_event_read(child);
1966 *enabled += child->total_time_enabled;
1967 *running += child->total_time_running;
1969 mutex_unlock(&event->child_mutex);
1973 EXPORT_SYMBOL_GPL(perf_event_read_value);
1975 static int perf_event_read_group(struct perf_event *event,
1976 u64 read_format, char __user *buf)
1978 struct perf_event *leader = event->group_leader, *sub;
1979 int n = 0, size = 0, ret = -EFAULT;
1980 struct perf_event_context *ctx = leader->ctx;
1982 u64 count, enabled, running;
1984 mutex_lock(&ctx->mutex);
1985 count = perf_event_read_value(leader, &enabled, &running);
1987 values[n++] = 1 + leader->nr_siblings;
1988 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1989 values[n++] = enabled;
1990 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1991 values[n++] = running;
1992 values[n++] = count;
1993 if (read_format & PERF_FORMAT_ID)
1994 values[n++] = primary_event_id(leader);
1996 size = n * sizeof(u64);
1998 if (copy_to_user(buf, values, size))
2003 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2006 values[n++] = perf_event_read_value(sub, &enabled, &running);
2007 if (read_format & PERF_FORMAT_ID)
2008 values[n++] = primary_event_id(sub);
2010 size = n * sizeof(u64);
2012 if (copy_to_user(buf + ret, values, size)) {
2020 mutex_unlock(&ctx->mutex);
2025 static int perf_event_read_one(struct perf_event *event,
2026 u64 read_format, char __user *buf)
2028 u64 enabled, running;
2032 values[n++] = perf_event_read_value(event, &enabled, &running);
2033 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2034 values[n++] = enabled;
2035 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2036 values[n++] = running;
2037 if (read_format & PERF_FORMAT_ID)
2038 values[n++] = primary_event_id(event);
2040 if (copy_to_user(buf, values, n * sizeof(u64)))
2043 return n * sizeof(u64);
2047 * Read the performance event - simple non blocking version for now
2050 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2052 u64 read_format = event->attr.read_format;
2056 * Return end-of-file for a read on a event that is in
2057 * error state (i.e. because it was pinned but it couldn't be
2058 * scheduled on to the CPU at some point).
2060 if (event->state == PERF_EVENT_STATE_ERROR)
2063 if (count < perf_event_read_size(event))
2066 WARN_ON_ONCE(event->ctx->parent_ctx);
2067 if (read_format & PERF_FORMAT_GROUP)
2068 ret = perf_event_read_group(event, read_format, buf);
2070 ret = perf_event_read_one(event, read_format, buf);
2076 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2078 struct perf_event *event = file->private_data;
2080 return perf_read_hw(event, buf, count);
2083 static unsigned int perf_poll(struct file *file, poll_table *wait)
2085 struct perf_event *event = file->private_data;
2086 struct perf_mmap_data *data;
2087 unsigned int events = POLL_HUP;
2090 data = rcu_dereference(event->data);
2092 events = atomic_xchg(&data->poll, 0);
2095 poll_wait(file, &event->waitq, wait);
2100 static void perf_event_reset(struct perf_event *event)
2102 (void)perf_event_read(event);
2103 atomic64_set(&event->count, 0);
2104 perf_event_update_userpage(event);
2108 * Holding the top-level event's child_mutex means that any
2109 * descendant process that has inherited this event will block
2110 * in sync_child_event if it goes to exit, thus satisfying the
2111 * task existence requirements of perf_event_enable/disable.
2113 static void perf_event_for_each_child(struct perf_event *event,
2114 void (*func)(struct perf_event *))
2116 struct perf_event *child;
2118 WARN_ON_ONCE(event->ctx->parent_ctx);
2119 mutex_lock(&event->child_mutex);
2121 list_for_each_entry(child, &event->child_list, child_list)
2123 mutex_unlock(&event->child_mutex);
2126 static void perf_event_for_each(struct perf_event *event,
2127 void (*func)(struct perf_event *))
2129 struct perf_event_context *ctx = event->ctx;
2130 struct perf_event *sibling;
2132 WARN_ON_ONCE(ctx->parent_ctx);
2133 mutex_lock(&ctx->mutex);
2134 event = event->group_leader;
2136 perf_event_for_each_child(event, func);
2138 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2139 perf_event_for_each_child(event, func);
2140 mutex_unlock(&ctx->mutex);
2143 static int perf_event_period(struct perf_event *event, u64 __user *arg)
2145 struct perf_event_context *ctx = event->ctx;
2150 if (!event->attr.sample_period)
2153 size = copy_from_user(&value, arg, sizeof(value));
2154 if (size != sizeof(value))
2160 raw_spin_lock_irq(&ctx->lock);
2161 if (event->attr.freq) {
2162 if (value > sysctl_perf_event_sample_rate) {
2167 event->attr.sample_freq = value;
2169 event->attr.sample_period = value;
2170 event->hw.sample_period = value;
2173 raw_spin_unlock_irq(&ctx->lock);
2178 static int perf_event_set_output(struct perf_event *event, int output_fd);
2179 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2181 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2183 struct perf_event *event = file->private_data;
2184 void (*func)(struct perf_event *);
2188 case PERF_EVENT_IOC_ENABLE:
2189 func = perf_event_enable;
2191 case PERF_EVENT_IOC_DISABLE:
2192 func = perf_event_disable;
2194 case PERF_EVENT_IOC_RESET:
2195 func = perf_event_reset;
2198 case PERF_EVENT_IOC_REFRESH:
2199 return perf_event_refresh(event, arg);
2201 case PERF_EVENT_IOC_PERIOD:
2202 return perf_event_period(event, (u64 __user *)arg);
2204 case PERF_EVENT_IOC_SET_OUTPUT:
2205 return perf_event_set_output(event, arg);
2207 case PERF_EVENT_IOC_SET_FILTER:
2208 return perf_event_set_filter(event, (void __user *)arg);
2214 if (flags & PERF_IOC_FLAG_GROUP)
2215 perf_event_for_each(event, func);
2217 perf_event_for_each_child(event, func);
2222 int perf_event_task_enable(void)
2224 struct perf_event *event;
2226 mutex_lock(¤t->perf_event_mutex);
2227 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
2228 perf_event_for_each_child(event, perf_event_enable);
2229 mutex_unlock(¤t->perf_event_mutex);
2234 int perf_event_task_disable(void)
2236 struct perf_event *event;
2238 mutex_lock(¤t->perf_event_mutex);
2239 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
2240 perf_event_for_each_child(event, perf_event_disable);
2241 mutex_unlock(¤t->perf_event_mutex);
2246 #ifndef PERF_EVENT_INDEX_OFFSET
2247 # define PERF_EVENT_INDEX_OFFSET 0
2250 static int perf_event_index(struct perf_event *event)
2252 if (event->state != PERF_EVENT_STATE_ACTIVE)
2255 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2259 * Callers need to ensure there can be no nesting of this function, otherwise
2260 * the seqlock logic goes bad. We can not serialize this because the arch
2261 * code calls this from NMI context.
2263 void perf_event_update_userpage(struct perf_event *event)
2265 struct perf_event_mmap_page *userpg;
2266 struct perf_mmap_data *data;
2269 data = rcu_dereference(event->data);
2273 userpg = data->user_page;
2276 * Disable preemption so as to not let the corresponding user-space
2277 * spin too long if we get preempted.
2282 userpg->index = perf_event_index(event);
2283 userpg->offset = atomic64_read(&event->count);
2284 if (event->state == PERF_EVENT_STATE_ACTIVE)
2285 userpg->offset -= atomic64_read(&event->hw.prev_count);
2287 userpg->time_enabled = event->total_time_enabled +
2288 atomic64_read(&event->child_total_time_enabled);
2290 userpg->time_running = event->total_time_running +
2291 atomic64_read(&event->child_total_time_running);
2300 static unsigned long perf_data_size(struct perf_mmap_data *data)
2302 return data->nr_pages << (PAGE_SHIFT + data->data_order);
2305 #ifndef CONFIG_PERF_USE_VMALLOC
2308 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2311 static struct page *
2312 perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2314 if (pgoff > data->nr_pages)
2318 return virt_to_page(data->user_page);
2320 return virt_to_page(data->data_pages[pgoff - 1]);
2323 static void *perf_mmap_alloc_page(int cpu)
2328 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2329 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2333 return page_address(page);
2336 static struct perf_mmap_data *
2337 perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2339 struct perf_mmap_data *data;
2343 WARN_ON(atomic_read(&event->mmap_count));
2345 size = sizeof(struct perf_mmap_data);
2346 size += nr_pages * sizeof(void *);
2348 data = kzalloc(size, GFP_KERNEL);
2352 data->user_page = perf_mmap_alloc_page(event->cpu);
2353 if (!data->user_page)
2354 goto fail_user_page;
2356 for (i = 0; i < nr_pages; i++) {
2357 data->data_pages[i] = perf_mmap_alloc_page(event->cpu);
2358 if (!data->data_pages[i])
2359 goto fail_data_pages;
2362 data->data_order = 0;
2363 data->nr_pages = nr_pages;
2368 for (i--; i >= 0; i--)
2369 free_page((unsigned long)data->data_pages[i]);
2371 free_page((unsigned long)data->user_page);
2380 static void perf_mmap_free_page(unsigned long addr)
2382 struct page *page = virt_to_page((void *)addr);
2384 page->mapping = NULL;
2388 static void perf_mmap_data_free(struct perf_mmap_data *data)
2392 perf_mmap_free_page((unsigned long)data->user_page);
2393 for (i = 0; i < data->nr_pages; i++)
2394 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2401 * Back perf_mmap() with vmalloc memory.
2403 * Required for architectures that have d-cache aliasing issues.
2406 static struct page *
2407 perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2409 if (pgoff > (1UL << data->data_order))
2412 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2415 static void perf_mmap_unmark_page(void *addr)
2417 struct page *page = vmalloc_to_page(addr);
2419 page->mapping = NULL;
2422 static void perf_mmap_data_free_work(struct work_struct *work)
2424 struct perf_mmap_data *data;
2428 data = container_of(work, struct perf_mmap_data, work);
2429 nr = 1 << data->data_order;
2431 base = data->user_page;
2432 for (i = 0; i < nr + 1; i++)
2433 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2439 static void perf_mmap_data_free(struct perf_mmap_data *data)
2441 schedule_work(&data->work);
2444 static struct perf_mmap_data *
2445 perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2447 struct perf_mmap_data *data;
2451 WARN_ON(atomic_read(&event->mmap_count));
2453 size = sizeof(struct perf_mmap_data);
2454 size += sizeof(void *);
2456 data = kzalloc(size, GFP_KERNEL);
2460 INIT_WORK(&data->work, perf_mmap_data_free_work);
2462 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2466 data->user_page = all_buf;
2467 data->data_pages[0] = all_buf + PAGE_SIZE;
2468 data->data_order = ilog2(nr_pages);
2482 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2484 struct perf_event *event = vma->vm_file->private_data;
2485 struct perf_mmap_data *data;
2486 int ret = VM_FAULT_SIGBUS;
2488 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2489 if (vmf->pgoff == 0)
2495 data = rcu_dereference(event->data);
2499 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2502 vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2506 get_page(vmf->page);
2507 vmf->page->mapping = vma->vm_file->f_mapping;
2508 vmf->page->index = vmf->pgoff;
2518 perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2520 long max_size = perf_data_size(data);
2522 atomic_set(&data->lock, -1);
2524 if (event->attr.watermark) {
2525 data->watermark = min_t(long, max_size,
2526 event->attr.wakeup_watermark);
2529 if (!data->watermark)
2530 data->watermark = max_size / 2;
2533 rcu_assign_pointer(event->data, data);
2536 static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2538 struct perf_mmap_data *data;
2540 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2541 perf_mmap_data_free(data);
2544 static void perf_mmap_data_release(struct perf_event *event)
2546 struct perf_mmap_data *data = event->data;
2548 WARN_ON(atomic_read(&event->mmap_count));
2550 rcu_assign_pointer(event->data, NULL);
2551 call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
2554 static void perf_mmap_open(struct vm_area_struct *vma)
2556 struct perf_event *event = vma->vm_file->private_data;
2558 atomic_inc(&event->mmap_count);
2561 static void perf_mmap_close(struct vm_area_struct *vma)
2563 struct perf_event *event = vma->vm_file->private_data;
2565 WARN_ON_ONCE(event->ctx->parent_ctx);
2566 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2567 unsigned long size = perf_data_size(event->data);
2568 struct user_struct *user = current_user();
2570 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2571 vma->vm_mm->locked_vm -= event->data->nr_locked;
2572 perf_mmap_data_release(event);
2573 mutex_unlock(&event->mmap_mutex);
2577 static const struct vm_operations_struct perf_mmap_vmops = {
2578 .open = perf_mmap_open,
2579 .close = perf_mmap_close,
2580 .fault = perf_mmap_fault,
2581 .page_mkwrite = perf_mmap_fault,
2584 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2586 struct perf_event *event = file->private_data;
2587 unsigned long user_locked, user_lock_limit;
2588 struct user_struct *user = current_user();
2589 unsigned long locked, lock_limit;
2590 struct perf_mmap_data *data;
2591 unsigned long vma_size;
2592 unsigned long nr_pages;
2593 long user_extra, extra;
2596 if (!(vma->vm_flags & VM_SHARED))
2599 vma_size = vma->vm_end - vma->vm_start;
2600 nr_pages = (vma_size / PAGE_SIZE) - 1;
2603 * If we have data pages ensure they're a power-of-two number, so we
2604 * can do bitmasks instead of modulo.
2606 if (nr_pages != 0 && !is_power_of_2(nr_pages))
2609 if (vma_size != PAGE_SIZE * (1 + nr_pages))
2612 if (vma->vm_pgoff != 0)
2615 WARN_ON_ONCE(event->ctx->parent_ctx);
2616 mutex_lock(&event->mmap_mutex);
2617 if (event->output) {
2622 if (atomic_inc_not_zero(&event->mmap_count)) {
2623 if (nr_pages != event->data->nr_pages)
2628 user_extra = nr_pages + 1;
2629 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2632 * Increase the limit linearly with more CPUs:
2634 user_lock_limit *= num_online_cpus();
2636 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2639 if (user_locked > user_lock_limit)
2640 extra = user_locked - user_lock_limit;
2642 lock_limit = rlimit(RLIMIT_MEMLOCK);
2643 lock_limit >>= PAGE_SHIFT;
2644 locked = vma->vm_mm->locked_vm + extra;
2646 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2647 !capable(CAP_IPC_LOCK)) {
2652 WARN_ON(event->data);
2654 data = perf_mmap_data_alloc(event, nr_pages);
2660 perf_mmap_data_init(event, data);
2662 atomic_set(&event->mmap_count, 1);
2663 atomic_long_add(user_extra, &user->locked_vm);
2664 vma->vm_mm->locked_vm += extra;
2665 event->data->nr_locked = extra;
2666 if (vma->vm_flags & VM_WRITE)
2667 event->data->writable = 1;
2670 mutex_unlock(&event->mmap_mutex);
2672 vma->vm_flags |= VM_RESERVED;
2673 vma->vm_ops = &perf_mmap_vmops;
2678 static int perf_fasync(int fd, struct file *filp, int on)
2680 struct inode *inode = filp->f_path.dentry->d_inode;
2681 struct perf_event *event = filp->private_data;
2684 mutex_lock(&inode->i_mutex);
2685 retval = fasync_helper(fd, filp, on, &event->fasync);
2686 mutex_unlock(&inode->i_mutex);
2694 static const struct file_operations perf_fops = {
2695 .llseek = no_llseek,
2696 .release = perf_release,
2699 .unlocked_ioctl = perf_ioctl,
2700 .compat_ioctl = perf_ioctl,
2702 .fasync = perf_fasync,
2708 * If there's data, ensure we set the poll() state and publish everything
2709 * to user-space before waking everybody up.
2712 void perf_event_wakeup(struct perf_event *event)
2714 wake_up_all(&event->waitq);
2716 if (event->pending_kill) {
2717 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2718 event->pending_kill = 0;
2725 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2727 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2728 * single linked list and use cmpxchg() to add entries lockless.
2731 static void perf_pending_event(struct perf_pending_entry *entry)
2733 struct perf_event *event = container_of(entry,
2734 struct perf_event, pending);
2736 if (event->pending_disable) {
2737 event->pending_disable = 0;
2738 __perf_event_disable(event);
2741 if (event->pending_wakeup) {
2742 event->pending_wakeup = 0;
2743 perf_event_wakeup(event);
2747 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2749 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2753 static void perf_pending_queue(struct perf_pending_entry *entry,
2754 void (*func)(struct perf_pending_entry *))
2756 struct perf_pending_entry **head;
2758 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2763 head = &get_cpu_var(perf_pending_head);
2766 entry->next = *head;
2767 } while (cmpxchg(head, entry->next, entry) != entry->next);
2769 set_perf_event_pending();
2771 put_cpu_var(perf_pending_head);
2774 static int __perf_pending_run(void)
2776 struct perf_pending_entry *list;
2779 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2780 while (list != PENDING_TAIL) {
2781 void (*func)(struct perf_pending_entry *);
2782 struct perf_pending_entry *entry = list;
2789 * Ensure we observe the unqueue before we issue the wakeup,
2790 * so that we won't be waiting forever.
2791 * -- see perf_not_pending().
2802 static inline int perf_not_pending(struct perf_event *event)
2805 * If we flush on whatever cpu we run, there is a chance we don't
2809 __perf_pending_run();
2813 * Ensure we see the proper queue state before going to sleep
2814 * so that we do not miss the wakeup. -- see perf_pending_handle()
2817 return event->pending.next == NULL;
2820 static void perf_pending_sync(struct perf_event *event)
2822 wait_event(event->waitq, perf_not_pending(event));
2825 void perf_event_do_pending(void)
2827 __perf_pending_run();
2831 * Callchain support -- arch specific
2834 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2840 void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2846 * We assume there is only KVM supporting the callbacks.
2847 * Later on, we might change it to a list if there is
2848 * another virtualization implementation supporting the callbacks.
2850 struct perf_guest_info_callbacks *perf_guest_cbs;
2852 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
2854 perf_guest_cbs = cbs;
2857 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
2859 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
2861 perf_guest_cbs = NULL;
2864 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
2869 static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2870 unsigned long offset, unsigned long head)
2874 if (!data->writable)
2877 mask = perf_data_size(data) - 1;
2879 offset = (offset - tail) & mask;
2880 head = (head - tail) & mask;
2882 if ((int)(head - offset) < 0)
2888 static void perf_output_wakeup(struct perf_output_handle *handle)
2890 atomic_set(&handle->data->poll, POLL_IN);
2893 handle->event->pending_wakeup = 1;
2894 perf_pending_queue(&handle->event->pending,
2895 perf_pending_event);
2897 perf_event_wakeup(handle->event);
2901 * Curious locking construct.
2903 * We need to ensure a later event_id doesn't publish a head when a former
2904 * event_id isn't done writing. However since we need to deal with NMIs we
2905 * cannot fully serialize things.
2907 * What we do is serialize between CPUs so we only have to deal with NMI
2908 * nesting on a single CPU.
2910 * We only publish the head (and generate a wakeup) when the outer-most
2911 * event_id completes.
2913 static void perf_output_lock(struct perf_output_handle *handle)
2915 struct perf_mmap_data *data = handle->data;
2916 int cur, cpu = get_cpu();
2921 cur = atomic_cmpxchg(&data->lock, -1, cpu);
2933 static void perf_output_unlock(struct perf_output_handle *handle)
2935 struct perf_mmap_data *data = handle->data;
2939 data->done_head = data->head;
2941 if (!handle->locked)
2946 * The xchg implies a full barrier that ensures all writes are done
2947 * before we publish the new head, matched by a rmb() in userspace when
2948 * reading this position.
2950 while ((head = atomic_long_xchg(&data->done_head, 0)))
2951 data->user_page->data_head = head;
2954 * NMI can happen here, which means we can miss a done_head update.
2957 cpu = atomic_xchg(&data->lock, -1);
2958 WARN_ON_ONCE(cpu != smp_processor_id());
2961 * Therefore we have to validate we did not indeed do so.
2963 if (unlikely(atomic_long_read(&data->done_head))) {
2965 * Since we had it locked, we can lock it again.
2967 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2973 if (atomic_xchg(&data->wakeup, 0))
2974 perf_output_wakeup(handle);
2979 void perf_output_copy(struct perf_output_handle *handle,
2980 const void *buf, unsigned int len)
2982 unsigned int pages_mask;
2983 unsigned long offset;
2987 offset = handle->offset;
2988 pages_mask = handle->data->nr_pages - 1;
2989 pages = handle->data->data_pages;
2992 unsigned long page_offset;
2993 unsigned long page_size;
2996 nr = (offset >> PAGE_SHIFT) & pages_mask;
2997 page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
2998 page_offset = offset & (page_size - 1);
2999 size = min_t(unsigned int, page_size - page_offset, len);
3001 memcpy(pages[nr] + page_offset, buf, size);
3008 handle->offset = offset;
3011 * Check we didn't copy past our reservation window, taking the
3012 * possible unsigned int wrap into account.
3014 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
3017 int perf_output_begin(struct perf_output_handle *handle,
3018 struct perf_event *event, unsigned int size,
3019 int nmi, int sample)
3021 struct perf_event *output_event;
3022 struct perf_mmap_data *data;
3023 unsigned long tail, offset, head;
3026 struct perf_event_header header;
3033 * For inherited events we send all the output towards the parent.
3036 event = event->parent;
3038 output_event = rcu_dereference(event->output);
3040 event = output_event;
3042 data = rcu_dereference(event->data);
3046 handle->data = data;
3047 handle->event = event;
3049 handle->sample = sample;
3051 if (!data->nr_pages)
3054 have_lost = atomic_read(&data->lost);
3056 size += sizeof(lost_event);
3058 perf_output_lock(handle);
3062 * Userspace could choose to issue a mb() before updating the
3063 * tail pointer. So that all reads will be completed before the
3066 tail = ACCESS_ONCE(data->user_page->data_tail);
3068 offset = head = atomic_long_read(&data->head);
3070 if (unlikely(!perf_output_space(data, tail, offset, head)))
3072 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
3074 handle->offset = offset;
3075 handle->head = head;
3077 if (head - tail > data->watermark)
3078 atomic_set(&data->wakeup, 1);
3081 lost_event.header.type = PERF_RECORD_LOST;
3082 lost_event.header.misc = 0;
3083 lost_event.header.size = sizeof(lost_event);
3084 lost_event.id = event->id;
3085 lost_event.lost = atomic_xchg(&data->lost, 0);
3087 perf_output_put(handle, lost_event);
3093 atomic_inc(&data->lost);
3094 perf_output_unlock(handle);
3101 void perf_output_end(struct perf_output_handle *handle)
3103 struct perf_event *event = handle->event;
3104 struct perf_mmap_data *data = handle->data;
3106 int wakeup_events = event->attr.wakeup_events;
3108 if (handle->sample && wakeup_events) {
3109 int events = atomic_inc_return(&data->events);
3110 if (events >= wakeup_events) {
3111 atomic_sub(wakeup_events, &data->events);
3112 atomic_set(&data->wakeup, 1);
3116 perf_output_unlock(handle);
3120 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3123 * only top level events have the pid namespace they were created in
3126 event = event->parent;
3128 return task_tgid_nr_ns(p, event->ns);
3131 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3134 * only top level events have the pid namespace they were created in
3137 event = event->parent;
3139 return task_pid_nr_ns(p, event->ns);
3142 static void perf_output_read_one(struct perf_output_handle *handle,
3143 struct perf_event *event)
3145 u64 read_format = event->attr.read_format;
3149 values[n++] = atomic64_read(&event->count);
3150 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3151 values[n++] = event->total_time_enabled +
3152 atomic64_read(&event->child_total_time_enabled);
3154 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3155 values[n++] = event->total_time_running +
3156 atomic64_read(&event->child_total_time_running);
3158 if (read_format & PERF_FORMAT_ID)
3159 values[n++] = primary_event_id(event);
3161 perf_output_copy(handle, values, n * sizeof(u64));
3165 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3167 static void perf_output_read_group(struct perf_output_handle *handle,
3168 struct perf_event *event)
3170 struct perf_event *leader = event->group_leader, *sub;
3171 u64 read_format = event->attr.read_format;
3175 values[n++] = 1 + leader->nr_siblings;
3177 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3178 values[n++] = leader->total_time_enabled;
3180 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3181 values[n++] = leader->total_time_running;
3183 if (leader != event)
3184 leader->pmu->read(leader);
3186 values[n++] = atomic64_read(&leader->count);
3187 if (read_format & PERF_FORMAT_ID)
3188 values[n++] = primary_event_id(leader);
3190 perf_output_copy(handle, values, n * sizeof(u64));
3192 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3196 sub->pmu->read(sub);
3198 values[n++] = atomic64_read(&sub->count);
3199 if (read_format & PERF_FORMAT_ID)
3200 values[n++] = primary_event_id(sub);
3202 perf_output_copy(handle, values, n * sizeof(u64));
3206 static void perf_output_read(struct perf_output_handle *handle,
3207 struct perf_event *event)
3209 if (event->attr.read_format & PERF_FORMAT_GROUP)
3210 perf_output_read_group(handle, event);
3212 perf_output_read_one(handle, event);
3215 void perf_output_sample(struct perf_output_handle *handle,
3216 struct perf_event_header *header,
3217 struct perf_sample_data *data,
3218 struct perf_event *event)
3220 u64 sample_type = data->type;
3222 perf_output_put(handle, *header);
3224 if (sample_type & PERF_SAMPLE_IP)
3225 perf_output_put(handle, data->ip);
3227 if (sample_type & PERF_SAMPLE_TID)
3228 perf_output_put(handle, data->tid_entry);
3230 if (sample_type & PERF_SAMPLE_TIME)
3231 perf_output_put(handle, data->time);
3233 if (sample_type & PERF_SAMPLE_ADDR)
3234 perf_output_put(handle, data->addr);
3236 if (sample_type & PERF_SAMPLE_ID)
3237 perf_output_put(handle, data->id);
3239 if (sample_type & PERF_SAMPLE_STREAM_ID)
3240 perf_output_put(handle, data->stream_id);
3242 if (sample_type & PERF_SAMPLE_CPU)
3243 perf_output_put(handle, data->cpu_entry);
3245 if (sample_type & PERF_SAMPLE_PERIOD)
3246 perf_output_put(handle, data->period);
3248 if (sample_type & PERF_SAMPLE_READ)
3249 perf_output_read(handle, event);
3251 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3252 if (data->callchain) {
3255 if (data->callchain)
3256 size += data->callchain->nr;
3258 size *= sizeof(u64);
3260 perf_output_copy(handle, data->callchain, size);
3263 perf_output_put(handle, nr);
3267 if (sample_type & PERF_SAMPLE_RAW) {
3269 perf_output_put(handle, data->raw->size);
3270 perf_output_copy(handle, data->raw->data,
3277 .size = sizeof(u32),
3280 perf_output_put(handle, raw);
3285 void perf_prepare_sample(struct perf_event_header *header,
3286 struct perf_sample_data *data,
3287 struct perf_event *event,
3288 struct pt_regs *regs)
3290 u64 sample_type = event->attr.sample_type;
3292 data->type = sample_type;
3294 header->type = PERF_RECORD_SAMPLE;
3295 header->size = sizeof(*header);
3298 header->misc |= perf_misc_flags(regs);
3300 if (sample_type & PERF_SAMPLE_IP) {
3301 data->ip = perf_instruction_pointer(regs);
3303 header->size += sizeof(data->ip);
3306 if (sample_type & PERF_SAMPLE_TID) {
3307 /* namespace issues */
3308 data->tid_entry.pid = perf_event_pid(event, current);
3309 data->tid_entry.tid = perf_event_tid(event, current);
3311 header->size += sizeof(data->tid_entry);
3314 if (sample_type & PERF_SAMPLE_TIME) {
3315 data->time = perf_clock();
3317 header->size += sizeof(data->time);
3320 if (sample_type & PERF_SAMPLE_ADDR)
3321 header->size += sizeof(data->addr);
3323 if (sample_type & PERF_SAMPLE_ID) {
3324 data->id = primary_event_id(event);
3326 header->size += sizeof(data->id);
3329 if (sample_type & PERF_SAMPLE_STREAM_ID) {
3330 data->stream_id = event->id;
3332 header->size += sizeof(data->stream_id);
3335 if (sample_type & PERF_SAMPLE_CPU) {
3336 data->cpu_entry.cpu = raw_smp_processor_id();
3337 data->cpu_entry.reserved = 0;
3339 header->size += sizeof(data->cpu_entry);
3342 if (sample_type & PERF_SAMPLE_PERIOD)
3343 header->size += sizeof(data->period);
3345 if (sample_type & PERF_SAMPLE_READ)
3346 header->size += perf_event_read_size(event);
3348 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3351 data->callchain = perf_callchain(regs);
3353 if (data->callchain)
3354 size += data->callchain->nr;
3356 header->size += size * sizeof(u64);
3359 if (sample_type & PERF_SAMPLE_RAW) {
3360 int size = sizeof(u32);
3363 size += data->raw->size;
3365 size += sizeof(u32);
3367 WARN_ON_ONCE(size & (sizeof(u64)-1));
3368 header->size += size;
3372 static void perf_event_output(struct perf_event *event, int nmi,
3373 struct perf_sample_data *data,
3374 struct pt_regs *regs)
3376 struct perf_output_handle handle;
3377 struct perf_event_header header;
3379 perf_prepare_sample(&header, data, event, regs);
3381 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3384 perf_output_sample(&handle, &header, data, event);
3386 perf_output_end(&handle);
3393 struct perf_read_event {
3394 struct perf_event_header header;
3401 perf_event_read_event(struct perf_event *event,
3402 struct task_struct *task)
3404 struct perf_output_handle handle;
3405 struct perf_read_event read_event = {
3407 .type = PERF_RECORD_READ,
3409 .size = sizeof(read_event) + perf_event_read_size(event),
3411 .pid = perf_event_pid(event, task),
3412 .tid = perf_event_tid(event, task),
3416 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3420 perf_output_put(&handle, read_event);
3421 perf_output_read(&handle, event);
3423 perf_output_end(&handle);
3427 * task tracking -- fork/exit
3429 * enabled by: attr.comm | attr.mmap | attr.task
3432 struct perf_task_event {
3433 struct task_struct *task;
3434 struct perf_event_context *task_ctx;
3437 struct perf_event_header header;
3447 static void perf_event_task_output(struct perf_event *event,
3448 struct perf_task_event *task_event)
3450 struct perf_output_handle handle;
3451 struct task_struct *task = task_event->task;
3452 unsigned long flags;
3456 * If this CPU attempts to acquire an rq lock held by a CPU spinning
3457 * in perf_output_lock() from interrupt context, it's game over.
3459 local_irq_save(flags);
3461 size = task_event->event_id.header.size;
3462 ret = perf_output_begin(&handle, event, size, 0, 0);
3465 local_irq_restore(flags);
3469 task_event->event_id.pid = perf_event_pid(event, task);
3470 task_event->event_id.ppid = perf_event_pid(event, current);
3472 task_event->event_id.tid = perf_event_tid(event, task);
3473 task_event->event_id.ptid = perf_event_tid(event, current);
3475 perf_output_put(&handle, task_event->event_id);
3477 perf_output_end(&handle);
3478 local_irq_restore(flags);
3481 static int perf_event_task_match(struct perf_event *event)
3483 if (event->state < PERF_EVENT_STATE_INACTIVE)
3486 if (event->cpu != -1 && event->cpu != smp_processor_id())
3489 if (event->attr.comm || event->attr.mmap || event->attr.task)
3495 static void perf_event_task_ctx(struct perf_event_context *ctx,
3496 struct perf_task_event *task_event)
3498 struct perf_event *event;
3500 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3501 if (perf_event_task_match(event))
3502 perf_event_task_output(event, task_event);
3506 static void perf_event_task_event(struct perf_task_event *task_event)
3508 struct perf_cpu_context *cpuctx;
3509 struct perf_event_context *ctx = task_event->task_ctx;
3512 cpuctx = &get_cpu_var(perf_cpu_context);
3513 perf_event_task_ctx(&cpuctx->ctx, task_event);
3515 ctx = rcu_dereference(current->perf_event_ctxp);
3517 perf_event_task_ctx(ctx, task_event);
3518 put_cpu_var(perf_cpu_context);
3522 static void perf_event_task(struct task_struct *task,
3523 struct perf_event_context *task_ctx,
3526 struct perf_task_event task_event;
3528 if (!atomic_read(&nr_comm_events) &&
3529 !atomic_read(&nr_mmap_events) &&
3530 !atomic_read(&nr_task_events))
3533 task_event = (struct perf_task_event){
3535 .task_ctx = task_ctx,
3538 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3540 .size = sizeof(task_event.event_id),
3546 .time = perf_clock(),
3550 perf_event_task_event(&task_event);
3553 void perf_event_fork(struct task_struct *task)
3555 perf_event_task(task, NULL, 1);
3562 struct perf_comm_event {
3563 struct task_struct *task;
3568 struct perf_event_header header;
3575 static void perf_event_comm_output(struct perf_event *event,
3576 struct perf_comm_event *comm_event)
3578 struct perf_output_handle handle;
3579 int size = comm_event->event_id.header.size;
3580 int ret = perf_output_begin(&handle, event, size, 0, 0);
3585 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3586 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3588 perf_output_put(&handle, comm_event->event_id);
3589 perf_output_copy(&handle, comm_event->comm,
3590 comm_event->comm_size);
3591 perf_output_end(&handle);
3594 static int perf_event_comm_match(struct perf_event *event)
3596 if (event->state < PERF_EVENT_STATE_INACTIVE)
3599 if (event->cpu != -1 && event->cpu != smp_processor_id())
3602 if (event->attr.comm)
3608 static void perf_event_comm_ctx(struct perf_event_context *ctx,
3609 struct perf_comm_event *comm_event)
3611 struct perf_event *event;
3613 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3614 if (perf_event_comm_match(event))
3615 perf_event_comm_output(event, comm_event);
3619 static void perf_event_comm_event(struct perf_comm_event *comm_event)
3621 struct perf_cpu_context *cpuctx;
3622 struct perf_event_context *ctx;
3624 char comm[TASK_COMM_LEN];
3626 memset(comm, 0, sizeof(comm));
3627 strlcpy(comm, comm_event->task->comm, sizeof(comm));
3628 size = ALIGN(strlen(comm)+1, sizeof(u64));
3630 comm_event->comm = comm;
3631 comm_event->comm_size = size;
3633 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3636 cpuctx = &get_cpu_var(perf_cpu_context);
3637 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3638 ctx = rcu_dereference(current->perf_event_ctxp);
3640 perf_event_comm_ctx(ctx, comm_event);
3641 put_cpu_var(perf_cpu_context);
3645 void perf_event_comm(struct task_struct *task)
3647 struct perf_comm_event comm_event;
3649 if (task->perf_event_ctxp)
3650 perf_event_enable_on_exec(task);
3652 if (!atomic_read(&nr_comm_events))
3655 comm_event = (struct perf_comm_event){
3661 .type = PERF_RECORD_COMM,
3670 perf_event_comm_event(&comm_event);
3677 struct perf_mmap_event {
3678 struct vm_area_struct *vma;
3680 const char *file_name;
3684 struct perf_event_header header;
3694 static void perf_event_mmap_output(struct perf_event *event,
3695 struct perf_mmap_event *mmap_event)
3697 struct perf_output_handle handle;
3698 int size = mmap_event->event_id.header.size;
3699 int ret = perf_output_begin(&handle, event, size, 0, 0);
3704 mmap_event->event_id.pid = perf_event_pid(event, current);
3705 mmap_event->event_id.tid = perf_event_tid(event, current);
3707 perf_output_put(&handle, mmap_event->event_id);
3708 perf_output_copy(&handle, mmap_event->file_name,
3709 mmap_event->file_size);
3710 perf_output_end(&handle);
3713 static int perf_event_mmap_match(struct perf_event *event,
3714 struct perf_mmap_event *mmap_event)
3716 if (event->state < PERF_EVENT_STATE_INACTIVE)
3719 if (event->cpu != -1 && event->cpu != smp_processor_id())
3722 if (event->attr.mmap)
3728 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3729 struct perf_mmap_event *mmap_event)
3731 struct perf_event *event;
3733 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3734 if (perf_event_mmap_match(event, mmap_event))
3735 perf_event_mmap_output(event, mmap_event);
3739 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3741 struct perf_cpu_context *cpuctx;
3742 struct perf_event_context *ctx;
3743 struct vm_area_struct *vma = mmap_event->vma;
3744 struct file *file = vma->vm_file;
3750 memset(tmp, 0, sizeof(tmp));
3754 * d_path works from the end of the buffer backwards, so we
3755 * need to add enough zero bytes after the string to handle
3756 * the 64bit alignment we do later.
3758 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3760 name = strncpy(tmp, "//enomem", sizeof(tmp));
3763 name = d_path(&file->f_path, buf, PATH_MAX);
3765 name = strncpy(tmp, "//toolong", sizeof(tmp));
3769 if (arch_vma_name(mmap_event->vma)) {
3770 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3776 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3780 name = strncpy(tmp, "//anon", sizeof(tmp));
3785 size = ALIGN(strlen(name)+1, sizeof(u64));
3787 mmap_event->file_name = name;
3788 mmap_event->file_size = size;
3790 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3793 cpuctx = &get_cpu_var(perf_cpu_context);
3794 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3795 ctx = rcu_dereference(current->perf_event_ctxp);
3797 perf_event_mmap_ctx(ctx, mmap_event);
3798 put_cpu_var(perf_cpu_context);
3804 void __perf_event_mmap(struct vm_area_struct *vma)
3806 struct perf_mmap_event mmap_event;
3808 if (!atomic_read(&nr_mmap_events))
3811 mmap_event = (struct perf_mmap_event){
3817 .type = PERF_RECORD_MMAP,
3818 .misc = PERF_RECORD_MISC_USER,
3823 .start = vma->vm_start,
3824 .len = vma->vm_end - vma->vm_start,
3825 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
3829 perf_event_mmap_event(&mmap_event);
3833 * IRQ throttle logging
3836 static void perf_log_throttle(struct perf_event *event, int enable)
3838 struct perf_output_handle handle;
3842 struct perf_event_header header;
3846 } throttle_event = {
3848 .type = PERF_RECORD_THROTTLE,
3850 .size = sizeof(throttle_event),
3852 .time = perf_clock(),
3853 .id = primary_event_id(event),
3854 .stream_id = event->id,
3858 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3860 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3864 perf_output_put(&handle, throttle_event);
3865 perf_output_end(&handle);
3869 * Generic event overflow handling, sampling.
3872 static int __perf_event_overflow(struct perf_event *event, int nmi,
3873 int throttle, struct perf_sample_data *data,
3874 struct pt_regs *regs)
3876 int events = atomic_read(&event->event_limit);
3877 struct hw_perf_event *hwc = &event->hw;
3880 throttle = (throttle && event->pmu->unthrottle != NULL);
3885 if (hwc->interrupts != MAX_INTERRUPTS) {
3887 if (HZ * hwc->interrupts >
3888 (u64)sysctl_perf_event_sample_rate) {
3889 hwc->interrupts = MAX_INTERRUPTS;
3890 perf_log_throttle(event, 0);
3895 * Keep re-disabling events even though on the previous
3896 * pass we disabled it - just in case we raced with a
3897 * sched-in and the event got enabled again:
3903 if (event->attr.freq) {
3904 u64 now = perf_clock();
3905 s64 delta = now - hwc->freq_time_stamp;
3907 hwc->freq_time_stamp = now;
3909 if (delta > 0 && delta < 2*TICK_NSEC)
3910 perf_adjust_period(event, delta, hwc->last_period);
3914 * XXX event_limit might not quite work as expected on inherited
3918 event->pending_kill = POLL_IN;
3919 if (events && atomic_dec_and_test(&event->event_limit)) {
3921 event->pending_kill = POLL_HUP;
3923 event->pending_disable = 1;
3924 perf_pending_queue(&event->pending,
3925 perf_pending_event);
3927 perf_event_disable(event);
3930 if (event->overflow_handler)
3931 event->overflow_handler(event, nmi, data, regs);
3933 perf_event_output(event, nmi, data, regs);
3938 int perf_event_overflow(struct perf_event *event, int nmi,
3939 struct perf_sample_data *data,
3940 struct pt_regs *regs)
3942 return __perf_event_overflow(event, nmi, 1, data, regs);
3946 * Generic software event infrastructure
3950 * We directly increment event->count and keep a second value in
3951 * event->hw.period_left to count intervals. This period event
3952 * is kept in the range [-sample_period, 0] so that we can use the
3956 static u64 perf_swevent_set_period(struct perf_event *event)
3958 struct hw_perf_event *hwc = &event->hw;
3959 u64 period = hwc->last_period;
3963 hwc->last_period = hwc->sample_period;
3966 old = val = atomic64_read(&hwc->period_left);
3970 nr = div64_u64(period + val, period);
3971 offset = nr * period;
3973 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3979 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
3980 int nmi, struct perf_sample_data *data,
3981 struct pt_regs *regs)
3983 struct hw_perf_event *hwc = &event->hw;
3986 data->period = event->hw.last_period;
3988 overflow = perf_swevent_set_period(event);
3990 if (hwc->interrupts == MAX_INTERRUPTS)
3993 for (; overflow; overflow--) {
3994 if (__perf_event_overflow(event, nmi, throttle,
3997 * We inhibit the overflow from happening when
3998 * hwc->interrupts == MAX_INTERRUPTS.
4006 static void perf_swevent_unthrottle(struct perf_event *event)
4009 * Nothing to do, we already reset hwc->interrupts.
4013 static void perf_swevent_add(struct perf_event *event, u64 nr,
4014 int nmi, struct perf_sample_data *data,
4015 struct pt_regs *regs)
4017 struct hw_perf_event *hwc = &event->hw;
4019 atomic64_add(nr, &event->count);
4024 if (!hwc->sample_period)
4027 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4028 return perf_swevent_overflow(event, 1, nmi, data, regs);
4030 if (atomic64_add_negative(nr, &hwc->period_left))
4033 perf_swevent_overflow(event, 0, nmi, data, regs);
4036 static int perf_tp_event_match(struct perf_event *event,
4037 struct perf_sample_data *data);
4039 static int perf_exclude_event(struct perf_event *event,
4040 struct pt_regs *regs)
4043 if (event->attr.exclude_user && user_mode(regs))
4046 if (event->attr.exclude_kernel && !user_mode(regs))
4053 static int perf_swevent_match(struct perf_event *event,
4054 enum perf_type_id type,
4056 struct perf_sample_data *data,
4057 struct pt_regs *regs)
4059 if (event->attr.type != type)
4062 if (event->attr.config != event_id)
4065 if (perf_exclude_event(event, regs))
4068 if (event->attr.type == PERF_TYPE_TRACEPOINT &&
4069 !perf_tp_event_match(event, data))
4075 static inline u64 swevent_hash(u64 type, u32 event_id)
4077 u64 val = event_id | (type << 32);
4079 return hash_64(val, SWEVENT_HLIST_BITS);
4082 static struct hlist_head *
4083 find_swevent_head(struct perf_cpu_context *ctx, u64 type, u32 event_id)
4086 struct swevent_hlist *hlist;
4088 hash = swevent_hash(type, event_id);
4090 hlist = rcu_dereference(ctx->swevent_hlist);
4094 return &hlist->heads[hash];
4097 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4099 struct perf_sample_data *data,
4100 struct pt_regs *regs)
4102 struct perf_cpu_context *cpuctx;
4103 struct perf_event *event;
4104 struct hlist_node *node;
4105 struct hlist_head *head;
4107 cpuctx = &__get_cpu_var(perf_cpu_context);
4111 head = find_swevent_head(cpuctx, type, event_id);
4116 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4117 if (perf_swevent_match(event, type, event_id, data, regs))
4118 perf_swevent_add(event, nr, nmi, data, regs);
4124 int perf_swevent_get_recursion_context(void)
4126 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
4133 else if (in_softirq())
4138 if (cpuctx->recursion[rctx]) {
4139 put_cpu_var(perf_cpu_context);
4143 cpuctx->recursion[rctx]++;
4148 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4150 void perf_swevent_put_recursion_context(int rctx)
4152 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4154 cpuctx->recursion[rctx]--;
4155 put_cpu_var(perf_cpu_context);
4157 EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
4160 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4161 struct pt_regs *regs, u64 addr)
4163 struct perf_sample_data data;
4166 rctx = perf_swevent_get_recursion_context();
4170 perf_sample_data_init(&data, addr);
4172 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4174 perf_swevent_put_recursion_context(rctx);
4177 static void perf_swevent_read(struct perf_event *event)
4181 static int perf_swevent_enable(struct perf_event *event)
4183 struct hw_perf_event *hwc = &event->hw;
4184 struct perf_cpu_context *cpuctx;
4185 struct hlist_head *head;
4187 cpuctx = &__get_cpu_var(perf_cpu_context);
4189 if (hwc->sample_period) {
4190 hwc->last_period = hwc->sample_period;
4191 perf_swevent_set_period(event);
4194 head = find_swevent_head(cpuctx, event->attr.type, event->attr.config);
4195 if (WARN_ON_ONCE(!head))
4198 hlist_add_head_rcu(&event->hlist_entry, head);
4203 static void perf_swevent_disable(struct perf_event *event)
4205 hlist_del_rcu(&event->hlist_entry);
4208 static const struct pmu perf_ops_generic = {
4209 .enable = perf_swevent_enable,
4210 .disable = perf_swevent_disable,
4211 .read = perf_swevent_read,
4212 .unthrottle = perf_swevent_unthrottle,
4216 * hrtimer based swevent callback
4219 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4221 enum hrtimer_restart ret = HRTIMER_RESTART;
4222 struct perf_sample_data data;
4223 struct pt_regs *regs;
4224 struct perf_event *event;
4227 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4228 event->pmu->read(event);
4230 perf_sample_data_init(&data, 0);
4231 data.period = event->hw.last_period;
4232 regs = get_irq_regs();
4234 if (regs && !perf_exclude_event(event, regs)) {
4235 if (!(event->attr.exclude_idle && current->pid == 0))
4236 if (perf_event_overflow(event, 0, &data, regs))
4237 ret = HRTIMER_NORESTART;
4240 period = max_t(u64, 10000, event->hw.sample_period);
4241 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4246 static void perf_swevent_start_hrtimer(struct perf_event *event)
4248 struct hw_perf_event *hwc = &event->hw;
4250 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4251 hwc->hrtimer.function = perf_swevent_hrtimer;
4252 if (hwc->sample_period) {
4255 if (hwc->remaining) {
4256 if (hwc->remaining < 0)
4259 period = hwc->remaining;
4262 period = max_t(u64, 10000, hwc->sample_period);
4264 __hrtimer_start_range_ns(&hwc->hrtimer,
4265 ns_to_ktime(period), 0,
4266 HRTIMER_MODE_REL, 0);
4270 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4272 struct hw_perf_event *hwc = &event->hw;
4274 if (hwc->sample_period) {
4275 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4276 hwc->remaining = ktime_to_ns(remaining);
4278 hrtimer_cancel(&hwc->hrtimer);
4283 * Software event: cpu wall time clock
4286 static void cpu_clock_perf_event_update(struct perf_event *event)
4288 int cpu = raw_smp_processor_id();
4292 now = cpu_clock(cpu);
4293 prev = atomic64_xchg(&event->hw.prev_count, now);
4294 atomic64_add(now - prev, &event->count);
4297 static int cpu_clock_perf_event_enable(struct perf_event *event)
4299 struct hw_perf_event *hwc = &event->hw;
4300 int cpu = raw_smp_processor_id();
4302 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
4303 perf_swevent_start_hrtimer(event);
4308 static void cpu_clock_perf_event_disable(struct perf_event *event)
4310 perf_swevent_cancel_hrtimer(event);
4311 cpu_clock_perf_event_update(event);
4314 static void cpu_clock_perf_event_read(struct perf_event *event)
4316 cpu_clock_perf_event_update(event);
4319 static const struct pmu perf_ops_cpu_clock = {
4320 .enable = cpu_clock_perf_event_enable,
4321 .disable = cpu_clock_perf_event_disable,
4322 .read = cpu_clock_perf_event_read,
4326 * Software event: task time clock
4329 static void task_clock_perf_event_update(struct perf_event *event, u64 now)
4334 prev = atomic64_xchg(&event->hw.prev_count, now);
4336 atomic64_add(delta, &event->count);
4339 static int task_clock_perf_event_enable(struct perf_event *event)
4341 struct hw_perf_event *hwc = &event->hw;
4344 now = event->ctx->time;
4346 atomic64_set(&hwc->prev_count, now);
4348 perf_swevent_start_hrtimer(event);
4353 static void task_clock_perf_event_disable(struct perf_event *event)
4355 perf_swevent_cancel_hrtimer(event);
4356 task_clock_perf_event_update(event, event->ctx->time);
4360 static void task_clock_perf_event_read(struct perf_event *event)
4365 update_context_time(event->ctx);
4366 time = event->ctx->time;
4368 u64 now = perf_clock();
4369 u64 delta = now - event->ctx->timestamp;
4370 time = event->ctx->time + delta;
4373 task_clock_perf_event_update(event, time);
4376 static const struct pmu perf_ops_task_clock = {
4377 .enable = task_clock_perf_event_enable,
4378 .disable = task_clock_perf_event_disable,
4379 .read = task_clock_perf_event_read,
4382 static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4384 struct swevent_hlist *hlist;
4386 hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4390 static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
4392 struct swevent_hlist *hlist;
4394 if (!cpuctx->swevent_hlist)
4397 hlist = cpuctx->swevent_hlist;
4398 rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
4399 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4402 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4404 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4406 mutex_lock(&cpuctx->hlist_mutex);
4408 if (!--cpuctx->hlist_refcount)
4409 swevent_hlist_release(cpuctx);
4411 mutex_unlock(&cpuctx->hlist_mutex);
4414 static void swevent_hlist_put(struct perf_event *event)
4418 if (event->cpu != -1) {
4419 swevent_hlist_put_cpu(event, event->cpu);
4423 for_each_possible_cpu(cpu)
4424 swevent_hlist_put_cpu(event, cpu);
4427 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4429 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4432 mutex_lock(&cpuctx->hlist_mutex);
4434 if (!cpuctx->swevent_hlist && cpu_online(cpu)) {
4435 struct swevent_hlist *hlist;
4437 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4442 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
4444 cpuctx->hlist_refcount++;
4446 mutex_unlock(&cpuctx->hlist_mutex);
4451 static int swevent_hlist_get(struct perf_event *event)
4454 int cpu, failed_cpu;
4456 if (event->cpu != -1)
4457 return swevent_hlist_get_cpu(event, event->cpu);
4460 for_each_possible_cpu(cpu) {
4461 err = swevent_hlist_get_cpu(event, cpu);
4471 for_each_possible_cpu(cpu) {
4472 if (cpu == failed_cpu)
4474 swevent_hlist_put_cpu(event, cpu);
4481 #ifdef CONFIG_EVENT_TRACING
4483 void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4484 int entry_size, struct pt_regs *regs, void *event)
4486 const int type = PERF_TYPE_TRACEPOINT;
4487 struct perf_sample_data data;
4488 struct perf_raw_record raw = {
4493 perf_sample_data_init(&data, addr);
4497 do_perf_sw_event(type, event_id, count, 1, &data, regs);
4501 if (perf_swevent_match(event, type, event_id, &data, regs))
4502 perf_swevent_add(event, count, 1, &data, regs);
4504 EXPORT_SYMBOL_GPL(perf_tp_event);
4506 static int perf_tp_event_match(struct perf_event *event,
4507 struct perf_sample_data *data)
4509 void *record = data->raw->data;
4511 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4516 static void tp_perf_event_destroy(struct perf_event *event)
4518 perf_trace_disable(event->attr.config);
4519 swevent_hlist_put(event);
4522 static const struct pmu *tp_perf_event_init(struct perf_event *event)
4527 * Raw tracepoint data is a severe data leak, only allow root to
4530 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4531 perf_paranoid_tracepoint_raw() &&
4532 !capable(CAP_SYS_ADMIN))
4533 return ERR_PTR(-EPERM);
4535 if (perf_trace_enable(event->attr.config, event))
4538 event->destroy = tp_perf_event_destroy;
4539 err = swevent_hlist_get(event);
4541 perf_trace_disable(event->attr.config);
4542 return ERR_PTR(err);
4545 return &perf_ops_generic;
4548 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4553 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4556 filter_str = strndup_user(arg, PAGE_SIZE);
4557 if (IS_ERR(filter_str))
4558 return PTR_ERR(filter_str);
4560 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4566 static void perf_event_free_filter(struct perf_event *event)
4568 ftrace_profile_free_filter(event);
4573 static int perf_tp_event_match(struct perf_event *event,
4574 struct perf_sample_data *data)
4579 static const struct pmu *tp_perf_event_init(struct perf_event *event)
4584 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4589 static void perf_event_free_filter(struct perf_event *event)
4593 #endif /* CONFIG_EVENT_TRACING */
4595 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4596 static void bp_perf_event_destroy(struct perf_event *event)
4598 release_bp_slot(event);
4601 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4605 err = register_perf_hw_breakpoint(bp);
4607 return ERR_PTR(err);
4609 bp->destroy = bp_perf_event_destroy;
4611 return &perf_ops_bp;
4614 void perf_bp_event(struct perf_event *bp, void *data)
4616 struct perf_sample_data sample;
4617 struct pt_regs *regs = data;
4619 perf_sample_data_init(&sample, bp->attr.bp_addr);
4621 if (!perf_exclude_event(bp, regs))
4622 perf_swevent_add(bp, 1, 1, &sample, regs);
4625 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4630 void perf_bp_event(struct perf_event *bp, void *regs)
4635 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4637 static void sw_perf_event_destroy(struct perf_event *event)
4639 u64 event_id = event->attr.config;
4641 WARN_ON(event->parent);
4643 atomic_dec(&perf_swevent_enabled[event_id]);
4644 swevent_hlist_put(event);
4647 static const struct pmu *sw_perf_event_init(struct perf_event *event)
4649 const struct pmu *pmu = NULL;
4650 u64 event_id = event->attr.config;
4653 * Software events (currently) can't in general distinguish
4654 * between user, kernel and hypervisor events.
4655 * However, context switches and cpu migrations are considered
4656 * to be kernel events, and page faults are never hypervisor
4660 case PERF_COUNT_SW_CPU_CLOCK:
4661 pmu = &perf_ops_cpu_clock;
4664 case PERF_COUNT_SW_TASK_CLOCK:
4666 * If the user instantiates this as a per-cpu event,
4667 * use the cpu_clock event instead.
4669 if (event->ctx->task)
4670 pmu = &perf_ops_task_clock;
4672 pmu = &perf_ops_cpu_clock;
4675 case PERF_COUNT_SW_PAGE_FAULTS:
4676 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
4677 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4678 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4679 case PERF_COUNT_SW_CPU_MIGRATIONS:
4680 case PERF_COUNT_SW_ALIGNMENT_FAULTS:
4681 case PERF_COUNT_SW_EMULATION_FAULTS:
4682 if (!event->parent) {
4685 err = swevent_hlist_get(event);
4687 return ERR_PTR(err);
4689 atomic_inc(&perf_swevent_enabled[event_id]);
4690 event->destroy = sw_perf_event_destroy;
4692 pmu = &perf_ops_generic;
4700 * Allocate and initialize a event structure
4702 static struct perf_event *
4703 perf_event_alloc(struct perf_event_attr *attr,
4705 struct perf_event_context *ctx,
4706 struct perf_event *group_leader,
4707 struct perf_event *parent_event,
4708 perf_overflow_handler_t overflow_handler,
4711 const struct pmu *pmu;
4712 struct perf_event *event;
4713 struct hw_perf_event *hwc;
4716 event = kzalloc(sizeof(*event), gfpflags);
4718 return ERR_PTR(-ENOMEM);
4721 * Single events are their own group leaders, with an
4722 * empty sibling list:
4725 group_leader = event;
4727 mutex_init(&event->child_mutex);
4728 INIT_LIST_HEAD(&event->child_list);
4730 INIT_LIST_HEAD(&event->group_entry);
4731 INIT_LIST_HEAD(&event->event_entry);
4732 INIT_LIST_HEAD(&event->sibling_list);
4733 init_waitqueue_head(&event->waitq);
4735 mutex_init(&event->mmap_mutex);
4738 event->attr = *attr;
4739 event->group_leader = group_leader;
4744 event->parent = parent_event;
4746 event->ns = get_pid_ns(current->nsproxy->pid_ns);
4747 event->id = atomic64_inc_return(&perf_event_id);
4749 event->state = PERF_EVENT_STATE_INACTIVE;
4751 if (!overflow_handler && parent_event)
4752 overflow_handler = parent_event->overflow_handler;
4754 event->overflow_handler = overflow_handler;
4757 event->state = PERF_EVENT_STATE_OFF;
4762 hwc->sample_period = attr->sample_period;
4763 if (attr->freq && attr->sample_freq)
4764 hwc->sample_period = 1;
4765 hwc->last_period = hwc->sample_period;
4767 atomic64_set(&hwc->period_left, hwc->sample_period);
4770 * we currently do not support PERF_FORMAT_GROUP on inherited events
4772 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4775 switch (attr->type) {
4777 case PERF_TYPE_HARDWARE:
4778 case PERF_TYPE_HW_CACHE:
4779 pmu = hw_perf_event_init(event);
4782 case PERF_TYPE_SOFTWARE:
4783 pmu = sw_perf_event_init(event);
4786 case PERF_TYPE_TRACEPOINT:
4787 pmu = tp_perf_event_init(event);
4790 case PERF_TYPE_BREAKPOINT:
4791 pmu = bp_perf_event_init(event);
4802 else if (IS_ERR(pmu))
4807 put_pid_ns(event->ns);
4809 return ERR_PTR(err);
4814 if (!event->parent) {
4815 atomic_inc(&nr_events);
4816 if (event->attr.mmap)
4817 atomic_inc(&nr_mmap_events);
4818 if (event->attr.comm)
4819 atomic_inc(&nr_comm_events);
4820 if (event->attr.task)
4821 atomic_inc(&nr_task_events);
4827 static int perf_copy_attr(struct perf_event_attr __user *uattr,
4828 struct perf_event_attr *attr)
4833 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4837 * zero the full structure, so that a short copy will be nice.
4839 memset(attr, 0, sizeof(*attr));
4841 ret = get_user(size, &uattr->size);
4845 if (size > PAGE_SIZE) /* silly large */
4848 if (!size) /* abi compat */
4849 size = PERF_ATTR_SIZE_VER0;
4851 if (size < PERF_ATTR_SIZE_VER0)
4855 * If we're handed a bigger struct than we know of,
4856 * ensure all the unknown bits are 0 - i.e. new
4857 * user-space does not rely on any kernel feature
4858 * extensions we dont know about yet.
4860 if (size > sizeof(*attr)) {
4861 unsigned char __user *addr;
4862 unsigned char __user *end;
4865 addr = (void __user *)uattr + sizeof(*attr);
4866 end = (void __user *)uattr + size;
4868 for (; addr < end; addr++) {
4869 ret = get_user(val, addr);
4875 size = sizeof(*attr);
4878 ret = copy_from_user(attr, uattr, size);
4883 * If the type exists, the corresponding creation will verify
4886 if (attr->type >= PERF_TYPE_MAX)
4889 if (attr->__reserved_1)
4892 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4895 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4902 put_user(sizeof(*attr), &uattr->size);
4907 static int perf_event_set_output(struct perf_event *event, int output_fd)
4909 struct perf_event *output_event = NULL;
4910 struct file *output_file = NULL;
4911 struct perf_event *old_output;
4912 int fput_needed = 0;
4918 output_file = fget_light(output_fd, &fput_needed);
4922 if (output_file->f_op != &perf_fops)
4925 output_event = output_file->private_data;
4927 /* Don't chain output fds */
4928 if (output_event->output)
4931 /* Don't set an output fd when we already have an output channel */
4935 atomic_long_inc(&output_file->f_count);
4938 mutex_lock(&event->mmap_mutex);
4939 old_output = event->output;
4940 rcu_assign_pointer(event->output, output_event);
4941 mutex_unlock(&event->mmap_mutex);
4945 * we need to make sure no existing perf_output_*()
4946 * is still referencing this event.
4949 fput(old_output->filp);
4954 fput_light(output_file, fput_needed);
4959 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4961 * @attr_uptr: event_id type attributes for monitoring/sampling
4964 * @group_fd: group leader event fd
4966 SYSCALL_DEFINE5(perf_event_open,
4967 struct perf_event_attr __user *, attr_uptr,
4968 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4970 struct perf_event *event, *group_leader;
4971 struct perf_event_attr attr;
4972 struct perf_event_context *ctx;
4973 struct file *event_file = NULL;
4974 struct file *group_file = NULL;
4975 int fput_needed = 0;
4976 int fput_needed2 = 0;
4979 /* for future expandability... */
4980 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
4983 err = perf_copy_attr(attr_uptr, &attr);
4987 if (!attr.exclude_kernel) {
4988 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4993 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4998 * Get the target context (task or percpu):
5000 ctx = find_get_context(pid, cpu);
5002 return PTR_ERR(ctx);
5005 * Look up the group leader (we will attach this event to it):
5007 group_leader = NULL;
5008 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
5010 group_file = fget_light(group_fd, &fput_needed);
5012 goto err_put_context;
5013 if (group_file->f_op != &perf_fops)
5014 goto err_put_context;
5016 group_leader = group_file->private_data;
5018 * Do not allow a recursive hierarchy (this new sibling
5019 * becoming part of another group-sibling):
5021 if (group_leader->group_leader != group_leader)
5022 goto err_put_context;
5024 * Do not allow to attach to a group in a different
5025 * task or CPU context:
5027 if (group_leader->ctx != ctx)
5028 goto err_put_context;
5030 * Only a group leader can be exclusive or pinned
5032 if (attr.exclusive || attr.pinned)
5033 goto err_put_context;
5036 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
5037 NULL, NULL, GFP_KERNEL);
5038 err = PTR_ERR(event);
5040 goto err_put_context;
5042 err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
5044 goto err_free_put_context;
5046 event_file = fget_light(err, &fput_needed2);
5048 goto err_free_put_context;
5050 if (flags & PERF_FLAG_FD_OUTPUT) {
5051 err = perf_event_set_output(event, group_fd);
5053 goto err_fput_free_put_context;
5056 event->filp = event_file;
5057 WARN_ON_ONCE(ctx->parent_ctx);
5058 mutex_lock(&ctx->mutex);
5059 perf_install_in_context(ctx, event, cpu);
5061 mutex_unlock(&ctx->mutex);
5063 event->owner = current;
5064 get_task_struct(current);
5065 mutex_lock(¤t->perf_event_mutex);
5066 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
5067 mutex_unlock(¤t->perf_event_mutex);
5069 err_fput_free_put_context:
5070 fput_light(event_file, fput_needed2);
5072 err_free_put_context:
5080 fput_light(group_file, fput_needed);
5086 * perf_event_create_kernel_counter
5088 * @attr: attributes of the counter to create
5089 * @cpu: cpu in which the counter is bound
5090 * @pid: task to profile
5093 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5095 perf_overflow_handler_t overflow_handler)
5097 struct perf_event *event;
5098 struct perf_event_context *ctx;
5102 * Get the target context (task or percpu):
5105 ctx = find_get_context(pid, cpu);
5111 event = perf_event_alloc(attr, cpu, ctx, NULL,
5112 NULL, overflow_handler, GFP_KERNEL);
5113 if (IS_ERR(event)) {
5114 err = PTR_ERR(event);
5115 goto err_put_context;
5119 WARN_ON_ONCE(ctx->parent_ctx);
5120 mutex_lock(&ctx->mutex);
5121 perf_install_in_context(ctx, event, cpu);
5123 mutex_unlock(&ctx->mutex);
5125 event->owner = current;
5126 get_task_struct(current);
5127 mutex_lock(¤t->perf_event_mutex);
5128 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
5129 mutex_unlock(¤t->perf_event_mutex);
5136 return ERR_PTR(err);
5138 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5141 * inherit a event from parent task to child task:
5143 static struct perf_event *
5144 inherit_event(struct perf_event *parent_event,
5145 struct task_struct *parent,
5146 struct perf_event_context *parent_ctx,
5147 struct task_struct *child,
5148 struct perf_event *group_leader,
5149 struct perf_event_context *child_ctx)
5151 struct perf_event *child_event;
5154 * Instead of creating recursive hierarchies of events,
5155 * we link inherited events back to the original parent,
5156 * which has a filp for sure, which we use as the reference
5159 if (parent_event->parent)
5160 parent_event = parent_event->parent;
5162 child_event = perf_event_alloc(&parent_event->attr,
5163 parent_event->cpu, child_ctx,
5164 group_leader, parent_event,
5166 if (IS_ERR(child_event))
5171 * Make the child state follow the state of the parent event,
5172 * not its attr.disabled bit. We hold the parent's mutex,
5173 * so we won't race with perf_event_{en, dis}able_family.
5175 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
5176 child_event->state = PERF_EVENT_STATE_INACTIVE;
5178 child_event->state = PERF_EVENT_STATE_OFF;
5180 if (parent_event->attr.freq) {
5181 u64 sample_period = parent_event->hw.sample_period;
5182 struct hw_perf_event *hwc = &child_event->hw;
5184 hwc->sample_period = sample_period;
5185 hwc->last_period = sample_period;
5187 atomic64_set(&hwc->period_left, sample_period);
5190 child_event->overflow_handler = parent_event->overflow_handler;
5193 * Link it up in the child's context:
5195 add_event_to_ctx(child_event, child_ctx);
5198 * Get a reference to the parent filp - we will fput it
5199 * when the child event exits. This is safe to do because
5200 * we are in the parent and we know that the filp still
5201 * exists and has a nonzero count:
5203 atomic_long_inc(&parent_event->filp->f_count);
5206 * Link this into the parent event's child list
5208 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5209 mutex_lock(&parent_event->child_mutex);
5210 list_add_tail(&child_event->child_list, &parent_event->child_list);
5211 mutex_unlock(&parent_event->child_mutex);
5216 static int inherit_group(struct perf_event *parent_event,
5217 struct task_struct *parent,
5218 struct perf_event_context *parent_ctx,
5219 struct task_struct *child,
5220 struct perf_event_context *child_ctx)
5222 struct perf_event *leader;
5223 struct perf_event *sub;
5224 struct perf_event *child_ctr;
5226 leader = inherit_event(parent_event, parent, parent_ctx,
5227 child, NULL, child_ctx);
5229 return PTR_ERR(leader);
5230 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
5231 child_ctr = inherit_event(sub, parent, parent_ctx,
5232 child, leader, child_ctx);
5233 if (IS_ERR(child_ctr))
5234 return PTR_ERR(child_ctr);
5239 static void sync_child_event(struct perf_event *child_event,
5240 struct task_struct *child)
5242 struct perf_event *parent_event = child_event->parent;
5245 if (child_event->attr.inherit_stat)
5246 perf_event_read_event(child_event, child);
5248 child_val = atomic64_read(&child_event->count);
5251 * Add back the child's count to the parent's count:
5253 atomic64_add(child_val, &parent_event->count);
5254 atomic64_add(child_event->total_time_enabled,
5255 &parent_event->child_total_time_enabled);
5256 atomic64_add(child_event->total_time_running,
5257 &parent_event->child_total_time_running);
5260 * Remove this event from the parent's list
5262 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5263 mutex_lock(&parent_event->child_mutex);
5264 list_del_init(&child_event->child_list);
5265 mutex_unlock(&parent_event->child_mutex);
5268 * Release the parent event, if this was the last
5271 fput(parent_event->filp);
5275 __perf_event_exit_task(struct perf_event *child_event,
5276 struct perf_event_context *child_ctx,
5277 struct task_struct *child)
5279 struct perf_event *parent_event;
5281 perf_event_remove_from_context(child_event);
5283 parent_event = child_event->parent;
5285 * It can happen that parent exits first, and has events
5286 * that are still around due to the child reference. These
5287 * events need to be zapped - but otherwise linger.
5290 sync_child_event(child_event, child);
5291 free_event(child_event);
5296 * When a child task exits, feed back event values to parent events.
5298 void perf_event_exit_task(struct task_struct *child)
5300 struct perf_event *child_event, *tmp;
5301 struct perf_event_context *child_ctx;
5302 unsigned long flags;
5304 if (likely(!child->perf_event_ctxp)) {
5305 perf_event_task(child, NULL, 0);
5309 local_irq_save(flags);
5311 * We can't reschedule here because interrupts are disabled,
5312 * and either child is current or it is a task that can't be
5313 * scheduled, so we are now safe from rescheduling changing
5316 child_ctx = child->perf_event_ctxp;
5317 __perf_event_task_sched_out(child_ctx);
5320 * Take the context lock here so that if find_get_context is
5321 * reading child->perf_event_ctxp, we wait until it has
5322 * incremented the context's refcount before we do put_ctx below.
5324 raw_spin_lock(&child_ctx->lock);
5325 child->perf_event_ctxp = NULL;
5327 * If this context is a clone; unclone it so it can't get
5328 * swapped to another process while we're removing all
5329 * the events from it.
5331 unclone_ctx(child_ctx);
5332 update_context_time(child_ctx);
5333 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5336 * Report the task dead after unscheduling the events so that we
5337 * won't get any samples after PERF_RECORD_EXIT. We can however still
5338 * get a few PERF_RECORD_READ events.
5340 perf_event_task(child, child_ctx, 0);
5343 * We can recurse on the same lock type through:
5345 * __perf_event_exit_task()
5346 * sync_child_event()
5347 * fput(parent_event->filp)
5349 * mutex_lock(&ctx->mutex)
5351 * But since its the parent context it won't be the same instance.
5353 mutex_lock(&child_ctx->mutex);
5356 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5358 __perf_event_exit_task(child_event, child_ctx, child);
5360 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
5362 __perf_event_exit_task(child_event, child_ctx, child);
5365 * If the last event was a group event, it will have appended all
5366 * its siblings to the list, but we obtained 'tmp' before that which
5367 * will still point to the list head terminating the iteration.
5369 if (!list_empty(&child_ctx->pinned_groups) ||
5370 !list_empty(&child_ctx->flexible_groups))
5373 mutex_unlock(&child_ctx->mutex);
5378 static void perf_free_event(struct perf_event *event,
5379 struct perf_event_context *ctx)
5381 struct perf_event *parent = event->parent;
5383 if (WARN_ON_ONCE(!parent))
5386 mutex_lock(&parent->child_mutex);
5387 list_del_init(&event->child_list);
5388 mutex_unlock(&parent->child_mutex);
5392 list_del_event(event, ctx);
5397 * free an unexposed, unused context as created by inheritance by
5398 * init_task below, used by fork() in case of fail.
5400 void perf_event_free_task(struct task_struct *task)
5402 struct perf_event_context *ctx = task->perf_event_ctxp;
5403 struct perf_event *event, *tmp;
5408 mutex_lock(&ctx->mutex);
5410 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5411 perf_free_event(event, ctx);
5413 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5415 perf_free_event(event, ctx);
5417 if (!list_empty(&ctx->pinned_groups) ||
5418 !list_empty(&ctx->flexible_groups))
5421 mutex_unlock(&ctx->mutex);
5427 inherit_task_group(struct perf_event *event, struct task_struct *parent,
5428 struct perf_event_context *parent_ctx,
5429 struct task_struct *child,
5433 struct perf_event_context *child_ctx = child->perf_event_ctxp;
5435 if (!event->attr.inherit) {
5442 * This is executed from the parent task context, so
5443 * inherit events that have been marked for cloning.
5444 * First allocate and initialize a context for the
5448 child_ctx = kzalloc(sizeof(struct perf_event_context),
5453 __perf_event_init_context(child_ctx, child);
5454 child->perf_event_ctxp = child_ctx;
5455 get_task_struct(child);
5458 ret = inherit_group(event, parent, parent_ctx,
5469 * Initialize the perf_event context in task_struct
5471 int perf_event_init_task(struct task_struct *child)
5473 struct perf_event_context *child_ctx, *parent_ctx;
5474 struct perf_event_context *cloned_ctx;
5475 struct perf_event *event;
5476 struct task_struct *parent = current;
5477 int inherited_all = 1;
5480 child->perf_event_ctxp = NULL;
5482 mutex_init(&child->perf_event_mutex);
5483 INIT_LIST_HEAD(&child->perf_event_list);
5485 if (likely(!parent->perf_event_ctxp))
5489 * If the parent's context is a clone, pin it so it won't get
5492 parent_ctx = perf_pin_task_context(parent);
5495 * No need to check if parent_ctx != NULL here; since we saw
5496 * it non-NULL earlier, the only reason for it to become NULL
5497 * is if we exit, and since we're currently in the middle of
5498 * a fork we can't be exiting at the same time.
5502 * Lock the parent list. No need to lock the child - not PID
5503 * hashed yet and not running, so nobody can access it.
5505 mutex_lock(&parent_ctx->mutex);
5508 * We dont have to disable NMIs - we are only looking at
5509 * the list, not manipulating it:
5511 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
5512 ret = inherit_task_group(event, parent, parent_ctx, child,
5518 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
5519 ret = inherit_task_group(event, parent, parent_ctx, child,
5525 child_ctx = child->perf_event_ctxp;
5527 if (child_ctx && inherited_all) {
5529 * Mark the child context as a clone of the parent
5530 * context, or of whatever the parent is a clone of.
5531 * Note that if the parent is a clone, it could get
5532 * uncloned at any point, but that doesn't matter
5533 * because the list of events and the generation
5534 * count can't have changed since we took the mutex.
5536 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
5538 child_ctx->parent_ctx = cloned_ctx;
5539 child_ctx->parent_gen = parent_ctx->parent_gen;
5541 child_ctx->parent_ctx = parent_ctx;
5542 child_ctx->parent_gen = parent_ctx->generation;
5544 get_ctx(child_ctx->parent_ctx);
5547 mutex_unlock(&parent_ctx->mutex);
5549 perf_unpin_context(parent_ctx);
5554 static void __init perf_event_init_all_cpus(void)
5557 struct perf_cpu_context *cpuctx;
5559 for_each_possible_cpu(cpu) {
5560 cpuctx = &per_cpu(perf_cpu_context, cpu);
5561 mutex_init(&cpuctx->hlist_mutex);
5562 __perf_event_init_context(&cpuctx->ctx, NULL);
5566 static void __cpuinit perf_event_init_cpu(int cpu)
5568 struct perf_cpu_context *cpuctx;
5570 cpuctx = &per_cpu(perf_cpu_context, cpu);
5572 spin_lock(&perf_resource_lock);
5573 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5574 spin_unlock(&perf_resource_lock);
5576 mutex_lock(&cpuctx->hlist_mutex);
5577 if (cpuctx->hlist_refcount > 0) {
5578 struct swevent_hlist *hlist;
5580 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5581 WARN_ON_ONCE(!hlist);
5582 rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
5584 mutex_unlock(&cpuctx->hlist_mutex);
5587 #ifdef CONFIG_HOTPLUG_CPU
5588 static void __perf_event_exit_cpu(void *info)
5590 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5591 struct perf_event_context *ctx = &cpuctx->ctx;
5592 struct perf_event *event, *tmp;
5594 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5595 __perf_event_remove_from_context(event);
5596 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
5597 __perf_event_remove_from_context(event);
5599 static void perf_event_exit_cpu(int cpu)
5601 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5602 struct perf_event_context *ctx = &cpuctx->ctx;
5604 mutex_lock(&cpuctx->hlist_mutex);
5605 swevent_hlist_release(cpuctx);
5606 mutex_unlock(&cpuctx->hlist_mutex);
5608 mutex_lock(&ctx->mutex);
5609 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
5610 mutex_unlock(&ctx->mutex);
5613 static inline void perf_event_exit_cpu(int cpu) { }
5616 static int __cpuinit
5617 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5619 unsigned int cpu = (long)hcpu;
5623 case CPU_UP_PREPARE:
5624 case CPU_UP_PREPARE_FROZEN:
5625 perf_event_init_cpu(cpu);
5628 case CPU_DOWN_PREPARE:
5629 case CPU_DOWN_PREPARE_FROZEN:
5630 perf_event_exit_cpu(cpu);
5641 * This has to have a higher priority than migration_notifier in sched.c.
5643 static struct notifier_block __cpuinitdata perf_cpu_nb = {
5644 .notifier_call = perf_cpu_notify,
5648 void __init perf_event_init(void)
5650 perf_event_init_all_cpus();
5651 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5652 (void *)(long)smp_processor_id());
5653 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
5654 (void *)(long)smp_processor_id());
5655 register_cpu_notifier(&perf_cpu_nb);
5658 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
5659 struct sysdev_class_attribute *attr,
5662 return sprintf(buf, "%d\n", perf_reserved_percpu);
5666 perf_set_reserve_percpu(struct sysdev_class *class,
5667 struct sysdev_class_attribute *attr,
5671 struct perf_cpu_context *cpuctx;
5675 err = strict_strtoul(buf, 10, &val);
5678 if (val > perf_max_events)
5681 spin_lock(&perf_resource_lock);
5682 perf_reserved_percpu = val;
5683 for_each_online_cpu(cpu) {
5684 cpuctx = &per_cpu(perf_cpu_context, cpu);
5685 raw_spin_lock_irq(&cpuctx->ctx.lock);
5686 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5687 perf_max_events - perf_reserved_percpu);
5688 cpuctx->max_pertask = mpt;
5689 raw_spin_unlock_irq(&cpuctx->ctx.lock);
5691 spin_unlock(&perf_resource_lock);
5696 static ssize_t perf_show_overcommit(struct sysdev_class *class,
5697 struct sysdev_class_attribute *attr,
5700 return sprintf(buf, "%d\n", perf_overcommit);
5704 perf_set_overcommit(struct sysdev_class *class,
5705 struct sysdev_class_attribute *attr,
5706 const char *buf, size_t count)
5711 err = strict_strtoul(buf, 10, &val);
5717 spin_lock(&perf_resource_lock);
5718 perf_overcommit = val;
5719 spin_unlock(&perf_resource_lock);
5724 static SYSDEV_CLASS_ATTR(
5727 perf_show_reserve_percpu,
5728 perf_set_reserve_percpu
5731 static SYSDEV_CLASS_ATTR(
5734 perf_show_overcommit,
5738 static struct attribute *perfclass_attrs[] = {
5739 &attr_reserve_percpu.attr,
5740 &attr_overcommit.attr,
5744 static struct attribute_group perfclass_attr_group = {
5745 .attrs = perfclass_attrs,
5746 .name = "perf_events",
5749 static int __init perf_event_sysfs_init(void)
5751 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
5752 &perfclass_attr_group);
5754 device_initcall(perf_event_sysfs_init);