fd43ff4ac860077150b0ecfdd0566e1a99c50082
[safe/jmp/linux-2.6] / kernel / perf_event.c
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/vmalloc.h>
24 #include <linux/hardirq.h>
25 #include <linux/rculist.h>
26 #include <linux/uaccess.h>
27 #include <linux/syscalls.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/perf_event.h>
31 #include <linux/ftrace_event.h>
32 #include <linux/hw_breakpoint.h>
33
34 #include <asm/irq_regs.h>
35
36 /*
37  * Each CPU has a list of per CPU events:
38  */
39 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
40
41 int perf_max_events __read_mostly = 1;
42 static int perf_reserved_percpu __read_mostly;
43 static int perf_overcommit __read_mostly = 1;
44
45 static atomic_t nr_events __read_mostly;
46 static atomic_t nr_mmap_events __read_mostly;
47 static atomic_t nr_comm_events __read_mostly;
48 static atomic_t nr_task_events __read_mostly;
49
50 /*
51  * perf event paranoia level:
52  *  -1 - not paranoid at all
53  *   0 - disallow raw tracepoint access for unpriv
54  *   1 - disallow cpu events for unpriv
55  *   2 - disallow kernel profiling for unpriv
56  */
57 int sysctl_perf_event_paranoid __read_mostly = 1;
58
59 static inline bool perf_paranoid_tracepoint_raw(void)
60 {
61         return sysctl_perf_event_paranoid > -1;
62 }
63
64 static inline bool perf_paranoid_cpu(void)
65 {
66         return sysctl_perf_event_paranoid > 0;
67 }
68
69 static inline bool perf_paranoid_kernel(void)
70 {
71         return sysctl_perf_event_paranoid > 1;
72 }
73
74 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
75
76 /*
77  * max perf event sample rate
78  */
79 int sysctl_perf_event_sample_rate __read_mostly = 100000;
80
81 static atomic64_t perf_event_id;
82
83 /*
84  * Lock for (sysadmin-configurable) event reservations:
85  */
86 static DEFINE_SPINLOCK(perf_resource_lock);
87
88 /*
89  * Architecture provided APIs - weak aliases:
90  */
91 extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
92 {
93         return NULL;
94 }
95
96 void __weak hw_perf_disable(void)               { barrier(); }
97 void __weak hw_perf_enable(void)                { barrier(); }
98
99 void __weak hw_perf_event_setup(int cpu)        { barrier(); }
100 void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
101
102 int __weak
103 hw_perf_group_sched_in(struct perf_event *group_leader,
104                struct perf_cpu_context *cpuctx,
105                struct perf_event_context *ctx, int cpu)
106 {
107         return 0;
108 }
109
110 void __weak perf_event_print_debug(void)        { }
111
112 static DEFINE_PER_CPU(int, perf_disable_count);
113
114 void __perf_disable(void)
115 {
116         __get_cpu_var(perf_disable_count)++;
117 }
118
119 bool __perf_enable(void)
120 {
121         return !--__get_cpu_var(perf_disable_count);
122 }
123
124 void perf_disable(void)
125 {
126         __perf_disable();
127         hw_perf_disable();
128 }
129
130 void perf_enable(void)
131 {
132         if (__perf_enable())
133                 hw_perf_enable();
134 }
135
136 static void get_ctx(struct perf_event_context *ctx)
137 {
138         WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
139 }
140
141 static void free_ctx(struct rcu_head *head)
142 {
143         struct perf_event_context *ctx;
144
145         ctx = container_of(head, struct perf_event_context, rcu_head);
146         kfree(ctx);
147 }
148
149 static void put_ctx(struct perf_event_context *ctx)
150 {
151         if (atomic_dec_and_test(&ctx->refcount)) {
152                 if (ctx->parent_ctx)
153                         put_ctx(ctx->parent_ctx);
154                 if (ctx->task)
155                         put_task_struct(ctx->task);
156                 call_rcu(&ctx->rcu_head, free_ctx);
157         }
158 }
159
160 static void unclone_ctx(struct perf_event_context *ctx)
161 {
162         if (ctx->parent_ctx) {
163                 put_ctx(ctx->parent_ctx);
164                 ctx->parent_ctx = NULL;
165         }
166 }
167
168 /*
169  * If we inherit events we want to return the parent event id
170  * to userspace.
171  */
172 static u64 primary_event_id(struct perf_event *event)
173 {
174         u64 id = event->id;
175
176         if (event->parent)
177                 id = event->parent->id;
178
179         return id;
180 }
181
182 /*
183  * Get the perf_event_context for a task and lock it.
184  * This has to cope with with the fact that until it is locked,
185  * the context could get moved to another task.
186  */
187 static struct perf_event_context *
188 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
189 {
190         struct perf_event_context *ctx;
191
192         rcu_read_lock();
193  retry:
194         ctx = rcu_dereference(task->perf_event_ctxp);
195         if (ctx) {
196                 /*
197                  * If this context is a clone of another, it might
198                  * get swapped for another underneath us by
199                  * perf_event_task_sched_out, though the
200                  * rcu_read_lock() protects us from any context
201                  * getting freed.  Lock the context and check if it
202                  * got swapped before we could get the lock, and retry
203                  * if so.  If we locked the right context, then it
204                  * can't get swapped on us any more.
205                  */
206                 spin_lock_irqsave(&ctx->lock, *flags);
207                 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
208                         spin_unlock_irqrestore(&ctx->lock, *flags);
209                         goto retry;
210                 }
211
212                 if (!atomic_inc_not_zero(&ctx->refcount)) {
213                         spin_unlock_irqrestore(&ctx->lock, *flags);
214                         ctx = NULL;
215                 }
216         }
217         rcu_read_unlock();
218         return ctx;
219 }
220
221 /*
222  * Get the context for a task and increment its pin_count so it
223  * can't get swapped to another task.  This also increments its
224  * reference count so that the context can't get freed.
225  */
226 static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
227 {
228         struct perf_event_context *ctx;
229         unsigned long flags;
230
231         ctx = perf_lock_task_context(task, &flags);
232         if (ctx) {
233                 ++ctx->pin_count;
234                 spin_unlock_irqrestore(&ctx->lock, flags);
235         }
236         return ctx;
237 }
238
239 static void perf_unpin_context(struct perf_event_context *ctx)
240 {
241         unsigned long flags;
242
243         spin_lock_irqsave(&ctx->lock, flags);
244         --ctx->pin_count;
245         spin_unlock_irqrestore(&ctx->lock, flags);
246         put_ctx(ctx);
247 }
248
249 static inline u64 perf_clock(void)
250 {
251         return cpu_clock(smp_processor_id());
252 }
253
254 /*
255  * Update the record of the current time in a context.
256  */
257 static void update_context_time(struct perf_event_context *ctx)
258 {
259         u64 now = perf_clock();
260
261         ctx->time += now - ctx->timestamp;
262         ctx->timestamp = now;
263 }
264
265 /*
266  * Update the total_time_enabled and total_time_running fields for a event.
267  */
268 static void update_event_times(struct perf_event *event)
269 {
270         struct perf_event_context *ctx = event->ctx;
271         u64 run_end;
272
273         if (event->state < PERF_EVENT_STATE_INACTIVE ||
274             event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
275                 return;
276
277         if (ctx->is_active)
278                 run_end = ctx->time;
279         else
280                 run_end = event->tstamp_stopped;
281
282         event->total_time_enabled = run_end - event->tstamp_enabled;
283
284         if (event->state == PERF_EVENT_STATE_INACTIVE)
285                 run_end = event->tstamp_stopped;
286         else
287                 run_end = ctx->time;
288
289         event->total_time_running = run_end - event->tstamp_running;
290 }
291
292 /*
293  * Add a event from the lists for its context.
294  * Must be called with ctx->mutex and ctx->lock held.
295  */
296 static void
297 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
298 {
299         struct perf_event *group_leader = event->group_leader;
300
301         /*
302          * Depending on whether it is a standalone or sibling event,
303          * add it straight to the context's event list, or to the group
304          * leader's sibling list:
305          */
306         if (group_leader == event)
307                 list_add_tail(&event->group_entry, &ctx->group_list);
308         else {
309                 list_add_tail(&event->group_entry, &group_leader->sibling_list);
310                 group_leader->nr_siblings++;
311         }
312
313         list_add_rcu(&event->event_entry, &ctx->event_list);
314         ctx->nr_events++;
315         if (event->attr.inherit_stat)
316                 ctx->nr_stat++;
317 }
318
319 /*
320  * Remove a event from the lists for its context.
321  * Must be called with ctx->mutex and ctx->lock held.
322  */
323 static void
324 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
325 {
326         struct perf_event *sibling, *tmp;
327
328         if (list_empty(&event->group_entry))
329                 return;
330         ctx->nr_events--;
331         if (event->attr.inherit_stat)
332                 ctx->nr_stat--;
333
334         list_del_init(&event->group_entry);
335         list_del_rcu(&event->event_entry);
336
337         if (event->group_leader != event)
338                 event->group_leader->nr_siblings--;
339
340         update_event_times(event);
341
342         /*
343          * If event was in error state, then keep it
344          * that way, otherwise bogus counts will be
345          * returned on read(). The only way to get out
346          * of error state is by explicit re-enabling
347          * of the event
348          */
349         if (event->state > PERF_EVENT_STATE_OFF)
350                 event->state = PERF_EVENT_STATE_OFF;
351
352         /*
353          * If this was a group event with sibling events then
354          * upgrade the siblings to singleton events by adding them
355          * to the context list directly:
356          */
357         list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
358
359                 list_move_tail(&sibling->group_entry, &ctx->group_list);
360                 sibling->group_leader = sibling;
361         }
362 }
363
364 static void
365 event_sched_out(struct perf_event *event,
366                   struct perf_cpu_context *cpuctx,
367                   struct perf_event_context *ctx)
368 {
369         if (event->state != PERF_EVENT_STATE_ACTIVE)
370                 return;
371
372         event->state = PERF_EVENT_STATE_INACTIVE;
373         if (event->pending_disable) {
374                 event->pending_disable = 0;
375                 event->state = PERF_EVENT_STATE_OFF;
376         }
377         event->tstamp_stopped = ctx->time;
378         event->pmu->disable(event);
379         event->oncpu = -1;
380
381         if (!is_software_event(event))
382                 cpuctx->active_oncpu--;
383         ctx->nr_active--;
384         if (event->attr.exclusive || !cpuctx->active_oncpu)
385                 cpuctx->exclusive = 0;
386 }
387
388 static void
389 group_sched_out(struct perf_event *group_event,
390                 struct perf_cpu_context *cpuctx,
391                 struct perf_event_context *ctx)
392 {
393         struct perf_event *event;
394
395         if (group_event->state != PERF_EVENT_STATE_ACTIVE)
396                 return;
397
398         event_sched_out(group_event, cpuctx, ctx);
399
400         /*
401          * Schedule out siblings (if any):
402          */
403         list_for_each_entry(event, &group_event->sibling_list, group_entry)
404                 event_sched_out(event, cpuctx, ctx);
405
406         if (group_event->attr.exclusive)
407                 cpuctx->exclusive = 0;
408 }
409
410 /*
411  * Cross CPU call to remove a performance event
412  *
413  * We disable the event on the hardware level first. After that we
414  * remove it from the context list.
415  */
416 static void __perf_event_remove_from_context(void *info)
417 {
418         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
419         struct perf_event *event = info;
420         struct perf_event_context *ctx = event->ctx;
421
422         /*
423          * If this is a task context, we need to check whether it is
424          * the current task context of this cpu. If not it has been
425          * scheduled out before the smp call arrived.
426          */
427         if (ctx->task && cpuctx->task_ctx != ctx)
428                 return;
429
430         spin_lock(&ctx->lock);
431         /*
432          * Protect the list operation against NMI by disabling the
433          * events on a global level.
434          */
435         perf_disable();
436
437         event_sched_out(event, cpuctx, ctx);
438
439         list_del_event(event, ctx);
440
441         if (!ctx->task) {
442                 /*
443                  * Allow more per task events with respect to the
444                  * reservation:
445                  */
446                 cpuctx->max_pertask =
447                         min(perf_max_events - ctx->nr_events,
448                             perf_max_events - perf_reserved_percpu);
449         }
450
451         perf_enable();
452         spin_unlock(&ctx->lock);
453 }
454
455
456 /*
457  * Remove the event from a task's (or a CPU's) list of events.
458  *
459  * Must be called with ctx->mutex held.
460  *
461  * CPU events are removed with a smp call. For task events we only
462  * call when the task is on a CPU.
463  *
464  * If event->ctx is a cloned context, callers must make sure that
465  * every task struct that event->ctx->task could possibly point to
466  * remains valid.  This is OK when called from perf_release since
467  * that only calls us on the top-level context, which can't be a clone.
468  * When called from perf_event_exit_task, it's OK because the
469  * context has been detached from its task.
470  */
471 static void perf_event_remove_from_context(struct perf_event *event)
472 {
473         struct perf_event_context *ctx = event->ctx;
474         struct task_struct *task = ctx->task;
475
476         if (!task) {
477                 /*
478                  * Per cpu events are removed via an smp call and
479                  * the removal is always sucessful.
480                  */
481                 smp_call_function_single(event->cpu,
482                                          __perf_event_remove_from_context,
483                                          event, 1);
484                 return;
485         }
486
487 retry:
488         task_oncpu_function_call(task, __perf_event_remove_from_context,
489                                  event);
490
491         spin_lock_irq(&ctx->lock);
492         /*
493          * If the context is active we need to retry the smp call.
494          */
495         if (ctx->nr_active && !list_empty(&event->group_entry)) {
496                 spin_unlock_irq(&ctx->lock);
497                 goto retry;
498         }
499
500         /*
501          * The lock prevents that this context is scheduled in so we
502          * can remove the event safely, if the call above did not
503          * succeed.
504          */
505         if (!list_empty(&event->group_entry))
506                 list_del_event(event, ctx);
507         spin_unlock_irq(&ctx->lock);
508 }
509
510 /*
511  * Update total_time_enabled and total_time_running for all events in a group.
512  */
513 static void update_group_times(struct perf_event *leader)
514 {
515         struct perf_event *event;
516
517         update_event_times(leader);
518         list_for_each_entry(event, &leader->sibling_list, group_entry)
519                 update_event_times(event);
520 }
521
522 /*
523  * Cross CPU call to disable a performance event
524  */
525 static void __perf_event_disable(void *info)
526 {
527         struct perf_event *event = info;
528         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
529         struct perf_event_context *ctx = event->ctx;
530
531         /*
532          * If this is a per-task event, need to check whether this
533          * event's task is the current task on this cpu.
534          */
535         if (ctx->task && cpuctx->task_ctx != ctx)
536                 return;
537
538         spin_lock(&ctx->lock);
539
540         /*
541          * If the event is on, turn it off.
542          * If it is in error state, leave it in error state.
543          */
544         if (event->state >= PERF_EVENT_STATE_INACTIVE) {
545                 update_context_time(ctx);
546                 update_group_times(event);
547                 if (event == event->group_leader)
548                         group_sched_out(event, cpuctx, ctx);
549                 else
550                         event_sched_out(event, cpuctx, ctx);
551                 event->state = PERF_EVENT_STATE_OFF;
552         }
553
554         spin_unlock(&ctx->lock);
555 }
556
557 /*
558  * Disable a event.
559  *
560  * If event->ctx is a cloned context, callers must make sure that
561  * every task struct that event->ctx->task could possibly point to
562  * remains valid.  This condition is satisifed when called through
563  * perf_event_for_each_child or perf_event_for_each because they
564  * hold the top-level event's child_mutex, so any descendant that
565  * goes to exit will block in sync_child_event.
566  * When called from perf_pending_event it's OK because event->ctx
567  * is the current context on this CPU and preemption is disabled,
568  * hence we can't get into perf_event_task_sched_out for this context.
569  */
570 static void perf_event_disable(struct perf_event *event)
571 {
572         struct perf_event_context *ctx = event->ctx;
573         struct task_struct *task = ctx->task;
574
575         if (!task) {
576                 /*
577                  * Disable the event on the cpu that it's on
578                  */
579                 smp_call_function_single(event->cpu, __perf_event_disable,
580                                          event, 1);
581                 return;
582         }
583
584  retry:
585         task_oncpu_function_call(task, __perf_event_disable, event);
586
587         spin_lock_irq(&ctx->lock);
588         /*
589          * If the event is still active, we need to retry the cross-call.
590          */
591         if (event->state == PERF_EVENT_STATE_ACTIVE) {
592                 spin_unlock_irq(&ctx->lock);
593                 goto retry;
594         }
595
596         /*
597          * Since we have the lock this context can't be scheduled
598          * in, so we can change the state safely.
599          */
600         if (event->state == PERF_EVENT_STATE_INACTIVE) {
601                 update_group_times(event);
602                 event->state = PERF_EVENT_STATE_OFF;
603         }
604
605         spin_unlock_irq(&ctx->lock);
606 }
607
608 static int
609 event_sched_in(struct perf_event *event,
610                  struct perf_cpu_context *cpuctx,
611                  struct perf_event_context *ctx,
612                  int cpu)
613 {
614         if (event->state <= PERF_EVENT_STATE_OFF)
615                 return 0;
616
617         event->state = PERF_EVENT_STATE_ACTIVE;
618         event->oncpu = cpu;     /* TODO: put 'cpu' into cpuctx->cpu */
619         /*
620          * The new state must be visible before we turn it on in the hardware:
621          */
622         smp_wmb();
623
624         if (event->pmu->enable(event)) {
625                 event->state = PERF_EVENT_STATE_INACTIVE;
626                 event->oncpu = -1;
627                 return -EAGAIN;
628         }
629
630         event->tstamp_running += ctx->time - event->tstamp_stopped;
631
632         if (!is_software_event(event))
633                 cpuctx->active_oncpu++;
634         ctx->nr_active++;
635
636         if (event->attr.exclusive)
637                 cpuctx->exclusive = 1;
638
639         return 0;
640 }
641
642 static int
643 group_sched_in(struct perf_event *group_event,
644                struct perf_cpu_context *cpuctx,
645                struct perf_event_context *ctx,
646                int cpu)
647 {
648         struct perf_event *event, *partial_group;
649         int ret;
650
651         if (group_event->state == PERF_EVENT_STATE_OFF)
652                 return 0;
653
654         ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
655         if (ret)
656                 return ret < 0 ? ret : 0;
657
658         if (event_sched_in(group_event, cpuctx, ctx, cpu))
659                 return -EAGAIN;
660
661         /*
662          * Schedule in siblings as one group (if any):
663          */
664         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
665                 if (event_sched_in(event, cpuctx, ctx, cpu)) {
666                         partial_group = event;
667                         goto group_error;
668                 }
669         }
670
671         return 0;
672
673 group_error:
674         /*
675          * Groups can be scheduled in as one unit only, so undo any
676          * partial group before returning:
677          */
678         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
679                 if (event == partial_group)
680                         break;
681                 event_sched_out(event, cpuctx, ctx);
682         }
683         event_sched_out(group_event, cpuctx, ctx);
684
685         return -EAGAIN;
686 }
687
688 /*
689  * Return 1 for a group consisting entirely of software events,
690  * 0 if the group contains any hardware events.
691  */
692 static int is_software_only_group(struct perf_event *leader)
693 {
694         struct perf_event *event;
695
696         if (!is_software_event(leader))
697                 return 0;
698
699         list_for_each_entry(event, &leader->sibling_list, group_entry)
700                 if (!is_software_event(event))
701                         return 0;
702
703         return 1;
704 }
705
706 /*
707  * Work out whether we can put this event group on the CPU now.
708  */
709 static int group_can_go_on(struct perf_event *event,
710                            struct perf_cpu_context *cpuctx,
711                            int can_add_hw)
712 {
713         /*
714          * Groups consisting entirely of software events can always go on.
715          */
716         if (is_software_only_group(event))
717                 return 1;
718         /*
719          * If an exclusive group is already on, no other hardware
720          * events can go on.
721          */
722         if (cpuctx->exclusive)
723                 return 0;
724         /*
725          * If this group is exclusive and there are already
726          * events on the CPU, it can't go on.
727          */
728         if (event->attr.exclusive && cpuctx->active_oncpu)
729                 return 0;
730         /*
731          * Otherwise, try to add it if all previous groups were able
732          * to go on.
733          */
734         return can_add_hw;
735 }
736
737 static void add_event_to_ctx(struct perf_event *event,
738                                struct perf_event_context *ctx)
739 {
740         list_add_event(event, ctx);
741         event->tstamp_enabled = ctx->time;
742         event->tstamp_running = ctx->time;
743         event->tstamp_stopped = ctx->time;
744 }
745
746 /*
747  * Cross CPU call to install and enable a performance event
748  *
749  * Must be called with ctx->mutex held
750  */
751 static void __perf_install_in_context(void *info)
752 {
753         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
754         struct perf_event *event = info;
755         struct perf_event_context *ctx = event->ctx;
756         struct perf_event *leader = event->group_leader;
757         int cpu = smp_processor_id();
758         int err;
759
760         /*
761          * If this is a task context, we need to check whether it is
762          * the current task context of this cpu. If not it has been
763          * scheduled out before the smp call arrived.
764          * Or possibly this is the right context but it isn't
765          * on this cpu because it had no events.
766          */
767         if (ctx->task && cpuctx->task_ctx != ctx) {
768                 if (cpuctx->task_ctx || ctx->task != current)
769                         return;
770                 cpuctx->task_ctx = ctx;
771         }
772
773         spin_lock(&ctx->lock);
774         ctx->is_active = 1;
775         update_context_time(ctx);
776
777         /*
778          * Protect the list operation against NMI by disabling the
779          * events on a global level. NOP for non NMI based events.
780          */
781         perf_disable();
782
783         add_event_to_ctx(event, ctx);
784
785         /*
786          * Don't put the event on if it is disabled or if
787          * it is in a group and the group isn't on.
788          */
789         if (event->state != PERF_EVENT_STATE_INACTIVE ||
790             (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
791                 goto unlock;
792
793         /*
794          * An exclusive event can't go on if there are already active
795          * hardware events, and no hardware event can go on if there
796          * is already an exclusive event on.
797          */
798         if (!group_can_go_on(event, cpuctx, 1))
799                 err = -EEXIST;
800         else
801                 err = event_sched_in(event, cpuctx, ctx, cpu);
802
803         if (err) {
804                 /*
805                  * This event couldn't go on.  If it is in a group
806                  * then we have to pull the whole group off.
807                  * If the event group is pinned then put it in error state.
808                  */
809                 if (leader != event)
810                         group_sched_out(leader, cpuctx, ctx);
811                 if (leader->attr.pinned) {
812                         update_group_times(leader);
813                         leader->state = PERF_EVENT_STATE_ERROR;
814                 }
815         }
816
817         if (!err && !ctx->task && cpuctx->max_pertask)
818                 cpuctx->max_pertask--;
819
820  unlock:
821         perf_enable();
822
823         spin_unlock(&ctx->lock);
824 }
825
826 /*
827  * Attach a performance event to a context
828  *
829  * First we add the event to the list with the hardware enable bit
830  * in event->hw_config cleared.
831  *
832  * If the event is attached to a task which is on a CPU we use a smp
833  * call to enable it in the task context. The task might have been
834  * scheduled away, but we check this in the smp call again.
835  *
836  * Must be called with ctx->mutex held.
837  */
838 static void
839 perf_install_in_context(struct perf_event_context *ctx,
840                         struct perf_event *event,
841                         int cpu)
842 {
843         struct task_struct *task = ctx->task;
844
845         if (!task) {
846                 /*
847                  * Per cpu events are installed via an smp call and
848                  * the install is always sucessful.
849                  */
850                 smp_call_function_single(cpu, __perf_install_in_context,
851                                          event, 1);
852                 return;
853         }
854
855 retry:
856         task_oncpu_function_call(task, __perf_install_in_context,
857                                  event);
858
859         spin_lock_irq(&ctx->lock);
860         /*
861          * we need to retry the smp call.
862          */
863         if (ctx->is_active && list_empty(&event->group_entry)) {
864                 spin_unlock_irq(&ctx->lock);
865                 goto retry;
866         }
867
868         /*
869          * The lock prevents that this context is scheduled in so we
870          * can add the event safely, if it the call above did not
871          * succeed.
872          */
873         if (list_empty(&event->group_entry))
874                 add_event_to_ctx(event, ctx);
875         spin_unlock_irq(&ctx->lock);
876 }
877
878 /*
879  * Put a event into inactive state and update time fields.
880  * Enabling the leader of a group effectively enables all
881  * the group members that aren't explicitly disabled, so we
882  * have to update their ->tstamp_enabled also.
883  * Note: this works for group members as well as group leaders
884  * since the non-leader members' sibling_lists will be empty.
885  */
886 static void __perf_event_mark_enabled(struct perf_event *event,
887                                         struct perf_event_context *ctx)
888 {
889         struct perf_event *sub;
890
891         event->state = PERF_EVENT_STATE_INACTIVE;
892         event->tstamp_enabled = ctx->time - event->total_time_enabled;
893         list_for_each_entry(sub, &event->sibling_list, group_entry)
894                 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
895                         sub->tstamp_enabled =
896                                 ctx->time - sub->total_time_enabled;
897 }
898
899 /*
900  * Cross CPU call to enable a performance event
901  */
902 static void __perf_event_enable(void *info)
903 {
904         struct perf_event *event = info;
905         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
906         struct perf_event_context *ctx = event->ctx;
907         struct perf_event *leader = event->group_leader;
908         int err;
909
910         /*
911          * If this is a per-task event, need to check whether this
912          * event's task is the current task on this cpu.
913          */
914         if (ctx->task && cpuctx->task_ctx != ctx) {
915                 if (cpuctx->task_ctx || ctx->task != current)
916                         return;
917                 cpuctx->task_ctx = ctx;
918         }
919
920         spin_lock(&ctx->lock);
921         ctx->is_active = 1;
922         update_context_time(ctx);
923
924         if (event->state >= PERF_EVENT_STATE_INACTIVE)
925                 goto unlock;
926         __perf_event_mark_enabled(event, ctx);
927
928         /*
929          * If the event is in a group and isn't the group leader,
930          * then don't put it on unless the group is on.
931          */
932         if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
933                 goto unlock;
934
935         if (!group_can_go_on(event, cpuctx, 1)) {
936                 err = -EEXIST;
937         } else {
938                 perf_disable();
939                 if (event == leader)
940                         err = group_sched_in(event, cpuctx, ctx,
941                                              smp_processor_id());
942                 else
943                         err = event_sched_in(event, cpuctx, ctx,
944                                                smp_processor_id());
945                 perf_enable();
946         }
947
948         if (err) {
949                 /*
950                  * If this event can't go on and it's part of a
951                  * group, then the whole group has to come off.
952                  */
953                 if (leader != event)
954                         group_sched_out(leader, cpuctx, ctx);
955                 if (leader->attr.pinned) {
956                         update_group_times(leader);
957                         leader->state = PERF_EVENT_STATE_ERROR;
958                 }
959         }
960
961  unlock:
962         spin_unlock(&ctx->lock);
963 }
964
965 /*
966  * Enable a event.
967  *
968  * If event->ctx is a cloned context, callers must make sure that
969  * every task struct that event->ctx->task could possibly point to
970  * remains valid.  This condition is satisfied when called through
971  * perf_event_for_each_child or perf_event_for_each as described
972  * for perf_event_disable.
973  */
974 static void perf_event_enable(struct perf_event *event)
975 {
976         struct perf_event_context *ctx = event->ctx;
977         struct task_struct *task = ctx->task;
978
979         if (!task) {
980                 /*
981                  * Enable the event on the cpu that it's on
982                  */
983                 smp_call_function_single(event->cpu, __perf_event_enable,
984                                          event, 1);
985                 return;
986         }
987
988         spin_lock_irq(&ctx->lock);
989         if (event->state >= PERF_EVENT_STATE_INACTIVE)
990                 goto out;
991
992         /*
993          * If the event is in error state, clear that first.
994          * That way, if we see the event in error state below, we
995          * know that it has gone back into error state, as distinct
996          * from the task having been scheduled away before the
997          * cross-call arrived.
998          */
999         if (event->state == PERF_EVENT_STATE_ERROR)
1000                 event->state = PERF_EVENT_STATE_OFF;
1001
1002  retry:
1003         spin_unlock_irq(&ctx->lock);
1004         task_oncpu_function_call(task, __perf_event_enable, event);
1005
1006         spin_lock_irq(&ctx->lock);
1007
1008         /*
1009          * If the context is active and the event is still off,
1010          * we need to retry the cross-call.
1011          */
1012         if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1013                 goto retry;
1014
1015         /*
1016          * Since we have the lock this context can't be scheduled
1017          * in, so we can change the state safely.
1018          */
1019         if (event->state == PERF_EVENT_STATE_OFF)
1020                 __perf_event_mark_enabled(event, ctx);
1021
1022  out:
1023         spin_unlock_irq(&ctx->lock);
1024 }
1025
1026 static int perf_event_refresh(struct perf_event *event, int refresh)
1027 {
1028         /*
1029          * not supported on inherited events
1030          */
1031         if (event->attr.inherit)
1032                 return -EINVAL;
1033
1034         atomic_add(refresh, &event->event_limit);
1035         perf_event_enable(event);
1036
1037         return 0;
1038 }
1039
1040 void __perf_event_sched_out(struct perf_event_context *ctx,
1041                               struct perf_cpu_context *cpuctx)
1042 {
1043         struct perf_event *event;
1044
1045         spin_lock(&ctx->lock);
1046         ctx->is_active = 0;
1047         if (likely(!ctx->nr_events))
1048                 goto out;
1049         update_context_time(ctx);
1050
1051         perf_disable();
1052         if (ctx->nr_active) {
1053                 list_for_each_entry(event, &ctx->group_list, group_entry)
1054                         group_sched_out(event, cpuctx, ctx);
1055         }
1056         perf_enable();
1057  out:
1058         spin_unlock(&ctx->lock);
1059 }
1060
1061 /*
1062  * Test whether two contexts are equivalent, i.e. whether they
1063  * have both been cloned from the same version of the same context
1064  * and they both have the same number of enabled events.
1065  * If the number of enabled events is the same, then the set
1066  * of enabled events should be the same, because these are both
1067  * inherited contexts, therefore we can't access individual events
1068  * in them directly with an fd; we can only enable/disable all
1069  * events via prctl, or enable/disable all events in a family
1070  * via ioctl, which will have the same effect on both contexts.
1071  */
1072 static int context_equiv(struct perf_event_context *ctx1,
1073                          struct perf_event_context *ctx2)
1074 {
1075         return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1076                 && ctx1->parent_gen == ctx2->parent_gen
1077                 && !ctx1->pin_count && !ctx2->pin_count;
1078 }
1079
1080 static void __perf_event_sync_stat(struct perf_event *event,
1081                                      struct perf_event *next_event)
1082 {
1083         u64 value;
1084
1085         if (!event->attr.inherit_stat)
1086                 return;
1087
1088         /*
1089          * Update the event value, we cannot use perf_event_read()
1090          * because we're in the middle of a context switch and have IRQs
1091          * disabled, which upsets smp_call_function_single(), however
1092          * we know the event must be on the current CPU, therefore we
1093          * don't need to use it.
1094          */
1095         switch (event->state) {
1096         case PERF_EVENT_STATE_ACTIVE:
1097                 event->pmu->read(event);
1098                 /* fall-through */
1099
1100         case PERF_EVENT_STATE_INACTIVE:
1101                 update_event_times(event);
1102                 break;
1103
1104         default:
1105                 break;
1106         }
1107
1108         /*
1109          * In order to keep per-task stats reliable we need to flip the event
1110          * values when we flip the contexts.
1111          */
1112         value = atomic64_read(&next_event->count);
1113         value = atomic64_xchg(&event->count, value);
1114         atomic64_set(&next_event->count, value);
1115
1116         swap(event->total_time_enabled, next_event->total_time_enabled);
1117         swap(event->total_time_running, next_event->total_time_running);
1118
1119         /*
1120          * Since we swizzled the values, update the user visible data too.
1121          */
1122         perf_event_update_userpage(event);
1123         perf_event_update_userpage(next_event);
1124 }
1125
1126 #define list_next_entry(pos, member) \
1127         list_entry(pos->member.next, typeof(*pos), member)
1128
1129 static void perf_event_sync_stat(struct perf_event_context *ctx,
1130                                    struct perf_event_context *next_ctx)
1131 {
1132         struct perf_event *event, *next_event;
1133
1134         if (!ctx->nr_stat)
1135                 return;
1136
1137         update_context_time(ctx);
1138
1139         event = list_first_entry(&ctx->event_list,
1140                                    struct perf_event, event_entry);
1141
1142         next_event = list_first_entry(&next_ctx->event_list,
1143                                         struct perf_event, event_entry);
1144
1145         while (&event->event_entry != &ctx->event_list &&
1146                &next_event->event_entry != &next_ctx->event_list) {
1147
1148                 __perf_event_sync_stat(event, next_event);
1149
1150                 event = list_next_entry(event, event_entry);
1151                 next_event = list_next_entry(next_event, event_entry);
1152         }
1153 }
1154
1155 /*
1156  * Called from scheduler to remove the events of the current task,
1157  * with interrupts disabled.
1158  *
1159  * We stop each event and update the event value in event->count.
1160  *
1161  * This does not protect us against NMI, but disable()
1162  * sets the disabled bit in the control field of event _before_
1163  * accessing the event control register. If a NMI hits, then it will
1164  * not restart the event.
1165  */
1166 void perf_event_task_sched_out(struct task_struct *task,
1167                                  struct task_struct *next, int cpu)
1168 {
1169         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1170         struct perf_event_context *ctx = task->perf_event_ctxp;
1171         struct perf_event_context *next_ctx;
1172         struct perf_event_context *parent;
1173         struct pt_regs *regs;
1174         int do_switch = 1;
1175
1176         regs = task_pt_regs(task);
1177         perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1178
1179         if (likely(!ctx || !cpuctx->task_ctx))
1180                 return;
1181
1182         rcu_read_lock();
1183         parent = rcu_dereference(ctx->parent_ctx);
1184         next_ctx = next->perf_event_ctxp;
1185         if (parent && next_ctx &&
1186             rcu_dereference(next_ctx->parent_ctx) == parent) {
1187                 /*
1188                  * Looks like the two contexts are clones, so we might be
1189                  * able to optimize the context switch.  We lock both
1190                  * contexts and check that they are clones under the
1191                  * lock (including re-checking that neither has been
1192                  * uncloned in the meantime).  It doesn't matter which
1193                  * order we take the locks because no other cpu could
1194                  * be trying to lock both of these tasks.
1195                  */
1196                 spin_lock(&ctx->lock);
1197                 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1198                 if (context_equiv(ctx, next_ctx)) {
1199                         /*
1200                          * XXX do we need a memory barrier of sorts
1201                          * wrt to rcu_dereference() of perf_event_ctxp
1202                          */
1203                         task->perf_event_ctxp = next_ctx;
1204                         next->perf_event_ctxp = ctx;
1205                         ctx->task = next;
1206                         next_ctx->task = task;
1207                         do_switch = 0;
1208
1209                         perf_event_sync_stat(ctx, next_ctx);
1210                 }
1211                 spin_unlock(&next_ctx->lock);
1212                 spin_unlock(&ctx->lock);
1213         }
1214         rcu_read_unlock();
1215
1216         if (do_switch) {
1217                 __perf_event_sched_out(ctx, cpuctx);
1218                 cpuctx->task_ctx = NULL;
1219         }
1220 }
1221
1222 /*
1223  * Called with IRQs disabled
1224  */
1225 static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1226 {
1227         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1228
1229         if (!cpuctx->task_ctx)
1230                 return;
1231
1232         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1233                 return;
1234
1235         __perf_event_sched_out(ctx, cpuctx);
1236         cpuctx->task_ctx = NULL;
1237 }
1238
1239 /*
1240  * Called with IRQs disabled
1241  */
1242 static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
1243 {
1244         __perf_event_sched_out(&cpuctx->ctx, cpuctx);
1245 }
1246
1247 static void
1248 __perf_event_sched_in(struct perf_event_context *ctx,
1249                         struct perf_cpu_context *cpuctx, int cpu)
1250 {
1251         struct perf_event *event;
1252         int can_add_hw = 1;
1253
1254         spin_lock(&ctx->lock);
1255         ctx->is_active = 1;
1256         if (likely(!ctx->nr_events))
1257                 goto out;
1258
1259         ctx->timestamp = perf_clock();
1260
1261         perf_disable();
1262
1263         /*
1264          * First go through the list and put on any pinned groups
1265          * in order to give them the best chance of going on.
1266          */
1267         list_for_each_entry(event, &ctx->group_list, group_entry) {
1268                 if (event->state <= PERF_EVENT_STATE_OFF ||
1269                     !event->attr.pinned)
1270                         continue;
1271                 if (event->cpu != -1 && event->cpu != cpu)
1272                         continue;
1273
1274                 if (group_can_go_on(event, cpuctx, 1))
1275                         group_sched_in(event, cpuctx, ctx, cpu);
1276
1277                 /*
1278                  * If this pinned group hasn't been scheduled,
1279                  * put it in error state.
1280                  */
1281                 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1282                         update_group_times(event);
1283                         event->state = PERF_EVENT_STATE_ERROR;
1284                 }
1285         }
1286
1287         list_for_each_entry(event, &ctx->group_list, group_entry) {
1288                 /*
1289                  * Ignore events in OFF or ERROR state, and
1290                  * ignore pinned events since we did them already.
1291                  */
1292                 if (event->state <= PERF_EVENT_STATE_OFF ||
1293                     event->attr.pinned)
1294                         continue;
1295
1296                 /*
1297                  * Listen to the 'cpu' scheduling filter constraint
1298                  * of events:
1299                  */
1300                 if (event->cpu != -1 && event->cpu != cpu)
1301                         continue;
1302
1303                 if (group_can_go_on(event, cpuctx, can_add_hw))
1304                         if (group_sched_in(event, cpuctx, ctx, cpu))
1305                                 can_add_hw = 0;
1306         }
1307         perf_enable();
1308  out:
1309         spin_unlock(&ctx->lock);
1310 }
1311
1312 /*
1313  * Called from scheduler to add the events of the current task
1314  * with interrupts disabled.
1315  *
1316  * We restore the event value and then enable it.
1317  *
1318  * This does not protect us against NMI, but enable()
1319  * sets the enabled bit in the control field of event _before_
1320  * accessing the event control register. If a NMI hits, then it will
1321  * keep the event running.
1322  */
1323 void perf_event_task_sched_in(struct task_struct *task, int cpu)
1324 {
1325         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1326         struct perf_event_context *ctx = task->perf_event_ctxp;
1327
1328         if (likely(!ctx))
1329                 return;
1330         if (cpuctx->task_ctx == ctx)
1331                 return;
1332         __perf_event_sched_in(ctx, cpuctx, cpu);
1333         cpuctx->task_ctx = ctx;
1334 }
1335
1336 static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1337 {
1338         struct perf_event_context *ctx = &cpuctx->ctx;
1339
1340         __perf_event_sched_in(ctx, cpuctx, cpu);
1341 }
1342
1343 #define MAX_INTERRUPTS (~0ULL)
1344
1345 static void perf_log_throttle(struct perf_event *event, int enable);
1346
1347 static void perf_adjust_period(struct perf_event *event, u64 events)
1348 {
1349         struct hw_perf_event *hwc = &event->hw;
1350         u64 period, sample_period;
1351         s64 delta;
1352
1353         events *= hwc->sample_period;
1354         period = div64_u64(events, event->attr.sample_freq);
1355
1356         delta = (s64)(period - hwc->sample_period);
1357         delta = (delta + 7) / 8; /* low pass filter */
1358
1359         sample_period = hwc->sample_period + delta;
1360
1361         if (!sample_period)
1362                 sample_period = 1;
1363
1364         hwc->sample_period = sample_period;
1365 }
1366
1367 static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1368 {
1369         struct perf_event *event;
1370         struct hw_perf_event *hwc;
1371         u64 interrupts, freq;
1372
1373         spin_lock(&ctx->lock);
1374         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1375                 if (event->state != PERF_EVENT_STATE_ACTIVE)
1376                         continue;
1377
1378                 hwc = &event->hw;
1379
1380                 interrupts = hwc->interrupts;
1381                 hwc->interrupts = 0;
1382
1383                 /*
1384                  * unthrottle events on the tick
1385                  */
1386                 if (interrupts == MAX_INTERRUPTS) {
1387                         perf_log_throttle(event, 1);
1388                         event->pmu->unthrottle(event);
1389                         interrupts = 2*sysctl_perf_event_sample_rate/HZ;
1390                 }
1391
1392                 if (!event->attr.freq || !event->attr.sample_freq)
1393                         continue;
1394
1395                 /*
1396                  * if the specified freq < HZ then we need to skip ticks
1397                  */
1398                 if (event->attr.sample_freq < HZ) {
1399                         freq = event->attr.sample_freq;
1400
1401                         hwc->freq_count += freq;
1402                         hwc->freq_interrupts += interrupts;
1403
1404                         if (hwc->freq_count < HZ)
1405                                 continue;
1406
1407                         interrupts = hwc->freq_interrupts;
1408                         hwc->freq_interrupts = 0;
1409                         hwc->freq_count -= HZ;
1410                 } else
1411                         freq = HZ;
1412
1413                 perf_adjust_period(event, freq * interrupts);
1414
1415                 /*
1416                  * In order to avoid being stalled by an (accidental) huge
1417                  * sample period, force reset the sample period if we didn't
1418                  * get any events in this freq period.
1419                  */
1420                 if (!interrupts) {
1421                         perf_disable();
1422                         event->pmu->disable(event);
1423                         atomic64_set(&hwc->period_left, 0);
1424                         event->pmu->enable(event);
1425                         perf_enable();
1426                 }
1427         }
1428         spin_unlock(&ctx->lock);
1429 }
1430
1431 /*
1432  * Round-robin a context's events:
1433  */
1434 static void rotate_ctx(struct perf_event_context *ctx)
1435 {
1436         struct perf_event *event;
1437
1438         if (!ctx->nr_events)
1439                 return;
1440
1441         spin_lock(&ctx->lock);
1442         /*
1443          * Rotate the first entry last (works just fine for group events too):
1444          */
1445         perf_disable();
1446         list_for_each_entry(event, &ctx->group_list, group_entry) {
1447                 list_move_tail(&event->group_entry, &ctx->group_list);
1448                 break;
1449         }
1450         perf_enable();
1451
1452         spin_unlock(&ctx->lock);
1453 }
1454
1455 void perf_event_task_tick(struct task_struct *curr, int cpu)
1456 {
1457         struct perf_cpu_context *cpuctx;
1458         struct perf_event_context *ctx;
1459
1460         if (!atomic_read(&nr_events))
1461                 return;
1462
1463         cpuctx = &per_cpu(perf_cpu_context, cpu);
1464         ctx = curr->perf_event_ctxp;
1465
1466         perf_ctx_adjust_freq(&cpuctx->ctx);
1467         if (ctx)
1468                 perf_ctx_adjust_freq(ctx);
1469
1470         perf_event_cpu_sched_out(cpuctx);
1471         if (ctx)
1472                 __perf_event_task_sched_out(ctx);
1473
1474         rotate_ctx(&cpuctx->ctx);
1475         if (ctx)
1476                 rotate_ctx(ctx);
1477
1478         perf_event_cpu_sched_in(cpuctx, cpu);
1479         if (ctx)
1480                 perf_event_task_sched_in(curr, cpu);
1481 }
1482
1483 /*
1484  * Enable all of a task's events that have been marked enable-on-exec.
1485  * This expects task == current.
1486  */
1487 static void perf_event_enable_on_exec(struct task_struct *task)
1488 {
1489         struct perf_event_context *ctx;
1490         struct perf_event *event;
1491         unsigned long flags;
1492         int enabled = 0;
1493
1494         local_irq_save(flags);
1495         ctx = task->perf_event_ctxp;
1496         if (!ctx || !ctx->nr_events)
1497                 goto out;
1498
1499         __perf_event_task_sched_out(ctx);
1500
1501         spin_lock(&ctx->lock);
1502
1503         list_for_each_entry(event, &ctx->group_list, group_entry) {
1504                 if (!event->attr.enable_on_exec)
1505                         continue;
1506                 event->attr.enable_on_exec = 0;
1507                 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1508                         continue;
1509                 __perf_event_mark_enabled(event, ctx);
1510                 enabled = 1;
1511         }
1512
1513         /*
1514          * Unclone this context if we enabled any event.
1515          */
1516         if (enabled)
1517                 unclone_ctx(ctx);
1518
1519         spin_unlock(&ctx->lock);
1520
1521         perf_event_task_sched_in(task, smp_processor_id());
1522  out:
1523         local_irq_restore(flags);
1524 }
1525
1526 /*
1527  * Cross CPU call to read the hardware event
1528  */
1529 static void __perf_event_read(void *info)
1530 {
1531         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1532         struct perf_event *event = info;
1533         struct perf_event_context *ctx = event->ctx;
1534
1535         /*
1536          * If this is a task context, we need to check whether it is
1537          * the current task context of this cpu.  If not it has been
1538          * scheduled out before the smp call arrived.  In that case
1539          * event->count would have been updated to a recent sample
1540          * when the event was scheduled out.
1541          */
1542         if (ctx->task && cpuctx->task_ctx != ctx)
1543                 return;
1544
1545         spin_lock(&ctx->lock);
1546         update_context_time(ctx);
1547         update_event_times(event);
1548         spin_unlock(&ctx->lock);
1549
1550         event->pmu->read(event);
1551 }
1552
1553 static u64 perf_event_read(struct perf_event *event)
1554 {
1555         /*
1556          * If event is enabled and currently active on a CPU, update the
1557          * value in the event structure:
1558          */
1559         if (event->state == PERF_EVENT_STATE_ACTIVE) {
1560                 smp_call_function_single(event->oncpu,
1561                                          __perf_event_read, event, 1);
1562         } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1563                 struct perf_event_context *ctx = event->ctx;
1564                 unsigned long flags;
1565
1566                 spin_lock_irqsave(&ctx->lock, flags);
1567                 update_context_time(ctx);
1568                 update_event_times(event);
1569                 spin_unlock_irqrestore(&ctx->lock, flags);
1570         }
1571
1572         return atomic64_read(&event->count);
1573 }
1574
1575 /*
1576  * Initialize the perf_event context in a task_struct:
1577  */
1578 static void
1579 __perf_event_init_context(struct perf_event_context *ctx,
1580                             struct task_struct *task)
1581 {
1582         memset(ctx, 0, sizeof(*ctx));
1583         spin_lock_init(&ctx->lock);
1584         mutex_init(&ctx->mutex);
1585         INIT_LIST_HEAD(&ctx->group_list);
1586         INIT_LIST_HEAD(&ctx->event_list);
1587         atomic_set(&ctx->refcount, 1);
1588         ctx->task = task;
1589 }
1590
1591 static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1592 {
1593         struct perf_event_context *ctx;
1594         struct perf_cpu_context *cpuctx;
1595         struct task_struct *task;
1596         unsigned long flags;
1597         int err;
1598
1599         /*
1600          * If cpu is not a wildcard then this is a percpu event:
1601          */
1602         if (cpu != -1) {
1603                 /* Must be root to operate on a CPU event: */
1604                 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1605                         return ERR_PTR(-EACCES);
1606
1607                 if (cpu < 0 || cpu > num_possible_cpus())
1608                         return ERR_PTR(-EINVAL);
1609
1610                 /*
1611                  * We could be clever and allow to attach a event to an
1612                  * offline CPU and activate it when the CPU comes up, but
1613                  * that's for later.
1614                  */
1615                 if (!cpu_isset(cpu, cpu_online_map))
1616                         return ERR_PTR(-ENODEV);
1617
1618                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1619                 ctx = &cpuctx->ctx;
1620                 get_ctx(ctx);
1621
1622                 return ctx;
1623         }
1624
1625         rcu_read_lock();
1626         if (!pid)
1627                 task = current;
1628         else
1629                 task = find_task_by_vpid(pid);
1630         if (task)
1631                 get_task_struct(task);
1632         rcu_read_unlock();
1633
1634         if (!task)
1635                 return ERR_PTR(-ESRCH);
1636
1637         /*
1638          * Can't attach events to a dying task.
1639          */
1640         err = -ESRCH;
1641         if (task->flags & PF_EXITING)
1642                 goto errout;
1643
1644         /* Reuse ptrace permission checks for now. */
1645         err = -EACCES;
1646         if (!ptrace_may_access(task, PTRACE_MODE_READ))
1647                 goto errout;
1648
1649  retry:
1650         ctx = perf_lock_task_context(task, &flags);
1651         if (ctx) {
1652                 unclone_ctx(ctx);
1653                 spin_unlock_irqrestore(&ctx->lock, flags);
1654         }
1655
1656         if (!ctx) {
1657                 ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
1658                 err = -ENOMEM;
1659                 if (!ctx)
1660                         goto errout;
1661                 __perf_event_init_context(ctx, task);
1662                 get_ctx(ctx);
1663                 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1664                         /*
1665                          * We raced with some other task; use
1666                          * the context they set.
1667                          */
1668                         kfree(ctx);
1669                         goto retry;
1670                 }
1671                 get_task_struct(task);
1672         }
1673
1674         put_task_struct(task);
1675         return ctx;
1676
1677  errout:
1678         put_task_struct(task);
1679         return ERR_PTR(err);
1680 }
1681
1682 static void perf_event_free_filter(struct perf_event *event);
1683
1684 static void free_event_rcu(struct rcu_head *head)
1685 {
1686         struct perf_event *event;
1687
1688         event = container_of(head, struct perf_event, rcu_head);
1689         if (event->ns)
1690                 put_pid_ns(event->ns);
1691         perf_event_free_filter(event);
1692         kfree(event);
1693 }
1694
1695 static void perf_pending_sync(struct perf_event *event);
1696
1697 static void free_event(struct perf_event *event)
1698 {
1699         perf_pending_sync(event);
1700
1701         if (!event->parent) {
1702                 atomic_dec(&nr_events);
1703                 if (event->attr.mmap)
1704                         atomic_dec(&nr_mmap_events);
1705                 if (event->attr.comm)
1706                         atomic_dec(&nr_comm_events);
1707                 if (event->attr.task)
1708                         atomic_dec(&nr_task_events);
1709         }
1710
1711         if (event->output) {
1712                 fput(event->output->filp);
1713                 event->output = NULL;
1714         }
1715
1716         if (event->destroy)
1717                 event->destroy(event);
1718
1719         put_ctx(event->ctx);
1720         call_rcu(&event->rcu_head, free_event_rcu);
1721 }
1722
1723 int perf_event_release_kernel(struct perf_event *event)
1724 {
1725         struct perf_event_context *ctx = event->ctx;
1726
1727         WARN_ON_ONCE(ctx->parent_ctx);
1728         mutex_lock(&ctx->mutex);
1729         perf_event_remove_from_context(event);
1730         mutex_unlock(&ctx->mutex);
1731
1732         mutex_lock(&event->owner->perf_event_mutex);
1733         list_del_init(&event->owner_entry);
1734         mutex_unlock(&event->owner->perf_event_mutex);
1735         put_task_struct(event->owner);
1736
1737         free_event(event);
1738
1739         return 0;
1740 }
1741 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1742
1743 /*
1744  * Called when the last reference to the file is gone.
1745  */
1746 static int perf_release(struct inode *inode, struct file *file)
1747 {
1748         struct perf_event *event = file->private_data;
1749
1750         file->private_data = NULL;
1751
1752         return perf_event_release_kernel(event);
1753 }
1754
1755 static int perf_event_read_size(struct perf_event *event)
1756 {
1757         int entry = sizeof(u64); /* value */
1758         int size = 0;
1759         int nr = 1;
1760
1761         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1762                 size += sizeof(u64);
1763
1764         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1765                 size += sizeof(u64);
1766
1767         if (event->attr.read_format & PERF_FORMAT_ID)
1768                 entry += sizeof(u64);
1769
1770         if (event->attr.read_format & PERF_FORMAT_GROUP) {
1771                 nr += event->group_leader->nr_siblings;
1772                 size += sizeof(u64);
1773         }
1774
1775         size += entry * nr;
1776
1777         return size;
1778 }
1779
1780 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
1781 {
1782         struct perf_event *child;
1783         u64 total = 0;
1784
1785         *enabled = 0;
1786         *running = 0;
1787
1788         mutex_lock(&event->child_mutex);
1789         total += perf_event_read(event);
1790         *enabled += event->total_time_enabled +
1791                         atomic64_read(&event->child_total_time_enabled);
1792         *running += event->total_time_running +
1793                         atomic64_read(&event->child_total_time_running);
1794
1795         list_for_each_entry(child, &event->child_list, child_list) {
1796                 total += perf_event_read(child);
1797                 *enabled += child->total_time_enabled;
1798                 *running += child->total_time_running;
1799         }
1800         mutex_unlock(&event->child_mutex);
1801
1802         return total;
1803 }
1804 EXPORT_SYMBOL_GPL(perf_event_read_value);
1805
1806 static int perf_event_read_group(struct perf_event *event,
1807                                    u64 read_format, char __user *buf)
1808 {
1809         struct perf_event *leader = event->group_leader, *sub;
1810         int n = 0, size = 0, ret = -EFAULT;
1811         struct perf_event_context *ctx = leader->ctx;
1812         u64 values[5];
1813         u64 count, enabled, running;
1814
1815         mutex_lock(&ctx->mutex);
1816         count = perf_event_read_value(leader, &enabled, &running);
1817
1818         values[n++] = 1 + leader->nr_siblings;
1819         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1820                 values[n++] = enabled;
1821         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1822                 values[n++] = running;
1823         values[n++] = count;
1824         if (read_format & PERF_FORMAT_ID)
1825                 values[n++] = primary_event_id(leader);
1826
1827         size = n * sizeof(u64);
1828
1829         if (copy_to_user(buf, values, size))
1830                 goto unlock;
1831
1832         ret = size;
1833
1834         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1835                 n = 0;
1836
1837                 values[n++] = perf_event_read_value(sub, &enabled, &running);
1838                 if (read_format & PERF_FORMAT_ID)
1839                         values[n++] = primary_event_id(sub);
1840
1841                 size = n * sizeof(u64);
1842
1843                 if (copy_to_user(buf + ret, values, size)) {
1844                         ret = -EFAULT;
1845                         goto unlock;
1846                 }
1847
1848                 ret += size;
1849         }
1850 unlock:
1851         mutex_unlock(&ctx->mutex);
1852
1853         return ret;
1854 }
1855
1856 static int perf_event_read_one(struct perf_event *event,
1857                                  u64 read_format, char __user *buf)
1858 {
1859         u64 enabled, running;
1860         u64 values[4];
1861         int n = 0;
1862
1863         values[n++] = perf_event_read_value(event, &enabled, &running);
1864         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1865                 values[n++] = enabled;
1866         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1867                 values[n++] = running;
1868         if (read_format & PERF_FORMAT_ID)
1869                 values[n++] = primary_event_id(event);
1870
1871         if (copy_to_user(buf, values, n * sizeof(u64)))
1872                 return -EFAULT;
1873
1874         return n * sizeof(u64);
1875 }
1876
1877 /*
1878  * Read the performance event - simple non blocking version for now
1879  */
1880 static ssize_t
1881 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1882 {
1883         u64 read_format = event->attr.read_format;
1884         int ret;
1885
1886         /*
1887          * Return end-of-file for a read on a event that is in
1888          * error state (i.e. because it was pinned but it couldn't be
1889          * scheduled on to the CPU at some point).
1890          */
1891         if (event->state == PERF_EVENT_STATE_ERROR)
1892                 return 0;
1893
1894         if (count < perf_event_read_size(event))
1895                 return -ENOSPC;
1896
1897         WARN_ON_ONCE(event->ctx->parent_ctx);
1898         if (read_format & PERF_FORMAT_GROUP)
1899                 ret = perf_event_read_group(event, read_format, buf);
1900         else
1901                 ret = perf_event_read_one(event, read_format, buf);
1902
1903         return ret;
1904 }
1905
1906 static ssize_t
1907 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1908 {
1909         struct perf_event *event = file->private_data;
1910
1911         return perf_read_hw(event, buf, count);
1912 }
1913
1914 static unsigned int perf_poll(struct file *file, poll_table *wait)
1915 {
1916         struct perf_event *event = file->private_data;
1917         struct perf_mmap_data *data;
1918         unsigned int events = POLL_HUP;
1919
1920         rcu_read_lock();
1921         data = rcu_dereference(event->data);
1922         if (data)
1923                 events = atomic_xchg(&data->poll, 0);
1924         rcu_read_unlock();
1925
1926         poll_wait(file, &event->waitq, wait);
1927
1928         return events;
1929 }
1930
1931 static void perf_event_reset(struct perf_event *event)
1932 {
1933         (void)perf_event_read(event);
1934         atomic64_set(&event->count, 0);
1935         perf_event_update_userpage(event);
1936 }
1937
1938 /*
1939  * Holding the top-level event's child_mutex means that any
1940  * descendant process that has inherited this event will block
1941  * in sync_child_event if it goes to exit, thus satisfying the
1942  * task existence requirements of perf_event_enable/disable.
1943  */
1944 static void perf_event_for_each_child(struct perf_event *event,
1945                                         void (*func)(struct perf_event *))
1946 {
1947         struct perf_event *child;
1948
1949         WARN_ON_ONCE(event->ctx->parent_ctx);
1950         mutex_lock(&event->child_mutex);
1951         func(event);
1952         list_for_each_entry(child, &event->child_list, child_list)
1953                 func(child);
1954         mutex_unlock(&event->child_mutex);
1955 }
1956
1957 static void perf_event_for_each(struct perf_event *event,
1958                                   void (*func)(struct perf_event *))
1959 {
1960         struct perf_event_context *ctx = event->ctx;
1961         struct perf_event *sibling;
1962
1963         WARN_ON_ONCE(ctx->parent_ctx);
1964         mutex_lock(&ctx->mutex);
1965         event = event->group_leader;
1966
1967         perf_event_for_each_child(event, func);
1968         func(event);
1969         list_for_each_entry(sibling, &event->sibling_list, group_entry)
1970                 perf_event_for_each_child(event, func);
1971         mutex_unlock(&ctx->mutex);
1972 }
1973
1974 static int perf_event_period(struct perf_event *event, u64 __user *arg)
1975 {
1976         struct perf_event_context *ctx = event->ctx;
1977         unsigned long size;
1978         int ret = 0;
1979         u64 value;
1980
1981         if (!event->attr.sample_period)
1982                 return -EINVAL;
1983
1984         size = copy_from_user(&value, arg, sizeof(value));
1985         if (size != sizeof(value))
1986                 return -EFAULT;
1987
1988         if (!value)
1989                 return -EINVAL;
1990
1991         spin_lock_irq(&ctx->lock);
1992         if (event->attr.freq) {
1993                 if (value > sysctl_perf_event_sample_rate) {
1994                         ret = -EINVAL;
1995                         goto unlock;
1996                 }
1997
1998                 event->attr.sample_freq = value;
1999         } else {
2000                 event->attr.sample_period = value;
2001                 event->hw.sample_period = value;
2002         }
2003 unlock:
2004         spin_unlock_irq(&ctx->lock);
2005
2006         return ret;
2007 }
2008
2009 static int perf_event_set_output(struct perf_event *event, int output_fd);
2010 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2011
2012 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2013 {
2014         struct perf_event *event = file->private_data;
2015         void (*func)(struct perf_event *);
2016         u32 flags = arg;
2017
2018         switch (cmd) {
2019         case PERF_EVENT_IOC_ENABLE:
2020                 func = perf_event_enable;
2021                 break;
2022         case PERF_EVENT_IOC_DISABLE:
2023                 func = perf_event_disable;
2024                 break;
2025         case PERF_EVENT_IOC_RESET:
2026                 func = perf_event_reset;
2027                 break;
2028
2029         case PERF_EVENT_IOC_REFRESH:
2030                 return perf_event_refresh(event, arg);
2031
2032         case PERF_EVENT_IOC_PERIOD:
2033                 return perf_event_period(event, (u64 __user *)arg);
2034
2035         case PERF_EVENT_IOC_SET_OUTPUT:
2036                 return perf_event_set_output(event, arg);
2037
2038         case PERF_EVENT_IOC_SET_FILTER:
2039                 return perf_event_set_filter(event, (void __user *)arg);
2040
2041         default:
2042                 return -ENOTTY;
2043         }
2044
2045         if (flags & PERF_IOC_FLAG_GROUP)
2046                 perf_event_for_each(event, func);
2047         else
2048                 perf_event_for_each_child(event, func);
2049
2050         return 0;
2051 }
2052
2053 int perf_event_task_enable(void)
2054 {
2055         struct perf_event *event;
2056
2057         mutex_lock(&current->perf_event_mutex);
2058         list_for_each_entry(event, &current->perf_event_list, owner_entry)
2059                 perf_event_for_each_child(event, perf_event_enable);
2060         mutex_unlock(&current->perf_event_mutex);
2061
2062         return 0;
2063 }
2064
2065 int perf_event_task_disable(void)
2066 {
2067         struct perf_event *event;
2068
2069         mutex_lock(&current->perf_event_mutex);
2070         list_for_each_entry(event, &current->perf_event_list, owner_entry)
2071                 perf_event_for_each_child(event, perf_event_disable);
2072         mutex_unlock(&current->perf_event_mutex);
2073
2074         return 0;
2075 }
2076
2077 #ifndef PERF_EVENT_INDEX_OFFSET
2078 # define PERF_EVENT_INDEX_OFFSET 0
2079 #endif
2080
2081 static int perf_event_index(struct perf_event *event)
2082 {
2083         if (event->state != PERF_EVENT_STATE_ACTIVE)
2084                 return 0;
2085
2086         return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2087 }
2088
2089 /*
2090  * Callers need to ensure there can be no nesting of this function, otherwise
2091  * the seqlock logic goes bad. We can not serialize this because the arch
2092  * code calls this from NMI context.
2093  */
2094 void perf_event_update_userpage(struct perf_event *event)
2095 {
2096         struct perf_event_mmap_page *userpg;
2097         struct perf_mmap_data *data;
2098
2099         rcu_read_lock();
2100         data = rcu_dereference(event->data);
2101         if (!data)
2102                 goto unlock;
2103
2104         userpg = data->user_page;
2105
2106         /*
2107          * Disable preemption so as to not let the corresponding user-space
2108          * spin too long if we get preempted.
2109          */
2110         preempt_disable();
2111         ++userpg->lock;
2112         barrier();
2113         userpg->index = perf_event_index(event);
2114         userpg->offset = atomic64_read(&event->count);
2115         if (event->state == PERF_EVENT_STATE_ACTIVE)
2116                 userpg->offset -= atomic64_read(&event->hw.prev_count);
2117
2118         userpg->time_enabled = event->total_time_enabled +
2119                         atomic64_read(&event->child_total_time_enabled);
2120
2121         userpg->time_running = event->total_time_running +
2122                         atomic64_read(&event->child_total_time_running);
2123
2124         barrier();
2125         ++userpg->lock;
2126         preempt_enable();
2127 unlock:
2128         rcu_read_unlock();
2129 }
2130
2131 static unsigned long perf_data_size(struct perf_mmap_data *data)
2132 {
2133         return data->nr_pages << (PAGE_SHIFT + data->data_order);
2134 }
2135
2136 #ifndef CONFIG_PERF_USE_VMALLOC
2137
2138 /*
2139  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2140  */
2141
2142 static struct page *
2143 perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2144 {
2145         if (pgoff > data->nr_pages)
2146                 return NULL;
2147
2148         if (pgoff == 0)
2149                 return virt_to_page(data->user_page);
2150
2151         return virt_to_page(data->data_pages[pgoff - 1]);
2152 }
2153
2154 static struct perf_mmap_data *
2155 perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2156 {
2157         struct perf_mmap_data *data;
2158         unsigned long size;
2159         int i;
2160
2161         WARN_ON(atomic_read(&event->mmap_count));
2162
2163         size = sizeof(struct perf_mmap_data);
2164         size += nr_pages * sizeof(void *);
2165
2166         data = kzalloc(size, GFP_KERNEL);
2167         if (!data)
2168                 goto fail;
2169
2170         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
2171         if (!data->user_page)
2172                 goto fail_user_page;
2173
2174         for (i = 0; i < nr_pages; i++) {
2175                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
2176                 if (!data->data_pages[i])
2177                         goto fail_data_pages;
2178         }
2179
2180         data->data_order = 0;
2181         data->nr_pages = nr_pages;
2182
2183         return data;
2184
2185 fail_data_pages:
2186         for (i--; i >= 0; i--)
2187                 free_page((unsigned long)data->data_pages[i]);
2188
2189         free_page((unsigned long)data->user_page);
2190
2191 fail_user_page:
2192         kfree(data);
2193
2194 fail:
2195         return NULL;
2196 }
2197
2198 static void perf_mmap_free_page(unsigned long addr)
2199 {
2200         struct page *page = virt_to_page((void *)addr);
2201
2202         page->mapping = NULL;
2203         __free_page(page);
2204 }
2205
2206 static void perf_mmap_data_free(struct perf_mmap_data *data)
2207 {
2208         int i;
2209
2210         perf_mmap_free_page((unsigned long)data->user_page);
2211         for (i = 0; i < data->nr_pages; i++)
2212                 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2213         kfree(data);
2214 }
2215
2216 #else
2217
2218 /*
2219  * Back perf_mmap() with vmalloc memory.
2220  *
2221  * Required for architectures that have d-cache aliasing issues.
2222  */
2223
2224 static struct page *
2225 perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2226 {
2227         if (pgoff > (1UL << data->data_order))
2228                 return NULL;
2229
2230         return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2231 }
2232
2233 static void perf_mmap_unmark_page(void *addr)
2234 {
2235         struct page *page = vmalloc_to_page(addr);
2236
2237         page->mapping = NULL;
2238 }
2239
2240 static void perf_mmap_data_free_work(struct work_struct *work)
2241 {
2242         struct perf_mmap_data *data;
2243         void *base;
2244         int i, nr;
2245
2246         data = container_of(work, struct perf_mmap_data, work);
2247         nr = 1 << data->data_order;
2248
2249         base = data->user_page;
2250         for (i = 0; i < nr + 1; i++)
2251                 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2252
2253         vfree(base);
2254         kfree(data);
2255 }
2256
2257 static void perf_mmap_data_free(struct perf_mmap_data *data)
2258 {
2259         schedule_work(&data->work);
2260 }
2261
2262 static struct perf_mmap_data *
2263 perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2264 {
2265         struct perf_mmap_data *data;
2266         unsigned long size;
2267         void *all_buf;
2268
2269         WARN_ON(atomic_read(&event->mmap_count));
2270
2271         size = sizeof(struct perf_mmap_data);
2272         size += sizeof(void *);
2273
2274         data = kzalloc(size, GFP_KERNEL);
2275         if (!data)
2276                 goto fail;
2277
2278         INIT_WORK(&data->work, perf_mmap_data_free_work);
2279
2280         all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2281         if (!all_buf)
2282                 goto fail_all_buf;
2283
2284         data->user_page = all_buf;
2285         data->data_pages[0] = all_buf + PAGE_SIZE;
2286         data->data_order = ilog2(nr_pages);
2287         data->nr_pages = 1;
2288
2289         return data;
2290
2291 fail_all_buf:
2292         kfree(data);
2293
2294 fail:
2295         return NULL;
2296 }
2297
2298 #endif
2299
2300 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2301 {
2302         struct perf_event *event = vma->vm_file->private_data;
2303         struct perf_mmap_data *data;
2304         int ret = VM_FAULT_SIGBUS;
2305
2306         if (vmf->flags & FAULT_FLAG_MKWRITE) {
2307                 if (vmf->pgoff == 0)
2308                         ret = 0;
2309                 return ret;
2310         }
2311
2312         rcu_read_lock();
2313         data = rcu_dereference(event->data);
2314         if (!data)
2315                 goto unlock;
2316
2317         if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2318                 goto unlock;
2319
2320         vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2321         if (!vmf->page)
2322                 goto unlock;
2323
2324         get_page(vmf->page);
2325         vmf->page->mapping = vma->vm_file->f_mapping;
2326         vmf->page->index   = vmf->pgoff;
2327
2328         ret = 0;
2329 unlock:
2330         rcu_read_unlock();
2331
2332         return ret;
2333 }
2334
2335 static void
2336 perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2337 {
2338         long max_size = perf_data_size(data);
2339
2340         atomic_set(&data->lock, -1);
2341
2342         if (event->attr.watermark) {
2343                 data->watermark = min_t(long, max_size,
2344                                         event->attr.wakeup_watermark);
2345         }
2346
2347         if (!data->watermark)
2348                 data->watermark = max_size / 2;
2349
2350
2351         rcu_assign_pointer(event->data, data);
2352 }
2353
2354 static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2355 {
2356         struct perf_mmap_data *data;
2357
2358         data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2359         perf_mmap_data_free(data);
2360 }
2361
2362 static void perf_mmap_data_release(struct perf_event *event)
2363 {
2364         struct perf_mmap_data *data = event->data;
2365
2366         WARN_ON(atomic_read(&event->mmap_count));
2367
2368         rcu_assign_pointer(event->data, NULL);
2369         call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
2370 }
2371
2372 static void perf_mmap_open(struct vm_area_struct *vma)
2373 {
2374         struct perf_event *event = vma->vm_file->private_data;
2375
2376         atomic_inc(&event->mmap_count);
2377 }
2378
2379 static void perf_mmap_close(struct vm_area_struct *vma)
2380 {
2381         struct perf_event *event = vma->vm_file->private_data;
2382
2383         WARN_ON_ONCE(event->ctx->parent_ctx);
2384         if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2385                 unsigned long size = perf_data_size(event->data);
2386                 struct user_struct *user = current_user();
2387
2388                 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2389                 vma->vm_mm->locked_vm -= event->data->nr_locked;
2390                 perf_mmap_data_release(event);
2391                 mutex_unlock(&event->mmap_mutex);
2392         }
2393 }
2394
2395 static const struct vm_operations_struct perf_mmap_vmops = {
2396         .open           = perf_mmap_open,
2397         .close          = perf_mmap_close,
2398         .fault          = perf_mmap_fault,
2399         .page_mkwrite   = perf_mmap_fault,
2400 };
2401
2402 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2403 {
2404         struct perf_event *event = file->private_data;
2405         unsigned long user_locked, user_lock_limit;
2406         struct user_struct *user = current_user();
2407         unsigned long locked, lock_limit;
2408         struct perf_mmap_data *data;
2409         unsigned long vma_size;
2410         unsigned long nr_pages;
2411         long user_extra, extra;
2412         int ret = 0;
2413
2414         if (!(vma->vm_flags & VM_SHARED))
2415                 return -EINVAL;
2416
2417         vma_size = vma->vm_end - vma->vm_start;
2418         nr_pages = (vma_size / PAGE_SIZE) - 1;
2419
2420         /*
2421          * If we have data pages ensure they're a power-of-two number, so we
2422          * can do bitmasks instead of modulo.
2423          */
2424         if (nr_pages != 0 && !is_power_of_2(nr_pages))
2425                 return -EINVAL;
2426
2427         if (vma_size != PAGE_SIZE * (1 + nr_pages))
2428                 return -EINVAL;
2429
2430         if (vma->vm_pgoff != 0)
2431                 return -EINVAL;
2432
2433         WARN_ON_ONCE(event->ctx->parent_ctx);
2434         mutex_lock(&event->mmap_mutex);
2435         if (event->output) {
2436                 ret = -EINVAL;
2437                 goto unlock;
2438         }
2439
2440         if (atomic_inc_not_zero(&event->mmap_count)) {
2441                 if (nr_pages != event->data->nr_pages)
2442                         ret = -EINVAL;
2443                 goto unlock;
2444         }
2445
2446         user_extra = nr_pages + 1;
2447         user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2448
2449         /*
2450          * Increase the limit linearly with more CPUs:
2451          */
2452         user_lock_limit *= num_online_cpus();
2453
2454         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2455
2456         extra = 0;
2457         if (user_locked > user_lock_limit)
2458                 extra = user_locked - user_lock_limit;
2459
2460         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2461         lock_limit >>= PAGE_SHIFT;
2462         locked = vma->vm_mm->locked_vm + extra;
2463
2464         if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2465                 !capable(CAP_IPC_LOCK)) {
2466                 ret = -EPERM;
2467                 goto unlock;
2468         }
2469
2470         WARN_ON(event->data);
2471
2472         data = perf_mmap_data_alloc(event, nr_pages);
2473         ret = -ENOMEM;
2474         if (!data)
2475                 goto unlock;
2476
2477         ret = 0;
2478         perf_mmap_data_init(event, data);
2479
2480         atomic_set(&event->mmap_count, 1);
2481         atomic_long_add(user_extra, &user->locked_vm);
2482         vma->vm_mm->locked_vm += extra;
2483         event->data->nr_locked = extra;
2484         if (vma->vm_flags & VM_WRITE)
2485                 event->data->writable = 1;
2486
2487 unlock:
2488         mutex_unlock(&event->mmap_mutex);
2489
2490         vma->vm_flags |= VM_RESERVED;
2491         vma->vm_ops = &perf_mmap_vmops;
2492
2493         return ret;
2494 }
2495
2496 static int perf_fasync(int fd, struct file *filp, int on)
2497 {
2498         struct inode *inode = filp->f_path.dentry->d_inode;
2499         struct perf_event *event = filp->private_data;
2500         int retval;
2501
2502         mutex_lock(&inode->i_mutex);
2503         retval = fasync_helper(fd, filp, on, &event->fasync);
2504         mutex_unlock(&inode->i_mutex);
2505
2506         if (retval < 0)
2507                 return retval;
2508
2509         return 0;
2510 }
2511
2512 static const struct file_operations perf_fops = {
2513         .release                = perf_release,
2514         .read                   = perf_read,
2515         .poll                   = perf_poll,
2516         .unlocked_ioctl         = perf_ioctl,
2517         .compat_ioctl           = perf_ioctl,
2518         .mmap                   = perf_mmap,
2519         .fasync                 = perf_fasync,
2520 };
2521
2522 /*
2523  * Perf event wakeup
2524  *
2525  * If there's data, ensure we set the poll() state and publish everything
2526  * to user-space before waking everybody up.
2527  */
2528
2529 void perf_event_wakeup(struct perf_event *event)
2530 {
2531         wake_up_all(&event->waitq);
2532
2533         if (event->pending_kill) {
2534                 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2535                 event->pending_kill = 0;
2536         }
2537 }
2538
2539 /*
2540  * Pending wakeups
2541  *
2542  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2543  *
2544  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2545  * single linked list and use cmpxchg() to add entries lockless.
2546  */
2547
2548 static void perf_pending_event(struct perf_pending_entry *entry)
2549 {
2550         struct perf_event *event = container_of(entry,
2551                         struct perf_event, pending);
2552
2553         if (event->pending_disable) {
2554                 event->pending_disable = 0;
2555                 __perf_event_disable(event);
2556         }
2557
2558         if (event->pending_wakeup) {
2559                 event->pending_wakeup = 0;
2560                 perf_event_wakeup(event);
2561         }
2562 }
2563
2564 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2565
2566 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2567         PENDING_TAIL,
2568 };
2569
2570 static void perf_pending_queue(struct perf_pending_entry *entry,
2571                                void (*func)(struct perf_pending_entry *))
2572 {
2573         struct perf_pending_entry **head;
2574
2575         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2576                 return;
2577
2578         entry->func = func;
2579
2580         head = &get_cpu_var(perf_pending_head);
2581
2582         do {
2583                 entry->next = *head;
2584         } while (cmpxchg(head, entry->next, entry) != entry->next);
2585
2586         set_perf_event_pending();
2587
2588         put_cpu_var(perf_pending_head);
2589 }
2590
2591 static int __perf_pending_run(void)
2592 {
2593         struct perf_pending_entry *list;
2594         int nr = 0;
2595
2596         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2597         while (list != PENDING_TAIL) {
2598                 void (*func)(struct perf_pending_entry *);
2599                 struct perf_pending_entry *entry = list;
2600
2601                 list = list->next;
2602
2603                 func = entry->func;
2604                 entry->next = NULL;
2605                 /*
2606                  * Ensure we observe the unqueue before we issue the wakeup,
2607                  * so that we won't be waiting forever.
2608                  * -- see perf_not_pending().
2609                  */
2610                 smp_wmb();
2611
2612                 func(entry);
2613                 nr++;
2614         }
2615
2616         return nr;
2617 }
2618
2619 static inline int perf_not_pending(struct perf_event *event)
2620 {
2621         /*
2622          * If we flush on whatever cpu we run, there is a chance we don't
2623          * need to wait.
2624          */
2625         get_cpu();
2626         __perf_pending_run();
2627         put_cpu();
2628
2629         /*
2630          * Ensure we see the proper queue state before going to sleep
2631          * so that we do not miss the wakeup. -- see perf_pending_handle()
2632          */
2633         smp_rmb();
2634         return event->pending.next == NULL;
2635 }
2636
2637 static void perf_pending_sync(struct perf_event *event)
2638 {
2639         wait_event(event->waitq, perf_not_pending(event));
2640 }
2641
2642 void perf_event_do_pending(void)
2643 {
2644         __perf_pending_run();
2645 }
2646
2647 /*
2648  * Callchain support -- arch specific
2649  */
2650
2651 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2652 {
2653         return NULL;
2654 }
2655
2656 /*
2657  * Output
2658  */
2659 static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2660                               unsigned long offset, unsigned long head)
2661 {
2662         unsigned long mask;
2663
2664         if (!data->writable)
2665                 return true;
2666
2667         mask = perf_data_size(data) - 1;
2668
2669         offset = (offset - tail) & mask;
2670         head   = (head   - tail) & mask;
2671
2672         if ((int)(head - offset) < 0)
2673                 return false;
2674
2675         return true;
2676 }
2677
2678 static void perf_output_wakeup(struct perf_output_handle *handle)
2679 {
2680         atomic_set(&handle->data->poll, POLL_IN);
2681
2682         if (handle->nmi) {
2683                 handle->event->pending_wakeup = 1;
2684                 perf_pending_queue(&handle->event->pending,
2685                                    perf_pending_event);
2686         } else
2687                 perf_event_wakeup(handle->event);
2688 }
2689
2690 /*
2691  * Curious locking construct.
2692  *
2693  * We need to ensure a later event_id doesn't publish a head when a former
2694  * event_id isn't done writing. However since we need to deal with NMIs we
2695  * cannot fully serialize things.
2696  *
2697  * What we do is serialize between CPUs so we only have to deal with NMI
2698  * nesting on a single CPU.
2699  *
2700  * We only publish the head (and generate a wakeup) when the outer-most
2701  * event_id completes.
2702  */
2703 static void perf_output_lock(struct perf_output_handle *handle)
2704 {
2705         struct perf_mmap_data *data = handle->data;
2706         int cur, cpu = get_cpu();
2707
2708         handle->locked = 0;
2709
2710         for (;;) {
2711                 cur = atomic_cmpxchg(&data->lock, -1, cpu);
2712                 if (cur == -1) {
2713                         handle->locked = 1;
2714                         break;
2715                 }
2716                 if (cur == cpu)
2717                         break;
2718
2719                 cpu_relax();
2720         }
2721 }
2722
2723 static void perf_output_unlock(struct perf_output_handle *handle)
2724 {
2725         struct perf_mmap_data *data = handle->data;
2726         unsigned long head;
2727         int cpu;
2728
2729         data->done_head = data->head;
2730
2731         if (!handle->locked)
2732                 goto out;
2733
2734 again:
2735         /*
2736          * The xchg implies a full barrier that ensures all writes are done
2737          * before we publish the new head, matched by a rmb() in userspace when
2738          * reading this position.
2739          */
2740         while ((head = atomic_long_xchg(&data->done_head, 0)))
2741                 data->user_page->data_head = head;
2742
2743         /*
2744          * NMI can happen here, which means we can miss a done_head update.
2745          */
2746
2747         cpu = atomic_xchg(&data->lock, -1);
2748         WARN_ON_ONCE(cpu != smp_processor_id());
2749
2750         /*
2751          * Therefore we have to validate we did not indeed do so.
2752          */
2753         if (unlikely(atomic_long_read(&data->done_head))) {
2754                 /*
2755                  * Since we had it locked, we can lock it again.
2756                  */
2757                 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2758                         cpu_relax();
2759
2760                 goto again;
2761         }
2762
2763         if (atomic_xchg(&data->wakeup, 0))
2764                 perf_output_wakeup(handle);
2765 out:
2766         put_cpu();
2767 }
2768
2769 void perf_output_copy(struct perf_output_handle *handle,
2770                       const void *buf, unsigned int len)
2771 {
2772         unsigned int pages_mask;
2773         unsigned long offset;
2774         unsigned int size;
2775         void **pages;
2776
2777         offset          = handle->offset;
2778         pages_mask      = handle->data->nr_pages - 1;
2779         pages           = handle->data->data_pages;
2780
2781         do {
2782                 unsigned long page_offset;
2783                 unsigned long page_size;
2784                 int nr;
2785
2786                 nr          = (offset >> PAGE_SHIFT) & pages_mask;
2787                 page_size   = 1UL << (handle->data->data_order + PAGE_SHIFT);
2788                 page_offset = offset & (page_size - 1);
2789                 size        = min_t(unsigned int, page_size - page_offset, len);
2790
2791                 memcpy(pages[nr] + page_offset, buf, size);
2792
2793                 len         -= size;
2794                 buf         += size;
2795                 offset      += size;
2796         } while (len);
2797
2798         handle->offset = offset;
2799
2800         /*
2801          * Check we didn't copy past our reservation window, taking the
2802          * possible unsigned int wrap into account.
2803          */
2804         WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2805 }
2806
2807 int perf_output_begin(struct perf_output_handle *handle,
2808                       struct perf_event *event, unsigned int size,
2809                       int nmi, int sample)
2810 {
2811         struct perf_event *output_event;
2812         struct perf_mmap_data *data;
2813         unsigned long tail, offset, head;
2814         int have_lost;
2815         struct {
2816                 struct perf_event_header header;
2817                 u64                      id;
2818                 u64                      lost;
2819         } lost_event;
2820
2821         rcu_read_lock();
2822         /*
2823          * For inherited events we send all the output towards the parent.
2824          */
2825         if (event->parent)
2826                 event = event->parent;
2827
2828         output_event = rcu_dereference(event->output);
2829         if (output_event)
2830                 event = output_event;
2831
2832         data = rcu_dereference(event->data);
2833         if (!data)
2834                 goto out;
2835
2836         handle->data    = data;
2837         handle->event   = event;
2838         handle->nmi     = nmi;
2839         handle->sample  = sample;
2840
2841         if (!data->nr_pages)
2842                 goto fail;
2843
2844         have_lost = atomic_read(&data->lost);
2845         if (have_lost)
2846                 size += sizeof(lost_event);
2847
2848         perf_output_lock(handle);
2849
2850         do {
2851                 /*
2852                  * Userspace could choose to issue a mb() before updating the
2853                  * tail pointer. So that all reads will be completed before the
2854                  * write is issued.
2855                  */
2856                 tail = ACCESS_ONCE(data->user_page->data_tail);
2857                 smp_rmb();
2858                 offset = head = atomic_long_read(&data->head);
2859                 head += size;
2860                 if (unlikely(!perf_output_space(data, tail, offset, head)))
2861                         goto fail;
2862         } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2863
2864         handle->offset  = offset;
2865         handle->head    = head;
2866
2867         if (head - tail > data->watermark)
2868                 atomic_set(&data->wakeup, 1);
2869
2870         if (have_lost) {
2871                 lost_event.header.type = PERF_RECORD_LOST;
2872                 lost_event.header.misc = 0;
2873                 lost_event.header.size = sizeof(lost_event);
2874                 lost_event.id          = event->id;
2875                 lost_event.lost        = atomic_xchg(&data->lost, 0);
2876
2877                 perf_output_put(handle, lost_event);
2878         }
2879
2880         return 0;
2881
2882 fail:
2883         atomic_inc(&data->lost);
2884         perf_output_unlock(handle);
2885 out:
2886         rcu_read_unlock();
2887
2888         return -ENOSPC;
2889 }
2890
2891 void perf_output_end(struct perf_output_handle *handle)
2892 {
2893         struct perf_event *event = handle->event;
2894         struct perf_mmap_data *data = handle->data;
2895
2896         int wakeup_events = event->attr.wakeup_events;
2897
2898         if (handle->sample && wakeup_events) {
2899                 int events = atomic_inc_return(&data->events);
2900                 if (events >= wakeup_events) {
2901                         atomic_sub(wakeup_events, &data->events);
2902                         atomic_set(&data->wakeup, 1);
2903                 }
2904         }
2905
2906         perf_output_unlock(handle);
2907         rcu_read_unlock();
2908 }
2909
2910 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
2911 {
2912         /*
2913          * only top level events have the pid namespace they were created in
2914          */
2915         if (event->parent)
2916                 event = event->parent;
2917
2918         return task_tgid_nr_ns(p, event->ns);
2919 }
2920
2921 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
2922 {
2923         /*
2924          * only top level events have the pid namespace they were created in
2925          */
2926         if (event->parent)
2927                 event = event->parent;
2928
2929         return task_pid_nr_ns(p, event->ns);
2930 }
2931
2932 static void perf_output_read_one(struct perf_output_handle *handle,
2933                                  struct perf_event *event)
2934 {
2935         u64 read_format = event->attr.read_format;
2936         u64 values[4];
2937         int n = 0;
2938
2939         values[n++] = atomic64_read(&event->count);
2940         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2941                 values[n++] = event->total_time_enabled +
2942                         atomic64_read(&event->child_total_time_enabled);
2943         }
2944         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2945                 values[n++] = event->total_time_running +
2946                         atomic64_read(&event->child_total_time_running);
2947         }
2948         if (read_format & PERF_FORMAT_ID)
2949                 values[n++] = primary_event_id(event);
2950
2951         perf_output_copy(handle, values, n * sizeof(u64));
2952 }
2953
2954 /*
2955  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
2956  */
2957 static void perf_output_read_group(struct perf_output_handle *handle,
2958                             struct perf_event *event)
2959 {
2960         struct perf_event *leader = event->group_leader, *sub;
2961         u64 read_format = event->attr.read_format;
2962         u64 values[5];
2963         int n = 0;
2964
2965         values[n++] = 1 + leader->nr_siblings;
2966
2967         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2968                 values[n++] = leader->total_time_enabled;
2969
2970         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2971                 values[n++] = leader->total_time_running;
2972
2973         if (leader != event)
2974                 leader->pmu->read(leader);
2975
2976         values[n++] = atomic64_read(&leader->count);
2977         if (read_format & PERF_FORMAT_ID)
2978                 values[n++] = primary_event_id(leader);
2979
2980         perf_output_copy(handle, values, n * sizeof(u64));
2981
2982         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2983                 n = 0;
2984
2985                 if (sub != event)
2986                         sub->pmu->read(sub);
2987
2988                 values[n++] = atomic64_read(&sub->count);
2989                 if (read_format & PERF_FORMAT_ID)
2990                         values[n++] = primary_event_id(sub);
2991
2992                 perf_output_copy(handle, values, n * sizeof(u64));
2993         }
2994 }
2995
2996 static void perf_output_read(struct perf_output_handle *handle,
2997                              struct perf_event *event)
2998 {
2999         if (event->attr.read_format & PERF_FORMAT_GROUP)
3000                 perf_output_read_group(handle, event);
3001         else
3002                 perf_output_read_one(handle, event);
3003 }
3004
3005 void perf_output_sample(struct perf_output_handle *handle,
3006                         struct perf_event_header *header,
3007                         struct perf_sample_data *data,
3008                         struct perf_event *event)
3009 {
3010         u64 sample_type = data->type;
3011
3012         perf_output_put(handle, *header);
3013
3014         if (sample_type & PERF_SAMPLE_IP)
3015                 perf_output_put(handle, data->ip);
3016
3017         if (sample_type & PERF_SAMPLE_TID)
3018                 perf_output_put(handle, data->tid_entry);
3019
3020         if (sample_type & PERF_SAMPLE_TIME)
3021                 perf_output_put(handle, data->time);
3022
3023         if (sample_type & PERF_SAMPLE_ADDR)
3024                 perf_output_put(handle, data->addr);
3025
3026         if (sample_type & PERF_SAMPLE_ID)
3027                 perf_output_put(handle, data->id);
3028
3029         if (sample_type & PERF_SAMPLE_STREAM_ID)
3030                 perf_output_put(handle, data->stream_id);
3031
3032         if (sample_type & PERF_SAMPLE_CPU)
3033                 perf_output_put(handle, data->cpu_entry);
3034
3035         if (sample_type & PERF_SAMPLE_PERIOD)
3036                 perf_output_put(handle, data->period);
3037
3038         if (sample_type & PERF_SAMPLE_READ)
3039                 perf_output_read(handle, event);
3040
3041         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3042                 if (data->callchain) {
3043                         int size = 1;
3044
3045                         if (data->callchain)
3046                                 size += data->callchain->nr;
3047
3048                         size *= sizeof(u64);
3049
3050                         perf_output_copy(handle, data->callchain, size);
3051                 } else {
3052                         u64 nr = 0;
3053                         perf_output_put(handle, nr);
3054                 }
3055         }
3056
3057         if (sample_type & PERF_SAMPLE_RAW) {
3058                 if (data->raw) {
3059                         perf_output_put(handle, data->raw->size);
3060                         perf_output_copy(handle, data->raw->data,
3061                                          data->raw->size);
3062                 } else {
3063                         struct {
3064                                 u32     size;
3065                                 u32     data;
3066                         } raw = {
3067                                 .size = sizeof(u32),
3068                                 .data = 0,
3069                         };
3070                         perf_output_put(handle, raw);
3071                 }
3072         }
3073 }
3074
3075 void perf_prepare_sample(struct perf_event_header *header,
3076                          struct perf_sample_data *data,
3077                          struct perf_event *event,
3078                          struct pt_regs *regs)
3079 {
3080         u64 sample_type = event->attr.sample_type;
3081
3082         data->type = sample_type;
3083
3084         header->type = PERF_RECORD_SAMPLE;
3085         header->size = sizeof(*header);
3086
3087         header->misc = 0;
3088         header->misc |= perf_misc_flags(regs);
3089
3090         if (sample_type & PERF_SAMPLE_IP) {
3091                 data->ip = perf_instruction_pointer(regs);
3092
3093                 header->size += sizeof(data->ip);
3094         }
3095
3096         if (sample_type & PERF_SAMPLE_TID) {
3097                 /* namespace issues */
3098                 data->tid_entry.pid = perf_event_pid(event, current);
3099                 data->tid_entry.tid = perf_event_tid(event, current);
3100
3101                 header->size += sizeof(data->tid_entry);
3102         }
3103
3104         if (sample_type & PERF_SAMPLE_TIME) {
3105                 data->time = perf_clock();
3106
3107                 header->size += sizeof(data->time);
3108         }
3109
3110         if (sample_type & PERF_SAMPLE_ADDR)
3111                 header->size += sizeof(data->addr);
3112
3113         if (sample_type & PERF_SAMPLE_ID) {
3114                 data->id = primary_event_id(event);
3115
3116                 header->size += sizeof(data->id);
3117         }
3118
3119         if (sample_type & PERF_SAMPLE_STREAM_ID) {
3120                 data->stream_id = event->id;
3121
3122                 header->size += sizeof(data->stream_id);
3123         }
3124
3125         if (sample_type & PERF_SAMPLE_CPU) {
3126                 data->cpu_entry.cpu             = raw_smp_processor_id();
3127                 data->cpu_entry.reserved        = 0;
3128
3129                 header->size += sizeof(data->cpu_entry);
3130         }
3131
3132         if (sample_type & PERF_SAMPLE_PERIOD)
3133                 header->size += sizeof(data->period);
3134
3135         if (sample_type & PERF_SAMPLE_READ)
3136                 header->size += perf_event_read_size(event);
3137
3138         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3139                 int size = 1;
3140
3141                 data->callchain = perf_callchain(regs);
3142
3143                 if (data->callchain)
3144                         size += data->callchain->nr;
3145
3146                 header->size += size * sizeof(u64);
3147         }
3148
3149         if (sample_type & PERF_SAMPLE_RAW) {
3150                 int size = sizeof(u32);
3151
3152                 if (data->raw)
3153                         size += data->raw->size;
3154                 else
3155                         size += sizeof(u32);
3156
3157                 WARN_ON_ONCE(size & (sizeof(u64)-1));
3158                 header->size += size;
3159         }
3160 }
3161
3162 static void perf_event_output(struct perf_event *event, int nmi,
3163                                 struct perf_sample_data *data,
3164                                 struct pt_regs *regs)
3165 {
3166         struct perf_output_handle handle;
3167         struct perf_event_header header;
3168
3169         perf_prepare_sample(&header, data, event, regs);
3170
3171         if (perf_output_begin(&handle, event, header.size, nmi, 1))
3172                 return;
3173
3174         perf_output_sample(&handle, &header, data, event);
3175
3176         perf_output_end(&handle);
3177 }
3178
3179 /*
3180  * read event_id
3181  */
3182
3183 struct perf_read_event {
3184         struct perf_event_header        header;
3185
3186         u32                             pid;
3187         u32                             tid;
3188 };
3189
3190 static void
3191 perf_event_read_event(struct perf_event *event,
3192                         struct task_struct *task)
3193 {
3194         struct perf_output_handle handle;
3195         struct perf_read_event read_event = {
3196                 .header = {
3197                         .type = PERF_RECORD_READ,
3198                         .misc = 0,
3199                         .size = sizeof(read_event) + perf_event_read_size(event),
3200                 },
3201                 .pid = perf_event_pid(event, task),
3202                 .tid = perf_event_tid(event, task),
3203         };
3204         int ret;
3205
3206         ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3207         if (ret)
3208                 return;
3209
3210         perf_output_put(&handle, read_event);
3211         perf_output_read(&handle, event);
3212
3213         perf_output_end(&handle);
3214 }
3215
3216 /*
3217  * task tracking -- fork/exit
3218  *
3219  * enabled by: attr.comm | attr.mmap | attr.task
3220  */
3221
3222 struct perf_task_event {
3223         struct task_struct              *task;
3224         struct perf_event_context       *task_ctx;
3225
3226         struct {
3227                 struct perf_event_header        header;
3228
3229                 u32                             pid;
3230                 u32                             ppid;
3231                 u32                             tid;
3232                 u32                             ptid;
3233                 u64                             time;
3234         } event_id;
3235 };
3236
3237 static void perf_event_task_output(struct perf_event *event,
3238                                      struct perf_task_event *task_event)
3239 {
3240         struct perf_output_handle handle;
3241         int size;
3242         struct task_struct *task = task_event->task;
3243         int ret;
3244
3245         size  = task_event->event_id.header.size;
3246         ret = perf_output_begin(&handle, event, size, 0, 0);
3247
3248         if (ret)
3249                 return;
3250
3251         task_event->event_id.pid = perf_event_pid(event, task);
3252         task_event->event_id.ppid = perf_event_pid(event, current);
3253
3254         task_event->event_id.tid = perf_event_tid(event, task);
3255         task_event->event_id.ptid = perf_event_tid(event, current);
3256
3257         task_event->event_id.time = perf_clock();
3258
3259         perf_output_put(&handle, task_event->event_id);
3260
3261         perf_output_end(&handle);
3262 }
3263
3264 static int perf_event_task_match(struct perf_event *event)
3265 {
3266         if (event->attr.comm || event->attr.mmap || event->attr.task)
3267                 return 1;
3268
3269         return 0;
3270 }
3271
3272 static void perf_event_task_ctx(struct perf_event_context *ctx,
3273                                   struct perf_task_event *task_event)
3274 {
3275         struct perf_event *event;
3276
3277         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3278                 if (perf_event_task_match(event))
3279                         perf_event_task_output(event, task_event);
3280         }
3281 }
3282
3283 static void perf_event_task_event(struct perf_task_event *task_event)
3284 {
3285         struct perf_cpu_context *cpuctx;
3286         struct perf_event_context *ctx = task_event->task_ctx;
3287
3288         rcu_read_lock();
3289         cpuctx = &get_cpu_var(perf_cpu_context);
3290         perf_event_task_ctx(&cpuctx->ctx, task_event);
3291         put_cpu_var(perf_cpu_context);
3292
3293         if (!ctx)
3294                 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3295         if (ctx)
3296                 perf_event_task_ctx(ctx, task_event);
3297         rcu_read_unlock();
3298 }
3299
3300 static void perf_event_task(struct task_struct *task,
3301                               struct perf_event_context *task_ctx,
3302                               int new)
3303 {
3304         struct perf_task_event task_event;
3305
3306         if (!atomic_read(&nr_comm_events) &&
3307             !atomic_read(&nr_mmap_events) &&
3308             !atomic_read(&nr_task_events))
3309                 return;
3310
3311         task_event = (struct perf_task_event){
3312                 .task     = task,
3313                 .task_ctx = task_ctx,
3314                 .event_id    = {
3315                         .header = {
3316                                 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3317                                 .misc = 0,
3318                                 .size = sizeof(task_event.event_id),
3319                         },
3320                         /* .pid  */
3321                         /* .ppid */
3322                         /* .tid  */
3323                         /* .ptid */
3324                 },
3325         };
3326
3327         perf_event_task_event(&task_event);
3328 }
3329
3330 void perf_event_fork(struct task_struct *task)
3331 {
3332         perf_event_task(task, NULL, 1);
3333 }
3334
3335 /*
3336  * comm tracking
3337  */
3338
3339 struct perf_comm_event {
3340         struct task_struct      *task;
3341         char                    *comm;
3342         int                     comm_size;
3343
3344         struct {
3345                 struct perf_event_header        header;
3346
3347                 u32                             pid;
3348                 u32                             tid;
3349         } event_id;
3350 };
3351
3352 static void perf_event_comm_output(struct perf_event *event,
3353                                      struct perf_comm_event *comm_event)
3354 {
3355         struct perf_output_handle handle;
3356         int size = comm_event->event_id.header.size;
3357         int ret = perf_output_begin(&handle, event, size, 0, 0);
3358
3359         if (ret)
3360                 return;
3361
3362         comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3363         comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3364
3365         perf_output_put(&handle, comm_event->event_id);
3366         perf_output_copy(&handle, comm_event->comm,
3367                                    comm_event->comm_size);
3368         perf_output_end(&handle);
3369 }
3370
3371 static int perf_event_comm_match(struct perf_event *event)
3372 {
3373         if (event->attr.comm)
3374                 return 1;
3375
3376         return 0;
3377 }
3378
3379 static void perf_event_comm_ctx(struct perf_event_context *ctx,
3380                                   struct perf_comm_event *comm_event)
3381 {
3382         struct perf_event *event;
3383
3384         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3385                 if (perf_event_comm_match(event))
3386                         perf_event_comm_output(event, comm_event);
3387         }
3388 }
3389
3390 static void perf_event_comm_event(struct perf_comm_event *comm_event)
3391 {
3392         struct perf_cpu_context *cpuctx;
3393         struct perf_event_context *ctx;
3394         unsigned int size;
3395         char comm[TASK_COMM_LEN];
3396
3397         memset(comm, 0, sizeof(comm));
3398         strlcpy(comm, comm_event->task->comm, sizeof(comm));
3399         size = ALIGN(strlen(comm)+1, sizeof(u64));
3400
3401         comm_event->comm = comm;
3402         comm_event->comm_size = size;
3403
3404         comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3405
3406         rcu_read_lock();
3407         cpuctx = &get_cpu_var(perf_cpu_context);
3408         perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3409         put_cpu_var(perf_cpu_context);
3410
3411         /*
3412          * doesn't really matter which of the child contexts the
3413          * events ends up in.
3414          */
3415         ctx = rcu_dereference(current->perf_event_ctxp);
3416         if (ctx)
3417                 perf_event_comm_ctx(ctx, comm_event);
3418         rcu_read_unlock();
3419 }
3420
3421 void perf_event_comm(struct task_struct *task)
3422 {
3423         struct perf_comm_event comm_event;
3424
3425         if (task->perf_event_ctxp)
3426                 perf_event_enable_on_exec(task);
3427
3428         if (!atomic_read(&nr_comm_events))
3429                 return;
3430
3431         comm_event = (struct perf_comm_event){
3432                 .task   = task,
3433                 /* .comm      */
3434                 /* .comm_size */
3435                 .event_id  = {
3436                         .header = {
3437                                 .type = PERF_RECORD_COMM,
3438                                 .misc = 0,
3439                                 /* .size */
3440                         },
3441                         /* .pid */
3442                         /* .tid */
3443                 },
3444         };
3445
3446         perf_event_comm_event(&comm_event);
3447 }
3448
3449 /*
3450  * mmap tracking
3451  */
3452
3453 struct perf_mmap_event {
3454         struct vm_area_struct   *vma;
3455
3456         const char              *file_name;
3457         int                     file_size;
3458
3459         struct {
3460                 struct perf_event_header        header;
3461
3462                 u32                             pid;
3463                 u32                             tid;
3464                 u64                             start;
3465                 u64                             len;
3466                 u64                             pgoff;
3467         } event_id;
3468 };
3469
3470 static void perf_event_mmap_output(struct perf_event *event,
3471                                      struct perf_mmap_event *mmap_event)
3472 {
3473         struct perf_output_handle handle;
3474         int size = mmap_event->event_id.header.size;
3475         int ret = perf_output_begin(&handle, event, size, 0, 0);
3476
3477         if (ret)
3478                 return;
3479
3480         mmap_event->event_id.pid = perf_event_pid(event, current);
3481         mmap_event->event_id.tid = perf_event_tid(event, current);
3482
3483         perf_output_put(&handle, mmap_event->event_id);
3484         perf_output_copy(&handle, mmap_event->file_name,
3485                                    mmap_event->file_size);
3486         perf_output_end(&handle);
3487 }
3488
3489 static int perf_event_mmap_match(struct perf_event *event,
3490                                    struct perf_mmap_event *mmap_event)
3491 {
3492         if (event->attr.mmap)
3493                 return 1;
3494
3495         return 0;
3496 }
3497
3498 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3499                                   struct perf_mmap_event *mmap_event)
3500 {
3501         struct perf_event *event;
3502
3503         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3504                 if (perf_event_mmap_match(event, mmap_event))
3505                         perf_event_mmap_output(event, mmap_event);
3506         }
3507 }
3508
3509 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3510 {
3511         struct perf_cpu_context *cpuctx;
3512         struct perf_event_context *ctx;
3513         struct vm_area_struct *vma = mmap_event->vma;
3514         struct file *file = vma->vm_file;
3515         unsigned int size;
3516         char tmp[16];
3517         char *buf = NULL;
3518         const char *name;
3519
3520         memset(tmp, 0, sizeof(tmp));
3521
3522         if (file) {
3523                 /*
3524                  * d_path works from the end of the buffer backwards, so we
3525                  * need to add enough zero bytes after the string to handle
3526                  * the 64bit alignment we do later.
3527                  */
3528                 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3529                 if (!buf) {
3530                         name = strncpy(tmp, "//enomem", sizeof(tmp));
3531                         goto got_name;
3532                 }
3533                 name = d_path(&file->f_path, buf, PATH_MAX);
3534                 if (IS_ERR(name)) {
3535                         name = strncpy(tmp, "//toolong", sizeof(tmp));
3536                         goto got_name;
3537                 }
3538         } else {
3539                 if (arch_vma_name(mmap_event->vma)) {
3540                         name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3541                                        sizeof(tmp));
3542                         goto got_name;
3543                 }
3544
3545                 if (!vma->vm_mm) {
3546                         name = strncpy(tmp, "[vdso]", sizeof(tmp));
3547                         goto got_name;
3548                 }
3549
3550                 name = strncpy(tmp, "//anon", sizeof(tmp));
3551                 goto got_name;
3552         }
3553
3554 got_name:
3555         size = ALIGN(strlen(name)+1, sizeof(u64));
3556
3557         mmap_event->file_name = name;
3558         mmap_event->file_size = size;
3559
3560         mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3561
3562         rcu_read_lock();
3563         cpuctx = &get_cpu_var(perf_cpu_context);
3564         perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3565         put_cpu_var(perf_cpu_context);
3566
3567         /*
3568          * doesn't really matter which of the child contexts the
3569          * events ends up in.
3570          */
3571         ctx = rcu_dereference(current->perf_event_ctxp);
3572         if (ctx)
3573                 perf_event_mmap_ctx(ctx, mmap_event);
3574         rcu_read_unlock();
3575
3576         kfree(buf);
3577 }
3578
3579 void __perf_event_mmap(struct vm_area_struct *vma)
3580 {
3581         struct perf_mmap_event mmap_event;
3582
3583         if (!atomic_read(&nr_mmap_events))
3584                 return;
3585
3586         mmap_event = (struct perf_mmap_event){
3587                 .vma    = vma,
3588                 /* .file_name */
3589                 /* .file_size */
3590                 .event_id  = {
3591                         .header = {
3592                                 .type = PERF_RECORD_MMAP,
3593                                 .misc = 0,
3594                                 /* .size */
3595                         },
3596                         /* .pid */
3597                         /* .tid */
3598                         .start  = vma->vm_start,
3599                         .len    = vma->vm_end - vma->vm_start,
3600                         .pgoff  = vma->vm_pgoff,
3601                 },
3602         };
3603
3604         perf_event_mmap_event(&mmap_event);
3605 }
3606
3607 /*
3608  * IRQ throttle logging
3609  */
3610
3611 static void perf_log_throttle(struct perf_event *event, int enable)
3612 {
3613         struct perf_output_handle handle;
3614         int ret;
3615
3616         struct {
3617                 struct perf_event_header        header;
3618                 u64                             time;
3619                 u64                             id;
3620                 u64                             stream_id;
3621         } throttle_event = {
3622                 .header = {
3623                         .type = PERF_RECORD_THROTTLE,
3624                         .misc = 0,
3625                         .size = sizeof(throttle_event),
3626                 },
3627                 .time           = perf_clock(),
3628                 .id             = primary_event_id(event),
3629                 .stream_id      = event->id,
3630         };
3631
3632         if (enable)
3633                 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3634
3635         ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3636         if (ret)
3637                 return;
3638
3639         perf_output_put(&handle, throttle_event);
3640         perf_output_end(&handle);
3641 }
3642
3643 /*
3644  * Generic event overflow handling, sampling.
3645  */
3646
3647 static int __perf_event_overflow(struct perf_event *event, int nmi,
3648                                    int throttle, struct perf_sample_data *data,
3649                                    struct pt_regs *regs)
3650 {
3651         int events = atomic_read(&event->event_limit);
3652         struct hw_perf_event *hwc = &event->hw;
3653         int ret = 0;
3654
3655         throttle = (throttle && event->pmu->unthrottle != NULL);
3656
3657         if (!throttle) {
3658                 hwc->interrupts++;
3659         } else {
3660                 if (hwc->interrupts != MAX_INTERRUPTS) {
3661                         hwc->interrupts++;
3662                         if (HZ * hwc->interrupts >
3663                                         (u64)sysctl_perf_event_sample_rate) {
3664                                 hwc->interrupts = MAX_INTERRUPTS;
3665                                 perf_log_throttle(event, 0);
3666                                 ret = 1;
3667                         }
3668                 } else {
3669                         /*
3670                          * Keep re-disabling events even though on the previous
3671                          * pass we disabled it - just in case we raced with a
3672                          * sched-in and the event got enabled again:
3673                          */
3674                         ret = 1;
3675                 }
3676         }
3677
3678         if (event->attr.freq) {
3679                 u64 now = perf_clock();
3680                 s64 delta = now - hwc->freq_stamp;
3681
3682                 hwc->freq_stamp = now;
3683
3684                 if (delta > 0 && delta < TICK_NSEC)
3685                         perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
3686         }
3687
3688         /*
3689          * XXX event_limit might not quite work as expected on inherited
3690          * events
3691          */
3692
3693         event->pending_kill = POLL_IN;
3694         if (events && atomic_dec_and_test(&event->event_limit)) {
3695                 ret = 1;
3696                 event->pending_kill = POLL_HUP;
3697                 if (nmi) {
3698                         event->pending_disable = 1;
3699                         perf_pending_queue(&event->pending,
3700                                            perf_pending_event);
3701                 } else
3702                         perf_event_disable(event);
3703         }
3704
3705         if (event->overflow_handler)
3706                 event->overflow_handler(event, nmi, data, regs);
3707         else
3708                 perf_event_output(event, nmi, data, regs);
3709
3710         return ret;
3711 }
3712
3713 int perf_event_overflow(struct perf_event *event, int nmi,
3714                           struct perf_sample_data *data,
3715                           struct pt_regs *regs)
3716 {
3717         return __perf_event_overflow(event, nmi, 1, data, regs);
3718 }
3719
3720 /*
3721  * Generic software event infrastructure
3722  */
3723
3724 /*
3725  * We directly increment event->count and keep a second value in
3726  * event->hw.period_left to count intervals. This period event
3727  * is kept in the range [-sample_period, 0] so that we can use the
3728  * sign as trigger.
3729  */
3730
3731 static u64 perf_swevent_set_period(struct perf_event *event)
3732 {
3733         struct hw_perf_event *hwc = &event->hw;
3734         u64 period = hwc->last_period;
3735         u64 nr, offset;
3736         s64 old, val;
3737
3738         hwc->last_period = hwc->sample_period;
3739
3740 again:
3741         old = val = atomic64_read(&hwc->period_left);
3742         if (val < 0)
3743                 return 0;
3744
3745         nr = div64_u64(period + val, period);
3746         offset = nr * period;
3747         val -= offset;
3748         if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3749                 goto again;
3750
3751         return nr;
3752 }
3753
3754 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
3755                                     int nmi, struct perf_sample_data *data,
3756                                     struct pt_regs *regs)
3757 {
3758         struct hw_perf_event *hwc = &event->hw;
3759         int throttle = 0;
3760
3761         data->period = event->hw.last_period;
3762         if (!overflow)
3763                 overflow = perf_swevent_set_period(event);
3764
3765         if (hwc->interrupts == MAX_INTERRUPTS)
3766                 return;
3767
3768         for (; overflow; overflow--) {
3769                 if (__perf_event_overflow(event, nmi, throttle,
3770                                             data, regs)) {
3771                         /*
3772                          * We inhibit the overflow from happening when
3773                          * hwc->interrupts == MAX_INTERRUPTS.
3774                          */
3775                         break;
3776                 }
3777                 throttle = 1;
3778         }
3779 }
3780
3781 static void perf_swevent_unthrottle(struct perf_event *event)
3782 {
3783         /*
3784          * Nothing to do, we already reset hwc->interrupts.
3785          */
3786 }
3787
3788 static void perf_swevent_add(struct perf_event *event, u64 nr,
3789                                int nmi, struct perf_sample_data *data,
3790                                struct pt_regs *regs)
3791 {
3792         struct hw_perf_event *hwc = &event->hw;
3793
3794         atomic64_add(nr, &event->count);
3795
3796         if (!regs)
3797                 return;
3798
3799         if (!hwc->sample_period)
3800                 return;
3801
3802         if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
3803                 return perf_swevent_overflow(event, 1, nmi, data, regs);
3804
3805         if (atomic64_add_negative(nr, &hwc->period_left))
3806                 return;
3807
3808         perf_swevent_overflow(event, 0, nmi, data, regs);
3809 }
3810
3811 static int perf_swevent_is_counting(struct perf_event *event)
3812 {
3813         /*
3814          * The event is active, we're good!
3815          */
3816         if (event->state == PERF_EVENT_STATE_ACTIVE)
3817                 return 1;
3818
3819         /*
3820          * The event is off/error, not counting.
3821          */
3822         if (event->state != PERF_EVENT_STATE_INACTIVE)
3823                 return 0;
3824
3825         /*
3826          * The event is inactive, if the context is active
3827          * we're part of a group that didn't make it on the 'pmu',
3828          * not counting.
3829          */
3830         if (event->ctx->is_active)
3831                 return 0;
3832
3833         /*
3834          * We're inactive and the context is too, this means the
3835          * task is scheduled out, we're counting events that happen
3836          * to us, like migration events.
3837          */
3838         return 1;
3839 }
3840
3841 static int perf_tp_event_match(struct perf_event *event,
3842                                 struct perf_sample_data *data);
3843
3844 static int perf_exclude_event(struct perf_event *event,
3845                               struct pt_regs *regs)
3846 {
3847         if (regs) {
3848                 if (event->attr.exclude_user && user_mode(regs))
3849                         return 1;
3850
3851                 if (event->attr.exclude_kernel && !user_mode(regs))
3852                         return 1;
3853         }
3854
3855         return 0;
3856 }
3857
3858 static int perf_swevent_match(struct perf_event *event,
3859                                 enum perf_type_id type,
3860                                 u32 event_id,
3861                                 struct perf_sample_data *data,
3862                                 struct pt_regs *regs)
3863 {
3864         if (!perf_swevent_is_counting(event))
3865                 return 0;
3866
3867         if (event->attr.type != type)
3868                 return 0;
3869
3870         if (event->attr.config != event_id)
3871                 return 0;
3872
3873         if (perf_exclude_event(event, regs))
3874                 return 0;
3875
3876         if (event->attr.type == PERF_TYPE_TRACEPOINT &&
3877             !perf_tp_event_match(event, data))
3878                 return 0;
3879
3880         return 1;
3881 }
3882
3883 static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3884                                      enum perf_type_id type,
3885                                      u32 event_id, u64 nr, int nmi,
3886                                      struct perf_sample_data *data,
3887                                      struct pt_regs *regs)
3888 {
3889         struct perf_event *event;
3890
3891         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3892                 if (perf_swevent_match(event, type, event_id, data, regs))
3893                         perf_swevent_add(event, nr, nmi, data, regs);
3894         }
3895 }
3896
3897 int perf_swevent_get_recursion_context(void)
3898 {
3899         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3900         int rctx;
3901
3902         if (in_nmi())
3903                 rctx = 3;
3904         else if (in_irq())
3905                 rctx = 2;
3906         else if (in_softirq())
3907                 rctx = 1;
3908         else
3909                 rctx = 0;
3910
3911         if (cpuctx->recursion[rctx]) {
3912                 put_cpu_var(perf_cpu_context);
3913                 return -1;
3914         }
3915
3916         cpuctx->recursion[rctx]++;
3917         barrier();
3918
3919         return rctx;
3920 }
3921 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
3922
3923 void perf_swevent_put_recursion_context(int rctx)
3924 {
3925         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3926         barrier();
3927         cpuctx->recursion[rctx]--;
3928         put_cpu_var(perf_cpu_context);
3929 }
3930 EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
3931
3932 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3933                                     u64 nr, int nmi,
3934                                     struct perf_sample_data *data,
3935                                     struct pt_regs *regs)
3936 {
3937         struct perf_cpu_context *cpuctx;
3938         struct perf_event_context *ctx;
3939
3940         cpuctx = &__get_cpu_var(perf_cpu_context);
3941         rcu_read_lock();
3942         perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3943                                  nr, nmi, data, regs);
3944         /*
3945          * doesn't really matter which of the child contexts the
3946          * events ends up in.
3947          */
3948         ctx = rcu_dereference(current->perf_event_ctxp);
3949         if (ctx)
3950                 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3951         rcu_read_unlock();
3952 }
3953
3954 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3955                             struct pt_regs *regs, u64 addr)
3956 {
3957         struct perf_sample_data data;
3958         int rctx;
3959
3960         rctx = perf_swevent_get_recursion_context();
3961         if (rctx < 0)
3962                 return;
3963
3964         data.addr = addr;
3965         data.raw  = NULL;
3966
3967         do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
3968
3969         perf_swevent_put_recursion_context(rctx);
3970 }
3971
3972 static void perf_swevent_read(struct perf_event *event)
3973 {
3974 }
3975
3976 static int perf_swevent_enable(struct perf_event *event)
3977 {
3978         struct hw_perf_event *hwc = &event->hw;
3979
3980         if (hwc->sample_period) {
3981                 hwc->last_period = hwc->sample_period;
3982                 perf_swevent_set_period(event);
3983         }
3984         return 0;
3985 }
3986
3987 static void perf_swevent_disable(struct perf_event *event)
3988 {
3989 }
3990
3991 static const struct pmu perf_ops_generic = {
3992         .enable         = perf_swevent_enable,
3993         .disable        = perf_swevent_disable,
3994         .read           = perf_swevent_read,
3995         .unthrottle     = perf_swevent_unthrottle,
3996 };
3997
3998 /*
3999  * hrtimer based swevent callback
4000  */
4001
4002 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4003 {
4004         enum hrtimer_restart ret = HRTIMER_RESTART;
4005         struct perf_sample_data data;
4006         struct pt_regs *regs;
4007         struct perf_event *event;
4008         u64 period;
4009
4010         event   = container_of(hrtimer, struct perf_event, hw.hrtimer);
4011         event->pmu->read(event);
4012
4013         data.addr = 0;
4014         data.period = event->hw.last_period;
4015         regs = get_irq_regs();
4016         /*
4017          * In case we exclude kernel IPs or are somehow not in interrupt
4018          * context, provide the next best thing, the user IP.
4019          */
4020         if ((event->attr.exclude_kernel || !regs) &&
4021                         !event->attr.exclude_user)
4022                 regs = task_pt_regs(current);
4023
4024         if (regs) {
4025                 if (!(event->attr.exclude_idle && current->pid == 0))
4026                         if (perf_event_overflow(event, 0, &data, regs))
4027                                 ret = HRTIMER_NORESTART;
4028         }
4029
4030         period = max_t(u64, 10000, event->hw.sample_period);
4031         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4032
4033         return ret;
4034 }
4035
4036 static void perf_swevent_start_hrtimer(struct perf_event *event)
4037 {
4038         struct hw_perf_event *hwc = &event->hw;
4039
4040         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4041         hwc->hrtimer.function = perf_swevent_hrtimer;
4042         if (hwc->sample_period) {
4043                 u64 period;
4044
4045                 if (hwc->remaining) {
4046                         if (hwc->remaining < 0)
4047                                 period = 10000;
4048                         else
4049                                 period = hwc->remaining;
4050                         hwc->remaining = 0;
4051                 } else {
4052                         period = max_t(u64, 10000, hwc->sample_period);
4053                 }
4054                 __hrtimer_start_range_ns(&hwc->hrtimer,
4055                                 ns_to_ktime(period), 0,
4056                                 HRTIMER_MODE_REL, 0);
4057         }
4058 }
4059
4060 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4061 {
4062         struct hw_perf_event *hwc = &event->hw;
4063
4064         if (hwc->sample_period) {
4065                 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4066                 hwc->remaining = ktime_to_ns(remaining);
4067
4068                 hrtimer_cancel(&hwc->hrtimer);
4069         }
4070 }
4071
4072 /*
4073  * Software event: cpu wall time clock
4074  */
4075
4076 static void cpu_clock_perf_event_update(struct perf_event *event)
4077 {
4078         int cpu = raw_smp_processor_id();
4079         s64 prev;
4080         u64 now;
4081
4082         now = cpu_clock(cpu);
4083         prev = atomic64_read(&event->hw.prev_count);
4084         atomic64_set(&event->hw.prev_count, now);
4085         atomic64_add(now - prev, &event->count);
4086 }
4087
4088 static int cpu_clock_perf_event_enable(struct perf_event *event)
4089 {
4090         struct hw_perf_event *hwc = &event->hw;
4091         int cpu = raw_smp_processor_id();
4092
4093         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
4094         perf_swevent_start_hrtimer(event);
4095
4096         return 0;
4097 }
4098
4099 static void cpu_clock_perf_event_disable(struct perf_event *event)
4100 {
4101         perf_swevent_cancel_hrtimer(event);
4102         cpu_clock_perf_event_update(event);
4103 }
4104
4105 static void cpu_clock_perf_event_read(struct perf_event *event)
4106 {
4107         cpu_clock_perf_event_update(event);
4108 }
4109
4110 static const struct pmu perf_ops_cpu_clock = {
4111         .enable         = cpu_clock_perf_event_enable,
4112         .disable        = cpu_clock_perf_event_disable,
4113         .read           = cpu_clock_perf_event_read,
4114 };
4115
4116 /*
4117  * Software event: task time clock
4118  */
4119
4120 static void task_clock_perf_event_update(struct perf_event *event, u64 now)
4121 {
4122         u64 prev;
4123         s64 delta;
4124
4125         prev = atomic64_xchg(&event->hw.prev_count, now);
4126         delta = now - prev;
4127         atomic64_add(delta, &event->count);
4128 }
4129
4130 static int task_clock_perf_event_enable(struct perf_event *event)
4131 {
4132         struct hw_perf_event *hwc = &event->hw;
4133         u64 now;
4134
4135         now = event->ctx->time;
4136
4137         atomic64_set(&hwc->prev_count, now);
4138
4139         perf_swevent_start_hrtimer(event);
4140
4141         return 0;
4142 }
4143
4144 static void task_clock_perf_event_disable(struct perf_event *event)
4145 {
4146         perf_swevent_cancel_hrtimer(event);
4147         task_clock_perf_event_update(event, event->ctx->time);
4148
4149 }
4150
4151 static void task_clock_perf_event_read(struct perf_event *event)
4152 {
4153         u64 time;
4154
4155         if (!in_nmi()) {
4156                 update_context_time(event->ctx);
4157                 time = event->ctx->time;
4158         } else {
4159                 u64 now = perf_clock();
4160                 u64 delta = now - event->ctx->timestamp;
4161                 time = event->ctx->time + delta;
4162         }
4163
4164         task_clock_perf_event_update(event, time);
4165 }
4166
4167 static const struct pmu perf_ops_task_clock = {
4168         .enable         = task_clock_perf_event_enable,
4169         .disable        = task_clock_perf_event_disable,
4170         .read           = task_clock_perf_event_read,
4171 };
4172
4173 #ifdef CONFIG_EVENT_PROFILE
4174
4175 void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4176                           int entry_size)
4177 {
4178         struct perf_raw_record raw = {
4179                 .size = entry_size,
4180                 .data = record,
4181         };
4182
4183         struct perf_sample_data data = {
4184                 .addr = addr,
4185                 .raw = &raw,
4186         };
4187
4188         struct pt_regs *regs = get_irq_regs();
4189
4190         if (!regs)
4191                 regs = task_pt_regs(current);
4192
4193         /* Trace events already protected against recursion */
4194         do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4195                                 &data, regs);
4196 }
4197 EXPORT_SYMBOL_GPL(perf_tp_event);
4198
4199 static int perf_tp_event_match(struct perf_event *event,
4200                                 struct perf_sample_data *data)
4201 {
4202         void *record = data->raw->data;
4203
4204         if (likely(!event->filter) || filter_match_preds(event->filter, record))
4205                 return 1;
4206         return 0;
4207 }
4208
4209 static void tp_perf_event_destroy(struct perf_event *event)
4210 {
4211         ftrace_profile_disable(event->attr.config);
4212 }
4213
4214 static const struct pmu *tp_perf_event_init(struct perf_event *event)
4215 {
4216         /*
4217          * Raw tracepoint data is a severe data leak, only allow root to
4218          * have these.
4219          */
4220         if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4221                         perf_paranoid_tracepoint_raw() &&
4222                         !capable(CAP_SYS_ADMIN))
4223                 return ERR_PTR(-EPERM);
4224
4225         if (ftrace_profile_enable(event->attr.config))
4226                 return NULL;
4227
4228         event->destroy = tp_perf_event_destroy;
4229
4230         return &perf_ops_generic;
4231 }
4232
4233 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4234 {
4235         char *filter_str;
4236         int ret;
4237
4238         if (event->attr.type != PERF_TYPE_TRACEPOINT)
4239                 return -EINVAL;
4240
4241         filter_str = strndup_user(arg, PAGE_SIZE);
4242         if (IS_ERR(filter_str))
4243                 return PTR_ERR(filter_str);
4244
4245         ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4246
4247         kfree(filter_str);
4248         return ret;
4249 }
4250
4251 static void perf_event_free_filter(struct perf_event *event)
4252 {
4253         ftrace_profile_free_filter(event);
4254 }
4255
4256 #else
4257
4258 static int perf_tp_event_match(struct perf_event *event,
4259                                 struct perf_sample_data *data)
4260 {
4261         return 1;
4262 }
4263
4264 static const struct pmu *tp_perf_event_init(struct perf_event *event)
4265 {
4266         return NULL;
4267 }
4268
4269 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4270 {
4271         return -ENOENT;
4272 }
4273
4274 static void perf_event_free_filter(struct perf_event *event)
4275 {
4276 }
4277
4278 #endif /* CONFIG_EVENT_PROFILE */
4279
4280 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4281 static void bp_perf_event_destroy(struct perf_event *event)
4282 {
4283         release_bp_slot(event);
4284 }
4285
4286 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4287 {
4288         int err;
4289
4290         err = register_perf_hw_breakpoint(bp);
4291         if (err)
4292                 return ERR_PTR(err);
4293
4294         bp->destroy = bp_perf_event_destroy;
4295
4296         return &perf_ops_bp;
4297 }
4298
4299 void perf_bp_event(struct perf_event *bp, void *data)
4300 {
4301         struct perf_sample_data sample;
4302         struct pt_regs *regs = data;
4303
4304         sample.addr = bp->attr.bp_addr;
4305
4306         if (!perf_exclude_event(bp, regs))
4307                 perf_swevent_add(bp, 1, 1, &sample, regs);
4308 }
4309 #else
4310 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4311 {
4312         return NULL;
4313 }
4314
4315 void perf_bp_event(struct perf_event *bp, void *regs)
4316 {
4317 }
4318 #endif
4319
4320 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4321
4322 static void sw_perf_event_destroy(struct perf_event *event)
4323 {
4324         u64 event_id = event->attr.config;
4325
4326         WARN_ON(event->parent);
4327
4328         atomic_dec(&perf_swevent_enabled[event_id]);
4329 }
4330
4331 static const struct pmu *sw_perf_event_init(struct perf_event *event)
4332 {
4333         const struct pmu *pmu = NULL;
4334         u64 event_id = event->attr.config;
4335
4336         /*
4337          * Software events (currently) can't in general distinguish
4338          * between user, kernel and hypervisor events.
4339          * However, context switches and cpu migrations are considered
4340          * to be kernel events, and page faults are never hypervisor
4341          * events.
4342          */
4343         switch (event_id) {
4344         case PERF_COUNT_SW_CPU_CLOCK:
4345                 pmu = &perf_ops_cpu_clock;
4346
4347                 break;
4348         case PERF_COUNT_SW_TASK_CLOCK:
4349                 /*
4350                  * If the user instantiates this as a per-cpu event,
4351                  * use the cpu_clock event instead.
4352                  */
4353                 if (event->ctx->task)
4354                         pmu = &perf_ops_task_clock;
4355                 else
4356                         pmu = &perf_ops_cpu_clock;
4357
4358                 break;
4359         case PERF_COUNT_SW_PAGE_FAULTS:
4360         case PERF_COUNT_SW_PAGE_FAULTS_MIN:
4361         case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4362         case PERF_COUNT_SW_CONTEXT_SWITCHES:
4363         case PERF_COUNT_SW_CPU_MIGRATIONS:
4364         case PERF_COUNT_SW_ALIGNMENT_FAULTS:
4365         case PERF_COUNT_SW_EMULATION_FAULTS:
4366                 if (!event->parent) {
4367                         atomic_inc(&perf_swevent_enabled[event_id]);
4368                         event->destroy = sw_perf_event_destroy;
4369                 }
4370                 pmu = &perf_ops_generic;
4371                 break;
4372         }
4373
4374         return pmu;
4375 }
4376
4377 /*
4378  * Allocate and initialize a event structure
4379  */
4380 static struct perf_event *
4381 perf_event_alloc(struct perf_event_attr *attr,
4382                    int cpu,
4383                    struct perf_event_context *ctx,
4384                    struct perf_event *group_leader,
4385                    struct perf_event *parent_event,
4386                    perf_overflow_handler_t overflow_handler,
4387                    gfp_t gfpflags)
4388 {
4389         const struct pmu *pmu;
4390         struct perf_event *event;
4391         struct hw_perf_event *hwc;
4392         long err;
4393
4394         event = kzalloc(sizeof(*event), gfpflags);
4395         if (!event)
4396                 return ERR_PTR(-ENOMEM);
4397
4398         /*
4399          * Single events are their own group leaders, with an
4400          * empty sibling list:
4401          */
4402         if (!group_leader)
4403                 group_leader = event;
4404
4405         mutex_init(&event->child_mutex);
4406         INIT_LIST_HEAD(&event->child_list);
4407
4408         INIT_LIST_HEAD(&event->group_entry);
4409         INIT_LIST_HEAD(&event->event_entry);
4410         INIT_LIST_HEAD(&event->sibling_list);
4411         init_waitqueue_head(&event->waitq);
4412
4413         mutex_init(&event->mmap_mutex);
4414
4415         event->cpu              = cpu;
4416         event->attr             = *attr;
4417         event->group_leader     = group_leader;
4418         event->pmu              = NULL;
4419         event->ctx              = ctx;
4420         event->oncpu            = -1;
4421
4422         event->parent           = parent_event;
4423
4424         event->ns               = get_pid_ns(current->nsproxy->pid_ns);
4425         event->id               = atomic64_inc_return(&perf_event_id);
4426
4427         event->state            = PERF_EVENT_STATE_INACTIVE;
4428
4429         if (!overflow_handler && parent_event)
4430                 overflow_handler = parent_event->overflow_handler;
4431         
4432         event->overflow_handler = overflow_handler;
4433
4434         if (attr->disabled)
4435                 event->state = PERF_EVENT_STATE_OFF;
4436
4437         pmu = NULL;
4438
4439         hwc = &event->hw;
4440         hwc->sample_period = attr->sample_period;
4441         if (attr->freq && attr->sample_freq)
4442                 hwc->sample_period = 1;
4443         hwc->last_period = hwc->sample_period;
4444
4445         atomic64_set(&hwc->period_left, hwc->sample_period);
4446
4447         /*
4448          * we currently do not support PERF_FORMAT_GROUP on inherited events
4449          */
4450         if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4451                 goto done;
4452
4453         switch (attr->type) {
4454         case PERF_TYPE_RAW:
4455         case PERF_TYPE_HARDWARE:
4456         case PERF_TYPE_HW_CACHE:
4457                 pmu = hw_perf_event_init(event);
4458                 break;
4459
4460         case PERF_TYPE_SOFTWARE:
4461                 pmu = sw_perf_event_init(event);
4462                 break;
4463
4464         case PERF_TYPE_TRACEPOINT:
4465                 pmu = tp_perf_event_init(event);
4466                 break;
4467
4468         case PERF_TYPE_BREAKPOINT:
4469                 pmu = bp_perf_event_init(event);
4470                 break;
4471
4472
4473         default:
4474                 break;
4475         }
4476 done:
4477         err = 0;
4478         if (!pmu)
4479                 err = -EINVAL;
4480         else if (IS_ERR(pmu))
4481                 err = PTR_ERR(pmu);
4482
4483         if (err) {
4484                 if (event->ns)
4485                         put_pid_ns(event->ns);
4486                 kfree(event);
4487                 return ERR_PTR(err);
4488         }
4489
4490         event->pmu = pmu;
4491
4492         if (!event->parent) {
4493                 atomic_inc(&nr_events);
4494                 if (event->attr.mmap)
4495                         atomic_inc(&nr_mmap_events);
4496                 if (event->attr.comm)
4497                         atomic_inc(&nr_comm_events);
4498                 if (event->attr.task)
4499                         atomic_inc(&nr_task_events);
4500         }
4501
4502         return event;
4503 }
4504
4505 static int perf_copy_attr(struct perf_event_attr __user *uattr,
4506                           struct perf_event_attr *attr)
4507 {
4508         u32 size;
4509         int ret;
4510
4511         if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4512                 return -EFAULT;
4513
4514         /*
4515          * zero the full structure, so that a short copy will be nice.
4516          */
4517         memset(attr, 0, sizeof(*attr));
4518
4519         ret = get_user(size, &uattr->size);
4520         if (ret)
4521                 return ret;
4522
4523         if (size > PAGE_SIZE)   /* silly large */
4524                 goto err_size;
4525
4526         if (!size)              /* abi compat */
4527                 size = PERF_ATTR_SIZE_VER0;
4528
4529         if (size < PERF_ATTR_SIZE_VER0)
4530                 goto err_size;
4531
4532         /*
4533          * If we're handed a bigger struct than we know of,
4534          * ensure all the unknown bits are 0 - i.e. new
4535          * user-space does not rely on any kernel feature
4536          * extensions we dont know about yet.
4537          */
4538         if (size > sizeof(*attr)) {
4539                 unsigned char __user *addr;
4540                 unsigned char __user *end;
4541                 unsigned char val;
4542
4543                 addr = (void __user *)uattr + sizeof(*attr);
4544                 end  = (void __user *)uattr + size;
4545
4546                 for (; addr < end; addr++) {
4547                         ret = get_user(val, addr);
4548                         if (ret)
4549                                 return ret;
4550                         if (val)
4551                                 goto err_size;
4552                 }
4553                 size = sizeof(*attr);
4554         }
4555
4556         ret = copy_from_user(attr, uattr, size);
4557         if (ret)
4558                 return -EFAULT;
4559
4560         /*
4561          * If the type exists, the corresponding creation will verify
4562          * the attr->config.
4563          */
4564         if (attr->type >= PERF_TYPE_MAX)
4565                 return -EINVAL;
4566
4567         if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
4568                 return -EINVAL;
4569
4570         if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4571                 return -EINVAL;
4572
4573         if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4574                 return -EINVAL;
4575
4576 out:
4577         return ret;
4578
4579 err_size:
4580         put_user(sizeof(*attr), &uattr->size);
4581         ret = -E2BIG;
4582         goto out;
4583 }
4584
4585 static int perf_event_set_output(struct perf_event *event, int output_fd)
4586 {
4587         struct perf_event *output_event = NULL;
4588         struct file *output_file = NULL;
4589         struct perf_event *old_output;
4590         int fput_needed = 0;
4591         int ret = -EINVAL;
4592
4593         if (!output_fd)
4594                 goto set;
4595
4596         output_file = fget_light(output_fd, &fput_needed);
4597         if (!output_file)
4598                 return -EBADF;
4599
4600         if (output_file->f_op != &perf_fops)
4601                 goto out;
4602
4603         output_event = output_file->private_data;
4604
4605         /* Don't chain output fds */
4606         if (output_event->output)
4607                 goto out;
4608
4609         /* Don't set an output fd when we already have an output channel */
4610         if (event->data)
4611                 goto out;
4612
4613         atomic_long_inc(&output_file->f_count);
4614
4615 set:
4616         mutex_lock(&event->mmap_mutex);
4617         old_output = event->output;
4618         rcu_assign_pointer(event->output, output_event);
4619         mutex_unlock(&event->mmap_mutex);
4620
4621         if (old_output) {
4622                 /*
4623                  * we need to make sure no existing perf_output_*()
4624                  * is still referencing this event.
4625                  */
4626                 synchronize_rcu();
4627                 fput(old_output->filp);
4628         }
4629
4630         ret = 0;
4631 out:
4632         fput_light(output_file, fput_needed);
4633         return ret;
4634 }
4635
4636 /**
4637  * sys_perf_event_open - open a performance event, associate it to a task/cpu
4638  *
4639  * @attr_uptr:  event_id type attributes for monitoring/sampling
4640  * @pid:                target pid
4641  * @cpu:                target cpu
4642  * @group_fd:           group leader event fd
4643  */
4644 SYSCALL_DEFINE5(perf_event_open,
4645                 struct perf_event_attr __user *, attr_uptr,
4646                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4647 {
4648         struct perf_event *event, *group_leader;
4649         struct perf_event_attr attr;
4650         struct perf_event_context *ctx;
4651         struct file *event_file = NULL;
4652         struct file *group_file = NULL;
4653         int fput_needed = 0;
4654         int fput_needed2 = 0;
4655         int err;
4656
4657         /* for future expandability... */
4658         if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
4659                 return -EINVAL;
4660
4661         err = perf_copy_attr(attr_uptr, &attr);
4662         if (err)
4663                 return err;
4664
4665         if (!attr.exclude_kernel) {
4666                 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4667                         return -EACCES;
4668         }
4669
4670         if (attr.freq) {
4671                 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4672                         return -EINVAL;
4673         }
4674
4675         /*
4676          * Get the target context (task or percpu):
4677          */
4678         ctx = find_get_context(pid, cpu);
4679         if (IS_ERR(ctx))
4680                 return PTR_ERR(ctx);
4681
4682         /*
4683          * Look up the group leader (we will attach this event to it):
4684          */
4685         group_leader = NULL;
4686         if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
4687                 err = -EINVAL;
4688                 group_file = fget_light(group_fd, &fput_needed);
4689                 if (!group_file)
4690                         goto err_put_context;
4691                 if (group_file->f_op != &perf_fops)
4692                         goto err_put_context;
4693
4694                 group_leader = group_file->private_data;
4695                 /*
4696                  * Do not allow a recursive hierarchy (this new sibling
4697                  * becoming part of another group-sibling):
4698                  */
4699                 if (group_leader->group_leader != group_leader)
4700                         goto err_put_context;
4701                 /*
4702                  * Do not allow to attach to a group in a different
4703                  * task or CPU context:
4704                  */
4705                 if (group_leader->ctx != ctx)
4706                         goto err_put_context;
4707                 /*
4708                  * Only a group leader can be exclusive or pinned
4709                  */
4710                 if (attr.exclusive || attr.pinned)
4711                         goto err_put_context;
4712         }
4713
4714         event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4715                                      NULL, NULL, GFP_KERNEL);
4716         err = PTR_ERR(event);
4717         if (IS_ERR(event))
4718                 goto err_put_context;
4719
4720         err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
4721         if (err < 0)
4722                 goto err_free_put_context;
4723
4724         event_file = fget_light(err, &fput_needed2);
4725         if (!event_file)
4726                 goto err_free_put_context;
4727
4728         if (flags & PERF_FLAG_FD_OUTPUT) {
4729                 err = perf_event_set_output(event, group_fd);
4730                 if (err)
4731                         goto err_fput_free_put_context;
4732         }
4733
4734         event->filp = event_file;
4735         WARN_ON_ONCE(ctx->parent_ctx);
4736         mutex_lock(&ctx->mutex);
4737         perf_install_in_context(ctx, event, cpu);
4738         ++ctx->generation;
4739         mutex_unlock(&ctx->mutex);
4740
4741         event->owner = current;
4742         get_task_struct(current);
4743         mutex_lock(&current->perf_event_mutex);
4744         list_add_tail(&event->owner_entry, &current->perf_event_list);
4745         mutex_unlock(&current->perf_event_mutex);
4746
4747 err_fput_free_put_context:
4748         fput_light(event_file, fput_needed2);
4749
4750 err_free_put_context:
4751         if (err < 0)
4752                 kfree(event);
4753
4754 err_put_context:
4755         if (err < 0)
4756                 put_ctx(ctx);
4757
4758         fput_light(group_file, fput_needed);
4759
4760         return err;
4761 }
4762
4763 /**
4764  * perf_event_create_kernel_counter
4765  *
4766  * @attr: attributes of the counter to create
4767  * @cpu: cpu in which the counter is bound
4768  * @pid: task to profile
4769  */
4770 struct perf_event *
4771 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
4772                                  pid_t pid,
4773                                  perf_overflow_handler_t overflow_handler)
4774 {
4775         struct perf_event *event;
4776         struct perf_event_context *ctx;
4777         int err;
4778
4779         /*
4780          * Get the target context (task or percpu):
4781          */
4782
4783         ctx = find_get_context(pid, cpu);
4784         if (IS_ERR(ctx)) {
4785                 err = PTR_ERR(ctx);
4786                 goto err_exit;
4787         }
4788
4789         event = perf_event_alloc(attr, cpu, ctx, NULL,
4790                                  NULL, overflow_handler, GFP_KERNEL);
4791         if (IS_ERR(event)) {
4792                 err = PTR_ERR(event);
4793                 goto err_put_context;
4794         }
4795
4796         event->filp = NULL;
4797         WARN_ON_ONCE(ctx->parent_ctx);
4798         mutex_lock(&ctx->mutex);
4799         perf_install_in_context(ctx, event, cpu);
4800         ++ctx->generation;
4801         mutex_unlock(&ctx->mutex);
4802
4803         event->owner = current;
4804         get_task_struct(current);
4805         mutex_lock(&current->perf_event_mutex);
4806         list_add_tail(&event->owner_entry, &current->perf_event_list);
4807         mutex_unlock(&current->perf_event_mutex);
4808
4809         return event;
4810
4811  err_put_context:
4812         put_ctx(ctx);
4813  err_exit:
4814         return ERR_PTR(err);
4815 }
4816 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
4817
4818 /*
4819  * inherit a event from parent task to child task:
4820  */
4821 static struct perf_event *
4822 inherit_event(struct perf_event *parent_event,
4823               struct task_struct *parent,
4824               struct perf_event_context *parent_ctx,
4825               struct task_struct *child,
4826               struct perf_event *group_leader,
4827               struct perf_event_context *child_ctx)
4828 {
4829         struct perf_event *child_event;
4830
4831         /*
4832          * Instead of creating recursive hierarchies of events,
4833          * we link inherited events back to the original parent,
4834          * which has a filp for sure, which we use as the reference
4835          * count:
4836          */
4837         if (parent_event->parent)
4838                 parent_event = parent_event->parent;
4839
4840         child_event = perf_event_alloc(&parent_event->attr,
4841                                            parent_event->cpu, child_ctx,
4842                                            group_leader, parent_event,
4843                                            NULL, GFP_KERNEL);
4844         if (IS_ERR(child_event))
4845                 return child_event;
4846         get_ctx(child_ctx);
4847
4848         /*
4849          * Make the child state follow the state of the parent event,
4850          * not its attr.disabled bit.  We hold the parent's mutex,
4851          * so we won't race with perf_event_{en, dis}able_family.
4852          */
4853         if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
4854                 child_event->state = PERF_EVENT_STATE_INACTIVE;
4855         else
4856                 child_event->state = PERF_EVENT_STATE_OFF;
4857
4858         if (parent_event->attr.freq)
4859                 child_event->hw.sample_period = parent_event->hw.sample_period;
4860
4861         child_event->overflow_handler = parent_event->overflow_handler;
4862
4863         /*
4864          * Link it up in the child's context:
4865          */
4866         add_event_to_ctx(child_event, child_ctx);
4867
4868         /*
4869          * Get a reference to the parent filp - we will fput it
4870          * when the child event exits. This is safe to do because
4871          * we are in the parent and we know that the filp still
4872          * exists and has a nonzero count:
4873          */
4874         atomic_long_inc(&parent_event->filp->f_count);
4875
4876         /*
4877          * Link this into the parent event's child list
4878          */
4879         WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4880         mutex_lock(&parent_event->child_mutex);
4881         list_add_tail(&child_event->child_list, &parent_event->child_list);
4882         mutex_unlock(&parent_event->child_mutex);
4883
4884         return child_event;
4885 }
4886
4887 static int inherit_group(struct perf_event *parent_event,
4888               struct task_struct *parent,
4889               struct perf_event_context *parent_ctx,
4890               struct task_struct *child,
4891               struct perf_event_context *child_ctx)
4892 {
4893         struct perf_event *leader;
4894         struct perf_event *sub;
4895         struct perf_event *child_ctr;
4896
4897         leader = inherit_event(parent_event, parent, parent_ctx,
4898                                  child, NULL, child_ctx);
4899         if (IS_ERR(leader))
4900                 return PTR_ERR(leader);
4901         list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
4902                 child_ctr = inherit_event(sub, parent, parent_ctx,
4903                                             child, leader, child_ctx);
4904                 if (IS_ERR(child_ctr))
4905                         return PTR_ERR(child_ctr);
4906         }
4907         return 0;
4908 }
4909
4910 static void sync_child_event(struct perf_event *child_event,
4911                                struct task_struct *child)
4912 {
4913         struct perf_event *parent_event = child_event->parent;
4914         u64 child_val;
4915
4916         if (child_event->attr.inherit_stat)
4917                 perf_event_read_event(child_event, child);
4918
4919         child_val = atomic64_read(&child_event->count);
4920
4921         /*
4922          * Add back the child's count to the parent's count:
4923          */
4924         atomic64_add(child_val, &parent_event->count);
4925         atomic64_add(child_event->total_time_enabled,
4926                      &parent_event->child_total_time_enabled);
4927         atomic64_add(child_event->total_time_running,
4928                      &parent_event->child_total_time_running);
4929
4930         /*
4931          * Remove this event from the parent's list
4932          */
4933         WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4934         mutex_lock(&parent_event->child_mutex);
4935         list_del_init(&child_event->child_list);
4936         mutex_unlock(&parent_event->child_mutex);
4937
4938         /*
4939          * Release the parent event, if this was the last
4940          * reference to it.
4941          */
4942         fput(parent_event->filp);
4943 }
4944
4945 static void
4946 __perf_event_exit_task(struct perf_event *child_event,
4947                          struct perf_event_context *child_ctx,
4948                          struct task_struct *child)
4949 {
4950         struct perf_event *parent_event;
4951
4952         perf_event_remove_from_context(child_event);
4953
4954         parent_event = child_event->parent;
4955         /*
4956          * It can happen that parent exits first, and has events
4957          * that are still around due to the child reference. These
4958          * events need to be zapped - but otherwise linger.
4959          */
4960         if (parent_event) {
4961                 sync_child_event(child_event, child);
4962                 free_event(child_event);
4963         }
4964 }
4965
4966 /*
4967  * When a child task exits, feed back event values to parent events.
4968  */
4969 void perf_event_exit_task(struct task_struct *child)
4970 {
4971         struct perf_event *child_event, *tmp;
4972         struct perf_event_context *child_ctx;
4973         unsigned long flags;
4974
4975         if (likely(!child->perf_event_ctxp)) {
4976                 perf_event_task(child, NULL, 0);
4977                 return;
4978         }
4979
4980         local_irq_save(flags);
4981         /*
4982          * We can't reschedule here because interrupts are disabled,
4983          * and either child is current or it is a task that can't be
4984          * scheduled, so we are now safe from rescheduling changing
4985          * our context.
4986          */
4987         child_ctx = child->perf_event_ctxp;
4988         __perf_event_task_sched_out(child_ctx);
4989
4990         /*
4991          * Take the context lock here so that if find_get_context is
4992          * reading child->perf_event_ctxp, we wait until it has
4993          * incremented the context's refcount before we do put_ctx below.
4994          */
4995         spin_lock(&child_ctx->lock);
4996         child->perf_event_ctxp = NULL;
4997         /*
4998          * If this context is a clone; unclone it so it can't get
4999          * swapped to another process while we're removing all
5000          * the events from it.
5001          */
5002         unclone_ctx(child_ctx);
5003         update_context_time(child_ctx);
5004         spin_unlock_irqrestore(&child_ctx->lock, flags);
5005
5006         /*
5007          * Report the task dead after unscheduling the events so that we
5008          * won't get any samples after PERF_RECORD_EXIT. We can however still
5009          * get a few PERF_RECORD_READ events.
5010          */
5011         perf_event_task(child, child_ctx, 0);
5012
5013         /*
5014          * We can recurse on the same lock type through:
5015          *
5016          *   __perf_event_exit_task()
5017          *     sync_child_event()
5018          *       fput(parent_event->filp)
5019          *         perf_release()
5020          *           mutex_lock(&ctx->mutex)
5021          *
5022          * But since its the parent context it won't be the same instance.
5023          */
5024         mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
5025
5026 again:
5027         list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list,
5028                                  group_entry)
5029                 __perf_event_exit_task(child_event, child_ctx, child);
5030
5031         /*
5032          * If the last event was a group event, it will have appended all
5033          * its siblings to the list, but we obtained 'tmp' before that which
5034          * will still point to the list head terminating the iteration.
5035          */
5036         if (!list_empty(&child_ctx->group_list))
5037                 goto again;
5038
5039         mutex_unlock(&child_ctx->mutex);
5040
5041         put_ctx(child_ctx);
5042 }
5043
5044 /*
5045  * free an unexposed, unused context as created by inheritance by
5046  * init_task below, used by fork() in case of fail.
5047  */
5048 void perf_event_free_task(struct task_struct *task)
5049 {
5050         struct perf_event_context *ctx = task->perf_event_ctxp;
5051         struct perf_event *event, *tmp;
5052
5053         if (!ctx)
5054                 return;
5055
5056         mutex_lock(&ctx->mutex);
5057 again:
5058         list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) {
5059                 struct perf_event *parent = event->parent;
5060
5061                 if (WARN_ON_ONCE(!parent))
5062                         continue;
5063
5064                 mutex_lock(&parent->child_mutex);
5065                 list_del_init(&event->child_list);
5066                 mutex_unlock(&parent->child_mutex);
5067
5068                 fput(parent->filp);
5069
5070                 list_del_event(event, ctx);
5071                 free_event(event);
5072         }
5073
5074         if (!list_empty(&ctx->group_list))
5075                 goto again;
5076
5077         mutex_unlock(&ctx->mutex);
5078
5079         put_ctx(ctx);
5080 }
5081
5082 /*
5083  * Initialize the perf_event context in task_struct
5084  */
5085 int perf_event_init_task(struct task_struct *child)
5086 {
5087         struct perf_event_context *child_ctx, *parent_ctx;
5088         struct perf_event_context *cloned_ctx;
5089         struct perf_event *event;
5090         struct task_struct *parent = current;
5091         int inherited_all = 1;
5092         int ret = 0;
5093
5094         child->perf_event_ctxp = NULL;
5095
5096         mutex_init(&child->perf_event_mutex);
5097         INIT_LIST_HEAD(&child->perf_event_list);
5098
5099         if (likely(!parent->perf_event_ctxp))
5100                 return 0;
5101
5102         /*
5103          * This is executed from the parent task context, so inherit
5104          * events that have been marked for cloning.
5105          * First allocate and initialize a context for the child.
5106          */
5107
5108         child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
5109         if (!child_ctx)
5110                 return -ENOMEM;
5111
5112         __perf_event_init_context(child_ctx, child);
5113         child->perf_event_ctxp = child_ctx;
5114         get_task_struct(child);
5115
5116         /*
5117          * If the parent's context is a clone, pin it so it won't get
5118          * swapped under us.
5119          */
5120         parent_ctx = perf_pin_task_context(parent);
5121
5122         /*
5123          * No need to check if parent_ctx != NULL here; since we saw
5124          * it non-NULL earlier, the only reason for it to become NULL
5125          * is if we exit, and since we're currently in the middle of
5126          * a fork we can't be exiting at the same time.
5127          */
5128
5129         /*
5130          * Lock the parent list. No need to lock the child - not PID
5131          * hashed yet and not running, so nobody can access it.
5132          */
5133         mutex_lock(&parent_ctx->mutex);
5134
5135         /*
5136          * We dont have to disable NMIs - we are only looking at
5137          * the list, not manipulating it:
5138          */
5139         list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
5140
5141                 if (!event->attr.inherit) {
5142                         inherited_all = 0;
5143                         continue;
5144                 }
5145
5146                 ret = inherit_group(event, parent, parent_ctx,
5147                                              child, child_ctx);
5148                 if (ret) {
5149                         inherited_all = 0;
5150                         break;
5151                 }
5152         }
5153
5154         if (inherited_all) {
5155                 /*
5156                  * Mark the child context as a clone of the parent
5157                  * context, or of whatever the parent is a clone of.
5158                  * Note that if the parent is a clone, it could get
5159                  * uncloned at any point, but that doesn't matter
5160                  * because the list of events and the generation
5161                  * count can't have changed since we took the mutex.
5162                  */
5163                 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
5164                 if (cloned_ctx) {
5165                         child_ctx->parent_ctx = cloned_ctx;
5166                         child_ctx->parent_gen = parent_ctx->parent_gen;
5167                 } else {
5168                         child_ctx->parent_ctx = parent_ctx;
5169                         child_ctx->parent_gen = parent_ctx->generation;
5170                 }
5171                 get_ctx(child_ctx->parent_ctx);
5172         }
5173
5174         mutex_unlock(&parent_ctx->mutex);
5175
5176         perf_unpin_context(parent_ctx);
5177
5178         return ret;
5179 }
5180
5181 static void __cpuinit perf_event_init_cpu(int cpu)
5182 {
5183         struct perf_cpu_context *cpuctx;
5184
5185         cpuctx = &per_cpu(perf_cpu_context, cpu);
5186         __perf_event_init_context(&cpuctx->ctx, NULL);
5187
5188         spin_lock(&perf_resource_lock);
5189         cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5190         spin_unlock(&perf_resource_lock);
5191
5192         hw_perf_event_setup(cpu);
5193 }
5194
5195 #ifdef CONFIG_HOTPLUG_CPU
5196 static void __perf_event_exit_cpu(void *info)
5197 {
5198         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5199         struct perf_event_context *ctx = &cpuctx->ctx;
5200         struct perf_event *event, *tmp;
5201
5202         list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry)
5203                 __perf_event_remove_from_context(event);
5204 }
5205 static void perf_event_exit_cpu(int cpu)
5206 {
5207         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5208         struct perf_event_context *ctx = &cpuctx->ctx;
5209
5210         mutex_lock(&ctx->mutex);
5211         smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
5212         mutex_unlock(&ctx->mutex);
5213 }
5214 #else
5215 static inline void perf_event_exit_cpu(int cpu) { }
5216 #endif
5217
5218 static int __cpuinit
5219 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5220 {
5221         unsigned int cpu = (long)hcpu;
5222
5223         switch (action) {
5224
5225         case CPU_UP_PREPARE:
5226         case CPU_UP_PREPARE_FROZEN:
5227                 perf_event_init_cpu(cpu);
5228                 break;
5229
5230         case CPU_ONLINE:
5231         case CPU_ONLINE_FROZEN:
5232                 hw_perf_event_setup_online(cpu);
5233                 break;
5234
5235         case CPU_DOWN_PREPARE:
5236         case CPU_DOWN_PREPARE_FROZEN:
5237                 perf_event_exit_cpu(cpu);
5238                 break;
5239
5240         default:
5241                 break;
5242         }
5243
5244         return NOTIFY_OK;
5245 }
5246
5247 /*
5248  * This has to have a higher priority than migration_notifier in sched.c.
5249  */
5250 static struct notifier_block __cpuinitdata perf_cpu_nb = {
5251         .notifier_call          = perf_cpu_notify,
5252         .priority               = 20,
5253 };
5254
5255 void __init perf_event_init(void)
5256 {
5257         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5258                         (void *)(long)smp_processor_id());
5259         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
5260                         (void *)(long)smp_processor_id());
5261         register_cpu_notifier(&perf_cpu_nb);
5262 }
5263
5264 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
5265 {
5266         return sprintf(buf, "%d\n", perf_reserved_percpu);
5267 }
5268
5269 static ssize_t
5270 perf_set_reserve_percpu(struct sysdev_class *class,
5271                         const char *buf,
5272                         size_t count)
5273 {
5274         struct perf_cpu_context *cpuctx;
5275         unsigned long val;
5276         int err, cpu, mpt;
5277
5278         err = strict_strtoul(buf, 10, &val);
5279         if (err)
5280                 return err;
5281         if (val > perf_max_events)
5282                 return -EINVAL;
5283
5284         spin_lock(&perf_resource_lock);
5285         perf_reserved_percpu = val;
5286         for_each_online_cpu(cpu) {
5287                 cpuctx = &per_cpu(perf_cpu_context, cpu);
5288                 spin_lock_irq(&cpuctx->ctx.lock);
5289                 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5290                           perf_max_events - perf_reserved_percpu);
5291                 cpuctx->max_pertask = mpt;
5292                 spin_unlock_irq(&cpuctx->ctx.lock);
5293         }
5294         spin_unlock(&perf_resource_lock);
5295
5296         return count;
5297 }
5298
5299 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
5300 {
5301         return sprintf(buf, "%d\n", perf_overcommit);
5302 }
5303
5304 static ssize_t
5305 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
5306 {
5307         unsigned long val;
5308         int err;
5309
5310         err = strict_strtoul(buf, 10, &val);
5311         if (err)
5312                 return err;
5313         if (val > 1)
5314                 return -EINVAL;
5315
5316         spin_lock(&perf_resource_lock);
5317         perf_overcommit = val;
5318         spin_unlock(&perf_resource_lock);
5319
5320         return count;
5321 }
5322
5323 static SYSDEV_CLASS_ATTR(
5324                                 reserve_percpu,
5325                                 0644,
5326                                 perf_show_reserve_percpu,
5327                                 perf_set_reserve_percpu
5328                         );
5329
5330 static SYSDEV_CLASS_ATTR(
5331                                 overcommit,
5332                                 0644,
5333                                 perf_show_overcommit,
5334                                 perf_set_overcommit
5335                         );
5336
5337 static struct attribute *perfclass_attrs[] = {
5338         &attr_reserve_percpu.attr,
5339         &attr_overcommit.attr,
5340         NULL
5341 };
5342
5343 static struct attribute_group perfclass_attr_group = {
5344         .attrs                  = perfclass_attrs,
5345         .name                   = "perf_events",
5346 };
5347
5348 static int __init perf_event_sysfs_init(void)
5349 {
5350         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
5351                                   &perfclass_attr_group);
5352 }
5353 device_initcall(perf_event_sysfs_init);