perf: Round robin flexible groups of events using list_rotate_left()
[safe/jmp/linux-2.6] / kernel / perf_event.c
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/vmalloc.h>
24 #include <linux/hardirq.h>
25 #include <linux/rculist.h>
26 #include <linux/uaccess.h>
27 #include <linux/syscalls.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/perf_event.h>
31 #include <linux/ftrace_event.h>
32 #include <linux/hw_breakpoint.h>
33
34 #include <asm/irq_regs.h>
35
36 /*
37  * Each CPU has a list of per CPU events:
38  */
39 static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
40
41 int perf_max_events __read_mostly = 1;
42 static int perf_reserved_percpu __read_mostly;
43 static int perf_overcommit __read_mostly = 1;
44
45 static atomic_t nr_events __read_mostly;
46 static atomic_t nr_mmap_events __read_mostly;
47 static atomic_t nr_comm_events __read_mostly;
48 static atomic_t nr_task_events __read_mostly;
49
50 /*
51  * perf event paranoia level:
52  *  -1 - not paranoid at all
53  *   0 - disallow raw tracepoint access for unpriv
54  *   1 - disallow cpu events for unpriv
55  *   2 - disallow kernel profiling for unpriv
56  */
57 int sysctl_perf_event_paranoid __read_mostly = 1;
58
59 static inline bool perf_paranoid_tracepoint_raw(void)
60 {
61         return sysctl_perf_event_paranoid > -1;
62 }
63
64 static inline bool perf_paranoid_cpu(void)
65 {
66         return sysctl_perf_event_paranoid > 0;
67 }
68
69 static inline bool perf_paranoid_kernel(void)
70 {
71         return sysctl_perf_event_paranoid > 1;
72 }
73
74 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
75
76 /*
77  * max perf event sample rate
78  */
79 int sysctl_perf_event_sample_rate __read_mostly = 100000;
80
81 static atomic64_t perf_event_id;
82
83 /*
84  * Lock for (sysadmin-configurable) event reservations:
85  */
86 static DEFINE_SPINLOCK(perf_resource_lock);
87
88 /*
89  * Architecture provided APIs - weak aliases:
90  */
91 extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
92 {
93         return NULL;
94 }
95
96 void __weak hw_perf_disable(void)               { barrier(); }
97 void __weak hw_perf_enable(void)                { barrier(); }
98
99 void __weak hw_perf_event_setup(int cpu)        { barrier(); }
100 void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
101
102 int __weak
103 hw_perf_group_sched_in(struct perf_event *group_leader,
104                struct perf_cpu_context *cpuctx,
105                struct perf_event_context *ctx, int cpu)
106 {
107         return 0;
108 }
109
110 void __weak perf_event_print_debug(void)        { }
111
112 static DEFINE_PER_CPU(int, perf_disable_count);
113
114 void __perf_disable(void)
115 {
116         __get_cpu_var(perf_disable_count)++;
117 }
118
119 bool __perf_enable(void)
120 {
121         return !--__get_cpu_var(perf_disable_count);
122 }
123
124 void perf_disable(void)
125 {
126         __perf_disable();
127         hw_perf_disable();
128 }
129
130 void perf_enable(void)
131 {
132         if (__perf_enable())
133                 hw_perf_enable();
134 }
135
136 static void get_ctx(struct perf_event_context *ctx)
137 {
138         WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
139 }
140
141 static void free_ctx(struct rcu_head *head)
142 {
143         struct perf_event_context *ctx;
144
145         ctx = container_of(head, struct perf_event_context, rcu_head);
146         kfree(ctx);
147 }
148
149 static void put_ctx(struct perf_event_context *ctx)
150 {
151         if (atomic_dec_and_test(&ctx->refcount)) {
152                 if (ctx->parent_ctx)
153                         put_ctx(ctx->parent_ctx);
154                 if (ctx->task)
155                         put_task_struct(ctx->task);
156                 call_rcu(&ctx->rcu_head, free_ctx);
157         }
158 }
159
160 static void unclone_ctx(struct perf_event_context *ctx)
161 {
162         if (ctx->parent_ctx) {
163                 put_ctx(ctx->parent_ctx);
164                 ctx->parent_ctx = NULL;
165         }
166 }
167
168 /*
169  * If we inherit events we want to return the parent event id
170  * to userspace.
171  */
172 static u64 primary_event_id(struct perf_event *event)
173 {
174         u64 id = event->id;
175
176         if (event->parent)
177                 id = event->parent->id;
178
179         return id;
180 }
181
182 /*
183  * Get the perf_event_context for a task and lock it.
184  * This has to cope with with the fact that until it is locked,
185  * the context could get moved to another task.
186  */
187 static struct perf_event_context *
188 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
189 {
190         struct perf_event_context *ctx;
191
192         rcu_read_lock();
193  retry:
194         ctx = rcu_dereference(task->perf_event_ctxp);
195         if (ctx) {
196                 /*
197                  * If this context is a clone of another, it might
198                  * get swapped for another underneath us by
199                  * perf_event_task_sched_out, though the
200                  * rcu_read_lock() protects us from any context
201                  * getting freed.  Lock the context and check if it
202                  * got swapped before we could get the lock, and retry
203                  * if so.  If we locked the right context, then it
204                  * can't get swapped on us any more.
205                  */
206                 raw_spin_lock_irqsave(&ctx->lock, *flags);
207                 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
208                         raw_spin_unlock_irqrestore(&ctx->lock, *flags);
209                         goto retry;
210                 }
211
212                 if (!atomic_inc_not_zero(&ctx->refcount)) {
213                         raw_spin_unlock_irqrestore(&ctx->lock, *flags);
214                         ctx = NULL;
215                 }
216         }
217         rcu_read_unlock();
218         return ctx;
219 }
220
221 /*
222  * Get the context for a task and increment its pin_count so it
223  * can't get swapped to another task.  This also increments its
224  * reference count so that the context can't get freed.
225  */
226 static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
227 {
228         struct perf_event_context *ctx;
229         unsigned long flags;
230
231         ctx = perf_lock_task_context(task, &flags);
232         if (ctx) {
233                 ++ctx->pin_count;
234                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
235         }
236         return ctx;
237 }
238
239 static void perf_unpin_context(struct perf_event_context *ctx)
240 {
241         unsigned long flags;
242
243         raw_spin_lock_irqsave(&ctx->lock, flags);
244         --ctx->pin_count;
245         raw_spin_unlock_irqrestore(&ctx->lock, flags);
246         put_ctx(ctx);
247 }
248
249 static inline u64 perf_clock(void)
250 {
251         return cpu_clock(smp_processor_id());
252 }
253
254 /*
255  * Update the record of the current time in a context.
256  */
257 static void update_context_time(struct perf_event_context *ctx)
258 {
259         u64 now = perf_clock();
260
261         ctx->time += now - ctx->timestamp;
262         ctx->timestamp = now;
263 }
264
265 /*
266  * Update the total_time_enabled and total_time_running fields for a event.
267  */
268 static void update_event_times(struct perf_event *event)
269 {
270         struct perf_event_context *ctx = event->ctx;
271         u64 run_end;
272
273         if (event->state < PERF_EVENT_STATE_INACTIVE ||
274             event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
275                 return;
276
277         if (ctx->is_active)
278                 run_end = ctx->time;
279         else
280                 run_end = event->tstamp_stopped;
281
282         event->total_time_enabled = run_end - event->tstamp_enabled;
283
284         if (event->state == PERF_EVENT_STATE_INACTIVE)
285                 run_end = event->tstamp_stopped;
286         else
287                 run_end = ctx->time;
288
289         event->total_time_running = run_end - event->tstamp_running;
290 }
291
292 static struct list_head *
293 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
294 {
295         if (event->attr.pinned)
296                 return &ctx->pinned_groups;
297         else
298                 return &ctx->flexible_groups;
299 }
300
301 /*
302  * Add a event from the lists for its context.
303  * Must be called with ctx->mutex and ctx->lock held.
304  */
305 static void
306 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
307 {
308         struct perf_event *group_leader = event->group_leader;
309
310         /*
311          * Depending on whether it is a standalone or sibling event,
312          * add it straight to the context's event list, or to the group
313          * leader's sibling list:
314          */
315         if (group_leader == event) {
316                 struct list_head *list;
317
318                 list = ctx_group_list(event, ctx);
319                 list_add_tail(&event->group_entry, list);
320         } else {
321                 list_add_tail(&event->group_entry, &group_leader->sibling_list);
322                 group_leader->nr_siblings++;
323         }
324
325         list_add_rcu(&event->event_entry, &ctx->event_list);
326         ctx->nr_events++;
327         if (event->attr.inherit_stat)
328                 ctx->nr_stat++;
329 }
330
331 /*
332  * Remove a event from the lists for its context.
333  * Must be called with ctx->mutex and ctx->lock held.
334  */
335 static void
336 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
337 {
338         struct perf_event *sibling, *tmp;
339
340         if (list_empty(&event->group_entry))
341                 return;
342         ctx->nr_events--;
343         if (event->attr.inherit_stat)
344                 ctx->nr_stat--;
345
346         list_del_init(&event->group_entry);
347         list_del_rcu(&event->event_entry);
348
349         if (event->group_leader != event)
350                 event->group_leader->nr_siblings--;
351
352         update_event_times(event);
353
354         /*
355          * If event was in error state, then keep it
356          * that way, otherwise bogus counts will be
357          * returned on read(). The only way to get out
358          * of error state is by explicit re-enabling
359          * of the event
360          */
361         if (event->state > PERF_EVENT_STATE_OFF)
362                 event->state = PERF_EVENT_STATE_OFF;
363
364         /*
365          * If this was a group event with sibling events then
366          * upgrade the siblings to singleton events by adding them
367          * to the context list directly:
368          */
369         list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
370                 struct list_head *list;
371
372                 list = ctx_group_list(event, ctx);
373                 list_move_tail(&sibling->group_entry, list);
374                 sibling->group_leader = sibling;
375         }
376 }
377
378 static void
379 event_sched_out(struct perf_event *event,
380                   struct perf_cpu_context *cpuctx,
381                   struct perf_event_context *ctx)
382 {
383         if (event->state != PERF_EVENT_STATE_ACTIVE)
384                 return;
385
386         event->state = PERF_EVENT_STATE_INACTIVE;
387         if (event->pending_disable) {
388                 event->pending_disable = 0;
389                 event->state = PERF_EVENT_STATE_OFF;
390         }
391         event->tstamp_stopped = ctx->time;
392         event->pmu->disable(event);
393         event->oncpu = -1;
394
395         if (!is_software_event(event))
396                 cpuctx->active_oncpu--;
397         ctx->nr_active--;
398         if (event->attr.exclusive || !cpuctx->active_oncpu)
399                 cpuctx->exclusive = 0;
400 }
401
402 static void
403 group_sched_out(struct perf_event *group_event,
404                 struct perf_cpu_context *cpuctx,
405                 struct perf_event_context *ctx)
406 {
407         struct perf_event *event;
408
409         if (group_event->state != PERF_EVENT_STATE_ACTIVE)
410                 return;
411
412         event_sched_out(group_event, cpuctx, ctx);
413
414         /*
415          * Schedule out siblings (if any):
416          */
417         list_for_each_entry(event, &group_event->sibling_list, group_entry)
418                 event_sched_out(event, cpuctx, ctx);
419
420         if (group_event->attr.exclusive)
421                 cpuctx->exclusive = 0;
422 }
423
424 /*
425  * Cross CPU call to remove a performance event
426  *
427  * We disable the event on the hardware level first. After that we
428  * remove it from the context list.
429  */
430 static void __perf_event_remove_from_context(void *info)
431 {
432         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
433         struct perf_event *event = info;
434         struct perf_event_context *ctx = event->ctx;
435
436         /*
437          * If this is a task context, we need to check whether it is
438          * the current task context of this cpu. If not it has been
439          * scheduled out before the smp call arrived.
440          */
441         if (ctx->task && cpuctx->task_ctx != ctx)
442                 return;
443
444         raw_spin_lock(&ctx->lock);
445         /*
446          * Protect the list operation against NMI by disabling the
447          * events on a global level.
448          */
449         perf_disable();
450
451         event_sched_out(event, cpuctx, ctx);
452
453         list_del_event(event, ctx);
454
455         if (!ctx->task) {
456                 /*
457                  * Allow more per task events with respect to the
458                  * reservation:
459                  */
460                 cpuctx->max_pertask =
461                         min(perf_max_events - ctx->nr_events,
462                             perf_max_events - perf_reserved_percpu);
463         }
464
465         perf_enable();
466         raw_spin_unlock(&ctx->lock);
467 }
468
469
470 /*
471  * Remove the event from a task's (or a CPU's) list of events.
472  *
473  * Must be called with ctx->mutex held.
474  *
475  * CPU events are removed with a smp call. For task events we only
476  * call when the task is on a CPU.
477  *
478  * If event->ctx is a cloned context, callers must make sure that
479  * every task struct that event->ctx->task could possibly point to
480  * remains valid.  This is OK when called from perf_release since
481  * that only calls us on the top-level context, which can't be a clone.
482  * When called from perf_event_exit_task, it's OK because the
483  * context has been detached from its task.
484  */
485 static void perf_event_remove_from_context(struct perf_event *event)
486 {
487         struct perf_event_context *ctx = event->ctx;
488         struct task_struct *task = ctx->task;
489
490         if (!task) {
491                 /*
492                  * Per cpu events are removed via an smp call and
493                  * the removal is always successful.
494                  */
495                 smp_call_function_single(event->cpu,
496                                          __perf_event_remove_from_context,
497                                          event, 1);
498                 return;
499         }
500
501 retry:
502         task_oncpu_function_call(task, __perf_event_remove_from_context,
503                                  event);
504
505         raw_spin_lock_irq(&ctx->lock);
506         /*
507          * If the context is active we need to retry the smp call.
508          */
509         if (ctx->nr_active && !list_empty(&event->group_entry)) {
510                 raw_spin_unlock_irq(&ctx->lock);
511                 goto retry;
512         }
513
514         /*
515          * The lock prevents that this context is scheduled in so we
516          * can remove the event safely, if the call above did not
517          * succeed.
518          */
519         if (!list_empty(&event->group_entry))
520                 list_del_event(event, ctx);
521         raw_spin_unlock_irq(&ctx->lock);
522 }
523
524 /*
525  * Update total_time_enabled and total_time_running for all events in a group.
526  */
527 static void update_group_times(struct perf_event *leader)
528 {
529         struct perf_event *event;
530
531         update_event_times(leader);
532         list_for_each_entry(event, &leader->sibling_list, group_entry)
533                 update_event_times(event);
534 }
535
536 /*
537  * Cross CPU call to disable a performance event
538  */
539 static void __perf_event_disable(void *info)
540 {
541         struct perf_event *event = info;
542         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
543         struct perf_event_context *ctx = event->ctx;
544
545         /*
546          * If this is a per-task event, need to check whether this
547          * event's task is the current task on this cpu.
548          */
549         if (ctx->task && cpuctx->task_ctx != ctx)
550                 return;
551
552         raw_spin_lock(&ctx->lock);
553
554         /*
555          * If the event is on, turn it off.
556          * If it is in error state, leave it in error state.
557          */
558         if (event->state >= PERF_EVENT_STATE_INACTIVE) {
559                 update_context_time(ctx);
560                 update_group_times(event);
561                 if (event == event->group_leader)
562                         group_sched_out(event, cpuctx, ctx);
563                 else
564                         event_sched_out(event, cpuctx, ctx);
565                 event->state = PERF_EVENT_STATE_OFF;
566         }
567
568         raw_spin_unlock(&ctx->lock);
569 }
570
571 /*
572  * Disable a event.
573  *
574  * If event->ctx is a cloned context, callers must make sure that
575  * every task struct that event->ctx->task could possibly point to
576  * remains valid.  This condition is satisifed when called through
577  * perf_event_for_each_child or perf_event_for_each because they
578  * hold the top-level event's child_mutex, so any descendant that
579  * goes to exit will block in sync_child_event.
580  * When called from perf_pending_event it's OK because event->ctx
581  * is the current context on this CPU and preemption is disabled,
582  * hence we can't get into perf_event_task_sched_out for this context.
583  */
584 void perf_event_disable(struct perf_event *event)
585 {
586         struct perf_event_context *ctx = event->ctx;
587         struct task_struct *task = ctx->task;
588
589         if (!task) {
590                 /*
591                  * Disable the event on the cpu that it's on
592                  */
593                 smp_call_function_single(event->cpu, __perf_event_disable,
594                                          event, 1);
595                 return;
596         }
597
598  retry:
599         task_oncpu_function_call(task, __perf_event_disable, event);
600
601         raw_spin_lock_irq(&ctx->lock);
602         /*
603          * If the event is still active, we need to retry the cross-call.
604          */
605         if (event->state == PERF_EVENT_STATE_ACTIVE) {
606                 raw_spin_unlock_irq(&ctx->lock);
607                 goto retry;
608         }
609
610         /*
611          * Since we have the lock this context can't be scheduled
612          * in, so we can change the state safely.
613          */
614         if (event->state == PERF_EVENT_STATE_INACTIVE) {
615                 update_group_times(event);
616                 event->state = PERF_EVENT_STATE_OFF;
617         }
618
619         raw_spin_unlock_irq(&ctx->lock);
620 }
621
622 static int
623 event_sched_in(struct perf_event *event,
624                  struct perf_cpu_context *cpuctx,
625                  struct perf_event_context *ctx,
626                  int cpu)
627 {
628         if (event->state <= PERF_EVENT_STATE_OFF)
629                 return 0;
630
631         event->state = PERF_EVENT_STATE_ACTIVE;
632         event->oncpu = cpu;     /* TODO: put 'cpu' into cpuctx->cpu */
633         /*
634          * The new state must be visible before we turn it on in the hardware:
635          */
636         smp_wmb();
637
638         if (event->pmu->enable(event)) {
639                 event->state = PERF_EVENT_STATE_INACTIVE;
640                 event->oncpu = -1;
641                 return -EAGAIN;
642         }
643
644         event->tstamp_running += ctx->time - event->tstamp_stopped;
645
646         if (!is_software_event(event))
647                 cpuctx->active_oncpu++;
648         ctx->nr_active++;
649
650         if (event->attr.exclusive)
651                 cpuctx->exclusive = 1;
652
653         return 0;
654 }
655
656 static int
657 group_sched_in(struct perf_event *group_event,
658                struct perf_cpu_context *cpuctx,
659                struct perf_event_context *ctx,
660                int cpu)
661 {
662         struct perf_event *event, *partial_group;
663         int ret;
664
665         if (group_event->state == PERF_EVENT_STATE_OFF)
666                 return 0;
667
668         ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu);
669         if (ret)
670                 return ret < 0 ? ret : 0;
671
672         if (event_sched_in(group_event, cpuctx, ctx, cpu))
673                 return -EAGAIN;
674
675         /*
676          * Schedule in siblings as one group (if any):
677          */
678         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
679                 if (event_sched_in(event, cpuctx, ctx, cpu)) {
680                         partial_group = event;
681                         goto group_error;
682                 }
683         }
684
685         return 0;
686
687 group_error:
688         /*
689          * Groups can be scheduled in as one unit only, so undo any
690          * partial group before returning:
691          */
692         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
693                 if (event == partial_group)
694                         break;
695                 event_sched_out(event, cpuctx, ctx);
696         }
697         event_sched_out(group_event, cpuctx, ctx);
698
699         return -EAGAIN;
700 }
701
702 /*
703  * Return 1 for a group consisting entirely of software events,
704  * 0 if the group contains any hardware events.
705  */
706 static int is_software_only_group(struct perf_event *leader)
707 {
708         struct perf_event *event;
709
710         if (!is_software_event(leader))
711                 return 0;
712
713         list_for_each_entry(event, &leader->sibling_list, group_entry)
714                 if (!is_software_event(event))
715                         return 0;
716
717         return 1;
718 }
719
720 /*
721  * Work out whether we can put this event group on the CPU now.
722  */
723 static int group_can_go_on(struct perf_event *event,
724                            struct perf_cpu_context *cpuctx,
725                            int can_add_hw)
726 {
727         /*
728          * Groups consisting entirely of software events can always go on.
729          */
730         if (is_software_only_group(event))
731                 return 1;
732         /*
733          * If an exclusive group is already on, no other hardware
734          * events can go on.
735          */
736         if (cpuctx->exclusive)
737                 return 0;
738         /*
739          * If this group is exclusive and there are already
740          * events on the CPU, it can't go on.
741          */
742         if (event->attr.exclusive && cpuctx->active_oncpu)
743                 return 0;
744         /*
745          * Otherwise, try to add it if all previous groups were able
746          * to go on.
747          */
748         return can_add_hw;
749 }
750
751 static void add_event_to_ctx(struct perf_event *event,
752                                struct perf_event_context *ctx)
753 {
754         list_add_event(event, ctx);
755         event->tstamp_enabled = ctx->time;
756         event->tstamp_running = ctx->time;
757         event->tstamp_stopped = ctx->time;
758 }
759
760 /*
761  * Cross CPU call to install and enable a performance event
762  *
763  * Must be called with ctx->mutex held
764  */
765 static void __perf_install_in_context(void *info)
766 {
767         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
768         struct perf_event *event = info;
769         struct perf_event_context *ctx = event->ctx;
770         struct perf_event *leader = event->group_leader;
771         int cpu = smp_processor_id();
772         int err;
773
774         /*
775          * If this is a task context, we need to check whether it is
776          * the current task context of this cpu. If not it has been
777          * scheduled out before the smp call arrived.
778          * Or possibly this is the right context but it isn't
779          * on this cpu because it had no events.
780          */
781         if (ctx->task && cpuctx->task_ctx != ctx) {
782                 if (cpuctx->task_ctx || ctx->task != current)
783                         return;
784                 cpuctx->task_ctx = ctx;
785         }
786
787         raw_spin_lock(&ctx->lock);
788         ctx->is_active = 1;
789         update_context_time(ctx);
790
791         /*
792          * Protect the list operation against NMI by disabling the
793          * events on a global level. NOP for non NMI based events.
794          */
795         perf_disable();
796
797         add_event_to_ctx(event, ctx);
798
799         if (event->cpu != -1 && event->cpu != smp_processor_id())
800                 goto unlock;
801
802         /*
803          * Don't put the event on if it is disabled or if
804          * it is in a group and the group isn't on.
805          */
806         if (event->state != PERF_EVENT_STATE_INACTIVE ||
807             (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
808                 goto unlock;
809
810         /*
811          * An exclusive event can't go on if there are already active
812          * hardware events, and no hardware event can go on if there
813          * is already an exclusive event on.
814          */
815         if (!group_can_go_on(event, cpuctx, 1))
816                 err = -EEXIST;
817         else
818                 err = event_sched_in(event, cpuctx, ctx, cpu);
819
820         if (err) {
821                 /*
822                  * This event couldn't go on.  If it is in a group
823                  * then we have to pull the whole group off.
824                  * If the event group is pinned then put it in error state.
825                  */
826                 if (leader != event)
827                         group_sched_out(leader, cpuctx, ctx);
828                 if (leader->attr.pinned) {
829                         update_group_times(leader);
830                         leader->state = PERF_EVENT_STATE_ERROR;
831                 }
832         }
833
834         if (!err && !ctx->task && cpuctx->max_pertask)
835                 cpuctx->max_pertask--;
836
837  unlock:
838         perf_enable();
839
840         raw_spin_unlock(&ctx->lock);
841 }
842
843 /*
844  * Attach a performance event to a context
845  *
846  * First we add the event to the list with the hardware enable bit
847  * in event->hw_config cleared.
848  *
849  * If the event is attached to a task which is on a CPU we use a smp
850  * call to enable it in the task context. The task might have been
851  * scheduled away, but we check this in the smp call again.
852  *
853  * Must be called with ctx->mutex held.
854  */
855 static void
856 perf_install_in_context(struct perf_event_context *ctx,
857                         struct perf_event *event,
858                         int cpu)
859 {
860         struct task_struct *task = ctx->task;
861
862         if (!task) {
863                 /*
864                  * Per cpu events are installed via an smp call and
865                  * the install is always successful.
866                  */
867                 smp_call_function_single(cpu, __perf_install_in_context,
868                                          event, 1);
869                 return;
870         }
871
872 retry:
873         task_oncpu_function_call(task, __perf_install_in_context,
874                                  event);
875
876         raw_spin_lock_irq(&ctx->lock);
877         /*
878          * we need to retry the smp call.
879          */
880         if (ctx->is_active && list_empty(&event->group_entry)) {
881                 raw_spin_unlock_irq(&ctx->lock);
882                 goto retry;
883         }
884
885         /*
886          * The lock prevents that this context is scheduled in so we
887          * can add the event safely, if it the call above did not
888          * succeed.
889          */
890         if (list_empty(&event->group_entry))
891                 add_event_to_ctx(event, ctx);
892         raw_spin_unlock_irq(&ctx->lock);
893 }
894
895 /*
896  * Put a event into inactive state and update time fields.
897  * Enabling the leader of a group effectively enables all
898  * the group members that aren't explicitly disabled, so we
899  * have to update their ->tstamp_enabled also.
900  * Note: this works for group members as well as group leaders
901  * since the non-leader members' sibling_lists will be empty.
902  */
903 static void __perf_event_mark_enabled(struct perf_event *event,
904                                         struct perf_event_context *ctx)
905 {
906         struct perf_event *sub;
907
908         event->state = PERF_EVENT_STATE_INACTIVE;
909         event->tstamp_enabled = ctx->time - event->total_time_enabled;
910         list_for_each_entry(sub, &event->sibling_list, group_entry)
911                 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
912                         sub->tstamp_enabled =
913                                 ctx->time - sub->total_time_enabled;
914 }
915
916 /*
917  * Cross CPU call to enable a performance event
918  */
919 static void __perf_event_enable(void *info)
920 {
921         struct perf_event *event = info;
922         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
923         struct perf_event_context *ctx = event->ctx;
924         struct perf_event *leader = event->group_leader;
925         int err;
926
927         /*
928          * If this is a per-task event, need to check whether this
929          * event's task is the current task on this cpu.
930          */
931         if (ctx->task && cpuctx->task_ctx != ctx) {
932                 if (cpuctx->task_ctx || ctx->task != current)
933                         return;
934                 cpuctx->task_ctx = ctx;
935         }
936
937         raw_spin_lock(&ctx->lock);
938         ctx->is_active = 1;
939         update_context_time(ctx);
940
941         if (event->state >= PERF_EVENT_STATE_INACTIVE)
942                 goto unlock;
943         __perf_event_mark_enabled(event, ctx);
944
945         if (event->cpu != -1 && event->cpu != smp_processor_id())
946                 goto unlock;
947
948         /*
949          * If the event is in a group and isn't the group leader,
950          * then don't put it on unless the group is on.
951          */
952         if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
953                 goto unlock;
954
955         if (!group_can_go_on(event, cpuctx, 1)) {
956                 err = -EEXIST;
957         } else {
958                 perf_disable();
959                 if (event == leader)
960                         err = group_sched_in(event, cpuctx, ctx,
961                                              smp_processor_id());
962                 else
963                         err = event_sched_in(event, cpuctx, ctx,
964                                                smp_processor_id());
965                 perf_enable();
966         }
967
968         if (err) {
969                 /*
970                  * If this event can't go on and it's part of a
971                  * group, then the whole group has to come off.
972                  */
973                 if (leader != event)
974                         group_sched_out(leader, cpuctx, ctx);
975                 if (leader->attr.pinned) {
976                         update_group_times(leader);
977                         leader->state = PERF_EVENT_STATE_ERROR;
978                 }
979         }
980
981  unlock:
982         raw_spin_unlock(&ctx->lock);
983 }
984
985 /*
986  * Enable a event.
987  *
988  * If event->ctx is a cloned context, callers must make sure that
989  * every task struct that event->ctx->task could possibly point to
990  * remains valid.  This condition is satisfied when called through
991  * perf_event_for_each_child or perf_event_for_each as described
992  * for perf_event_disable.
993  */
994 void perf_event_enable(struct perf_event *event)
995 {
996         struct perf_event_context *ctx = event->ctx;
997         struct task_struct *task = ctx->task;
998
999         if (!task) {
1000                 /*
1001                  * Enable the event on the cpu that it's on
1002                  */
1003                 smp_call_function_single(event->cpu, __perf_event_enable,
1004                                          event, 1);
1005                 return;
1006         }
1007
1008         raw_spin_lock_irq(&ctx->lock);
1009         if (event->state >= PERF_EVENT_STATE_INACTIVE)
1010                 goto out;
1011
1012         /*
1013          * If the event is in error state, clear that first.
1014          * That way, if we see the event in error state below, we
1015          * know that it has gone back into error state, as distinct
1016          * from the task having been scheduled away before the
1017          * cross-call arrived.
1018          */
1019         if (event->state == PERF_EVENT_STATE_ERROR)
1020                 event->state = PERF_EVENT_STATE_OFF;
1021
1022  retry:
1023         raw_spin_unlock_irq(&ctx->lock);
1024         task_oncpu_function_call(task, __perf_event_enable, event);
1025
1026         raw_spin_lock_irq(&ctx->lock);
1027
1028         /*
1029          * If the context is active and the event is still off,
1030          * we need to retry the cross-call.
1031          */
1032         if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1033                 goto retry;
1034
1035         /*
1036          * Since we have the lock this context can't be scheduled
1037          * in, so we can change the state safely.
1038          */
1039         if (event->state == PERF_EVENT_STATE_OFF)
1040                 __perf_event_mark_enabled(event, ctx);
1041
1042  out:
1043         raw_spin_unlock_irq(&ctx->lock);
1044 }
1045
1046 static int perf_event_refresh(struct perf_event *event, int refresh)
1047 {
1048         /*
1049          * not supported on inherited events
1050          */
1051         if (event->attr.inherit)
1052                 return -EINVAL;
1053
1054         atomic_add(refresh, &event->event_limit);
1055         perf_event_enable(event);
1056
1057         return 0;
1058 }
1059
1060 void __perf_event_sched_out(struct perf_event_context *ctx,
1061                               struct perf_cpu_context *cpuctx)
1062 {
1063         struct perf_event *event;
1064
1065         raw_spin_lock(&ctx->lock);
1066         ctx->is_active = 0;
1067         if (likely(!ctx->nr_events))
1068                 goto out;
1069         update_context_time(ctx);
1070
1071         perf_disable();
1072         if (ctx->nr_active) {
1073                 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1074                         group_sched_out(event, cpuctx, ctx);
1075
1076                 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1077                         group_sched_out(event, cpuctx, ctx);
1078         }
1079         perf_enable();
1080  out:
1081         raw_spin_unlock(&ctx->lock);
1082 }
1083
1084 /*
1085  * Test whether two contexts are equivalent, i.e. whether they
1086  * have both been cloned from the same version of the same context
1087  * and they both have the same number of enabled events.
1088  * If the number of enabled events is the same, then the set
1089  * of enabled events should be the same, because these are both
1090  * inherited contexts, therefore we can't access individual events
1091  * in them directly with an fd; we can only enable/disable all
1092  * events via prctl, or enable/disable all events in a family
1093  * via ioctl, which will have the same effect on both contexts.
1094  */
1095 static int context_equiv(struct perf_event_context *ctx1,
1096                          struct perf_event_context *ctx2)
1097 {
1098         return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1099                 && ctx1->parent_gen == ctx2->parent_gen
1100                 && !ctx1->pin_count && !ctx2->pin_count;
1101 }
1102
1103 static void __perf_event_sync_stat(struct perf_event *event,
1104                                      struct perf_event *next_event)
1105 {
1106         u64 value;
1107
1108         if (!event->attr.inherit_stat)
1109                 return;
1110
1111         /*
1112          * Update the event value, we cannot use perf_event_read()
1113          * because we're in the middle of a context switch and have IRQs
1114          * disabled, which upsets smp_call_function_single(), however
1115          * we know the event must be on the current CPU, therefore we
1116          * don't need to use it.
1117          */
1118         switch (event->state) {
1119         case PERF_EVENT_STATE_ACTIVE:
1120                 event->pmu->read(event);
1121                 /* fall-through */
1122
1123         case PERF_EVENT_STATE_INACTIVE:
1124                 update_event_times(event);
1125                 break;
1126
1127         default:
1128                 break;
1129         }
1130
1131         /*
1132          * In order to keep per-task stats reliable we need to flip the event
1133          * values when we flip the contexts.
1134          */
1135         value = atomic64_read(&next_event->count);
1136         value = atomic64_xchg(&event->count, value);
1137         atomic64_set(&next_event->count, value);
1138
1139         swap(event->total_time_enabled, next_event->total_time_enabled);
1140         swap(event->total_time_running, next_event->total_time_running);
1141
1142         /*
1143          * Since we swizzled the values, update the user visible data too.
1144          */
1145         perf_event_update_userpage(event);
1146         perf_event_update_userpage(next_event);
1147 }
1148
1149 #define list_next_entry(pos, member) \
1150         list_entry(pos->member.next, typeof(*pos), member)
1151
1152 static void perf_event_sync_stat(struct perf_event_context *ctx,
1153                                    struct perf_event_context *next_ctx)
1154 {
1155         struct perf_event *event, *next_event;
1156
1157         if (!ctx->nr_stat)
1158                 return;
1159
1160         update_context_time(ctx);
1161
1162         event = list_first_entry(&ctx->event_list,
1163                                    struct perf_event, event_entry);
1164
1165         next_event = list_first_entry(&next_ctx->event_list,
1166                                         struct perf_event, event_entry);
1167
1168         while (&event->event_entry != &ctx->event_list &&
1169                &next_event->event_entry != &next_ctx->event_list) {
1170
1171                 __perf_event_sync_stat(event, next_event);
1172
1173                 event = list_next_entry(event, event_entry);
1174                 next_event = list_next_entry(next_event, event_entry);
1175         }
1176 }
1177
1178 /*
1179  * Called from scheduler to remove the events of the current task,
1180  * with interrupts disabled.
1181  *
1182  * We stop each event and update the event value in event->count.
1183  *
1184  * This does not protect us against NMI, but disable()
1185  * sets the disabled bit in the control field of event _before_
1186  * accessing the event control register. If a NMI hits, then it will
1187  * not restart the event.
1188  */
1189 void perf_event_task_sched_out(struct task_struct *task,
1190                                  struct task_struct *next)
1191 {
1192         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1193         struct perf_event_context *ctx = task->perf_event_ctxp;
1194         struct perf_event_context *next_ctx;
1195         struct perf_event_context *parent;
1196         struct pt_regs *regs;
1197         int do_switch = 1;
1198
1199         regs = task_pt_regs(task);
1200         perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1201
1202         if (likely(!ctx || !cpuctx->task_ctx))
1203                 return;
1204
1205         rcu_read_lock();
1206         parent = rcu_dereference(ctx->parent_ctx);
1207         next_ctx = next->perf_event_ctxp;
1208         if (parent && next_ctx &&
1209             rcu_dereference(next_ctx->parent_ctx) == parent) {
1210                 /*
1211                  * Looks like the two contexts are clones, so we might be
1212                  * able to optimize the context switch.  We lock both
1213                  * contexts and check that they are clones under the
1214                  * lock (including re-checking that neither has been
1215                  * uncloned in the meantime).  It doesn't matter which
1216                  * order we take the locks because no other cpu could
1217                  * be trying to lock both of these tasks.
1218                  */
1219                 raw_spin_lock(&ctx->lock);
1220                 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1221                 if (context_equiv(ctx, next_ctx)) {
1222                         /*
1223                          * XXX do we need a memory barrier of sorts
1224                          * wrt to rcu_dereference() of perf_event_ctxp
1225                          */
1226                         task->perf_event_ctxp = next_ctx;
1227                         next->perf_event_ctxp = ctx;
1228                         ctx->task = next;
1229                         next_ctx->task = task;
1230                         do_switch = 0;
1231
1232                         perf_event_sync_stat(ctx, next_ctx);
1233                 }
1234                 raw_spin_unlock(&next_ctx->lock);
1235                 raw_spin_unlock(&ctx->lock);
1236         }
1237         rcu_read_unlock();
1238
1239         if (do_switch) {
1240                 __perf_event_sched_out(ctx, cpuctx);
1241                 cpuctx->task_ctx = NULL;
1242         }
1243 }
1244
1245 /*
1246  * Called with IRQs disabled
1247  */
1248 static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1249 {
1250         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1251
1252         if (!cpuctx->task_ctx)
1253                 return;
1254
1255         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1256                 return;
1257
1258         __perf_event_sched_out(ctx, cpuctx);
1259         cpuctx->task_ctx = NULL;
1260 }
1261
1262 /*
1263  * Called with IRQs disabled
1264  */
1265 static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
1266 {
1267         __perf_event_sched_out(&cpuctx->ctx, cpuctx);
1268 }
1269
1270 static void
1271 __perf_event_sched_in(struct perf_event_context *ctx,
1272                         struct perf_cpu_context *cpuctx)
1273 {
1274         int cpu = smp_processor_id();
1275         struct perf_event *event;
1276         int can_add_hw = 1;
1277
1278         raw_spin_lock(&ctx->lock);
1279         ctx->is_active = 1;
1280         if (likely(!ctx->nr_events))
1281                 goto out;
1282
1283         ctx->timestamp = perf_clock();
1284
1285         perf_disable();
1286
1287         /*
1288          * First go through the list and put on any pinned groups
1289          * in order to give them the best chance of going on.
1290          */
1291         list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1292                 if (event->state <= PERF_EVENT_STATE_OFF)
1293                         continue;
1294                 if (event->cpu != -1 && event->cpu != cpu)
1295                         continue;
1296
1297                 if (group_can_go_on(event, cpuctx, 1))
1298                         group_sched_in(event, cpuctx, ctx, cpu);
1299
1300                 /*
1301                  * If this pinned group hasn't been scheduled,
1302                  * put it in error state.
1303                  */
1304                 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1305                         update_group_times(event);
1306                         event->state = PERF_EVENT_STATE_ERROR;
1307                 }
1308         }
1309
1310         list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1311                 /* Ignore events in OFF or ERROR state */
1312                 if (event->state <= PERF_EVENT_STATE_OFF)
1313                         continue;
1314                 /*
1315                  * Listen to the 'cpu' scheduling filter constraint
1316                  * of events:
1317                  */
1318                 if (event->cpu != -1 && event->cpu != cpu)
1319                         continue;
1320
1321                 if (group_can_go_on(event, cpuctx, can_add_hw))
1322                         if (group_sched_in(event, cpuctx, ctx, cpu))
1323                                 can_add_hw = 0;
1324         }
1325         perf_enable();
1326  out:
1327         raw_spin_unlock(&ctx->lock);
1328 }
1329
1330 /*
1331  * Called from scheduler to add the events of the current task
1332  * with interrupts disabled.
1333  *
1334  * We restore the event value and then enable it.
1335  *
1336  * This does not protect us against NMI, but enable()
1337  * sets the enabled bit in the control field of event _before_
1338  * accessing the event control register. If a NMI hits, then it will
1339  * keep the event running.
1340  */
1341 void perf_event_task_sched_in(struct task_struct *task)
1342 {
1343         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1344         struct perf_event_context *ctx = task->perf_event_ctxp;
1345
1346         if (likely(!ctx))
1347                 return;
1348         if (cpuctx->task_ctx == ctx)
1349                 return;
1350         __perf_event_sched_in(ctx, cpuctx);
1351         cpuctx->task_ctx = ctx;
1352 }
1353
1354 static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx)
1355 {
1356         struct perf_event_context *ctx = &cpuctx->ctx;
1357
1358         __perf_event_sched_in(ctx, cpuctx);
1359 }
1360
1361 #define MAX_INTERRUPTS (~0ULL)
1362
1363 static void perf_log_throttle(struct perf_event *event, int enable);
1364
1365 static void perf_adjust_period(struct perf_event *event, u64 events)
1366 {
1367         struct hw_perf_event *hwc = &event->hw;
1368         u64 period, sample_period;
1369         s64 delta;
1370
1371         events *= hwc->sample_period;
1372         period = div64_u64(events, event->attr.sample_freq);
1373
1374         delta = (s64)(period - hwc->sample_period);
1375         delta = (delta + 7) / 8; /* low pass filter */
1376
1377         sample_period = hwc->sample_period + delta;
1378
1379         if (!sample_period)
1380                 sample_period = 1;
1381
1382         hwc->sample_period = sample_period;
1383 }
1384
1385 static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1386 {
1387         struct perf_event *event;
1388         struct hw_perf_event *hwc;
1389         u64 interrupts, freq;
1390
1391         raw_spin_lock(&ctx->lock);
1392         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1393                 if (event->state != PERF_EVENT_STATE_ACTIVE)
1394                         continue;
1395
1396                 if (event->cpu != -1 && event->cpu != smp_processor_id())
1397                         continue;
1398
1399                 hwc = &event->hw;
1400
1401                 interrupts = hwc->interrupts;
1402                 hwc->interrupts = 0;
1403
1404                 /*
1405                  * unthrottle events on the tick
1406                  */
1407                 if (interrupts == MAX_INTERRUPTS) {
1408                         perf_log_throttle(event, 1);
1409                         event->pmu->unthrottle(event);
1410                         interrupts = 2*sysctl_perf_event_sample_rate/HZ;
1411                 }
1412
1413                 if (!event->attr.freq || !event->attr.sample_freq)
1414                         continue;
1415
1416                 /*
1417                  * if the specified freq < HZ then we need to skip ticks
1418                  */
1419                 if (event->attr.sample_freq < HZ) {
1420                         freq = event->attr.sample_freq;
1421
1422                         hwc->freq_count += freq;
1423                         hwc->freq_interrupts += interrupts;
1424
1425                         if (hwc->freq_count < HZ)
1426                                 continue;
1427
1428                         interrupts = hwc->freq_interrupts;
1429                         hwc->freq_interrupts = 0;
1430                         hwc->freq_count -= HZ;
1431                 } else
1432                         freq = HZ;
1433
1434                 perf_adjust_period(event, freq * interrupts);
1435
1436                 /*
1437                  * In order to avoid being stalled by an (accidental) huge
1438                  * sample period, force reset the sample period if we didn't
1439                  * get any events in this freq period.
1440                  */
1441                 if (!interrupts) {
1442                         perf_disable();
1443                         event->pmu->disable(event);
1444                         atomic64_set(&hwc->period_left, 0);
1445                         event->pmu->enable(event);
1446                         perf_enable();
1447                 }
1448         }
1449         raw_spin_unlock(&ctx->lock);
1450 }
1451
1452 /*
1453  * Round-robin a context's events:
1454  */
1455 static void rotate_ctx(struct perf_event_context *ctx)
1456 {
1457         if (!ctx->nr_events)
1458                 return;
1459
1460         raw_spin_lock(&ctx->lock);
1461
1462         /* Rotate the first entry last of non-pinned groups */
1463         perf_disable();
1464
1465         list_rotate_left(&ctx->flexible_groups);
1466
1467         perf_enable();
1468
1469         raw_spin_unlock(&ctx->lock);
1470 }
1471
1472 void perf_event_task_tick(struct task_struct *curr)
1473 {
1474         struct perf_cpu_context *cpuctx;
1475         struct perf_event_context *ctx;
1476
1477         if (!atomic_read(&nr_events))
1478                 return;
1479
1480         cpuctx = &__get_cpu_var(perf_cpu_context);
1481         ctx = curr->perf_event_ctxp;
1482
1483         perf_ctx_adjust_freq(&cpuctx->ctx);
1484         if (ctx)
1485                 perf_ctx_adjust_freq(ctx);
1486
1487         perf_event_cpu_sched_out(cpuctx);
1488         if (ctx)
1489                 __perf_event_task_sched_out(ctx);
1490
1491         rotate_ctx(&cpuctx->ctx);
1492         if (ctx)
1493                 rotate_ctx(ctx);
1494
1495         perf_event_cpu_sched_in(cpuctx);
1496         if (ctx)
1497                 perf_event_task_sched_in(curr);
1498 }
1499
1500 static int event_enable_on_exec(struct perf_event *event,
1501                                 struct perf_event_context *ctx)
1502 {
1503         if (!event->attr.enable_on_exec)
1504                 return 0;
1505
1506         event->attr.enable_on_exec = 0;
1507         if (event->state >= PERF_EVENT_STATE_INACTIVE)
1508                 return 0;
1509
1510         __perf_event_mark_enabled(event, ctx);
1511
1512         return 1;
1513 }
1514
1515 /*
1516  * Enable all of a task's events that have been marked enable-on-exec.
1517  * This expects task == current.
1518  */
1519 static void perf_event_enable_on_exec(struct task_struct *task)
1520 {
1521         struct perf_event_context *ctx;
1522         struct perf_event *event;
1523         unsigned long flags;
1524         int enabled = 0;
1525         int ret;
1526
1527         local_irq_save(flags);
1528         ctx = task->perf_event_ctxp;
1529         if (!ctx || !ctx->nr_events)
1530                 goto out;
1531
1532         __perf_event_task_sched_out(ctx);
1533
1534         raw_spin_lock(&ctx->lock);
1535
1536         list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1537                 ret = event_enable_on_exec(event, ctx);
1538                 if (ret)
1539                         enabled = 1;
1540         }
1541
1542         list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1543                 ret = event_enable_on_exec(event, ctx);
1544                 if (ret)
1545                         enabled = 1;
1546         }
1547
1548         /*
1549          * Unclone this context if we enabled any event.
1550          */
1551         if (enabled)
1552                 unclone_ctx(ctx);
1553
1554         raw_spin_unlock(&ctx->lock);
1555
1556         perf_event_task_sched_in(task);
1557  out:
1558         local_irq_restore(flags);
1559 }
1560
1561 /*
1562  * Cross CPU call to read the hardware event
1563  */
1564 static void __perf_event_read(void *info)
1565 {
1566         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1567         struct perf_event *event = info;
1568         struct perf_event_context *ctx = event->ctx;
1569
1570         /*
1571          * If this is a task context, we need to check whether it is
1572          * the current task context of this cpu.  If not it has been
1573          * scheduled out before the smp call arrived.  In that case
1574          * event->count would have been updated to a recent sample
1575          * when the event was scheduled out.
1576          */
1577         if (ctx->task && cpuctx->task_ctx != ctx)
1578                 return;
1579
1580         raw_spin_lock(&ctx->lock);
1581         update_context_time(ctx);
1582         update_event_times(event);
1583         raw_spin_unlock(&ctx->lock);
1584
1585         event->pmu->read(event);
1586 }
1587
1588 static u64 perf_event_read(struct perf_event *event)
1589 {
1590         /*
1591          * If event is enabled and currently active on a CPU, update the
1592          * value in the event structure:
1593          */
1594         if (event->state == PERF_EVENT_STATE_ACTIVE) {
1595                 smp_call_function_single(event->oncpu,
1596                                          __perf_event_read, event, 1);
1597         } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1598                 struct perf_event_context *ctx = event->ctx;
1599                 unsigned long flags;
1600
1601                 raw_spin_lock_irqsave(&ctx->lock, flags);
1602                 update_context_time(ctx);
1603                 update_event_times(event);
1604                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1605         }
1606
1607         return atomic64_read(&event->count);
1608 }
1609
1610 /*
1611  * Initialize the perf_event context in a task_struct:
1612  */
1613 static void
1614 __perf_event_init_context(struct perf_event_context *ctx,
1615                             struct task_struct *task)
1616 {
1617         raw_spin_lock_init(&ctx->lock);
1618         mutex_init(&ctx->mutex);
1619         INIT_LIST_HEAD(&ctx->pinned_groups);
1620         INIT_LIST_HEAD(&ctx->flexible_groups);
1621         INIT_LIST_HEAD(&ctx->event_list);
1622         atomic_set(&ctx->refcount, 1);
1623         ctx->task = task;
1624 }
1625
1626 static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1627 {
1628         struct perf_event_context *ctx;
1629         struct perf_cpu_context *cpuctx;
1630         struct task_struct *task;
1631         unsigned long flags;
1632         int err;
1633
1634         if (pid == -1 && cpu != -1) {
1635                 /* Must be root to operate on a CPU event: */
1636                 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1637                         return ERR_PTR(-EACCES);
1638
1639                 if (cpu < 0 || cpu >= nr_cpumask_bits)
1640                         return ERR_PTR(-EINVAL);
1641
1642                 /*
1643                  * We could be clever and allow to attach a event to an
1644                  * offline CPU and activate it when the CPU comes up, but
1645                  * that's for later.
1646                  */
1647                 if (!cpu_online(cpu))
1648                         return ERR_PTR(-ENODEV);
1649
1650                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1651                 ctx = &cpuctx->ctx;
1652                 get_ctx(ctx);
1653
1654                 return ctx;
1655         }
1656
1657         rcu_read_lock();
1658         if (!pid)
1659                 task = current;
1660         else
1661                 task = find_task_by_vpid(pid);
1662         if (task)
1663                 get_task_struct(task);
1664         rcu_read_unlock();
1665
1666         if (!task)
1667                 return ERR_PTR(-ESRCH);
1668
1669         /*
1670          * Can't attach events to a dying task.
1671          */
1672         err = -ESRCH;
1673         if (task->flags & PF_EXITING)
1674                 goto errout;
1675
1676         /* Reuse ptrace permission checks for now. */
1677         err = -EACCES;
1678         if (!ptrace_may_access(task, PTRACE_MODE_READ))
1679                 goto errout;
1680
1681  retry:
1682         ctx = perf_lock_task_context(task, &flags);
1683         if (ctx) {
1684                 unclone_ctx(ctx);
1685                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1686         }
1687
1688         if (!ctx) {
1689                 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
1690                 err = -ENOMEM;
1691                 if (!ctx)
1692                         goto errout;
1693                 __perf_event_init_context(ctx, task);
1694                 get_ctx(ctx);
1695                 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1696                         /*
1697                          * We raced with some other task; use
1698                          * the context they set.
1699                          */
1700                         kfree(ctx);
1701                         goto retry;
1702                 }
1703                 get_task_struct(task);
1704         }
1705
1706         put_task_struct(task);
1707         return ctx;
1708
1709  errout:
1710         put_task_struct(task);
1711         return ERR_PTR(err);
1712 }
1713
1714 static void perf_event_free_filter(struct perf_event *event);
1715
1716 static void free_event_rcu(struct rcu_head *head)
1717 {
1718         struct perf_event *event;
1719
1720         event = container_of(head, struct perf_event, rcu_head);
1721         if (event->ns)
1722                 put_pid_ns(event->ns);
1723         perf_event_free_filter(event);
1724         kfree(event);
1725 }
1726
1727 static void perf_pending_sync(struct perf_event *event);
1728
1729 static void free_event(struct perf_event *event)
1730 {
1731         perf_pending_sync(event);
1732
1733         if (!event->parent) {
1734                 atomic_dec(&nr_events);
1735                 if (event->attr.mmap)
1736                         atomic_dec(&nr_mmap_events);
1737                 if (event->attr.comm)
1738                         atomic_dec(&nr_comm_events);
1739                 if (event->attr.task)
1740                         atomic_dec(&nr_task_events);
1741         }
1742
1743         if (event->output) {
1744                 fput(event->output->filp);
1745                 event->output = NULL;
1746         }
1747
1748         if (event->destroy)
1749                 event->destroy(event);
1750
1751         put_ctx(event->ctx);
1752         call_rcu(&event->rcu_head, free_event_rcu);
1753 }
1754
1755 int perf_event_release_kernel(struct perf_event *event)
1756 {
1757         struct perf_event_context *ctx = event->ctx;
1758
1759         WARN_ON_ONCE(ctx->parent_ctx);
1760         mutex_lock(&ctx->mutex);
1761         perf_event_remove_from_context(event);
1762         mutex_unlock(&ctx->mutex);
1763
1764         mutex_lock(&event->owner->perf_event_mutex);
1765         list_del_init(&event->owner_entry);
1766         mutex_unlock(&event->owner->perf_event_mutex);
1767         put_task_struct(event->owner);
1768
1769         free_event(event);
1770
1771         return 0;
1772 }
1773 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1774
1775 /*
1776  * Called when the last reference to the file is gone.
1777  */
1778 static int perf_release(struct inode *inode, struct file *file)
1779 {
1780         struct perf_event *event = file->private_data;
1781
1782         file->private_data = NULL;
1783
1784         return perf_event_release_kernel(event);
1785 }
1786
1787 static int perf_event_read_size(struct perf_event *event)
1788 {
1789         int entry = sizeof(u64); /* value */
1790         int size = 0;
1791         int nr = 1;
1792
1793         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1794                 size += sizeof(u64);
1795
1796         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1797                 size += sizeof(u64);
1798
1799         if (event->attr.read_format & PERF_FORMAT_ID)
1800                 entry += sizeof(u64);
1801
1802         if (event->attr.read_format & PERF_FORMAT_GROUP) {
1803                 nr += event->group_leader->nr_siblings;
1804                 size += sizeof(u64);
1805         }
1806
1807         size += entry * nr;
1808
1809         return size;
1810 }
1811
1812 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
1813 {
1814         struct perf_event *child;
1815         u64 total = 0;
1816
1817         *enabled = 0;
1818         *running = 0;
1819
1820         mutex_lock(&event->child_mutex);
1821         total += perf_event_read(event);
1822         *enabled += event->total_time_enabled +
1823                         atomic64_read(&event->child_total_time_enabled);
1824         *running += event->total_time_running +
1825                         atomic64_read(&event->child_total_time_running);
1826
1827         list_for_each_entry(child, &event->child_list, child_list) {
1828                 total += perf_event_read(child);
1829                 *enabled += child->total_time_enabled;
1830                 *running += child->total_time_running;
1831         }
1832         mutex_unlock(&event->child_mutex);
1833
1834         return total;
1835 }
1836 EXPORT_SYMBOL_GPL(perf_event_read_value);
1837
1838 static int perf_event_read_group(struct perf_event *event,
1839                                    u64 read_format, char __user *buf)
1840 {
1841         struct perf_event *leader = event->group_leader, *sub;
1842         int n = 0, size = 0, ret = -EFAULT;
1843         struct perf_event_context *ctx = leader->ctx;
1844         u64 values[5];
1845         u64 count, enabled, running;
1846
1847         mutex_lock(&ctx->mutex);
1848         count = perf_event_read_value(leader, &enabled, &running);
1849
1850         values[n++] = 1 + leader->nr_siblings;
1851         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1852                 values[n++] = enabled;
1853         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1854                 values[n++] = running;
1855         values[n++] = count;
1856         if (read_format & PERF_FORMAT_ID)
1857                 values[n++] = primary_event_id(leader);
1858
1859         size = n * sizeof(u64);
1860
1861         if (copy_to_user(buf, values, size))
1862                 goto unlock;
1863
1864         ret = size;
1865
1866         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1867                 n = 0;
1868
1869                 values[n++] = perf_event_read_value(sub, &enabled, &running);
1870                 if (read_format & PERF_FORMAT_ID)
1871                         values[n++] = primary_event_id(sub);
1872
1873                 size = n * sizeof(u64);
1874
1875                 if (copy_to_user(buf + ret, values, size)) {
1876                         ret = -EFAULT;
1877                         goto unlock;
1878                 }
1879
1880                 ret += size;
1881         }
1882 unlock:
1883         mutex_unlock(&ctx->mutex);
1884
1885         return ret;
1886 }
1887
1888 static int perf_event_read_one(struct perf_event *event,
1889                                  u64 read_format, char __user *buf)
1890 {
1891         u64 enabled, running;
1892         u64 values[4];
1893         int n = 0;
1894
1895         values[n++] = perf_event_read_value(event, &enabled, &running);
1896         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1897                 values[n++] = enabled;
1898         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1899                 values[n++] = running;
1900         if (read_format & PERF_FORMAT_ID)
1901                 values[n++] = primary_event_id(event);
1902
1903         if (copy_to_user(buf, values, n * sizeof(u64)))
1904                 return -EFAULT;
1905
1906         return n * sizeof(u64);
1907 }
1908
1909 /*
1910  * Read the performance event - simple non blocking version for now
1911  */
1912 static ssize_t
1913 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1914 {
1915         u64 read_format = event->attr.read_format;
1916         int ret;
1917
1918         /*
1919          * Return end-of-file for a read on a event that is in
1920          * error state (i.e. because it was pinned but it couldn't be
1921          * scheduled on to the CPU at some point).
1922          */
1923         if (event->state == PERF_EVENT_STATE_ERROR)
1924                 return 0;
1925
1926         if (count < perf_event_read_size(event))
1927                 return -ENOSPC;
1928
1929         WARN_ON_ONCE(event->ctx->parent_ctx);
1930         if (read_format & PERF_FORMAT_GROUP)
1931                 ret = perf_event_read_group(event, read_format, buf);
1932         else
1933                 ret = perf_event_read_one(event, read_format, buf);
1934
1935         return ret;
1936 }
1937
1938 static ssize_t
1939 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1940 {
1941         struct perf_event *event = file->private_data;
1942
1943         return perf_read_hw(event, buf, count);
1944 }
1945
1946 static unsigned int perf_poll(struct file *file, poll_table *wait)
1947 {
1948         struct perf_event *event = file->private_data;
1949         struct perf_mmap_data *data;
1950         unsigned int events = POLL_HUP;
1951
1952         rcu_read_lock();
1953         data = rcu_dereference(event->data);
1954         if (data)
1955                 events = atomic_xchg(&data->poll, 0);
1956         rcu_read_unlock();
1957
1958         poll_wait(file, &event->waitq, wait);
1959
1960         return events;
1961 }
1962
1963 static void perf_event_reset(struct perf_event *event)
1964 {
1965         (void)perf_event_read(event);
1966         atomic64_set(&event->count, 0);
1967         perf_event_update_userpage(event);
1968 }
1969
1970 /*
1971  * Holding the top-level event's child_mutex means that any
1972  * descendant process that has inherited this event will block
1973  * in sync_child_event if it goes to exit, thus satisfying the
1974  * task existence requirements of perf_event_enable/disable.
1975  */
1976 static void perf_event_for_each_child(struct perf_event *event,
1977                                         void (*func)(struct perf_event *))
1978 {
1979         struct perf_event *child;
1980
1981         WARN_ON_ONCE(event->ctx->parent_ctx);
1982         mutex_lock(&event->child_mutex);
1983         func(event);
1984         list_for_each_entry(child, &event->child_list, child_list)
1985                 func(child);
1986         mutex_unlock(&event->child_mutex);
1987 }
1988
1989 static void perf_event_for_each(struct perf_event *event,
1990                                   void (*func)(struct perf_event *))
1991 {
1992         struct perf_event_context *ctx = event->ctx;
1993         struct perf_event *sibling;
1994
1995         WARN_ON_ONCE(ctx->parent_ctx);
1996         mutex_lock(&ctx->mutex);
1997         event = event->group_leader;
1998
1999         perf_event_for_each_child(event, func);
2000         func(event);
2001         list_for_each_entry(sibling, &event->sibling_list, group_entry)
2002                 perf_event_for_each_child(event, func);
2003         mutex_unlock(&ctx->mutex);
2004 }
2005
2006 static int perf_event_period(struct perf_event *event, u64 __user *arg)
2007 {
2008         struct perf_event_context *ctx = event->ctx;
2009         unsigned long size;
2010         int ret = 0;
2011         u64 value;
2012
2013         if (!event->attr.sample_period)
2014                 return -EINVAL;
2015
2016         size = copy_from_user(&value, arg, sizeof(value));
2017         if (size != sizeof(value))
2018                 return -EFAULT;
2019
2020         if (!value)
2021                 return -EINVAL;
2022
2023         raw_spin_lock_irq(&ctx->lock);
2024         if (event->attr.freq) {
2025                 if (value > sysctl_perf_event_sample_rate) {
2026                         ret = -EINVAL;
2027                         goto unlock;
2028                 }
2029
2030                 event->attr.sample_freq = value;
2031         } else {
2032                 event->attr.sample_period = value;
2033                 event->hw.sample_period = value;
2034         }
2035 unlock:
2036         raw_spin_unlock_irq(&ctx->lock);
2037
2038         return ret;
2039 }
2040
2041 static int perf_event_set_output(struct perf_event *event, int output_fd);
2042 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2043
2044 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2045 {
2046         struct perf_event *event = file->private_data;
2047         void (*func)(struct perf_event *);
2048         u32 flags = arg;
2049
2050         switch (cmd) {
2051         case PERF_EVENT_IOC_ENABLE:
2052                 func = perf_event_enable;
2053                 break;
2054         case PERF_EVENT_IOC_DISABLE:
2055                 func = perf_event_disable;
2056                 break;
2057         case PERF_EVENT_IOC_RESET:
2058                 func = perf_event_reset;
2059                 break;
2060
2061         case PERF_EVENT_IOC_REFRESH:
2062                 return perf_event_refresh(event, arg);
2063
2064         case PERF_EVENT_IOC_PERIOD:
2065                 return perf_event_period(event, (u64 __user *)arg);
2066
2067         case PERF_EVENT_IOC_SET_OUTPUT:
2068                 return perf_event_set_output(event, arg);
2069
2070         case PERF_EVENT_IOC_SET_FILTER:
2071                 return perf_event_set_filter(event, (void __user *)arg);
2072
2073         default:
2074                 return -ENOTTY;
2075         }
2076
2077         if (flags & PERF_IOC_FLAG_GROUP)
2078                 perf_event_for_each(event, func);
2079         else
2080                 perf_event_for_each_child(event, func);
2081
2082         return 0;
2083 }
2084
2085 int perf_event_task_enable(void)
2086 {
2087         struct perf_event *event;
2088
2089         mutex_lock(&current->perf_event_mutex);
2090         list_for_each_entry(event, &current->perf_event_list, owner_entry)
2091                 perf_event_for_each_child(event, perf_event_enable);
2092         mutex_unlock(&current->perf_event_mutex);
2093
2094         return 0;
2095 }
2096
2097 int perf_event_task_disable(void)
2098 {
2099         struct perf_event *event;
2100
2101         mutex_lock(&current->perf_event_mutex);
2102         list_for_each_entry(event, &current->perf_event_list, owner_entry)
2103                 perf_event_for_each_child(event, perf_event_disable);
2104         mutex_unlock(&current->perf_event_mutex);
2105
2106         return 0;
2107 }
2108
2109 #ifndef PERF_EVENT_INDEX_OFFSET
2110 # define PERF_EVENT_INDEX_OFFSET 0
2111 #endif
2112
2113 static int perf_event_index(struct perf_event *event)
2114 {
2115         if (event->state != PERF_EVENT_STATE_ACTIVE)
2116                 return 0;
2117
2118         return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2119 }
2120
2121 /*
2122  * Callers need to ensure there can be no nesting of this function, otherwise
2123  * the seqlock logic goes bad. We can not serialize this because the arch
2124  * code calls this from NMI context.
2125  */
2126 void perf_event_update_userpage(struct perf_event *event)
2127 {
2128         struct perf_event_mmap_page *userpg;
2129         struct perf_mmap_data *data;
2130
2131         rcu_read_lock();
2132         data = rcu_dereference(event->data);
2133         if (!data)
2134                 goto unlock;
2135
2136         userpg = data->user_page;
2137
2138         /*
2139          * Disable preemption so as to not let the corresponding user-space
2140          * spin too long if we get preempted.
2141          */
2142         preempt_disable();
2143         ++userpg->lock;
2144         barrier();
2145         userpg->index = perf_event_index(event);
2146         userpg->offset = atomic64_read(&event->count);
2147         if (event->state == PERF_EVENT_STATE_ACTIVE)
2148                 userpg->offset -= atomic64_read(&event->hw.prev_count);
2149
2150         userpg->time_enabled = event->total_time_enabled +
2151                         atomic64_read(&event->child_total_time_enabled);
2152
2153         userpg->time_running = event->total_time_running +
2154                         atomic64_read(&event->child_total_time_running);
2155
2156         barrier();
2157         ++userpg->lock;
2158         preempt_enable();
2159 unlock:
2160         rcu_read_unlock();
2161 }
2162
2163 static unsigned long perf_data_size(struct perf_mmap_data *data)
2164 {
2165         return data->nr_pages << (PAGE_SHIFT + data->data_order);
2166 }
2167
2168 #ifndef CONFIG_PERF_USE_VMALLOC
2169
2170 /*
2171  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2172  */
2173
2174 static struct page *
2175 perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2176 {
2177         if (pgoff > data->nr_pages)
2178                 return NULL;
2179
2180         if (pgoff == 0)
2181                 return virt_to_page(data->user_page);
2182
2183         return virt_to_page(data->data_pages[pgoff - 1]);
2184 }
2185
2186 static struct perf_mmap_data *
2187 perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2188 {
2189         struct perf_mmap_data *data;
2190         unsigned long size;
2191         int i;
2192
2193         WARN_ON(atomic_read(&event->mmap_count));
2194
2195         size = sizeof(struct perf_mmap_data);
2196         size += nr_pages * sizeof(void *);
2197
2198         data = kzalloc(size, GFP_KERNEL);
2199         if (!data)
2200                 goto fail;
2201
2202         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
2203         if (!data->user_page)
2204                 goto fail_user_page;
2205
2206         for (i = 0; i < nr_pages; i++) {
2207                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
2208                 if (!data->data_pages[i])
2209                         goto fail_data_pages;
2210         }
2211
2212         data->data_order = 0;
2213         data->nr_pages = nr_pages;
2214
2215         return data;
2216
2217 fail_data_pages:
2218         for (i--; i >= 0; i--)
2219                 free_page((unsigned long)data->data_pages[i]);
2220
2221         free_page((unsigned long)data->user_page);
2222
2223 fail_user_page:
2224         kfree(data);
2225
2226 fail:
2227         return NULL;
2228 }
2229
2230 static void perf_mmap_free_page(unsigned long addr)
2231 {
2232         struct page *page = virt_to_page((void *)addr);
2233
2234         page->mapping = NULL;
2235         __free_page(page);
2236 }
2237
2238 static void perf_mmap_data_free(struct perf_mmap_data *data)
2239 {
2240         int i;
2241
2242         perf_mmap_free_page((unsigned long)data->user_page);
2243         for (i = 0; i < data->nr_pages; i++)
2244                 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2245         kfree(data);
2246 }
2247
2248 #else
2249
2250 /*
2251  * Back perf_mmap() with vmalloc memory.
2252  *
2253  * Required for architectures that have d-cache aliasing issues.
2254  */
2255
2256 static struct page *
2257 perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2258 {
2259         if (pgoff > (1UL << data->data_order))
2260                 return NULL;
2261
2262         return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2263 }
2264
2265 static void perf_mmap_unmark_page(void *addr)
2266 {
2267         struct page *page = vmalloc_to_page(addr);
2268
2269         page->mapping = NULL;
2270 }
2271
2272 static void perf_mmap_data_free_work(struct work_struct *work)
2273 {
2274         struct perf_mmap_data *data;
2275         void *base;
2276         int i, nr;
2277
2278         data = container_of(work, struct perf_mmap_data, work);
2279         nr = 1 << data->data_order;
2280
2281         base = data->user_page;
2282         for (i = 0; i < nr + 1; i++)
2283                 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2284
2285         vfree(base);
2286         kfree(data);
2287 }
2288
2289 static void perf_mmap_data_free(struct perf_mmap_data *data)
2290 {
2291         schedule_work(&data->work);
2292 }
2293
2294 static struct perf_mmap_data *
2295 perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2296 {
2297         struct perf_mmap_data *data;
2298         unsigned long size;
2299         void *all_buf;
2300
2301         WARN_ON(atomic_read(&event->mmap_count));
2302
2303         size = sizeof(struct perf_mmap_data);
2304         size += sizeof(void *);
2305
2306         data = kzalloc(size, GFP_KERNEL);
2307         if (!data)
2308                 goto fail;
2309
2310         INIT_WORK(&data->work, perf_mmap_data_free_work);
2311
2312         all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2313         if (!all_buf)
2314                 goto fail_all_buf;
2315
2316         data->user_page = all_buf;
2317         data->data_pages[0] = all_buf + PAGE_SIZE;
2318         data->data_order = ilog2(nr_pages);
2319         data->nr_pages = 1;
2320
2321         return data;
2322
2323 fail_all_buf:
2324         kfree(data);
2325
2326 fail:
2327         return NULL;
2328 }
2329
2330 #endif
2331
2332 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2333 {
2334         struct perf_event *event = vma->vm_file->private_data;
2335         struct perf_mmap_data *data;
2336         int ret = VM_FAULT_SIGBUS;
2337
2338         if (vmf->flags & FAULT_FLAG_MKWRITE) {
2339                 if (vmf->pgoff == 0)
2340                         ret = 0;
2341                 return ret;
2342         }
2343
2344         rcu_read_lock();
2345         data = rcu_dereference(event->data);
2346         if (!data)
2347                 goto unlock;
2348
2349         if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2350                 goto unlock;
2351
2352         vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2353         if (!vmf->page)
2354                 goto unlock;
2355
2356         get_page(vmf->page);
2357         vmf->page->mapping = vma->vm_file->f_mapping;
2358         vmf->page->index   = vmf->pgoff;
2359
2360         ret = 0;
2361 unlock:
2362         rcu_read_unlock();
2363
2364         return ret;
2365 }
2366
2367 static void
2368 perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2369 {
2370         long max_size = perf_data_size(data);
2371
2372         atomic_set(&data->lock, -1);
2373
2374         if (event->attr.watermark) {
2375                 data->watermark = min_t(long, max_size,
2376                                         event->attr.wakeup_watermark);
2377         }
2378
2379         if (!data->watermark)
2380                 data->watermark = max_size / 2;
2381
2382
2383         rcu_assign_pointer(event->data, data);
2384 }
2385
2386 static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2387 {
2388         struct perf_mmap_data *data;
2389
2390         data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2391         perf_mmap_data_free(data);
2392 }
2393
2394 static void perf_mmap_data_release(struct perf_event *event)
2395 {
2396         struct perf_mmap_data *data = event->data;
2397
2398         WARN_ON(atomic_read(&event->mmap_count));
2399
2400         rcu_assign_pointer(event->data, NULL);
2401         call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
2402 }
2403
2404 static void perf_mmap_open(struct vm_area_struct *vma)
2405 {
2406         struct perf_event *event = vma->vm_file->private_data;
2407
2408         atomic_inc(&event->mmap_count);
2409 }
2410
2411 static void perf_mmap_close(struct vm_area_struct *vma)
2412 {
2413         struct perf_event *event = vma->vm_file->private_data;
2414
2415         WARN_ON_ONCE(event->ctx->parent_ctx);
2416         if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2417                 unsigned long size = perf_data_size(event->data);
2418                 struct user_struct *user = current_user();
2419
2420                 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2421                 vma->vm_mm->locked_vm -= event->data->nr_locked;
2422                 perf_mmap_data_release(event);
2423                 mutex_unlock(&event->mmap_mutex);
2424         }
2425 }
2426
2427 static const struct vm_operations_struct perf_mmap_vmops = {
2428         .open           = perf_mmap_open,
2429         .close          = perf_mmap_close,
2430         .fault          = perf_mmap_fault,
2431         .page_mkwrite   = perf_mmap_fault,
2432 };
2433
2434 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2435 {
2436         struct perf_event *event = file->private_data;
2437         unsigned long user_locked, user_lock_limit;
2438         struct user_struct *user = current_user();
2439         unsigned long locked, lock_limit;
2440         struct perf_mmap_data *data;
2441         unsigned long vma_size;
2442         unsigned long nr_pages;
2443         long user_extra, extra;
2444         int ret = 0;
2445
2446         if (!(vma->vm_flags & VM_SHARED))
2447                 return -EINVAL;
2448
2449         vma_size = vma->vm_end - vma->vm_start;
2450         nr_pages = (vma_size / PAGE_SIZE) - 1;
2451
2452         /*
2453          * If we have data pages ensure they're a power-of-two number, so we
2454          * can do bitmasks instead of modulo.
2455          */
2456         if (nr_pages != 0 && !is_power_of_2(nr_pages))
2457                 return -EINVAL;
2458
2459         if (vma_size != PAGE_SIZE * (1 + nr_pages))
2460                 return -EINVAL;
2461
2462         if (vma->vm_pgoff != 0)
2463                 return -EINVAL;
2464
2465         WARN_ON_ONCE(event->ctx->parent_ctx);
2466         mutex_lock(&event->mmap_mutex);
2467         if (event->output) {
2468                 ret = -EINVAL;
2469                 goto unlock;
2470         }
2471
2472         if (atomic_inc_not_zero(&event->mmap_count)) {
2473                 if (nr_pages != event->data->nr_pages)
2474                         ret = -EINVAL;
2475                 goto unlock;
2476         }
2477
2478         user_extra = nr_pages + 1;
2479         user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2480
2481         /*
2482          * Increase the limit linearly with more CPUs:
2483          */
2484         user_lock_limit *= num_online_cpus();
2485
2486         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2487
2488         extra = 0;
2489         if (user_locked > user_lock_limit)
2490                 extra = user_locked - user_lock_limit;
2491
2492         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2493         lock_limit >>= PAGE_SHIFT;
2494         locked = vma->vm_mm->locked_vm + extra;
2495
2496         if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2497                 !capable(CAP_IPC_LOCK)) {
2498                 ret = -EPERM;
2499                 goto unlock;
2500         }
2501
2502         WARN_ON(event->data);
2503
2504         data = perf_mmap_data_alloc(event, nr_pages);
2505         ret = -ENOMEM;
2506         if (!data)
2507                 goto unlock;
2508
2509         ret = 0;
2510         perf_mmap_data_init(event, data);
2511
2512         atomic_set(&event->mmap_count, 1);
2513         atomic_long_add(user_extra, &user->locked_vm);
2514         vma->vm_mm->locked_vm += extra;
2515         event->data->nr_locked = extra;
2516         if (vma->vm_flags & VM_WRITE)
2517                 event->data->writable = 1;
2518
2519 unlock:
2520         mutex_unlock(&event->mmap_mutex);
2521
2522         vma->vm_flags |= VM_RESERVED;
2523         vma->vm_ops = &perf_mmap_vmops;
2524
2525         return ret;
2526 }
2527
2528 static int perf_fasync(int fd, struct file *filp, int on)
2529 {
2530         struct inode *inode = filp->f_path.dentry->d_inode;
2531         struct perf_event *event = filp->private_data;
2532         int retval;
2533
2534         mutex_lock(&inode->i_mutex);
2535         retval = fasync_helper(fd, filp, on, &event->fasync);
2536         mutex_unlock(&inode->i_mutex);
2537
2538         if (retval < 0)
2539                 return retval;
2540
2541         return 0;
2542 }
2543
2544 static const struct file_operations perf_fops = {
2545         .release                = perf_release,
2546         .read                   = perf_read,
2547         .poll                   = perf_poll,
2548         .unlocked_ioctl         = perf_ioctl,
2549         .compat_ioctl           = perf_ioctl,
2550         .mmap                   = perf_mmap,
2551         .fasync                 = perf_fasync,
2552 };
2553
2554 /*
2555  * Perf event wakeup
2556  *
2557  * If there's data, ensure we set the poll() state and publish everything
2558  * to user-space before waking everybody up.
2559  */
2560
2561 void perf_event_wakeup(struct perf_event *event)
2562 {
2563         wake_up_all(&event->waitq);
2564
2565         if (event->pending_kill) {
2566                 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2567                 event->pending_kill = 0;
2568         }
2569 }
2570
2571 /*
2572  * Pending wakeups
2573  *
2574  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2575  *
2576  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2577  * single linked list and use cmpxchg() to add entries lockless.
2578  */
2579
2580 static void perf_pending_event(struct perf_pending_entry *entry)
2581 {
2582         struct perf_event *event = container_of(entry,
2583                         struct perf_event, pending);
2584
2585         if (event->pending_disable) {
2586                 event->pending_disable = 0;
2587                 __perf_event_disable(event);
2588         }
2589
2590         if (event->pending_wakeup) {
2591                 event->pending_wakeup = 0;
2592                 perf_event_wakeup(event);
2593         }
2594 }
2595
2596 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2597
2598 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2599         PENDING_TAIL,
2600 };
2601
2602 static void perf_pending_queue(struct perf_pending_entry *entry,
2603                                void (*func)(struct perf_pending_entry *))
2604 {
2605         struct perf_pending_entry **head;
2606
2607         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2608                 return;
2609
2610         entry->func = func;
2611
2612         head = &get_cpu_var(perf_pending_head);
2613
2614         do {
2615                 entry->next = *head;
2616         } while (cmpxchg(head, entry->next, entry) != entry->next);
2617
2618         set_perf_event_pending();
2619
2620         put_cpu_var(perf_pending_head);
2621 }
2622
2623 static int __perf_pending_run(void)
2624 {
2625         struct perf_pending_entry *list;
2626         int nr = 0;
2627
2628         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2629         while (list != PENDING_TAIL) {
2630                 void (*func)(struct perf_pending_entry *);
2631                 struct perf_pending_entry *entry = list;
2632
2633                 list = list->next;
2634
2635                 func = entry->func;
2636                 entry->next = NULL;
2637                 /*
2638                  * Ensure we observe the unqueue before we issue the wakeup,
2639                  * so that we won't be waiting forever.
2640                  * -- see perf_not_pending().
2641                  */
2642                 smp_wmb();
2643
2644                 func(entry);
2645                 nr++;
2646         }
2647
2648         return nr;
2649 }
2650
2651 static inline int perf_not_pending(struct perf_event *event)
2652 {
2653         /*
2654          * If we flush on whatever cpu we run, there is a chance we don't
2655          * need to wait.
2656          */
2657         get_cpu();
2658         __perf_pending_run();
2659         put_cpu();
2660
2661         /*
2662          * Ensure we see the proper queue state before going to sleep
2663          * so that we do not miss the wakeup. -- see perf_pending_handle()
2664          */
2665         smp_rmb();
2666         return event->pending.next == NULL;
2667 }
2668
2669 static void perf_pending_sync(struct perf_event *event)
2670 {
2671         wait_event(event->waitq, perf_not_pending(event));
2672 }
2673
2674 void perf_event_do_pending(void)
2675 {
2676         __perf_pending_run();
2677 }
2678
2679 /*
2680  * Callchain support -- arch specific
2681  */
2682
2683 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2684 {
2685         return NULL;
2686 }
2687
2688 /*
2689  * Output
2690  */
2691 static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2692                               unsigned long offset, unsigned long head)
2693 {
2694         unsigned long mask;
2695
2696         if (!data->writable)
2697                 return true;
2698
2699         mask = perf_data_size(data) - 1;
2700
2701         offset = (offset - tail) & mask;
2702         head   = (head   - tail) & mask;
2703
2704         if ((int)(head - offset) < 0)
2705                 return false;
2706
2707         return true;
2708 }
2709
2710 static void perf_output_wakeup(struct perf_output_handle *handle)
2711 {
2712         atomic_set(&handle->data->poll, POLL_IN);
2713
2714         if (handle->nmi) {
2715                 handle->event->pending_wakeup = 1;
2716                 perf_pending_queue(&handle->event->pending,
2717                                    perf_pending_event);
2718         } else
2719                 perf_event_wakeup(handle->event);
2720 }
2721
2722 /*
2723  * Curious locking construct.
2724  *
2725  * We need to ensure a later event_id doesn't publish a head when a former
2726  * event_id isn't done writing. However since we need to deal with NMIs we
2727  * cannot fully serialize things.
2728  *
2729  * What we do is serialize between CPUs so we only have to deal with NMI
2730  * nesting on a single CPU.
2731  *
2732  * We only publish the head (and generate a wakeup) when the outer-most
2733  * event_id completes.
2734  */
2735 static void perf_output_lock(struct perf_output_handle *handle)
2736 {
2737         struct perf_mmap_data *data = handle->data;
2738         int cur, cpu = get_cpu();
2739
2740         handle->locked = 0;
2741
2742         for (;;) {
2743                 cur = atomic_cmpxchg(&data->lock, -1, cpu);
2744                 if (cur == -1) {
2745                         handle->locked = 1;
2746                         break;
2747                 }
2748                 if (cur == cpu)
2749                         break;
2750
2751                 cpu_relax();
2752         }
2753 }
2754
2755 static void perf_output_unlock(struct perf_output_handle *handle)
2756 {
2757         struct perf_mmap_data *data = handle->data;
2758         unsigned long head;
2759         int cpu;
2760
2761         data->done_head = data->head;
2762
2763         if (!handle->locked)
2764                 goto out;
2765
2766 again:
2767         /*
2768          * The xchg implies a full barrier that ensures all writes are done
2769          * before we publish the new head, matched by a rmb() in userspace when
2770          * reading this position.
2771          */
2772         while ((head = atomic_long_xchg(&data->done_head, 0)))
2773                 data->user_page->data_head = head;
2774
2775         /*
2776          * NMI can happen here, which means we can miss a done_head update.
2777          */
2778
2779         cpu = atomic_xchg(&data->lock, -1);
2780         WARN_ON_ONCE(cpu != smp_processor_id());
2781
2782         /*
2783          * Therefore we have to validate we did not indeed do so.
2784          */
2785         if (unlikely(atomic_long_read(&data->done_head))) {
2786                 /*
2787                  * Since we had it locked, we can lock it again.
2788                  */
2789                 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2790                         cpu_relax();
2791
2792                 goto again;
2793         }
2794
2795         if (atomic_xchg(&data->wakeup, 0))
2796                 perf_output_wakeup(handle);
2797 out:
2798         put_cpu();
2799 }
2800
2801 void perf_output_copy(struct perf_output_handle *handle,
2802                       const void *buf, unsigned int len)
2803 {
2804         unsigned int pages_mask;
2805         unsigned long offset;
2806         unsigned int size;
2807         void **pages;
2808
2809         offset          = handle->offset;
2810         pages_mask      = handle->data->nr_pages - 1;
2811         pages           = handle->data->data_pages;
2812
2813         do {
2814                 unsigned long page_offset;
2815                 unsigned long page_size;
2816                 int nr;
2817
2818                 nr          = (offset >> PAGE_SHIFT) & pages_mask;
2819                 page_size   = 1UL << (handle->data->data_order + PAGE_SHIFT);
2820                 page_offset = offset & (page_size - 1);
2821                 size        = min_t(unsigned int, page_size - page_offset, len);
2822
2823                 memcpy(pages[nr] + page_offset, buf, size);
2824
2825                 len         -= size;
2826                 buf         += size;
2827                 offset      += size;
2828         } while (len);
2829
2830         handle->offset = offset;
2831
2832         /*
2833          * Check we didn't copy past our reservation window, taking the
2834          * possible unsigned int wrap into account.
2835          */
2836         WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2837 }
2838
2839 int perf_output_begin(struct perf_output_handle *handle,
2840                       struct perf_event *event, unsigned int size,
2841                       int nmi, int sample)
2842 {
2843         struct perf_event *output_event;
2844         struct perf_mmap_data *data;
2845         unsigned long tail, offset, head;
2846         int have_lost;
2847         struct {
2848                 struct perf_event_header header;
2849                 u64                      id;
2850                 u64                      lost;
2851         } lost_event;
2852
2853         rcu_read_lock();
2854         /*
2855          * For inherited events we send all the output towards the parent.
2856          */
2857         if (event->parent)
2858                 event = event->parent;
2859
2860         output_event = rcu_dereference(event->output);
2861         if (output_event)
2862                 event = output_event;
2863
2864         data = rcu_dereference(event->data);
2865         if (!data)
2866                 goto out;
2867
2868         handle->data    = data;
2869         handle->event   = event;
2870         handle->nmi     = nmi;
2871         handle->sample  = sample;
2872
2873         if (!data->nr_pages)
2874                 goto fail;
2875
2876         have_lost = atomic_read(&data->lost);
2877         if (have_lost)
2878                 size += sizeof(lost_event);
2879
2880         perf_output_lock(handle);
2881
2882         do {
2883                 /*
2884                  * Userspace could choose to issue a mb() before updating the
2885                  * tail pointer. So that all reads will be completed before the
2886                  * write is issued.
2887                  */
2888                 tail = ACCESS_ONCE(data->user_page->data_tail);
2889                 smp_rmb();
2890                 offset = head = atomic_long_read(&data->head);
2891                 head += size;
2892                 if (unlikely(!perf_output_space(data, tail, offset, head)))
2893                         goto fail;
2894         } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2895
2896         handle->offset  = offset;
2897         handle->head    = head;
2898
2899         if (head - tail > data->watermark)
2900                 atomic_set(&data->wakeup, 1);
2901
2902         if (have_lost) {
2903                 lost_event.header.type = PERF_RECORD_LOST;
2904                 lost_event.header.misc = 0;
2905                 lost_event.header.size = sizeof(lost_event);
2906                 lost_event.id          = event->id;
2907                 lost_event.lost        = atomic_xchg(&data->lost, 0);
2908
2909                 perf_output_put(handle, lost_event);
2910         }
2911
2912         return 0;
2913
2914 fail:
2915         atomic_inc(&data->lost);
2916         perf_output_unlock(handle);
2917 out:
2918         rcu_read_unlock();
2919
2920         return -ENOSPC;
2921 }
2922
2923 void perf_output_end(struct perf_output_handle *handle)
2924 {
2925         struct perf_event *event = handle->event;
2926         struct perf_mmap_data *data = handle->data;
2927
2928         int wakeup_events = event->attr.wakeup_events;
2929
2930         if (handle->sample && wakeup_events) {
2931                 int events = atomic_inc_return(&data->events);
2932                 if (events >= wakeup_events) {
2933                         atomic_sub(wakeup_events, &data->events);
2934                         atomic_set(&data->wakeup, 1);
2935                 }
2936         }
2937
2938         perf_output_unlock(handle);
2939         rcu_read_unlock();
2940 }
2941
2942 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
2943 {
2944         /*
2945          * only top level events have the pid namespace they were created in
2946          */
2947         if (event->parent)
2948                 event = event->parent;
2949
2950         return task_tgid_nr_ns(p, event->ns);
2951 }
2952
2953 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
2954 {
2955         /*
2956          * only top level events have the pid namespace they were created in
2957          */
2958         if (event->parent)
2959                 event = event->parent;
2960
2961         return task_pid_nr_ns(p, event->ns);
2962 }
2963
2964 static void perf_output_read_one(struct perf_output_handle *handle,
2965                                  struct perf_event *event)
2966 {
2967         u64 read_format = event->attr.read_format;
2968         u64 values[4];
2969         int n = 0;
2970
2971         values[n++] = atomic64_read(&event->count);
2972         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2973                 values[n++] = event->total_time_enabled +
2974                         atomic64_read(&event->child_total_time_enabled);
2975         }
2976         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2977                 values[n++] = event->total_time_running +
2978                         atomic64_read(&event->child_total_time_running);
2979         }
2980         if (read_format & PERF_FORMAT_ID)
2981                 values[n++] = primary_event_id(event);
2982
2983         perf_output_copy(handle, values, n * sizeof(u64));
2984 }
2985
2986 /*
2987  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
2988  */
2989 static void perf_output_read_group(struct perf_output_handle *handle,
2990                             struct perf_event *event)
2991 {
2992         struct perf_event *leader = event->group_leader, *sub;
2993         u64 read_format = event->attr.read_format;
2994         u64 values[5];
2995         int n = 0;
2996
2997         values[n++] = 1 + leader->nr_siblings;
2998
2999         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3000                 values[n++] = leader->total_time_enabled;
3001
3002         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3003                 values[n++] = leader->total_time_running;
3004
3005         if (leader != event)
3006                 leader->pmu->read(leader);
3007
3008         values[n++] = atomic64_read(&leader->count);
3009         if (read_format & PERF_FORMAT_ID)
3010                 values[n++] = primary_event_id(leader);
3011
3012         perf_output_copy(handle, values, n * sizeof(u64));
3013
3014         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3015                 n = 0;
3016
3017                 if (sub != event)
3018                         sub->pmu->read(sub);
3019
3020                 values[n++] = atomic64_read(&sub->count);
3021                 if (read_format & PERF_FORMAT_ID)
3022                         values[n++] = primary_event_id(sub);
3023
3024                 perf_output_copy(handle, values, n * sizeof(u64));
3025         }
3026 }
3027
3028 static void perf_output_read(struct perf_output_handle *handle,
3029                              struct perf_event *event)
3030 {
3031         if (event->attr.read_format & PERF_FORMAT_GROUP)
3032                 perf_output_read_group(handle, event);
3033         else
3034                 perf_output_read_one(handle, event);
3035 }
3036
3037 void perf_output_sample(struct perf_output_handle *handle,
3038                         struct perf_event_header *header,
3039                         struct perf_sample_data *data,
3040                         struct perf_event *event)
3041 {
3042         u64 sample_type = data->type;
3043
3044         perf_output_put(handle, *header);
3045
3046         if (sample_type & PERF_SAMPLE_IP)
3047                 perf_output_put(handle, data->ip);
3048
3049         if (sample_type & PERF_SAMPLE_TID)
3050                 perf_output_put(handle, data->tid_entry);
3051
3052         if (sample_type & PERF_SAMPLE_TIME)
3053                 perf_output_put(handle, data->time);
3054
3055         if (sample_type & PERF_SAMPLE_ADDR)
3056                 perf_output_put(handle, data->addr);
3057
3058         if (sample_type & PERF_SAMPLE_ID)
3059                 perf_output_put(handle, data->id);
3060
3061         if (sample_type & PERF_SAMPLE_STREAM_ID)
3062                 perf_output_put(handle, data->stream_id);
3063
3064         if (sample_type & PERF_SAMPLE_CPU)
3065                 perf_output_put(handle, data->cpu_entry);
3066
3067         if (sample_type & PERF_SAMPLE_PERIOD)
3068                 perf_output_put(handle, data->period);
3069
3070         if (sample_type & PERF_SAMPLE_READ)
3071                 perf_output_read(handle, event);
3072
3073         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3074                 if (data->callchain) {
3075                         int size = 1;
3076
3077                         if (data->callchain)
3078                                 size += data->callchain->nr;
3079
3080                         size *= sizeof(u64);
3081
3082                         perf_output_copy(handle, data->callchain, size);
3083                 } else {
3084                         u64 nr = 0;
3085                         perf_output_put(handle, nr);
3086                 }
3087         }
3088
3089         if (sample_type & PERF_SAMPLE_RAW) {
3090                 if (data->raw) {
3091                         perf_output_put(handle, data->raw->size);
3092                         perf_output_copy(handle, data->raw->data,
3093                                          data->raw->size);
3094                 } else {
3095                         struct {
3096                                 u32     size;
3097                                 u32     data;
3098                         } raw = {
3099                                 .size = sizeof(u32),
3100                                 .data = 0,
3101                         };
3102                         perf_output_put(handle, raw);
3103                 }
3104         }
3105 }
3106
3107 void perf_prepare_sample(struct perf_event_header *header,
3108                          struct perf_sample_data *data,
3109                          struct perf_event *event,
3110                          struct pt_regs *regs)
3111 {
3112         u64 sample_type = event->attr.sample_type;
3113
3114         data->type = sample_type;
3115
3116         header->type = PERF_RECORD_SAMPLE;
3117         header->size = sizeof(*header);
3118
3119         header->misc = 0;
3120         header->misc |= perf_misc_flags(regs);
3121
3122         if (sample_type & PERF_SAMPLE_IP) {
3123                 data->ip = perf_instruction_pointer(regs);
3124
3125                 header->size += sizeof(data->ip);
3126         }
3127
3128         if (sample_type & PERF_SAMPLE_TID) {
3129                 /* namespace issues */
3130                 data->tid_entry.pid = perf_event_pid(event, current);
3131                 data->tid_entry.tid = perf_event_tid(event, current);
3132
3133                 header->size += sizeof(data->tid_entry);
3134         }
3135
3136         if (sample_type & PERF_SAMPLE_TIME) {
3137                 data->time = perf_clock();
3138
3139                 header->size += sizeof(data->time);
3140         }
3141
3142         if (sample_type & PERF_SAMPLE_ADDR)
3143                 header->size += sizeof(data->addr);
3144
3145         if (sample_type & PERF_SAMPLE_ID) {
3146                 data->id = primary_event_id(event);
3147
3148                 header->size += sizeof(data->id);
3149         }
3150
3151         if (sample_type & PERF_SAMPLE_STREAM_ID) {
3152                 data->stream_id = event->id;
3153
3154                 header->size += sizeof(data->stream_id);
3155         }
3156
3157         if (sample_type & PERF_SAMPLE_CPU) {
3158                 data->cpu_entry.cpu             = raw_smp_processor_id();
3159                 data->cpu_entry.reserved        = 0;
3160
3161                 header->size += sizeof(data->cpu_entry);
3162         }
3163
3164         if (sample_type & PERF_SAMPLE_PERIOD)
3165                 header->size += sizeof(data->period);
3166
3167         if (sample_type & PERF_SAMPLE_READ)
3168                 header->size += perf_event_read_size(event);
3169
3170         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3171                 int size = 1;
3172
3173                 data->callchain = perf_callchain(regs);
3174
3175                 if (data->callchain)
3176                         size += data->callchain->nr;
3177
3178                 header->size += size * sizeof(u64);
3179         }
3180
3181         if (sample_type & PERF_SAMPLE_RAW) {
3182                 int size = sizeof(u32);
3183
3184                 if (data->raw)
3185                         size += data->raw->size;
3186                 else
3187                         size += sizeof(u32);
3188
3189                 WARN_ON_ONCE(size & (sizeof(u64)-1));
3190                 header->size += size;
3191         }
3192 }
3193
3194 static void perf_event_output(struct perf_event *event, int nmi,
3195                                 struct perf_sample_data *data,
3196                                 struct pt_regs *regs)
3197 {
3198         struct perf_output_handle handle;
3199         struct perf_event_header header;
3200
3201         perf_prepare_sample(&header, data, event, regs);
3202
3203         if (perf_output_begin(&handle, event, header.size, nmi, 1))
3204                 return;
3205
3206         perf_output_sample(&handle, &header, data, event);
3207
3208         perf_output_end(&handle);
3209 }
3210
3211 /*
3212  * read event_id
3213  */
3214
3215 struct perf_read_event {
3216         struct perf_event_header        header;
3217
3218         u32                             pid;
3219         u32                             tid;
3220 };
3221
3222 static void
3223 perf_event_read_event(struct perf_event *event,
3224                         struct task_struct *task)
3225 {
3226         struct perf_output_handle handle;
3227         struct perf_read_event read_event = {
3228                 .header = {
3229                         .type = PERF_RECORD_READ,
3230                         .misc = 0,
3231                         .size = sizeof(read_event) + perf_event_read_size(event),
3232                 },
3233                 .pid = perf_event_pid(event, task),
3234                 .tid = perf_event_tid(event, task),
3235         };
3236         int ret;
3237
3238         ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3239         if (ret)
3240                 return;
3241
3242         perf_output_put(&handle, read_event);
3243         perf_output_read(&handle, event);
3244
3245         perf_output_end(&handle);
3246 }
3247
3248 /*
3249  * task tracking -- fork/exit
3250  *
3251  * enabled by: attr.comm | attr.mmap | attr.task
3252  */
3253
3254 struct perf_task_event {
3255         struct task_struct              *task;
3256         struct perf_event_context       *task_ctx;
3257
3258         struct {
3259                 struct perf_event_header        header;
3260
3261                 u32                             pid;
3262                 u32                             ppid;
3263                 u32                             tid;
3264                 u32                             ptid;
3265                 u64                             time;
3266         } event_id;
3267 };
3268
3269 static void perf_event_task_output(struct perf_event *event,
3270                                      struct perf_task_event *task_event)
3271 {
3272         struct perf_output_handle handle;
3273         int size;
3274         struct task_struct *task = task_event->task;
3275         int ret;
3276
3277         size  = task_event->event_id.header.size;
3278         ret = perf_output_begin(&handle, event, size, 0, 0);
3279
3280         if (ret)
3281                 return;
3282
3283         task_event->event_id.pid = perf_event_pid(event, task);
3284         task_event->event_id.ppid = perf_event_pid(event, current);
3285
3286         task_event->event_id.tid = perf_event_tid(event, task);
3287         task_event->event_id.ptid = perf_event_tid(event, current);
3288
3289         task_event->event_id.time = perf_clock();
3290
3291         perf_output_put(&handle, task_event->event_id);
3292
3293         perf_output_end(&handle);
3294 }
3295
3296 static int perf_event_task_match(struct perf_event *event)
3297 {
3298         if (event->cpu != -1 && event->cpu != smp_processor_id())
3299                 return 0;
3300
3301         if (event->attr.comm || event->attr.mmap || event->attr.task)
3302                 return 1;
3303
3304         return 0;
3305 }
3306
3307 static void perf_event_task_ctx(struct perf_event_context *ctx,
3308                                   struct perf_task_event *task_event)
3309 {
3310         struct perf_event *event;
3311
3312         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3313                 if (perf_event_task_match(event))
3314                         perf_event_task_output(event, task_event);
3315         }
3316 }
3317
3318 static void perf_event_task_event(struct perf_task_event *task_event)
3319 {
3320         struct perf_cpu_context *cpuctx;
3321         struct perf_event_context *ctx = task_event->task_ctx;
3322
3323         rcu_read_lock();
3324         cpuctx = &get_cpu_var(perf_cpu_context);
3325         perf_event_task_ctx(&cpuctx->ctx, task_event);
3326         if (!ctx)
3327                 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3328         if (ctx)
3329                 perf_event_task_ctx(ctx, task_event);
3330         put_cpu_var(perf_cpu_context);
3331         rcu_read_unlock();
3332 }
3333
3334 static void perf_event_task(struct task_struct *task,
3335                               struct perf_event_context *task_ctx,
3336                               int new)
3337 {
3338         struct perf_task_event task_event;
3339
3340         if (!atomic_read(&nr_comm_events) &&
3341             !atomic_read(&nr_mmap_events) &&
3342             !atomic_read(&nr_task_events))
3343                 return;
3344
3345         task_event = (struct perf_task_event){
3346                 .task     = task,
3347                 .task_ctx = task_ctx,
3348                 .event_id    = {
3349                         .header = {
3350                                 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3351                                 .misc = 0,
3352                                 .size = sizeof(task_event.event_id),
3353                         },
3354                         /* .pid  */
3355                         /* .ppid */
3356                         /* .tid  */
3357                         /* .ptid */
3358                 },
3359         };
3360
3361         perf_event_task_event(&task_event);
3362 }
3363
3364 void perf_event_fork(struct task_struct *task)
3365 {
3366         perf_event_task(task, NULL, 1);
3367 }
3368
3369 /*
3370  * comm tracking
3371  */
3372
3373 struct perf_comm_event {
3374         struct task_struct      *task;
3375         char                    *comm;
3376         int                     comm_size;
3377
3378         struct {
3379                 struct perf_event_header        header;
3380
3381                 u32                             pid;
3382                 u32                             tid;
3383         } event_id;
3384 };
3385
3386 static void perf_event_comm_output(struct perf_event *event,
3387                                      struct perf_comm_event *comm_event)
3388 {
3389         struct perf_output_handle handle;
3390         int size = comm_event->event_id.header.size;
3391         int ret = perf_output_begin(&handle, event, size, 0, 0);
3392
3393         if (ret)
3394                 return;
3395
3396         comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3397         comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3398
3399         perf_output_put(&handle, comm_event->event_id);
3400         perf_output_copy(&handle, comm_event->comm,
3401                                    comm_event->comm_size);
3402         perf_output_end(&handle);
3403 }
3404
3405 static int perf_event_comm_match(struct perf_event *event)
3406 {
3407         if (event->cpu != -1 && event->cpu != smp_processor_id())
3408                 return 0;
3409
3410         if (event->attr.comm)
3411                 return 1;
3412
3413         return 0;
3414 }
3415
3416 static void perf_event_comm_ctx(struct perf_event_context *ctx,
3417                                   struct perf_comm_event *comm_event)
3418 {
3419         struct perf_event *event;
3420
3421         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3422                 if (perf_event_comm_match(event))
3423                         perf_event_comm_output(event, comm_event);
3424         }
3425 }
3426
3427 static void perf_event_comm_event(struct perf_comm_event *comm_event)
3428 {
3429         struct perf_cpu_context *cpuctx;
3430         struct perf_event_context *ctx;
3431         unsigned int size;
3432         char comm[TASK_COMM_LEN];
3433
3434         memset(comm, 0, sizeof(comm));
3435         strlcpy(comm, comm_event->task->comm, sizeof(comm));
3436         size = ALIGN(strlen(comm)+1, sizeof(u64));
3437
3438         comm_event->comm = comm;
3439         comm_event->comm_size = size;
3440
3441         comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3442
3443         rcu_read_lock();
3444         cpuctx = &get_cpu_var(perf_cpu_context);
3445         perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3446         ctx = rcu_dereference(current->perf_event_ctxp);
3447         if (ctx)
3448                 perf_event_comm_ctx(ctx, comm_event);
3449         put_cpu_var(perf_cpu_context);
3450         rcu_read_unlock();
3451 }
3452
3453 void perf_event_comm(struct task_struct *task)
3454 {
3455         struct perf_comm_event comm_event;
3456
3457         if (task->perf_event_ctxp)
3458                 perf_event_enable_on_exec(task);
3459
3460         if (!atomic_read(&nr_comm_events))
3461                 return;
3462
3463         comm_event = (struct perf_comm_event){
3464                 .task   = task,
3465                 /* .comm      */
3466                 /* .comm_size */
3467                 .event_id  = {
3468                         .header = {
3469                                 .type = PERF_RECORD_COMM,
3470                                 .misc = 0,
3471                                 /* .size */
3472                         },
3473                         /* .pid */
3474                         /* .tid */
3475                 },
3476         };
3477
3478         perf_event_comm_event(&comm_event);
3479 }
3480
3481 /*
3482  * mmap tracking
3483  */
3484
3485 struct perf_mmap_event {
3486         struct vm_area_struct   *vma;
3487
3488         const char              *file_name;
3489         int                     file_size;
3490
3491         struct {
3492                 struct perf_event_header        header;
3493
3494                 u32                             pid;
3495                 u32                             tid;
3496                 u64                             start;
3497                 u64                             len;
3498                 u64                             pgoff;
3499         } event_id;
3500 };
3501
3502 static void perf_event_mmap_output(struct perf_event *event,
3503                                      struct perf_mmap_event *mmap_event)
3504 {
3505         struct perf_output_handle handle;
3506         int size = mmap_event->event_id.header.size;
3507         int ret = perf_output_begin(&handle, event, size, 0, 0);
3508
3509         if (ret)
3510                 return;
3511
3512         mmap_event->event_id.pid = perf_event_pid(event, current);
3513         mmap_event->event_id.tid = perf_event_tid(event, current);
3514
3515         perf_output_put(&handle, mmap_event->event_id);
3516         perf_output_copy(&handle, mmap_event->file_name,
3517                                    mmap_event->file_size);
3518         perf_output_end(&handle);
3519 }
3520
3521 static int perf_event_mmap_match(struct perf_event *event,
3522                                    struct perf_mmap_event *mmap_event)
3523 {
3524         if (event->cpu != -1 && event->cpu != smp_processor_id())
3525                 return 0;
3526
3527         if (event->attr.mmap)
3528                 return 1;
3529
3530         return 0;
3531 }
3532
3533 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3534                                   struct perf_mmap_event *mmap_event)
3535 {
3536         struct perf_event *event;
3537
3538         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3539                 if (perf_event_mmap_match(event, mmap_event))
3540                         perf_event_mmap_output(event, mmap_event);
3541         }
3542 }
3543
3544 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3545 {
3546         struct perf_cpu_context *cpuctx;
3547         struct perf_event_context *ctx;
3548         struct vm_area_struct *vma = mmap_event->vma;
3549         struct file *file = vma->vm_file;
3550         unsigned int size;
3551         char tmp[16];
3552         char *buf = NULL;
3553         const char *name;
3554
3555         memset(tmp, 0, sizeof(tmp));
3556
3557         if (file) {
3558                 /*
3559                  * d_path works from the end of the buffer backwards, so we
3560                  * need to add enough zero bytes after the string to handle
3561                  * the 64bit alignment we do later.
3562                  */
3563                 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3564                 if (!buf) {
3565                         name = strncpy(tmp, "//enomem", sizeof(tmp));
3566                         goto got_name;
3567                 }
3568                 name = d_path(&file->f_path, buf, PATH_MAX);
3569                 if (IS_ERR(name)) {
3570                         name = strncpy(tmp, "//toolong", sizeof(tmp));
3571                         goto got_name;
3572                 }
3573         } else {
3574                 if (arch_vma_name(mmap_event->vma)) {
3575                         name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3576                                        sizeof(tmp));
3577                         goto got_name;
3578                 }
3579
3580                 if (!vma->vm_mm) {
3581                         name = strncpy(tmp, "[vdso]", sizeof(tmp));
3582                         goto got_name;
3583                 }
3584
3585                 name = strncpy(tmp, "//anon", sizeof(tmp));
3586                 goto got_name;
3587         }
3588
3589 got_name:
3590         size = ALIGN(strlen(name)+1, sizeof(u64));
3591
3592         mmap_event->file_name = name;
3593         mmap_event->file_size = size;
3594
3595         mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3596
3597         rcu_read_lock();
3598         cpuctx = &get_cpu_var(perf_cpu_context);
3599         perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3600         ctx = rcu_dereference(current->perf_event_ctxp);
3601         if (ctx)
3602                 perf_event_mmap_ctx(ctx, mmap_event);
3603         put_cpu_var(perf_cpu_context);
3604         rcu_read_unlock();
3605
3606         kfree(buf);
3607 }
3608
3609 void __perf_event_mmap(struct vm_area_struct *vma)
3610 {
3611         struct perf_mmap_event mmap_event;
3612
3613         if (!atomic_read(&nr_mmap_events))
3614                 return;
3615
3616         mmap_event = (struct perf_mmap_event){
3617                 .vma    = vma,
3618                 /* .file_name */
3619                 /* .file_size */
3620                 .event_id  = {
3621                         .header = {
3622                                 .type = PERF_RECORD_MMAP,
3623                                 .misc = 0,
3624                                 /* .size */
3625                         },
3626                         /* .pid */
3627                         /* .tid */
3628                         .start  = vma->vm_start,
3629                         .len    = vma->vm_end - vma->vm_start,
3630                         .pgoff  = vma->vm_pgoff,
3631                 },
3632         };
3633
3634         perf_event_mmap_event(&mmap_event);
3635 }
3636
3637 /*
3638  * IRQ throttle logging
3639  */
3640
3641 static void perf_log_throttle(struct perf_event *event, int enable)
3642 {
3643         struct perf_output_handle handle;
3644         int ret;
3645
3646         struct {
3647                 struct perf_event_header        header;
3648                 u64                             time;
3649                 u64                             id;
3650                 u64                             stream_id;
3651         } throttle_event = {
3652                 .header = {
3653                         .type = PERF_RECORD_THROTTLE,
3654                         .misc = 0,
3655                         .size = sizeof(throttle_event),
3656                 },
3657                 .time           = perf_clock(),
3658                 .id             = primary_event_id(event),
3659                 .stream_id      = event->id,
3660         };
3661
3662         if (enable)
3663                 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3664
3665         ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3666         if (ret)
3667                 return;
3668
3669         perf_output_put(&handle, throttle_event);
3670         perf_output_end(&handle);
3671 }
3672
3673 /*
3674  * Generic event overflow handling, sampling.
3675  */
3676
3677 static int __perf_event_overflow(struct perf_event *event, int nmi,
3678                                    int throttle, struct perf_sample_data *data,
3679                                    struct pt_regs *regs)
3680 {
3681         int events = atomic_read(&event->event_limit);
3682         struct hw_perf_event *hwc = &event->hw;
3683         int ret = 0;
3684
3685         throttle = (throttle && event->pmu->unthrottle != NULL);
3686
3687         if (!throttle) {
3688                 hwc->interrupts++;
3689         } else {
3690                 if (hwc->interrupts != MAX_INTERRUPTS) {
3691                         hwc->interrupts++;
3692                         if (HZ * hwc->interrupts >
3693                                         (u64)sysctl_perf_event_sample_rate) {
3694                                 hwc->interrupts = MAX_INTERRUPTS;
3695                                 perf_log_throttle(event, 0);
3696                                 ret = 1;
3697                         }
3698                 } else {
3699                         /*
3700                          * Keep re-disabling events even though on the previous
3701                          * pass we disabled it - just in case we raced with a
3702                          * sched-in and the event got enabled again:
3703                          */
3704                         ret = 1;
3705                 }
3706         }
3707
3708         if (event->attr.freq) {
3709                 u64 now = perf_clock();
3710                 s64 delta = now - hwc->freq_stamp;
3711
3712                 hwc->freq_stamp = now;
3713
3714                 if (delta > 0 && delta < TICK_NSEC)
3715                         perf_adjust_period(event, NSEC_PER_SEC / (int)delta);
3716         }
3717
3718         /*
3719          * XXX event_limit might not quite work as expected on inherited
3720          * events
3721          */
3722
3723         event->pending_kill = POLL_IN;
3724         if (events && atomic_dec_and_test(&event->event_limit)) {
3725                 ret = 1;
3726                 event->pending_kill = POLL_HUP;
3727                 if (nmi) {
3728                         event->pending_disable = 1;
3729                         perf_pending_queue(&event->pending,
3730                                            perf_pending_event);
3731                 } else
3732                         perf_event_disable(event);
3733         }
3734
3735         if (event->overflow_handler)
3736                 event->overflow_handler(event, nmi, data, regs);
3737         else
3738                 perf_event_output(event, nmi, data, regs);
3739
3740         return ret;
3741 }
3742
3743 int perf_event_overflow(struct perf_event *event, int nmi,
3744                           struct perf_sample_data *data,
3745                           struct pt_regs *regs)
3746 {
3747         return __perf_event_overflow(event, nmi, 1, data, regs);
3748 }
3749
3750 /*
3751  * Generic software event infrastructure
3752  */
3753
3754 /*
3755  * We directly increment event->count and keep a second value in
3756  * event->hw.period_left to count intervals. This period event
3757  * is kept in the range [-sample_period, 0] so that we can use the
3758  * sign as trigger.
3759  */
3760
3761 static u64 perf_swevent_set_period(struct perf_event *event)
3762 {
3763         struct hw_perf_event *hwc = &event->hw;
3764         u64 period = hwc->last_period;
3765         u64 nr, offset;
3766         s64 old, val;
3767
3768         hwc->last_period = hwc->sample_period;
3769
3770 again:
3771         old = val = atomic64_read(&hwc->period_left);
3772         if (val < 0)
3773                 return 0;
3774
3775         nr = div64_u64(period + val, period);
3776         offset = nr * period;
3777         val -= offset;
3778         if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3779                 goto again;
3780
3781         return nr;
3782 }
3783
3784 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
3785                                     int nmi, struct perf_sample_data *data,
3786                                     struct pt_regs *regs)
3787 {
3788         struct hw_perf_event *hwc = &event->hw;
3789         int throttle = 0;
3790
3791         data->period = event->hw.last_period;
3792         if (!overflow)
3793                 overflow = perf_swevent_set_period(event);
3794
3795         if (hwc->interrupts == MAX_INTERRUPTS)
3796                 return;
3797
3798         for (; overflow; overflow--) {
3799                 if (__perf_event_overflow(event, nmi, throttle,
3800                                             data, regs)) {
3801                         /*
3802                          * We inhibit the overflow from happening when
3803                          * hwc->interrupts == MAX_INTERRUPTS.
3804                          */
3805                         break;
3806                 }
3807                 throttle = 1;
3808         }
3809 }
3810
3811 static void perf_swevent_unthrottle(struct perf_event *event)
3812 {
3813         /*
3814          * Nothing to do, we already reset hwc->interrupts.
3815          */
3816 }
3817
3818 static void perf_swevent_add(struct perf_event *event, u64 nr,
3819                                int nmi, struct perf_sample_data *data,
3820                                struct pt_regs *regs)
3821 {
3822         struct hw_perf_event *hwc = &event->hw;
3823
3824         atomic64_add(nr, &event->count);
3825
3826         if (!regs)
3827                 return;
3828
3829         if (!hwc->sample_period)
3830                 return;
3831
3832         if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
3833                 return perf_swevent_overflow(event, 1, nmi, data, regs);
3834
3835         if (atomic64_add_negative(nr, &hwc->period_left))
3836                 return;
3837
3838         perf_swevent_overflow(event, 0, nmi, data, regs);
3839 }
3840
3841 static int perf_swevent_is_counting(struct perf_event *event)
3842 {
3843         /*
3844          * The event is active, we're good!
3845          */
3846         if (event->state == PERF_EVENT_STATE_ACTIVE)
3847                 return 1;
3848
3849         /*
3850          * The event is off/error, not counting.
3851          */
3852         if (event->state != PERF_EVENT_STATE_INACTIVE)
3853                 return 0;
3854
3855         /*
3856          * The event is inactive, if the context is active
3857          * we're part of a group that didn't make it on the 'pmu',
3858          * not counting.
3859          */
3860         if (event->ctx->is_active)
3861                 return 0;
3862
3863         /*
3864          * We're inactive and the context is too, this means the
3865          * task is scheduled out, we're counting events that happen
3866          * to us, like migration events.
3867          */
3868         return 1;
3869 }
3870
3871 static int perf_tp_event_match(struct perf_event *event,
3872                                 struct perf_sample_data *data);
3873
3874 static int perf_exclude_event(struct perf_event *event,
3875                               struct pt_regs *regs)
3876 {
3877         if (regs) {
3878                 if (event->attr.exclude_user && user_mode(regs))
3879                         return 1;
3880
3881                 if (event->attr.exclude_kernel && !user_mode(regs))
3882                         return 1;
3883         }
3884
3885         return 0;
3886 }
3887
3888 static int perf_swevent_match(struct perf_event *event,
3889                                 enum perf_type_id type,
3890                                 u32 event_id,
3891                                 struct perf_sample_data *data,
3892                                 struct pt_regs *regs)
3893 {
3894         if (event->cpu != -1 && event->cpu != smp_processor_id())
3895                 return 0;
3896
3897         if (!perf_swevent_is_counting(event))
3898                 return 0;
3899
3900         if (event->attr.type != type)
3901                 return 0;
3902
3903         if (event->attr.config != event_id)
3904                 return 0;
3905
3906         if (perf_exclude_event(event, regs))
3907                 return 0;
3908
3909         if (event->attr.type == PERF_TYPE_TRACEPOINT &&
3910             !perf_tp_event_match(event, data))
3911                 return 0;
3912
3913         return 1;
3914 }
3915
3916 static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3917                                      enum perf_type_id type,
3918                                      u32 event_id, u64 nr, int nmi,
3919                                      struct perf_sample_data *data,
3920                                      struct pt_regs *regs)
3921 {
3922         struct perf_event *event;
3923
3924         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3925                 if (perf_swevent_match(event, type, event_id, data, regs))
3926                         perf_swevent_add(event, nr, nmi, data, regs);
3927         }
3928 }
3929
3930 int perf_swevent_get_recursion_context(void)
3931 {
3932         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3933         int rctx;
3934
3935         if (in_nmi())
3936                 rctx = 3;
3937         else if (in_irq())
3938                 rctx = 2;
3939         else if (in_softirq())
3940                 rctx = 1;
3941         else
3942                 rctx = 0;
3943
3944         if (cpuctx->recursion[rctx]) {
3945                 put_cpu_var(perf_cpu_context);
3946                 return -1;
3947         }
3948
3949         cpuctx->recursion[rctx]++;
3950         barrier();
3951
3952         return rctx;
3953 }
3954 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
3955
3956 void perf_swevent_put_recursion_context(int rctx)
3957 {
3958         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3959         barrier();
3960         cpuctx->recursion[rctx]--;
3961         put_cpu_var(perf_cpu_context);
3962 }
3963 EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
3964
3965 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3966                                     u64 nr, int nmi,
3967                                     struct perf_sample_data *data,
3968                                     struct pt_regs *regs)
3969 {
3970         struct perf_cpu_context *cpuctx;
3971         struct perf_event_context *ctx;
3972
3973         cpuctx = &__get_cpu_var(perf_cpu_context);
3974         rcu_read_lock();
3975         perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3976                                  nr, nmi, data, regs);
3977         /*
3978          * doesn't really matter which of the child contexts the
3979          * events ends up in.
3980          */
3981         ctx = rcu_dereference(current->perf_event_ctxp);
3982         if (ctx)
3983                 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3984         rcu_read_unlock();
3985 }
3986
3987 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3988                             struct pt_regs *regs, u64 addr)
3989 {
3990         struct perf_sample_data data;
3991         int rctx;
3992
3993         rctx = perf_swevent_get_recursion_context();
3994         if (rctx < 0)
3995                 return;
3996
3997         data.addr = addr;
3998         data.raw  = NULL;
3999
4000         do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4001
4002         perf_swevent_put_recursion_context(rctx);
4003 }
4004
4005 static void perf_swevent_read(struct perf_event *event)
4006 {
4007 }
4008
4009 static int perf_swevent_enable(struct perf_event *event)
4010 {
4011         struct hw_perf_event *hwc = &event->hw;
4012
4013         if (hwc->sample_period) {
4014                 hwc->last_period = hwc->sample_period;
4015                 perf_swevent_set_period(event);
4016         }
4017         return 0;
4018 }
4019
4020 static void perf_swevent_disable(struct perf_event *event)
4021 {
4022 }
4023
4024 static const struct pmu perf_ops_generic = {
4025         .enable         = perf_swevent_enable,
4026         .disable        = perf_swevent_disable,
4027         .read           = perf_swevent_read,
4028         .unthrottle     = perf_swevent_unthrottle,
4029 };
4030
4031 /*
4032  * hrtimer based swevent callback
4033  */
4034
4035 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4036 {
4037         enum hrtimer_restart ret = HRTIMER_RESTART;
4038         struct perf_sample_data data;
4039         struct pt_regs *regs;
4040         struct perf_event *event;
4041         u64 period;
4042
4043         event   = container_of(hrtimer, struct perf_event, hw.hrtimer);
4044         event->pmu->read(event);
4045
4046         data.addr = 0;
4047         data.raw = NULL;
4048         data.period = event->hw.last_period;
4049         regs = get_irq_regs();
4050         /*
4051          * In case we exclude kernel IPs or are somehow not in interrupt
4052          * context, provide the next best thing, the user IP.
4053          */
4054         if ((event->attr.exclude_kernel || !regs) &&
4055                         !event->attr.exclude_user)
4056                 regs = task_pt_regs(current);
4057
4058         if (regs) {
4059                 if (!(event->attr.exclude_idle && current->pid == 0))
4060                         if (perf_event_overflow(event, 0, &data, regs))
4061                                 ret = HRTIMER_NORESTART;
4062         }
4063
4064         period = max_t(u64, 10000, event->hw.sample_period);
4065         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4066
4067         return ret;
4068 }
4069
4070 static void perf_swevent_start_hrtimer(struct perf_event *event)
4071 {
4072         struct hw_perf_event *hwc = &event->hw;
4073
4074         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4075         hwc->hrtimer.function = perf_swevent_hrtimer;
4076         if (hwc->sample_period) {
4077                 u64 period;
4078
4079                 if (hwc->remaining) {
4080                         if (hwc->remaining < 0)
4081                                 period = 10000;
4082                         else
4083                                 period = hwc->remaining;
4084                         hwc->remaining = 0;
4085                 } else {
4086                         period = max_t(u64, 10000, hwc->sample_period);
4087                 }
4088                 __hrtimer_start_range_ns(&hwc->hrtimer,
4089                                 ns_to_ktime(period), 0,
4090                                 HRTIMER_MODE_REL, 0);
4091         }
4092 }
4093
4094 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4095 {
4096         struct hw_perf_event *hwc = &event->hw;
4097
4098         if (hwc->sample_period) {
4099                 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4100                 hwc->remaining = ktime_to_ns(remaining);
4101
4102                 hrtimer_cancel(&hwc->hrtimer);
4103         }
4104 }
4105
4106 /*
4107  * Software event: cpu wall time clock
4108  */
4109
4110 static void cpu_clock_perf_event_update(struct perf_event *event)
4111 {
4112         int cpu = raw_smp_processor_id();
4113         s64 prev;
4114         u64 now;
4115
4116         now = cpu_clock(cpu);
4117         prev = atomic64_xchg(&event->hw.prev_count, now);
4118         atomic64_add(now - prev, &event->count);
4119 }
4120
4121 static int cpu_clock_perf_event_enable(struct perf_event *event)
4122 {
4123         struct hw_perf_event *hwc = &event->hw;
4124         int cpu = raw_smp_processor_id();
4125
4126         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
4127         perf_swevent_start_hrtimer(event);
4128
4129         return 0;
4130 }
4131
4132 static void cpu_clock_perf_event_disable(struct perf_event *event)
4133 {
4134         perf_swevent_cancel_hrtimer(event);
4135         cpu_clock_perf_event_update(event);
4136 }
4137
4138 static void cpu_clock_perf_event_read(struct perf_event *event)
4139 {
4140         cpu_clock_perf_event_update(event);
4141 }
4142
4143 static const struct pmu perf_ops_cpu_clock = {
4144         .enable         = cpu_clock_perf_event_enable,
4145         .disable        = cpu_clock_perf_event_disable,
4146         .read           = cpu_clock_perf_event_read,
4147 };
4148
4149 /*
4150  * Software event: task time clock
4151  */
4152
4153 static void task_clock_perf_event_update(struct perf_event *event, u64 now)
4154 {
4155         u64 prev;
4156         s64 delta;
4157
4158         prev = atomic64_xchg(&event->hw.prev_count, now);
4159         delta = now - prev;
4160         atomic64_add(delta, &event->count);
4161 }
4162
4163 static int task_clock_perf_event_enable(struct perf_event *event)
4164 {
4165         struct hw_perf_event *hwc = &event->hw;
4166         u64 now;
4167
4168         now = event->ctx->time;
4169
4170         atomic64_set(&hwc->prev_count, now);
4171
4172         perf_swevent_start_hrtimer(event);
4173
4174         return 0;
4175 }
4176
4177 static void task_clock_perf_event_disable(struct perf_event *event)
4178 {
4179         perf_swevent_cancel_hrtimer(event);
4180         task_clock_perf_event_update(event, event->ctx->time);
4181
4182 }
4183
4184 static void task_clock_perf_event_read(struct perf_event *event)
4185 {
4186         u64 time;
4187
4188         if (!in_nmi()) {
4189                 update_context_time(event->ctx);
4190                 time = event->ctx->time;
4191         } else {
4192                 u64 now = perf_clock();
4193                 u64 delta = now - event->ctx->timestamp;
4194                 time = event->ctx->time + delta;
4195         }
4196
4197         task_clock_perf_event_update(event, time);
4198 }
4199
4200 static const struct pmu perf_ops_task_clock = {
4201         .enable         = task_clock_perf_event_enable,
4202         .disable        = task_clock_perf_event_disable,
4203         .read           = task_clock_perf_event_read,
4204 };
4205
4206 #ifdef CONFIG_EVENT_TRACING
4207
4208 void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4209                           int entry_size)
4210 {
4211         struct perf_raw_record raw = {
4212                 .size = entry_size,
4213                 .data = record,
4214         };
4215
4216         struct perf_sample_data data = {
4217                 .addr = addr,
4218                 .raw = &raw,
4219         };
4220
4221         struct pt_regs *regs = get_irq_regs();
4222
4223         if (!regs)
4224                 regs = task_pt_regs(current);
4225
4226         /* Trace events already protected against recursion */
4227         do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4228                                 &data, regs);
4229 }
4230 EXPORT_SYMBOL_GPL(perf_tp_event);
4231
4232 static int perf_tp_event_match(struct perf_event *event,
4233                                 struct perf_sample_data *data)
4234 {
4235         void *record = data->raw->data;
4236
4237         if (likely(!event->filter) || filter_match_preds(event->filter, record))
4238                 return 1;
4239         return 0;
4240 }
4241
4242 static void tp_perf_event_destroy(struct perf_event *event)
4243 {
4244         ftrace_profile_disable(event->attr.config);
4245 }
4246
4247 static const struct pmu *tp_perf_event_init(struct perf_event *event)
4248 {
4249         /*
4250          * Raw tracepoint data is a severe data leak, only allow root to
4251          * have these.
4252          */
4253         if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4254                         perf_paranoid_tracepoint_raw() &&
4255                         !capable(CAP_SYS_ADMIN))
4256                 return ERR_PTR(-EPERM);
4257
4258         if (ftrace_profile_enable(event->attr.config))
4259                 return NULL;
4260
4261         event->destroy = tp_perf_event_destroy;
4262
4263         return &perf_ops_generic;
4264 }
4265
4266 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4267 {
4268         char *filter_str;
4269         int ret;
4270
4271         if (event->attr.type != PERF_TYPE_TRACEPOINT)
4272                 return -EINVAL;
4273
4274         filter_str = strndup_user(arg, PAGE_SIZE);
4275         if (IS_ERR(filter_str))
4276                 return PTR_ERR(filter_str);
4277
4278         ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4279
4280         kfree(filter_str);
4281         return ret;
4282 }
4283
4284 static void perf_event_free_filter(struct perf_event *event)
4285 {
4286         ftrace_profile_free_filter(event);
4287 }
4288
4289 #else
4290
4291 static int perf_tp_event_match(struct perf_event *event,
4292                                 struct perf_sample_data *data)
4293 {
4294         return 1;
4295 }
4296
4297 static const struct pmu *tp_perf_event_init(struct perf_event *event)
4298 {
4299         return NULL;
4300 }
4301
4302 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4303 {
4304         return -ENOENT;
4305 }
4306
4307 static void perf_event_free_filter(struct perf_event *event)
4308 {
4309 }
4310
4311 #endif /* CONFIG_EVENT_TRACING */
4312
4313 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4314 static void bp_perf_event_destroy(struct perf_event *event)
4315 {
4316         release_bp_slot(event);
4317 }
4318
4319 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4320 {
4321         int err;
4322
4323         err = register_perf_hw_breakpoint(bp);
4324         if (err)
4325                 return ERR_PTR(err);
4326
4327         bp->destroy = bp_perf_event_destroy;
4328
4329         return &perf_ops_bp;
4330 }
4331
4332 void perf_bp_event(struct perf_event *bp, void *data)
4333 {
4334         struct perf_sample_data sample;
4335         struct pt_regs *regs = data;
4336
4337         sample.raw = NULL;
4338         sample.addr = bp->attr.bp_addr;
4339
4340         if (!perf_exclude_event(bp, regs))
4341                 perf_swevent_add(bp, 1, 1, &sample, regs);
4342 }
4343 #else
4344 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4345 {
4346         return NULL;
4347 }
4348
4349 void perf_bp_event(struct perf_event *bp, void *regs)
4350 {
4351 }
4352 #endif
4353
4354 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4355
4356 static void sw_perf_event_destroy(struct perf_event *event)
4357 {
4358         u64 event_id = event->attr.config;
4359
4360         WARN_ON(event->parent);
4361
4362         atomic_dec(&perf_swevent_enabled[event_id]);
4363 }
4364
4365 static const struct pmu *sw_perf_event_init(struct perf_event *event)
4366 {
4367         const struct pmu *pmu = NULL;
4368         u64 event_id = event->attr.config;
4369
4370         /*
4371          * Software events (currently) can't in general distinguish
4372          * between user, kernel and hypervisor events.
4373          * However, context switches and cpu migrations are considered
4374          * to be kernel events, and page faults are never hypervisor
4375          * events.
4376          */
4377         switch (event_id) {
4378         case PERF_COUNT_SW_CPU_CLOCK:
4379                 pmu = &perf_ops_cpu_clock;
4380
4381                 break;
4382         case PERF_COUNT_SW_TASK_CLOCK:
4383                 /*
4384                  * If the user instantiates this as a per-cpu event,
4385                  * use the cpu_clock event instead.
4386                  */
4387                 if (event->ctx->task)
4388                         pmu = &perf_ops_task_clock;
4389                 else
4390                         pmu = &perf_ops_cpu_clock;
4391
4392                 break;
4393         case PERF_COUNT_SW_PAGE_FAULTS:
4394         case PERF_COUNT_SW_PAGE_FAULTS_MIN:
4395         case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4396         case PERF_COUNT_SW_CONTEXT_SWITCHES:
4397         case PERF_COUNT_SW_CPU_MIGRATIONS:
4398         case PERF_COUNT_SW_ALIGNMENT_FAULTS:
4399         case PERF_COUNT_SW_EMULATION_FAULTS:
4400                 if (!event->parent) {
4401                         atomic_inc(&perf_swevent_enabled[event_id]);
4402                         event->destroy = sw_perf_event_destroy;
4403                 }
4404                 pmu = &perf_ops_generic;
4405                 break;
4406         }
4407
4408         return pmu;
4409 }
4410
4411 /*
4412  * Allocate and initialize a event structure
4413  */
4414 static struct perf_event *
4415 perf_event_alloc(struct perf_event_attr *attr,
4416                    int cpu,
4417                    struct perf_event_context *ctx,
4418                    struct perf_event *group_leader,
4419                    struct perf_event *parent_event,
4420                    perf_overflow_handler_t overflow_handler,
4421                    gfp_t gfpflags)
4422 {
4423         const struct pmu *pmu;
4424         struct perf_event *event;
4425         struct hw_perf_event *hwc;
4426         long err;
4427
4428         event = kzalloc(sizeof(*event), gfpflags);
4429         if (!event)
4430                 return ERR_PTR(-ENOMEM);
4431
4432         /*
4433          * Single events are their own group leaders, with an
4434          * empty sibling list:
4435          */
4436         if (!group_leader)
4437                 group_leader = event;
4438
4439         mutex_init(&event->child_mutex);
4440         INIT_LIST_HEAD(&event->child_list);
4441
4442         INIT_LIST_HEAD(&event->group_entry);
4443         INIT_LIST_HEAD(&event->event_entry);
4444         INIT_LIST_HEAD(&event->sibling_list);
4445         init_waitqueue_head(&event->waitq);
4446
4447         mutex_init(&event->mmap_mutex);
4448
4449         event->cpu              = cpu;
4450         event->attr             = *attr;
4451         event->group_leader     = group_leader;
4452         event->pmu              = NULL;
4453         event->ctx              = ctx;
4454         event->oncpu            = -1;
4455
4456         event->parent           = parent_event;
4457
4458         event->ns               = get_pid_ns(current->nsproxy->pid_ns);
4459         event->id               = atomic64_inc_return(&perf_event_id);
4460
4461         event->state            = PERF_EVENT_STATE_INACTIVE;
4462
4463         if (!overflow_handler && parent_event)
4464                 overflow_handler = parent_event->overflow_handler;
4465         
4466         event->overflow_handler = overflow_handler;
4467
4468         if (attr->disabled)
4469                 event->state = PERF_EVENT_STATE_OFF;
4470
4471         pmu = NULL;
4472
4473         hwc = &event->hw;
4474         hwc->sample_period = attr->sample_period;
4475         if (attr->freq && attr->sample_freq)
4476                 hwc->sample_period = 1;
4477         hwc->last_period = hwc->sample_period;
4478
4479         atomic64_set(&hwc->period_left, hwc->sample_period);
4480
4481         /*
4482          * we currently do not support PERF_FORMAT_GROUP on inherited events
4483          */
4484         if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4485                 goto done;
4486
4487         switch (attr->type) {
4488         case PERF_TYPE_RAW:
4489         case PERF_TYPE_HARDWARE:
4490         case PERF_TYPE_HW_CACHE:
4491                 pmu = hw_perf_event_init(event);
4492                 break;
4493
4494         case PERF_TYPE_SOFTWARE:
4495                 pmu = sw_perf_event_init(event);
4496                 break;
4497
4498         case PERF_TYPE_TRACEPOINT:
4499                 pmu = tp_perf_event_init(event);
4500                 break;
4501
4502         case PERF_TYPE_BREAKPOINT:
4503                 pmu = bp_perf_event_init(event);
4504                 break;
4505
4506
4507         default:
4508                 break;
4509         }
4510 done:
4511         err = 0;
4512         if (!pmu)
4513                 err = -EINVAL;
4514         else if (IS_ERR(pmu))
4515                 err = PTR_ERR(pmu);
4516
4517         if (err) {
4518                 if (event->ns)
4519                         put_pid_ns(event->ns);
4520                 kfree(event);
4521                 return ERR_PTR(err);
4522         }
4523
4524         event->pmu = pmu;
4525
4526         if (!event->parent) {
4527                 atomic_inc(&nr_events);
4528                 if (event->attr.mmap)
4529                         atomic_inc(&nr_mmap_events);
4530                 if (event->attr.comm)
4531                         atomic_inc(&nr_comm_events);
4532                 if (event->attr.task)
4533                         atomic_inc(&nr_task_events);
4534         }
4535
4536         return event;
4537 }
4538
4539 static int perf_copy_attr(struct perf_event_attr __user *uattr,
4540                           struct perf_event_attr *attr)
4541 {
4542         u32 size;
4543         int ret;
4544
4545         if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4546                 return -EFAULT;
4547
4548         /*
4549          * zero the full structure, so that a short copy will be nice.
4550          */
4551         memset(attr, 0, sizeof(*attr));
4552
4553         ret = get_user(size, &uattr->size);
4554         if (ret)
4555                 return ret;
4556
4557         if (size > PAGE_SIZE)   /* silly large */
4558                 goto err_size;
4559
4560         if (!size)              /* abi compat */
4561                 size = PERF_ATTR_SIZE_VER0;
4562
4563         if (size < PERF_ATTR_SIZE_VER0)
4564                 goto err_size;
4565
4566         /*
4567          * If we're handed a bigger struct than we know of,
4568          * ensure all the unknown bits are 0 - i.e. new
4569          * user-space does not rely on any kernel feature
4570          * extensions we dont know about yet.
4571          */
4572         if (size > sizeof(*attr)) {
4573                 unsigned char __user *addr;
4574                 unsigned char __user *end;
4575                 unsigned char val;
4576
4577                 addr = (void __user *)uattr + sizeof(*attr);
4578                 end  = (void __user *)uattr + size;
4579
4580                 for (; addr < end; addr++) {
4581                         ret = get_user(val, addr);
4582                         if (ret)
4583                                 return ret;
4584                         if (val)
4585                                 goto err_size;
4586                 }
4587                 size = sizeof(*attr);
4588         }
4589
4590         ret = copy_from_user(attr, uattr, size);
4591         if (ret)
4592                 return -EFAULT;
4593
4594         /*
4595          * If the type exists, the corresponding creation will verify
4596          * the attr->config.
4597          */
4598         if (attr->type >= PERF_TYPE_MAX)
4599                 return -EINVAL;
4600
4601         if (attr->__reserved_1 || attr->__reserved_2)
4602                 return -EINVAL;
4603
4604         if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4605                 return -EINVAL;
4606
4607         if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4608                 return -EINVAL;
4609
4610 out:
4611         return ret;
4612
4613 err_size:
4614         put_user(sizeof(*attr), &uattr->size);
4615         ret = -E2BIG;
4616         goto out;
4617 }
4618
4619 static int perf_event_set_output(struct perf_event *event, int output_fd)
4620 {
4621         struct perf_event *output_event = NULL;
4622         struct file *output_file = NULL;
4623         struct perf_event *old_output;
4624         int fput_needed = 0;
4625         int ret = -EINVAL;
4626
4627         if (!output_fd)
4628                 goto set;
4629
4630         output_file = fget_light(output_fd, &fput_needed);
4631         if (!output_file)
4632                 return -EBADF;
4633
4634         if (output_file->f_op != &perf_fops)
4635                 goto out;
4636
4637         output_event = output_file->private_data;
4638
4639         /* Don't chain output fds */
4640         if (output_event->output)
4641                 goto out;
4642
4643         /* Don't set an output fd when we already have an output channel */
4644         if (event->data)
4645                 goto out;
4646
4647         atomic_long_inc(&output_file->f_count);
4648
4649 set:
4650         mutex_lock(&event->mmap_mutex);
4651         old_output = event->output;
4652         rcu_assign_pointer(event->output, output_event);
4653         mutex_unlock(&event->mmap_mutex);
4654
4655         if (old_output) {
4656                 /*
4657                  * we need to make sure no existing perf_output_*()
4658                  * is still referencing this event.
4659                  */
4660                 synchronize_rcu();
4661                 fput(old_output->filp);
4662         }
4663
4664         ret = 0;
4665 out:
4666         fput_light(output_file, fput_needed);
4667         return ret;
4668 }
4669
4670 /**
4671  * sys_perf_event_open - open a performance event, associate it to a task/cpu
4672  *
4673  * @attr_uptr:  event_id type attributes for monitoring/sampling
4674  * @pid:                target pid
4675  * @cpu:                target cpu
4676  * @group_fd:           group leader event fd
4677  */
4678 SYSCALL_DEFINE5(perf_event_open,
4679                 struct perf_event_attr __user *, attr_uptr,
4680                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4681 {
4682         struct perf_event *event, *group_leader;
4683         struct perf_event_attr attr;
4684         struct perf_event_context *ctx;
4685         struct file *event_file = NULL;
4686         struct file *group_file = NULL;
4687         int fput_needed = 0;
4688         int fput_needed2 = 0;
4689         int err;
4690
4691         /* for future expandability... */
4692         if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
4693                 return -EINVAL;
4694
4695         err = perf_copy_attr(attr_uptr, &attr);
4696         if (err)
4697                 return err;
4698
4699         if (!attr.exclude_kernel) {
4700                 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4701                         return -EACCES;
4702         }
4703
4704         if (attr.freq) {
4705                 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4706                         return -EINVAL;
4707         }
4708
4709         /*
4710          * Get the target context (task or percpu):
4711          */
4712         ctx = find_get_context(pid, cpu);
4713         if (IS_ERR(ctx))
4714                 return PTR_ERR(ctx);
4715
4716         /*
4717          * Look up the group leader (we will attach this event to it):
4718          */
4719         group_leader = NULL;
4720         if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
4721                 err = -EINVAL;
4722                 group_file = fget_light(group_fd, &fput_needed);
4723                 if (!group_file)
4724                         goto err_put_context;
4725                 if (group_file->f_op != &perf_fops)
4726                         goto err_put_context;
4727
4728                 group_leader = group_file->private_data;
4729                 /*
4730                  * Do not allow a recursive hierarchy (this new sibling
4731                  * becoming part of another group-sibling):
4732                  */
4733                 if (group_leader->group_leader != group_leader)
4734                         goto err_put_context;
4735                 /*
4736                  * Do not allow to attach to a group in a different
4737                  * task or CPU context:
4738                  */
4739                 if (group_leader->ctx != ctx)
4740                         goto err_put_context;
4741                 /*
4742                  * Only a group leader can be exclusive or pinned
4743                  */
4744                 if (attr.exclusive || attr.pinned)
4745                         goto err_put_context;
4746         }
4747
4748         event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4749                                      NULL, NULL, GFP_KERNEL);
4750         err = PTR_ERR(event);
4751         if (IS_ERR(event))
4752                 goto err_put_context;
4753
4754         err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
4755         if (err < 0)
4756                 goto err_free_put_context;
4757
4758         event_file = fget_light(err, &fput_needed2);
4759         if (!event_file)
4760                 goto err_free_put_context;
4761
4762         if (flags & PERF_FLAG_FD_OUTPUT) {
4763                 err = perf_event_set_output(event, group_fd);
4764                 if (err)
4765                         goto err_fput_free_put_context;
4766         }
4767
4768         event->filp = event_file;
4769         WARN_ON_ONCE(ctx->parent_ctx);
4770         mutex_lock(&ctx->mutex);
4771         perf_install_in_context(ctx, event, cpu);
4772         ++ctx->generation;
4773         mutex_unlock(&ctx->mutex);
4774
4775         event->owner = current;
4776         get_task_struct(current);
4777         mutex_lock(&current->perf_event_mutex);
4778         list_add_tail(&event->owner_entry, &current->perf_event_list);
4779         mutex_unlock(&current->perf_event_mutex);
4780
4781 err_fput_free_put_context:
4782         fput_light(event_file, fput_needed2);
4783
4784 err_free_put_context:
4785         if (err < 0)
4786                 kfree(event);
4787
4788 err_put_context:
4789         if (err < 0)
4790                 put_ctx(ctx);
4791
4792         fput_light(group_file, fput_needed);
4793
4794         return err;
4795 }
4796
4797 /**
4798  * perf_event_create_kernel_counter
4799  *
4800  * @attr: attributes of the counter to create
4801  * @cpu: cpu in which the counter is bound
4802  * @pid: task to profile
4803  */
4804 struct perf_event *
4805 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
4806                                  pid_t pid,
4807                                  perf_overflow_handler_t overflow_handler)
4808 {
4809         struct perf_event *event;
4810         struct perf_event_context *ctx;
4811         int err;
4812
4813         /*
4814          * Get the target context (task or percpu):
4815          */
4816
4817         ctx = find_get_context(pid, cpu);
4818         if (IS_ERR(ctx)) {
4819                 err = PTR_ERR(ctx);
4820                 goto err_exit;
4821         }
4822
4823         event = perf_event_alloc(attr, cpu, ctx, NULL,
4824                                  NULL, overflow_handler, GFP_KERNEL);
4825         if (IS_ERR(event)) {
4826                 err = PTR_ERR(event);
4827                 goto err_put_context;
4828         }
4829
4830         event->filp = NULL;
4831         WARN_ON_ONCE(ctx->parent_ctx);
4832         mutex_lock(&ctx->mutex);
4833         perf_install_in_context(ctx, event, cpu);
4834         ++ctx->generation;
4835         mutex_unlock(&ctx->mutex);
4836
4837         event->owner = current;
4838         get_task_struct(current);
4839         mutex_lock(&current->perf_event_mutex);
4840         list_add_tail(&event->owner_entry, &current->perf_event_list);
4841         mutex_unlock(&current->perf_event_mutex);
4842
4843         return event;
4844
4845  err_put_context:
4846         put_ctx(ctx);
4847  err_exit:
4848         return ERR_PTR(err);
4849 }
4850 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
4851
4852 /*
4853  * inherit a event from parent task to child task:
4854  */
4855 static struct perf_event *
4856 inherit_event(struct perf_event *parent_event,
4857               struct task_struct *parent,
4858               struct perf_event_context *parent_ctx,
4859               struct task_struct *child,
4860               struct perf_event *group_leader,
4861               struct perf_event_context *child_ctx)
4862 {
4863         struct perf_event *child_event;
4864
4865         /*
4866          * Instead of creating recursive hierarchies of events,
4867          * we link inherited events back to the original parent,
4868          * which has a filp for sure, which we use as the reference
4869          * count:
4870          */
4871         if (parent_event->parent)
4872                 parent_event = parent_event->parent;
4873
4874         child_event = perf_event_alloc(&parent_event->attr,
4875                                            parent_event->cpu, child_ctx,
4876                                            group_leader, parent_event,
4877                                            NULL, GFP_KERNEL);
4878         if (IS_ERR(child_event))
4879                 return child_event;
4880         get_ctx(child_ctx);
4881
4882         /*
4883          * Make the child state follow the state of the parent event,
4884          * not its attr.disabled bit.  We hold the parent's mutex,
4885          * so we won't race with perf_event_{en, dis}able_family.
4886          */
4887         if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
4888                 child_event->state = PERF_EVENT_STATE_INACTIVE;
4889         else
4890                 child_event->state = PERF_EVENT_STATE_OFF;
4891
4892         if (parent_event->attr.freq)
4893                 child_event->hw.sample_period = parent_event->hw.sample_period;
4894
4895         child_event->overflow_handler = parent_event->overflow_handler;
4896
4897         /*
4898          * Link it up in the child's context:
4899          */
4900         add_event_to_ctx(child_event, child_ctx);
4901
4902         /*
4903          * Get a reference to the parent filp - we will fput it
4904          * when the child event exits. This is safe to do because
4905          * we are in the parent and we know that the filp still
4906          * exists and has a nonzero count:
4907          */
4908         atomic_long_inc(&parent_event->filp->f_count);
4909
4910         /*
4911          * Link this into the parent event's child list
4912          */
4913         WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4914         mutex_lock(&parent_event->child_mutex);
4915         list_add_tail(&child_event->child_list, &parent_event->child_list);
4916         mutex_unlock(&parent_event->child_mutex);
4917
4918         return child_event;
4919 }
4920
4921 static int inherit_group(struct perf_event *parent_event,
4922               struct task_struct *parent,
4923               struct perf_event_context *parent_ctx,
4924               struct task_struct *child,
4925               struct perf_event_context *child_ctx)
4926 {
4927         struct perf_event *leader;
4928         struct perf_event *sub;
4929         struct perf_event *child_ctr;
4930
4931         leader = inherit_event(parent_event, parent, parent_ctx,
4932                                  child, NULL, child_ctx);
4933         if (IS_ERR(leader))
4934                 return PTR_ERR(leader);
4935         list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
4936                 child_ctr = inherit_event(sub, parent, parent_ctx,
4937                                             child, leader, child_ctx);
4938                 if (IS_ERR(child_ctr))
4939                         return PTR_ERR(child_ctr);
4940         }
4941         return 0;
4942 }
4943
4944 static void sync_child_event(struct perf_event *child_event,
4945                                struct task_struct *child)
4946 {
4947         struct perf_event *parent_event = child_event->parent;
4948         u64 child_val;
4949
4950         if (child_event->attr.inherit_stat)
4951                 perf_event_read_event(child_event, child);
4952
4953         child_val = atomic64_read(&child_event->count);
4954
4955         /*
4956          * Add back the child's count to the parent's count:
4957          */
4958         atomic64_add(child_val, &parent_event->count);
4959         atomic64_add(child_event->total_time_enabled,
4960                      &parent_event->child_total_time_enabled);
4961         atomic64_add(child_event->total_time_running,
4962                      &parent_event->child_total_time_running);
4963
4964         /*
4965          * Remove this event from the parent's list
4966          */
4967         WARN_ON_ONCE(parent_event->ctx->parent_ctx);
4968         mutex_lock(&parent_event->child_mutex);
4969         list_del_init(&child_event->child_list);
4970         mutex_unlock(&parent_event->child_mutex);
4971
4972         /*
4973          * Release the parent event, if this was the last
4974          * reference to it.
4975          */
4976         fput(parent_event->filp);
4977 }
4978
4979 static void
4980 __perf_event_exit_task(struct perf_event *child_event,
4981                          struct perf_event_context *child_ctx,
4982                          struct task_struct *child)
4983 {
4984         struct perf_event *parent_event;
4985
4986         perf_event_remove_from_context(child_event);
4987
4988         parent_event = child_event->parent;
4989         /*
4990          * It can happen that parent exits first, and has events
4991          * that are still around due to the child reference. These
4992          * events need to be zapped - but otherwise linger.
4993          */
4994         if (parent_event) {
4995                 sync_child_event(child_event, child);
4996                 free_event(child_event);
4997         }
4998 }
4999
5000 /*
5001  * When a child task exits, feed back event values to parent events.
5002  */
5003 void perf_event_exit_task(struct task_struct *child)
5004 {
5005         struct perf_event *child_event, *tmp;
5006         struct perf_event_context *child_ctx;
5007         unsigned long flags;
5008
5009         if (likely(!child->perf_event_ctxp)) {
5010                 perf_event_task(child, NULL, 0);
5011                 return;
5012         }
5013
5014         local_irq_save(flags);
5015         /*
5016          * We can't reschedule here because interrupts are disabled,
5017          * and either child is current or it is a task that can't be
5018          * scheduled, so we are now safe from rescheduling changing
5019          * our context.
5020          */
5021         child_ctx = child->perf_event_ctxp;
5022         __perf_event_task_sched_out(child_ctx);
5023
5024         /*
5025          * Take the context lock here so that if find_get_context is
5026          * reading child->perf_event_ctxp, we wait until it has
5027          * incremented the context's refcount before we do put_ctx below.
5028          */
5029         raw_spin_lock(&child_ctx->lock);
5030         child->perf_event_ctxp = NULL;
5031         /*
5032          * If this context is a clone; unclone it so it can't get
5033          * swapped to another process while we're removing all
5034          * the events from it.
5035          */
5036         unclone_ctx(child_ctx);
5037         update_context_time(child_ctx);
5038         raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5039
5040         /*
5041          * Report the task dead after unscheduling the events so that we
5042          * won't get any samples after PERF_RECORD_EXIT. We can however still
5043          * get a few PERF_RECORD_READ events.
5044          */
5045         perf_event_task(child, child_ctx, 0);
5046
5047         /*
5048          * We can recurse on the same lock type through:
5049          *
5050          *   __perf_event_exit_task()
5051          *     sync_child_event()
5052          *       fput(parent_event->filp)
5053          *         perf_release()
5054          *           mutex_lock(&ctx->mutex)
5055          *
5056          * But since its the parent context it won't be the same instance.
5057          */
5058         mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
5059
5060 again:
5061         list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5062                                  group_entry)
5063                 __perf_event_exit_task(child_event, child_ctx, child);
5064
5065         list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
5066                                  group_entry)
5067                 __perf_event_exit_task(child_event, child_ctx, child);
5068
5069         /*
5070          * If the last event was a group event, it will have appended all
5071          * its siblings to the list, but we obtained 'tmp' before that which
5072          * will still point to the list head terminating the iteration.
5073          */
5074         if (!list_empty(&child_ctx->pinned_groups) ||
5075             !list_empty(&child_ctx->flexible_groups))
5076                 goto again;
5077
5078         mutex_unlock(&child_ctx->mutex);
5079
5080         put_ctx(child_ctx);
5081 }
5082
5083 static void perf_free_event(struct perf_event *event,
5084                             struct perf_event_context *ctx)
5085 {
5086         struct perf_event *parent = event->parent;
5087
5088         if (WARN_ON_ONCE(!parent))
5089                 return;
5090
5091         mutex_lock(&parent->child_mutex);
5092         list_del_init(&event->child_list);
5093         mutex_unlock(&parent->child_mutex);
5094
5095         fput(parent->filp);
5096
5097         list_del_event(event, ctx);
5098         free_event(event);
5099 }
5100
5101 /*
5102  * free an unexposed, unused context as created by inheritance by
5103  * init_task below, used by fork() in case of fail.
5104  */
5105 void perf_event_free_task(struct task_struct *task)
5106 {
5107         struct perf_event_context *ctx = task->perf_event_ctxp;
5108         struct perf_event *event, *tmp;
5109
5110         if (!ctx)
5111                 return;
5112
5113         mutex_lock(&ctx->mutex);
5114 again:
5115         list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5116                 perf_free_event(event, ctx);
5117
5118         list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5119                                  group_entry)
5120                 perf_free_event(event, ctx);
5121
5122         if (!list_empty(&ctx->pinned_groups) ||
5123             !list_empty(&ctx->flexible_groups))
5124                 goto again;
5125
5126         mutex_unlock(&ctx->mutex);
5127
5128         put_ctx(ctx);
5129 }
5130
5131 static int
5132 inherit_task_group(struct perf_event *event, struct task_struct *parent,
5133                    struct perf_event_context *parent_ctx,
5134                    struct task_struct *child,
5135                    int *inherited_all)
5136 {
5137         int ret;
5138         struct perf_event_context *child_ctx = child->perf_event_ctxp;
5139
5140         if (!event->attr.inherit) {
5141                 *inherited_all = 0;
5142                 return 0;
5143         }
5144
5145         if (!child_ctx) {
5146                 /*
5147                  * This is executed from the parent task context, so
5148                  * inherit events that have been marked for cloning.
5149                  * First allocate and initialize a context for the
5150                  * child.
5151                  */
5152
5153                 child_ctx = kzalloc(sizeof(struct perf_event_context),
5154                                     GFP_KERNEL);
5155                 if (!child_ctx)
5156                         return -ENOMEM;
5157
5158                 __perf_event_init_context(child_ctx, child);
5159                 child->perf_event_ctxp = child_ctx;
5160                 get_task_struct(child);
5161         }
5162
5163         ret = inherit_group(event, parent, parent_ctx,
5164                             child, child_ctx);
5165
5166         if (ret)
5167                 *inherited_all = 0;
5168
5169         return ret;
5170 }
5171
5172
5173 /*
5174  * Initialize the perf_event context in task_struct
5175  */
5176 int perf_event_init_task(struct task_struct *child)
5177 {
5178         struct perf_event_context *child_ctx, *parent_ctx;
5179         struct perf_event_context *cloned_ctx;
5180         struct perf_event *event;
5181         struct task_struct *parent = current;
5182         int inherited_all = 1;
5183         int ret = 0;
5184
5185         child->perf_event_ctxp = NULL;
5186
5187         mutex_init(&child->perf_event_mutex);
5188         INIT_LIST_HEAD(&child->perf_event_list);
5189
5190         if (likely(!parent->perf_event_ctxp))
5191                 return 0;
5192
5193         /*
5194          * If the parent's context is a clone, pin it so it won't get
5195          * swapped under us.
5196          */
5197         parent_ctx = perf_pin_task_context(parent);
5198
5199         /*
5200          * No need to check if parent_ctx != NULL here; since we saw
5201          * it non-NULL earlier, the only reason for it to become NULL
5202          * is if we exit, and since we're currently in the middle of
5203          * a fork we can't be exiting at the same time.
5204          */
5205
5206         /*
5207          * Lock the parent list. No need to lock the child - not PID
5208          * hashed yet and not running, so nobody can access it.
5209          */
5210         mutex_lock(&parent_ctx->mutex);
5211
5212         /*
5213          * We dont have to disable NMIs - we are only looking at
5214          * the list, not manipulating it:
5215          */
5216         list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
5217                 ret = inherit_task_group(event, parent, parent_ctx, child,
5218                                          &inherited_all);
5219                 if (ret)
5220                         break;
5221         }
5222
5223         list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
5224                 ret = inherit_task_group(event, parent, parent_ctx, child,
5225                                          &inherited_all);
5226                 if (ret)
5227                         break;
5228         }
5229
5230         child_ctx = child->perf_event_ctxp;
5231
5232         if (child_ctx && inherited_all) {
5233                 /*
5234                  * Mark the child context as a clone of the parent
5235                  * context, or of whatever the parent is a clone of.
5236                  * Note that if the parent is a clone, it could get
5237                  * uncloned at any point, but that doesn't matter
5238                  * because the list of events and the generation
5239                  * count can't have changed since we took the mutex.
5240                  */
5241                 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
5242                 if (cloned_ctx) {
5243                         child_ctx->parent_ctx = cloned_ctx;
5244                         child_ctx->parent_gen = parent_ctx->parent_gen;
5245                 } else {
5246                         child_ctx->parent_ctx = parent_ctx;
5247                         child_ctx->parent_gen = parent_ctx->generation;
5248                 }
5249                 get_ctx(child_ctx->parent_ctx);
5250         }
5251
5252         mutex_unlock(&parent_ctx->mutex);
5253
5254         perf_unpin_context(parent_ctx);
5255
5256         return ret;
5257 }
5258
5259 static void __cpuinit perf_event_init_cpu(int cpu)
5260 {
5261         struct perf_cpu_context *cpuctx;
5262
5263         cpuctx = &per_cpu(perf_cpu_context, cpu);
5264         __perf_event_init_context(&cpuctx->ctx, NULL);
5265
5266         spin_lock(&perf_resource_lock);
5267         cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5268         spin_unlock(&perf_resource_lock);
5269
5270         hw_perf_event_setup(cpu);
5271 }
5272
5273 #ifdef CONFIG_HOTPLUG_CPU
5274 static void __perf_event_exit_cpu(void *info)
5275 {
5276         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5277         struct perf_event_context *ctx = &cpuctx->ctx;
5278         struct perf_event *event, *tmp;
5279
5280         list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5281                 __perf_event_remove_from_context(event);
5282         list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
5283                 __perf_event_remove_from_context(event);
5284 }
5285 static void perf_event_exit_cpu(int cpu)
5286 {
5287         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5288         struct perf_event_context *ctx = &cpuctx->ctx;
5289
5290         mutex_lock(&ctx->mutex);
5291         smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
5292         mutex_unlock(&ctx->mutex);
5293 }
5294 #else
5295 static inline void perf_event_exit_cpu(int cpu) { }
5296 #endif
5297
5298 static int __cpuinit
5299 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5300 {
5301         unsigned int cpu = (long)hcpu;
5302
5303         switch (action) {
5304
5305         case CPU_UP_PREPARE:
5306         case CPU_UP_PREPARE_FROZEN:
5307                 perf_event_init_cpu(cpu);
5308                 break;
5309
5310         case CPU_ONLINE:
5311         case CPU_ONLINE_FROZEN:
5312                 hw_perf_event_setup_online(cpu);
5313                 break;
5314
5315         case CPU_DOWN_PREPARE:
5316         case CPU_DOWN_PREPARE_FROZEN:
5317                 perf_event_exit_cpu(cpu);
5318                 break;
5319
5320         default:
5321                 break;
5322         }
5323
5324         return NOTIFY_OK;
5325 }
5326
5327 /*
5328  * This has to have a higher priority than migration_notifier in sched.c.
5329  */
5330 static struct notifier_block __cpuinitdata perf_cpu_nb = {
5331         .notifier_call          = perf_cpu_notify,
5332         .priority               = 20,
5333 };
5334
5335 void __init perf_event_init(void)
5336 {
5337         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5338                         (void *)(long)smp_processor_id());
5339         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
5340                         (void *)(long)smp_processor_id());
5341         register_cpu_notifier(&perf_cpu_nb);
5342 }
5343
5344 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
5345 {
5346         return sprintf(buf, "%d\n", perf_reserved_percpu);
5347 }
5348
5349 static ssize_t
5350 perf_set_reserve_percpu(struct sysdev_class *class,
5351                         const char *buf,
5352                         size_t count)
5353 {
5354         struct perf_cpu_context *cpuctx;
5355         unsigned long val;
5356         int err, cpu, mpt;
5357
5358         err = strict_strtoul(buf, 10, &val);
5359         if (err)
5360                 return err;
5361         if (val > perf_max_events)
5362                 return -EINVAL;
5363
5364         spin_lock(&perf_resource_lock);
5365         perf_reserved_percpu = val;
5366         for_each_online_cpu(cpu) {
5367                 cpuctx = &per_cpu(perf_cpu_context, cpu);
5368                 raw_spin_lock_irq(&cpuctx->ctx.lock);
5369                 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5370                           perf_max_events - perf_reserved_percpu);
5371                 cpuctx->max_pertask = mpt;
5372                 raw_spin_unlock_irq(&cpuctx->ctx.lock);
5373         }
5374         spin_unlock(&perf_resource_lock);
5375
5376         return count;
5377 }
5378
5379 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
5380 {
5381         return sprintf(buf, "%d\n", perf_overcommit);
5382 }
5383
5384 static ssize_t
5385 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
5386 {
5387         unsigned long val;
5388         int err;
5389
5390         err = strict_strtoul(buf, 10, &val);
5391         if (err)
5392                 return err;
5393         if (val > 1)
5394                 return -EINVAL;
5395
5396         spin_lock(&perf_resource_lock);
5397         perf_overcommit = val;
5398         spin_unlock(&perf_resource_lock);
5399
5400         return count;
5401 }
5402
5403 static SYSDEV_CLASS_ATTR(
5404                                 reserve_percpu,
5405                                 0644,
5406                                 perf_show_reserve_percpu,
5407                                 perf_set_reserve_percpu
5408                         );
5409
5410 static SYSDEV_CLASS_ATTR(
5411                                 overcommit,
5412                                 0644,
5413                                 perf_show_overcommit,
5414                                 perf_set_overcommit
5415                         );
5416
5417 static struct attribute *perfclass_attrs[] = {
5418         &attr_reserve_percpu.attr,
5419         &attr_overcommit.attr,
5420         NULL
5421 };
5422
5423 static struct attribute_group perfclass_attr_group = {
5424         .attrs                  = perfclass_attrs,
5425         .name                   = "perf_events",
5426 };
5427
5428 static int __init perf_event_sysfs_init(void)
5429 {
5430         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
5431                                   &perfclass_attr_group);
5432 }
5433 device_initcall(perf_event_sysfs_init);