perf_counter: Fix COMM and MMAP events for cpu wide counters
[safe/jmp/linux-2.6] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  *  For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/ptrace.h>
20 #include <linux/percpu.h>
21 #include <linux/vmstat.h>
22 #include <linux/hardirq.h>
23 #include <linux/rculist.h>
24 #include <linux/uaccess.h>
25 #include <linux/syscalls.h>
26 #include <linux/anon_inodes.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/perf_counter.h>
29 #include <linux/dcache.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34  * Each CPU has a list of per CPU counters:
35  */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_tracking __read_mostly;
44 static atomic_t nr_munmap_tracking __read_mostly;
45 static atomic_t nr_comm_tracking __read_mostly;
46
47 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
49 int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
50
51 /*
52  * Lock for (sysadmin-configurable) counter reservations:
53  */
54 static DEFINE_SPINLOCK(perf_resource_lock);
55
56 /*
57  * Architecture provided APIs - weak aliases:
58  */
59 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
60 {
61         return NULL;
62 }
63
64 void __weak hw_perf_disable(void)               { barrier(); }
65 void __weak hw_perf_enable(void)                { barrier(); }
66
67 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
68 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
69                struct perf_cpu_context *cpuctx,
70                struct perf_counter_context *ctx, int cpu)
71 {
72         return 0;
73 }
74
75 void __weak perf_counter_print_debug(void)      { }
76
77 static DEFINE_PER_CPU(int, disable_count);
78
79 void __perf_disable(void)
80 {
81         __get_cpu_var(disable_count)++;
82 }
83
84 bool __perf_enable(void)
85 {
86         return !--__get_cpu_var(disable_count);
87 }
88
89 void perf_disable(void)
90 {
91         __perf_disable();
92         hw_perf_disable();
93 }
94
95 void perf_enable(void)
96 {
97         if (__perf_enable())
98                 hw_perf_enable();
99 }
100
101 static void get_ctx(struct perf_counter_context *ctx)
102 {
103         atomic_inc(&ctx->refcount);
104 }
105
106 static void free_ctx(struct rcu_head *head)
107 {
108         struct perf_counter_context *ctx;
109
110         ctx = container_of(head, struct perf_counter_context, rcu_head);
111         kfree(ctx);
112 }
113
114 static void put_ctx(struct perf_counter_context *ctx)
115 {
116         if (atomic_dec_and_test(&ctx->refcount)) {
117                 if (ctx->parent_ctx)
118                         put_ctx(ctx->parent_ctx);
119                 if (ctx->task)
120                         put_task_struct(ctx->task);
121                 call_rcu(&ctx->rcu_head, free_ctx);
122         }
123 }
124
125 /*
126  * Add a counter from the lists for its context.
127  * Must be called with ctx->mutex and ctx->lock held.
128  */
129 static void
130 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
131 {
132         struct perf_counter *group_leader = counter->group_leader;
133
134         /*
135          * Depending on whether it is a standalone or sibling counter,
136          * add it straight to the context's counter list, or to the group
137          * leader's sibling list:
138          */
139         if (group_leader == counter)
140                 list_add_tail(&counter->list_entry, &ctx->counter_list);
141         else {
142                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
143                 group_leader->nr_siblings++;
144         }
145
146         list_add_rcu(&counter->event_entry, &ctx->event_list);
147         ctx->nr_counters++;
148 }
149
150 /*
151  * Remove a counter from the lists for its context.
152  * Must be called with ctx->mutex and ctx->lock held.
153  */
154 static void
155 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
156 {
157         struct perf_counter *sibling, *tmp;
158
159         if (list_empty(&counter->list_entry))
160                 return;
161         ctx->nr_counters--;
162
163         list_del_init(&counter->list_entry);
164         list_del_rcu(&counter->event_entry);
165
166         if (counter->group_leader != counter)
167                 counter->group_leader->nr_siblings--;
168
169         /*
170          * If this was a group counter with sibling counters then
171          * upgrade the siblings to singleton counters by adding them
172          * to the context list directly:
173          */
174         list_for_each_entry_safe(sibling, tmp,
175                                  &counter->sibling_list, list_entry) {
176
177                 list_move_tail(&sibling->list_entry, &ctx->counter_list);
178                 sibling->group_leader = sibling;
179         }
180 }
181
182 static void
183 counter_sched_out(struct perf_counter *counter,
184                   struct perf_cpu_context *cpuctx,
185                   struct perf_counter_context *ctx)
186 {
187         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
188                 return;
189
190         counter->state = PERF_COUNTER_STATE_INACTIVE;
191         counter->tstamp_stopped = ctx->time;
192         counter->pmu->disable(counter);
193         counter->oncpu = -1;
194
195         if (!is_software_counter(counter))
196                 cpuctx->active_oncpu--;
197         ctx->nr_active--;
198         if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
199                 cpuctx->exclusive = 0;
200 }
201
202 static void
203 group_sched_out(struct perf_counter *group_counter,
204                 struct perf_cpu_context *cpuctx,
205                 struct perf_counter_context *ctx)
206 {
207         struct perf_counter *counter;
208
209         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
210                 return;
211
212         counter_sched_out(group_counter, cpuctx, ctx);
213
214         /*
215          * Schedule out siblings (if any):
216          */
217         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
218                 counter_sched_out(counter, cpuctx, ctx);
219
220         if (group_counter->hw_event.exclusive)
221                 cpuctx->exclusive = 0;
222 }
223
224 /*
225  * Cross CPU call to remove a performance counter
226  *
227  * We disable the counter on the hardware level first. After that we
228  * remove it from the context list.
229  */
230 static void __perf_counter_remove_from_context(void *info)
231 {
232         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
233         struct perf_counter *counter = info;
234         struct perf_counter_context *ctx = counter->ctx;
235         unsigned long flags;
236
237         local_irq_save(flags);
238         /*
239          * If this is a task context, we need to check whether it is
240          * the current task context of this cpu. If not it has been
241          * scheduled out before the smp call arrived.
242          */
243         if (ctx->task && cpuctx->task_ctx != ctx) {
244                 local_irq_restore(flags);
245                 return;
246         }
247
248         spin_lock(&ctx->lock);
249         /*
250          * Protect the list operation against NMI by disabling the
251          * counters on a global level.
252          */
253         perf_disable();
254
255         counter_sched_out(counter, cpuctx, ctx);
256
257         list_del_counter(counter, ctx);
258
259         if (!ctx->task) {
260                 /*
261                  * Allow more per task counters with respect to the
262                  * reservation:
263                  */
264                 cpuctx->max_pertask =
265                         min(perf_max_counters - ctx->nr_counters,
266                             perf_max_counters - perf_reserved_percpu);
267         }
268
269         perf_enable();
270         spin_unlock_irqrestore(&ctx->lock, flags);
271 }
272
273
274 /*
275  * Remove the counter from a task's (or a CPU's) list of counters.
276  *
277  * Must be called with ctx->mutex held.
278  *
279  * CPU counters are removed with a smp call. For task counters we only
280  * call when the task is on a CPU.
281  *
282  * If counter->ctx is a cloned context, callers must make sure that
283  * every task struct that counter->ctx->task could possibly point to
284  * remains valid.  This is OK when called from perf_release since
285  * that only calls us on the top-level context, which can't be a clone.
286  * When called from perf_counter_exit_task, it's OK because the
287  * context has been detached from its task.
288  */
289 static void perf_counter_remove_from_context(struct perf_counter *counter)
290 {
291         struct perf_counter_context *ctx = counter->ctx;
292         struct task_struct *task = ctx->task;
293
294         if (!task) {
295                 /*
296                  * Per cpu counters are removed via an smp call and
297                  * the removal is always sucessful.
298                  */
299                 smp_call_function_single(counter->cpu,
300                                          __perf_counter_remove_from_context,
301                                          counter, 1);
302                 return;
303         }
304
305 retry:
306         task_oncpu_function_call(task, __perf_counter_remove_from_context,
307                                  counter);
308
309         spin_lock_irq(&ctx->lock);
310         /*
311          * If the context is active we need to retry the smp call.
312          */
313         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
314                 spin_unlock_irq(&ctx->lock);
315                 goto retry;
316         }
317
318         /*
319          * The lock prevents that this context is scheduled in so we
320          * can remove the counter safely, if the call above did not
321          * succeed.
322          */
323         if (!list_empty(&counter->list_entry)) {
324                 list_del_counter(counter, ctx);
325         }
326         spin_unlock_irq(&ctx->lock);
327 }
328
329 static inline u64 perf_clock(void)
330 {
331         return cpu_clock(smp_processor_id());
332 }
333
334 /*
335  * Update the record of the current time in a context.
336  */
337 static void update_context_time(struct perf_counter_context *ctx)
338 {
339         u64 now = perf_clock();
340
341         ctx->time += now - ctx->timestamp;
342         ctx->timestamp = now;
343 }
344
345 /*
346  * Update the total_time_enabled and total_time_running fields for a counter.
347  */
348 static void update_counter_times(struct perf_counter *counter)
349 {
350         struct perf_counter_context *ctx = counter->ctx;
351         u64 run_end;
352
353         if (counter->state < PERF_COUNTER_STATE_INACTIVE)
354                 return;
355
356         counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
357
358         if (counter->state == PERF_COUNTER_STATE_INACTIVE)
359                 run_end = counter->tstamp_stopped;
360         else
361                 run_end = ctx->time;
362
363         counter->total_time_running = run_end - counter->tstamp_running;
364 }
365
366 /*
367  * Update total_time_enabled and total_time_running for all counters in a group.
368  */
369 static void update_group_times(struct perf_counter *leader)
370 {
371         struct perf_counter *counter;
372
373         update_counter_times(leader);
374         list_for_each_entry(counter, &leader->sibling_list, list_entry)
375                 update_counter_times(counter);
376 }
377
378 /*
379  * Cross CPU call to disable a performance counter
380  */
381 static void __perf_counter_disable(void *info)
382 {
383         struct perf_counter *counter = info;
384         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
385         struct perf_counter_context *ctx = counter->ctx;
386         unsigned long flags;
387
388         local_irq_save(flags);
389         /*
390          * If this is a per-task counter, need to check whether this
391          * counter's task is the current task on this cpu.
392          */
393         if (ctx->task && cpuctx->task_ctx != ctx) {
394                 local_irq_restore(flags);
395                 return;
396         }
397
398         spin_lock(&ctx->lock);
399
400         /*
401          * If the counter is on, turn it off.
402          * If it is in error state, leave it in error state.
403          */
404         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
405                 update_context_time(ctx);
406                 update_counter_times(counter);
407                 if (counter == counter->group_leader)
408                         group_sched_out(counter, cpuctx, ctx);
409                 else
410                         counter_sched_out(counter, cpuctx, ctx);
411                 counter->state = PERF_COUNTER_STATE_OFF;
412         }
413
414         spin_unlock_irqrestore(&ctx->lock, flags);
415 }
416
417 /*
418  * Disable a counter.
419  *
420  * If counter->ctx is a cloned context, callers must make sure that
421  * every task struct that counter->ctx->task could possibly point to
422  * remains valid.  This condition is satisifed when called through
423  * perf_counter_for_each_child or perf_counter_for_each because they
424  * hold the top-level counter's child_mutex, so any descendant that
425  * goes to exit will block in sync_child_counter.
426  * When called from perf_pending_counter it's OK because counter->ctx
427  * is the current context on this CPU and preemption is disabled,
428  * hence we can't get into perf_counter_task_sched_out for this context.
429  */
430 static void perf_counter_disable(struct perf_counter *counter)
431 {
432         struct perf_counter_context *ctx = counter->ctx;
433         struct task_struct *task = ctx->task;
434
435         if (!task) {
436                 /*
437                  * Disable the counter on the cpu that it's on
438                  */
439                 smp_call_function_single(counter->cpu, __perf_counter_disable,
440                                          counter, 1);
441                 return;
442         }
443
444  retry:
445         task_oncpu_function_call(task, __perf_counter_disable, counter);
446
447         spin_lock_irq(&ctx->lock);
448         /*
449          * If the counter is still active, we need to retry the cross-call.
450          */
451         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
452                 spin_unlock_irq(&ctx->lock);
453                 goto retry;
454         }
455
456         /*
457          * Since we have the lock this context can't be scheduled
458          * in, so we can change the state safely.
459          */
460         if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
461                 update_counter_times(counter);
462                 counter->state = PERF_COUNTER_STATE_OFF;
463         }
464
465         spin_unlock_irq(&ctx->lock);
466 }
467
468 static int
469 counter_sched_in(struct perf_counter *counter,
470                  struct perf_cpu_context *cpuctx,
471                  struct perf_counter_context *ctx,
472                  int cpu)
473 {
474         if (counter->state <= PERF_COUNTER_STATE_OFF)
475                 return 0;
476
477         counter->state = PERF_COUNTER_STATE_ACTIVE;
478         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
479         /*
480          * The new state must be visible before we turn it on in the hardware:
481          */
482         smp_wmb();
483
484         if (counter->pmu->enable(counter)) {
485                 counter->state = PERF_COUNTER_STATE_INACTIVE;
486                 counter->oncpu = -1;
487                 return -EAGAIN;
488         }
489
490         counter->tstamp_running += ctx->time - counter->tstamp_stopped;
491
492         if (!is_software_counter(counter))
493                 cpuctx->active_oncpu++;
494         ctx->nr_active++;
495
496         if (counter->hw_event.exclusive)
497                 cpuctx->exclusive = 1;
498
499         return 0;
500 }
501
502 static int
503 group_sched_in(struct perf_counter *group_counter,
504                struct perf_cpu_context *cpuctx,
505                struct perf_counter_context *ctx,
506                int cpu)
507 {
508         struct perf_counter *counter, *partial_group;
509         int ret;
510
511         if (group_counter->state == PERF_COUNTER_STATE_OFF)
512                 return 0;
513
514         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
515         if (ret)
516                 return ret < 0 ? ret : 0;
517
518         group_counter->prev_state = group_counter->state;
519         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
520                 return -EAGAIN;
521
522         /*
523          * Schedule in siblings as one group (if any):
524          */
525         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
526                 counter->prev_state = counter->state;
527                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
528                         partial_group = counter;
529                         goto group_error;
530                 }
531         }
532
533         return 0;
534
535 group_error:
536         /*
537          * Groups can be scheduled in as one unit only, so undo any
538          * partial group before returning:
539          */
540         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
541                 if (counter == partial_group)
542                         break;
543                 counter_sched_out(counter, cpuctx, ctx);
544         }
545         counter_sched_out(group_counter, cpuctx, ctx);
546
547         return -EAGAIN;
548 }
549
550 /*
551  * Return 1 for a group consisting entirely of software counters,
552  * 0 if the group contains any hardware counters.
553  */
554 static int is_software_only_group(struct perf_counter *leader)
555 {
556         struct perf_counter *counter;
557
558         if (!is_software_counter(leader))
559                 return 0;
560
561         list_for_each_entry(counter, &leader->sibling_list, list_entry)
562                 if (!is_software_counter(counter))
563                         return 0;
564
565         return 1;
566 }
567
568 /*
569  * Work out whether we can put this counter group on the CPU now.
570  */
571 static int group_can_go_on(struct perf_counter *counter,
572                            struct perf_cpu_context *cpuctx,
573                            int can_add_hw)
574 {
575         /*
576          * Groups consisting entirely of software counters can always go on.
577          */
578         if (is_software_only_group(counter))
579                 return 1;
580         /*
581          * If an exclusive group is already on, no other hardware
582          * counters can go on.
583          */
584         if (cpuctx->exclusive)
585                 return 0;
586         /*
587          * If this group is exclusive and there are already
588          * counters on the CPU, it can't go on.
589          */
590         if (counter->hw_event.exclusive && cpuctx->active_oncpu)
591                 return 0;
592         /*
593          * Otherwise, try to add it if all previous groups were able
594          * to go on.
595          */
596         return can_add_hw;
597 }
598
599 static void add_counter_to_ctx(struct perf_counter *counter,
600                                struct perf_counter_context *ctx)
601 {
602         list_add_counter(counter, ctx);
603         counter->prev_state = PERF_COUNTER_STATE_OFF;
604         counter->tstamp_enabled = ctx->time;
605         counter->tstamp_running = ctx->time;
606         counter->tstamp_stopped = ctx->time;
607 }
608
609 /*
610  * Cross CPU call to install and enable a performance counter
611  *
612  * Must be called with ctx->mutex held
613  */
614 static void __perf_install_in_context(void *info)
615 {
616         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
617         struct perf_counter *counter = info;
618         struct perf_counter_context *ctx = counter->ctx;
619         struct perf_counter *leader = counter->group_leader;
620         int cpu = smp_processor_id();
621         unsigned long flags;
622         int err;
623
624         local_irq_save(flags);
625         /*
626          * If this is a task context, we need to check whether it is
627          * the current task context of this cpu. If not it has been
628          * scheduled out before the smp call arrived.
629          * Or possibly this is the right context but it isn't
630          * on this cpu because it had no counters.
631          */
632         if (ctx->task && cpuctx->task_ctx != ctx) {
633                 if (cpuctx->task_ctx || ctx->task != current) {
634                         local_irq_restore(flags);
635                         return;
636                 }
637                 cpuctx->task_ctx = ctx;
638         }
639
640         spin_lock(&ctx->lock);
641         ctx->is_active = 1;
642         update_context_time(ctx);
643
644         /*
645          * Protect the list operation against NMI by disabling the
646          * counters on a global level. NOP for non NMI based counters.
647          */
648         perf_disable();
649
650         add_counter_to_ctx(counter, ctx);
651
652         /*
653          * Don't put the counter on if it is disabled or if
654          * it is in a group and the group isn't on.
655          */
656         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
657             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
658                 goto unlock;
659
660         /*
661          * An exclusive counter can't go on if there are already active
662          * hardware counters, and no hardware counter can go on if there
663          * is already an exclusive counter on.
664          */
665         if (!group_can_go_on(counter, cpuctx, 1))
666                 err = -EEXIST;
667         else
668                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
669
670         if (err) {
671                 /*
672                  * This counter couldn't go on.  If it is in a group
673                  * then we have to pull the whole group off.
674                  * If the counter group is pinned then put it in error state.
675                  */
676                 if (leader != counter)
677                         group_sched_out(leader, cpuctx, ctx);
678                 if (leader->hw_event.pinned) {
679                         update_group_times(leader);
680                         leader->state = PERF_COUNTER_STATE_ERROR;
681                 }
682         }
683
684         if (!err && !ctx->task && cpuctx->max_pertask)
685                 cpuctx->max_pertask--;
686
687  unlock:
688         perf_enable();
689
690         spin_unlock_irqrestore(&ctx->lock, flags);
691 }
692
693 /*
694  * Attach a performance counter to a context
695  *
696  * First we add the counter to the list with the hardware enable bit
697  * in counter->hw_config cleared.
698  *
699  * If the counter is attached to a task which is on a CPU we use a smp
700  * call to enable it in the task context. The task might have been
701  * scheduled away, but we check this in the smp call again.
702  *
703  * Must be called with ctx->mutex held.
704  */
705 static void
706 perf_install_in_context(struct perf_counter_context *ctx,
707                         struct perf_counter *counter,
708                         int cpu)
709 {
710         struct task_struct *task = ctx->task;
711
712         if (!task) {
713                 /*
714                  * Per cpu counters are installed via an smp call and
715                  * the install is always sucessful.
716                  */
717                 smp_call_function_single(cpu, __perf_install_in_context,
718                                          counter, 1);
719                 return;
720         }
721
722 retry:
723         task_oncpu_function_call(task, __perf_install_in_context,
724                                  counter);
725
726         spin_lock_irq(&ctx->lock);
727         /*
728          * we need to retry the smp call.
729          */
730         if (ctx->is_active && list_empty(&counter->list_entry)) {
731                 spin_unlock_irq(&ctx->lock);
732                 goto retry;
733         }
734
735         /*
736          * The lock prevents that this context is scheduled in so we
737          * can add the counter safely, if it the call above did not
738          * succeed.
739          */
740         if (list_empty(&counter->list_entry))
741                 add_counter_to_ctx(counter, ctx);
742         spin_unlock_irq(&ctx->lock);
743 }
744
745 /*
746  * Cross CPU call to enable a performance counter
747  */
748 static void __perf_counter_enable(void *info)
749 {
750         struct perf_counter *counter = info;
751         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
752         struct perf_counter_context *ctx = counter->ctx;
753         struct perf_counter *leader = counter->group_leader;
754         unsigned long flags;
755         int err;
756
757         local_irq_save(flags);
758         /*
759          * If this is a per-task counter, need to check whether this
760          * counter's task is the current task on this cpu.
761          */
762         if (ctx->task && cpuctx->task_ctx != ctx) {
763                 if (cpuctx->task_ctx || ctx->task != current) {
764                         local_irq_restore(flags);
765                         return;
766                 }
767                 cpuctx->task_ctx = ctx;
768         }
769
770         spin_lock(&ctx->lock);
771         ctx->is_active = 1;
772         update_context_time(ctx);
773
774         counter->prev_state = counter->state;
775         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
776                 goto unlock;
777         counter->state = PERF_COUNTER_STATE_INACTIVE;
778         counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
779
780         /*
781          * If the counter is in a group and isn't the group leader,
782          * then don't put it on unless the group is on.
783          */
784         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
785                 goto unlock;
786
787         if (!group_can_go_on(counter, cpuctx, 1)) {
788                 err = -EEXIST;
789         } else {
790                 perf_disable();
791                 if (counter == leader)
792                         err = group_sched_in(counter, cpuctx, ctx,
793                                              smp_processor_id());
794                 else
795                         err = counter_sched_in(counter, cpuctx, ctx,
796                                                smp_processor_id());
797                 perf_enable();
798         }
799
800         if (err) {
801                 /*
802                  * If this counter can't go on and it's part of a
803                  * group, then the whole group has to come off.
804                  */
805                 if (leader != counter)
806                         group_sched_out(leader, cpuctx, ctx);
807                 if (leader->hw_event.pinned) {
808                         update_group_times(leader);
809                         leader->state = PERF_COUNTER_STATE_ERROR;
810                 }
811         }
812
813  unlock:
814         spin_unlock_irqrestore(&ctx->lock, flags);
815 }
816
817 /*
818  * Enable a counter.
819  *
820  * If counter->ctx is a cloned context, callers must make sure that
821  * every task struct that counter->ctx->task could possibly point to
822  * remains valid.  This condition is satisfied when called through
823  * perf_counter_for_each_child or perf_counter_for_each as described
824  * for perf_counter_disable.
825  */
826 static void perf_counter_enable(struct perf_counter *counter)
827 {
828         struct perf_counter_context *ctx = counter->ctx;
829         struct task_struct *task = ctx->task;
830
831         if (!task) {
832                 /*
833                  * Enable the counter on the cpu that it's on
834                  */
835                 smp_call_function_single(counter->cpu, __perf_counter_enable,
836                                          counter, 1);
837                 return;
838         }
839
840         spin_lock_irq(&ctx->lock);
841         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
842                 goto out;
843
844         /*
845          * If the counter is in error state, clear that first.
846          * That way, if we see the counter in error state below, we
847          * know that it has gone back into error state, as distinct
848          * from the task having been scheduled away before the
849          * cross-call arrived.
850          */
851         if (counter->state == PERF_COUNTER_STATE_ERROR)
852                 counter->state = PERF_COUNTER_STATE_OFF;
853
854  retry:
855         spin_unlock_irq(&ctx->lock);
856         task_oncpu_function_call(task, __perf_counter_enable, counter);
857
858         spin_lock_irq(&ctx->lock);
859
860         /*
861          * If the context is active and the counter is still off,
862          * we need to retry the cross-call.
863          */
864         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
865                 goto retry;
866
867         /*
868          * Since we have the lock this context can't be scheduled
869          * in, so we can change the state safely.
870          */
871         if (counter->state == PERF_COUNTER_STATE_OFF) {
872                 counter->state = PERF_COUNTER_STATE_INACTIVE;
873                 counter->tstamp_enabled =
874                         ctx->time - counter->total_time_enabled;
875         }
876  out:
877         spin_unlock_irq(&ctx->lock);
878 }
879
880 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
881 {
882         /*
883          * not supported on inherited counters
884          */
885         if (counter->hw_event.inherit)
886                 return -EINVAL;
887
888         atomic_add(refresh, &counter->event_limit);
889         perf_counter_enable(counter);
890
891         return 0;
892 }
893
894 void __perf_counter_sched_out(struct perf_counter_context *ctx,
895                               struct perf_cpu_context *cpuctx)
896 {
897         struct perf_counter *counter;
898
899         spin_lock(&ctx->lock);
900         ctx->is_active = 0;
901         if (likely(!ctx->nr_counters))
902                 goto out;
903         update_context_time(ctx);
904
905         perf_disable();
906         if (ctx->nr_active) {
907                 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
908                         if (counter != counter->group_leader)
909                                 counter_sched_out(counter, cpuctx, ctx);
910                         else
911                                 group_sched_out(counter, cpuctx, ctx);
912                 }
913         }
914         perf_enable();
915  out:
916         spin_unlock(&ctx->lock);
917 }
918
919 /*
920  * Test whether two contexts are equivalent, i.e. whether they
921  * have both been cloned from the same version of the same context
922  * and they both have the same number of enabled counters.
923  * If the number of enabled counters is the same, then the set
924  * of enabled counters should be the same, because these are both
925  * inherited contexts, therefore we can't access individual counters
926  * in them directly with an fd; we can only enable/disable all
927  * counters via prctl, or enable/disable all counters in a family
928  * via ioctl, which will have the same effect on both contexts.
929  */
930 static int context_equiv(struct perf_counter_context *ctx1,
931                          struct perf_counter_context *ctx2)
932 {
933         return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
934                 && ctx1->parent_gen == ctx2->parent_gen
935                 && ctx1->parent_gen != ~0ull;
936 }
937
938 /*
939  * Called from scheduler to remove the counters of the current task,
940  * with interrupts disabled.
941  *
942  * We stop each counter and update the counter value in counter->count.
943  *
944  * This does not protect us against NMI, but disable()
945  * sets the disabled bit in the control field of counter _before_
946  * accessing the counter control register. If a NMI hits, then it will
947  * not restart the counter.
948  */
949 void perf_counter_task_sched_out(struct task_struct *task,
950                                  struct task_struct *next, int cpu)
951 {
952         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
953         struct perf_counter_context *ctx = task->perf_counter_ctxp;
954         struct perf_counter_context *next_ctx;
955         struct perf_counter_context *parent;
956         struct pt_regs *regs;
957         int do_switch = 1;
958
959         regs = task_pt_regs(task);
960         perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
961
962         if (likely(!ctx || !cpuctx->task_ctx))
963                 return;
964
965         update_context_time(ctx);
966
967         rcu_read_lock();
968         parent = rcu_dereference(ctx->parent_ctx);
969         next_ctx = next->perf_counter_ctxp;
970         if (parent && next_ctx &&
971             rcu_dereference(next_ctx->parent_ctx) == parent) {
972                 /*
973                  * Looks like the two contexts are clones, so we might be
974                  * able to optimize the context switch.  We lock both
975                  * contexts and check that they are clones under the
976                  * lock (including re-checking that neither has been
977                  * uncloned in the meantime).  It doesn't matter which
978                  * order we take the locks because no other cpu could
979                  * be trying to lock both of these tasks.
980                  */
981                 spin_lock(&ctx->lock);
982                 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
983                 if (context_equiv(ctx, next_ctx)) {
984                         task->perf_counter_ctxp = next_ctx;
985                         next->perf_counter_ctxp = ctx;
986                         ctx->task = next;
987                         next_ctx->task = task;
988                         do_switch = 0;
989                 }
990                 spin_unlock(&next_ctx->lock);
991                 spin_unlock(&ctx->lock);
992         }
993         rcu_read_unlock();
994
995         if (do_switch) {
996                 __perf_counter_sched_out(ctx, cpuctx);
997                 cpuctx->task_ctx = NULL;
998         }
999 }
1000
1001 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1002 {
1003         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1004
1005         if (!cpuctx->task_ctx)
1006                 return;
1007
1008         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1009                 return;
1010
1011         __perf_counter_sched_out(ctx, cpuctx);
1012         cpuctx->task_ctx = NULL;
1013 }
1014
1015 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1016 {
1017         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1018 }
1019
1020 static void
1021 __perf_counter_sched_in(struct perf_counter_context *ctx,
1022                         struct perf_cpu_context *cpuctx, int cpu)
1023 {
1024         struct perf_counter *counter;
1025         int can_add_hw = 1;
1026
1027         spin_lock(&ctx->lock);
1028         ctx->is_active = 1;
1029         if (likely(!ctx->nr_counters))
1030                 goto out;
1031
1032         ctx->timestamp = perf_clock();
1033
1034         perf_disable();
1035
1036         /*
1037          * First go through the list and put on any pinned groups
1038          * in order to give them the best chance of going on.
1039          */
1040         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1041                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1042                     !counter->hw_event.pinned)
1043                         continue;
1044                 if (counter->cpu != -1 && counter->cpu != cpu)
1045                         continue;
1046
1047                 if (counter != counter->group_leader)
1048                         counter_sched_in(counter, cpuctx, ctx, cpu);
1049                 else {
1050                         if (group_can_go_on(counter, cpuctx, 1))
1051                                 group_sched_in(counter, cpuctx, ctx, cpu);
1052                 }
1053
1054                 /*
1055                  * If this pinned group hasn't been scheduled,
1056                  * put it in error state.
1057                  */
1058                 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1059                         update_group_times(counter);
1060                         counter->state = PERF_COUNTER_STATE_ERROR;
1061                 }
1062         }
1063
1064         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1065                 /*
1066                  * Ignore counters in OFF or ERROR state, and
1067                  * ignore pinned counters since we did them already.
1068                  */
1069                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1070                     counter->hw_event.pinned)
1071                         continue;
1072
1073                 /*
1074                  * Listen to the 'cpu' scheduling filter constraint
1075                  * of counters:
1076                  */
1077                 if (counter->cpu != -1 && counter->cpu != cpu)
1078                         continue;
1079
1080                 if (counter != counter->group_leader) {
1081                         if (counter_sched_in(counter, cpuctx, ctx, cpu))
1082                                 can_add_hw = 0;
1083                 } else {
1084                         if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1085                                 if (group_sched_in(counter, cpuctx, ctx, cpu))
1086                                         can_add_hw = 0;
1087                         }
1088                 }
1089         }
1090         perf_enable();
1091  out:
1092         spin_unlock(&ctx->lock);
1093 }
1094
1095 /*
1096  * Called from scheduler to add the counters of the current task
1097  * with interrupts disabled.
1098  *
1099  * We restore the counter value and then enable it.
1100  *
1101  * This does not protect us against NMI, but enable()
1102  * sets the enabled bit in the control field of counter _before_
1103  * accessing the counter control register. If a NMI hits, then it will
1104  * keep the counter running.
1105  */
1106 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1107 {
1108         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1109         struct perf_counter_context *ctx = task->perf_counter_ctxp;
1110
1111         if (likely(!ctx))
1112                 return;
1113         if (cpuctx->task_ctx == ctx)
1114                 return;
1115         __perf_counter_sched_in(ctx, cpuctx, cpu);
1116         cpuctx->task_ctx = ctx;
1117 }
1118
1119 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1120 {
1121         struct perf_counter_context *ctx = &cpuctx->ctx;
1122
1123         __perf_counter_sched_in(ctx, cpuctx, cpu);
1124 }
1125
1126 #define MAX_INTERRUPTS (~0ULL)
1127
1128 static void perf_log_throttle(struct perf_counter *counter, int enable);
1129 static void perf_log_period(struct perf_counter *counter, u64 period);
1130
1131 static void perf_adjust_freq(struct perf_counter_context *ctx)
1132 {
1133         struct perf_counter *counter;
1134         u64 interrupts, irq_period;
1135         u64 events, period;
1136         s64 delta;
1137
1138         spin_lock(&ctx->lock);
1139         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1140                 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1141                         continue;
1142
1143                 interrupts = counter->hw.interrupts;
1144                 counter->hw.interrupts = 0;
1145
1146                 if (interrupts == MAX_INTERRUPTS) {
1147                         perf_log_throttle(counter, 1);
1148                         counter->pmu->unthrottle(counter);
1149                         interrupts = 2*sysctl_perf_counter_limit/HZ;
1150                 }
1151
1152                 if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
1153                         continue;
1154
1155                 events = HZ * interrupts * counter->hw.irq_period;
1156                 period = div64_u64(events, counter->hw_event.irq_freq);
1157
1158                 delta = (s64)(1 + period - counter->hw.irq_period);
1159                 delta >>= 1;
1160
1161                 irq_period = counter->hw.irq_period + delta;
1162
1163                 if (!irq_period)
1164                         irq_period = 1;
1165
1166                 perf_log_period(counter, irq_period);
1167
1168                 counter->hw.irq_period = irq_period;
1169         }
1170         spin_unlock(&ctx->lock);
1171 }
1172
1173 /*
1174  * Round-robin a context's counters:
1175  */
1176 static void rotate_ctx(struct perf_counter_context *ctx)
1177 {
1178         struct perf_counter *counter;
1179
1180         if (!ctx->nr_counters)
1181                 return;
1182
1183         spin_lock(&ctx->lock);
1184         /*
1185          * Rotate the first entry last (works just fine for group counters too):
1186          */
1187         perf_disable();
1188         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1189                 list_move_tail(&counter->list_entry, &ctx->counter_list);
1190                 break;
1191         }
1192         perf_enable();
1193
1194         spin_unlock(&ctx->lock);
1195 }
1196
1197 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1198 {
1199         struct perf_cpu_context *cpuctx;
1200         struct perf_counter_context *ctx;
1201
1202         if (!atomic_read(&nr_counters))
1203                 return;
1204
1205         cpuctx = &per_cpu(perf_cpu_context, cpu);
1206         ctx = curr->perf_counter_ctxp;
1207
1208         perf_adjust_freq(&cpuctx->ctx);
1209         if (ctx)
1210                 perf_adjust_freq(ctx);
1211
1212         perf_counter_cpu_sched_out(cpuctx);
1213         if (ctx)
1214                 __perf_counter_task_sched_out(ctx);
1215
1216         rotate_ctx(&cpuctx->ctx);
1217         if (ctx)
1218                 rotate_ctx(ctx);
1219
1220         perf_counter_cpu_sched_in(cpuctx, cpu);
1221         if (ctx)
1222                 perf_counter_task_sched_in(curr, cpu);
1223 }
1224
1225 /*
1226  * Cross CPU call to read the hardware counter
1227  */
1228 static void __read(void *info)
1229 {
1230         struct perf_counter *counter = info;
1231         struct perf_counter_context *ctx = counter->ctx;
1232         unsigned long flags;
1233
1234         local_irq_save(flags);
1235         if (ctx->is_active)
1236                 update_context_time(ctx);
1237         counter->pmu->read(counter);
1238         update_counter_times(counter);
1239         local_irq_restore(flags);
1240 }
1241
1242 static u64 perf_counter_read(struct perf_counter *counter)
1243 {
1244         /*
1245          * If counter is enabled and currently active on a CPU, update the
1246          * value in the counter structure:
1247          */
1248         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1249                 smp_call_function_single(counter->oncpu,
1250                                          __read, counter, 1);
1251         } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1252                 update_counter_times(counter);
1253         }
1254
1255         return atomic64_read(&counter->count);
1256 }
1257
1258 /*
1259  * Initialize the perf_counter context in a task_struct:
1260  */
1261 static void
1262 __perf_counter_init_context(struct perf_counter_context *ctx,
1263                             struct task_struct *task)
1264 {
1265         memset(ctx, 0, sizeof(*ctx));
1266         spin_lock_init(&ctx->lock);
1267         mutex_init(&ctx->mutex);
1268         INIT_LIST_HEAD(&ctx->counter_list);
1269         INIT_LIST_HEAD(&ctx->event_list);
1270         atomic_set(&ctx->refcount, 1);
1271         ctx->task = task;
1272 }
1273
1274 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1275 {
1276         struct perf_cpu_context *cpuctx;
1277         struct perf_counter_context *ctx;
1278         struct perf_counter_context *parent_ctx;
1279         struct task_struct *task;
1280         int err;
1281
1282         /*
1283          * If cpu is not a wildcard then this is a percpu counter:
1284          */
1285         if (cpu != -1) {
1286                 /* Must be root to operate on a CPU counter: */
1287                 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
1288                         return ERR_PTR(-EACCES);
1289
1290                 if (cpu < 0 || cpu > num_possible_cpus())
1291                         return ERR_PTR(-EINVAL);
1292
1293                 /*
1294                  * We could be clever and allow to attach a counter to an
1295                  * offline CPU and activate it when the CPU comes up, but
1296                  * that's for later.
1297                  */
1298                 if (!cpu_isset(cpu, cpu_online_map))
1299                         return ERR_PTR(-ENODEV);
1300
1301                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1302                 ctx = &cpuctx->ctx;
1303                 get_ctx(ctx);
1304
1305                 return ctx;
1306         }
1307
1308         rcu_read_lock();
1309         if (!pid)
1310                 task = current;
1311         else
1312                 task = find_task_by_vpid(pid);
1313         if (task)
1314                 get_task_struct(task);
1315         rcu_read_unlock();
1316
1317         if (!task)
1318                 return ERR_PTR(-ESRCH);
1319
1320         /*
1321          * Can't attach counters to a dying task.
1322          */
1323         err = -ESRCH;
1324         if (task->flags & PF_EXITING)
1325                 goto errout;
1326
1327         /* Reuse ptrace permission checks for now. */
1328         err = -EACCES;
1329         if (!ptrace_may_access(task, PTRACE_MODE_READ))
1330                 goto errout;
1331
1332  retry_lock:
1333         rcu_read_lock();
1334  retry:
1335         ctx = rcu_dereference(task->perf_counter_ctxp);
1336         if (ctx) {
1337                 /*
1338                  * If this context is a clone of another, it might
1339                  * get swapped for another underneath us by
1340                  * perf_counter_task_sched_out, though the
1341                  * rcu_read_lock() protects us from any context
1342                  * getting freed.  Lock the context and check if it
1343                  * got swapped before we could get the lock, and retry
1344                  * if so.  If we locked the right context, then it
1345                  * can't get swapped on us any more and we can
1346                  * unclone it if necessary.
1347                  * Once it's not a clone things will be stable.
1348                  */
1349                 spin_lock_irq(&ctx->lock);
1350                 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
1351                         spin_unlock_irq(&ctx->lock);
1352                         goto retry;
1353                 }
1354                 parent_ctx = ctx->parent_ctx;
1355                 if (parent_ctx) {
1356                         put_ctx(parent_ctx);
1357                         ctx->parent_ctx = NULL;         /* no longer a clone */
1358                 }
1359                 /*
1360                  * Get an extra reference before dropping the lock so that
1361                  * this context won't get freed if the task exits.
1362                  */
1363                 get_ctx(ctx);
1364                 spin_unlock_irq(&ctx->lock);
1365         }
1366         rcu_read_unlock();
1367
1368         if (!ctx) {
1369                 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1370                 err = -ENOMEM;
1371                 if (!ctx)
1372                         goto errout;
1373                 __perf_counter_init_context(ctx, task);
1374                 get_ctx(ctx);
1375                 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1376                         /*
1377                          * We raced with some other task; use
1378                          * the context they set.
1379                          */
1380                         kfree(ctx);
1381                         goto retry_lock;
1382                 }
1383                 get_task_struct(task);
1384         }
1385
1386         put_task_struct(task);
1387         return ctx;
1388
1389  errout:
1390         put_task_struct(task);
1391         return ERR_PTR(err);
1392 }
1393
1394 static void free_counter_rcu(struct rcu_head *head)
1395 {
1396         struct perf_counter *counter;
1397
1398         counter = container_of(head, struct perf_counter, rcu_head);
1399         kfree(counter);
1400 }
1401
1402 static void perf_pending_sync(struct perf_counter *counter);
1403
1404 static void free_counter(struct perf_counter *counter)
1405 {
1406         perf_pending_sync(counter);
1407
1408         atomic_dec(&nr_counters);
1409         if (counter->hw_event.mmap)
1410                 atomic_dec(&nr_mmap_tracking);
1411         if (counter->hw_event.munmap)
1412                 atomic_dec(&nr_munmap_tracking);
1413         if (counter->hw_event.comm)
1414                 atomic_dec(&nr_comm_tracking);
1415
1416         if (counter->destroy)
1417                 counter->destroy(counter);
1418
1419         put_ctx(counter->ctx);
1420         call_rcu(&counter->rcu_head, free_counter_rcu);
1421 }
1422
1423 /*
1424  * Called when the last reference to the file is gone.
1425  */
1426 static int perf_release(struct inode *inode, struct file *file)
1427 {
1428         struct perf_counter *counter = file->private_data;
1429         struct perf_counter_context *ctx = counter->ctx;
1430
1431         file->private_data = NULL;
1432
1433         WARN_ON_ONCE(ctx->parent_ctx);
1434         mutex_lock(&ctx->mutex);
1435         perf_counter_remove_from_context(counter);
1436         mutex_unlock(&ctx->mutex);
1437
1438         mutex_lock(&counter->owner->perf_counter_mutex);
1439         list_del_init(&counter->owner_entry);
1440         mutex_unlock(&counter->owner->perf_counter_mutex);
1441         put_task_struct(counter->owner);
1442
1443         free_counter(counter);
1444
1445         return 0;
1446 }
1447
1448 /*
1449  * Read the performance counter - simple non blocking version for now
1450  */
1451 static ssize_t
1452 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1453 {
1454         u64 values[3];
1455         int n;
1456
1457         /*
1458          * Return end-of-file for a read on a counter that is in
1459          * error state (i.e. because it was pinned but it couldn't be
1460          * scheduled on to the CPU at some point).
1461          */
1462         if (counter->state == PERF_COUNTER_STATE_ERROR)
1463                 return 0;
1464
1465         WARN_ON_ONCE(counter->ctx->parent_ctx);
1466         mutex_lock(&counter->child_mutex);
1467         values[0] = perf_counter_read(counter);
1468         n = 1;
1469         if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1470                 values[n++] = counter->total_time_enabled +
1471                         atomic64_read(&counter->child_total_time_enabled);
1472         if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1473                 values[n++] = counter->total_time_running +
1474                         atomic64_read(&counter->child_total_time_running);
1475         mutex_unlock(&counter->child_mutex);
1476
1477         if (count < n * sizeof(u64))
1478                 return -EINVAL;
1479         count = n * sizeof(u64);
1480
1481         if (copy_to_user(buf, values, count))
1482                 return -EFAULT;
1483
1484         return count;
1485 }
1486
1487 static ssize_t
1488 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1489 {
1490         struct perf_counter *counter = file->private_data;
1491
1492         return perf_read_hw(counter, buf, count);
1493 }
1494
1495 static unsigned int perf_poll(struct file *file, poll_table *wait)
1496 {
1497         struct perf_counter *counter = file->private_data;
1498         struct perf_mmap_data *data;
1499         unsigned int events = POLL_HUP;
1500
1501         rcu_read_lock();
1502         data = rcu_dereference(counter->data);
1503         if (data)
1504                 events = atomic_xchg(&data->poll, 0);
1505         rcu_read_unlock();
1506
1507         poll_wait(file, &counter->waitq, wait);
1508
1509         return events;
1510 }
1511
1512 static void perf_counter_reset(struct perf_counter *counter)
1513 {
1514         (void)perf_counter_read(counter);
1515         atomic64_set(&counter->count, 0);
1516         perf_counter_update_userpage(counter);
1517 }
1518
1519 static void perf_counter_for_each_sibling(struct perf_counter *counter,
1520                                           void (*func)(struct perf_counter *))
1521 {
1522         struct perf_counter_context *ctx = counter->ctx;
1523         struct perf_counter *sibling;
1524
1525         WARN_ON_ONCE(ctx->parent_ctx);
1526         mutex_lock(&ctx->mutex);
1527         counter = counter->group_leader;
1528
1529         func(counter);
1530         list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1531                 func(sibling);
1532         mutex_unlock(&ctx->mutex);
1533 }
1534
1535 /*
1536  * Holding the top-level counter's child_mutex means that any
1537  * descendant process that has inherited this counter will block
1538  * in sync_child_counter if it goes to exit, thus satisfying the
1539  * task existence requirements of perf_counter_enable/disable.
1540  */
1541 static void perf_counter_for_each_child(struct perf_counter *counter,
1542                                         void (*func)(struct perf_counter *))
1543 {
1544         struct perf_counter *child;
1545
1546         WARN_ON_ONCE(counter->ctx->parent_ctx);
1547         mutex_lock(&counter->child_mutex);
1548         func(counter);
1549         list_for_each_entry(child, &counter->child_list, child_list)
1550                 func(child);
1551         mutex_unlock(&counter->child_mutex);
1552 }
1553
1554 static void perf_counter_for_each(struct perf_counter *counter,
1555                                   void (*func)(struct perf_counter *))
1556 {
1557         struct perf_counter *child;
1558
1559         WARN_ON_ONCE(counter->ctx->parent_ctx);
1560         mutex_lock(&counter->child_mutex);
1561         perf_counter_for_each_sibling(counter, func);
1562         list_for_each_entry(child, &counter->child_list, child_list)
1563                 perf_counter_for_each_sibling(child, func);
1564         mutex_unlock(&counter->child_mutex);
1565 }
1566
1567 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1568 {
1569         struct perf_counter *counter = file->private_data;
1570         void (*func)(struct perf_counter *);
1571         u32 flags = arg;
1572
1573         switch (cmd) {
1574         case PERF_COUNTER_IOC_ENABLE:
1575                 func = perf_counter_enable;
1576                 break;
1577         case PERF_COUNTER_IOC_DISABLE:
1578                 func = perf_counter_disable;
1579                 break;
1580         case PERF_COUNTER_IOC_RESET:
1581                 func = perf_counter_reset;
1582                 break;
1583
1584         case PERF_COUNTER_IOC_REFRESH:
1585                 return perf_counter_refresh(counter, arg);
1586         default:
1587                 return -ENOTTY;
1588         }
1589
1590         if (flags & PERF_IOC_FLAG_GROUP)
1591                 perf_counter_for_each(counter, func);
1592         else
1593                 perf_counter_for_each_child(counter, func);
1594
1595         return 0;
1596 }
1597
1598 int perf_counter_task_enable(void)
1599 {
1600         struct perf_counter *counter;
1601
1602         mutex_lock(&current->perf_counter_mutex);
1603         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1604                 perf_counter_for_each_child(counter, perf_counter_enable);
1605         mutex_unlock(&current->perf_counter_mutex);
1606
1607         return 0;
1608 }
1609
1610 int perf_counter_task_disable(void)
1611 {
1612         struct perf_counter *counter;
1613
1614         mutex_lock(&current->perf_counter_mutex);
1615         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1616                 perf_counter_for_each_child(counter, perf_counter_disable);
1617         mutex_unlock(&current->perf_counter_mutex);
1618
1619         return 0;
1620 }
1621
1622 /*
1623  * Callers need to ensure there can be no nesting of this function, otherwise
1624  * the seqlock logic goes bad. We can not serialize this because the arch
1625  * code calls this from NMI context.
1626  */
1627 void perf_counter_update_userpage(struct perf_counter *counter)
1628 {
1629         struct perf_mmap_data *data;
1630         struct perf_counter_mmap_page *userpg;
1631
1632         rcu_read_lock();
1633         data = rcu_dereference(counter->data);
1634         if (!data)
1635                 goto unlock;
1636
1637         userpg = data->user_page;
1638
1639         /*
1640          * Disable preemption so as to not let the corresponding user-space
1641          * spin too long if we get preempted.
1642          */
1643         preempt_disable();
1644         ++userpg->lock;
1645         barrier();
1646         userpg->index = counter->hw.idx;
1647         userpg->offset = atomic64_read(&counter->count);
1648         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1649                 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1650
1651         barrier();
1652         ++userpg->lock;
1653         preempt_enable();
1654 unlock:
1655         rcu_read_unlock();
1656 }
1657
1658 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1659 {
1660         struct perf_counter *counter = vma->vm_file->private_data;
1661         struct perf_mmap_data *data;
1662         int ret = VM_FAULT_SIGBUS;
1663
1664         rcu_read_lock();
1665         data = rcu_dereference(counter->data);
1666         if (!data)
1667                 goto unlock;
1668
1669         if (vmf->pgoff == 0) {
1670                 vmf->page = virt_to_page(data->user_page);
1671         } else {
1672                 int nr = vmf->pgoff - 1;
1673
1674                 if ((unsigned)nr > data->nr_pages)
1675                         goto unlock;
1676
1677                 vmf->page = virt_to_page(data->data_pages[nr]);
1678         }
1679         get_page(vmf->page);
1680         ret = 0;
1681 unlock:
1682         rcu_read_unlock();
1683
1684         return ret;
1685 }
1686
1687 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1688 {
1689         struct perf_mmap_data *data;
1690         unsigned long size;
1691         int i;
1692
1693         WARN_ON(atomic_read(&counter->mmap_count));
1694
1695         size = sizeof(struct perf_mmap_data);
1696         size += nr_pages * sizeof(void *);
1697
1698         data = kzalloc(size, GFP_KERNEL);
1699         if (!data)
1700                 goto fail;
1701
1702         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1703         if (!data->user_page)
1704                 goto fail_user_page;
1705
1706         for (i = 0; i < nr_pages; i++) {
1707                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1708                 if (!data->data_pages[i])
1709                         goto fail_data_pages;
1710         }
1711
1712         data->nr_pages = nr_pages;
1713         atomic_set(&data->lock, -1);
1714
1715         rcu_assign_pointer(counter->data, data);
1716
1717         return 0;
1718
1719 fail_data_pages:
1720         for (i--; i >= 0; i--)
1721                 free_page((unsigned long)data->data_pages[i]);
1722
1723         free_page((unsigned long)data->user_page);
1724
1725 fail_user_page:
1726         kfree(data);
1727
1728 fail:
1729         return -ENOMEM;
1730 }
1731
1732 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1733 {
1734         struct perf_mmap_data *data = container_of(rcu_head,
1735                         struct perf_mmap_data, rcu_head);
1736         int i;
1737
1738         free_page((unsigned long)data->user_page);
1739         for (i = 0; i < data->nr_pages; i++)
1740                 free_page((unsigned long)data->data_pages[i]);
1741         kfree(data);
1742 }
1743
1744 static void perf_mmap_data_free(struct perf_counter *counter)
1745 {
1746         struct perf_mmap_data *data = counter->data;
1747
1748         WARN_ON(atomic_read(&counter->mmap_count));
1749
1750         rcu_assign_pointer(counter->data, NULL);
1751         call_rcu(&data->rcu_head, __perf_mmap_data_free);
1752 }
1753
1754 static void perf_mmap_open(struct vm_area_struct *vma)
1755 {
1756         struct perf_counter *counter = vma->vm_file->private_data;
1757
1758         atomic_inc(&counter->mmap_count);
1759 }
1760
1761 static void perf_mmap_close(struct vm_area_struct *vma)
1762 {
1763         struct perf_counter *counter = vma->vm_file->private_data;
1764
1765         WARN_ON_ONCE(counter->ctx->parent_ctx);
1766         if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1767                                       &counter->mmap_mutex)) {
1768                 struct user_struct *user = current_user();
1769
1770                 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1771                 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1772                 perf_mmap_data_free(counter);
1773                 mutex_unlock(&counter->mmap_mutex);
1774         }
1775 }
1776
1777 static struct vm_operations_struct perf_mmap_vmops = {
1778         .open  = perf_mmap_open,
1779         .close = perf_mmap_close,
1780         .fault = perf_mmap_fault,
1781 };
1782
1783 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1784 {
1785         struct perf_counter *counter = file->private_data;
1786         struct user_struct *user = current_user();
1787         unsigned long vma_size;
1788         unsigned long nr_pages;
1789         unsigned long user_locked, user_lock_limit;
1790         unsigned long locked, lock_limit;
1791         long user_extra, extra;
1792         int ret = 0;
1793
1794         if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1795                 return -EINVAL;
1796
1797         vma_size = vma->vm_end - vma->vm_start;
1798         nr_pages = (vma_size / PAGE_SIZE) - 1;
1799
1800         /*
1801          * If we have data pages ensure they're a power-of-two number, so we
1802          * can do bitmasks instead of modulo.
1803          */
1804         if (nr_pages != 0 && !is_power_of_2(nr_pages))
1805                 return -EINVAL;
1806
1807         if (vma_size != PAGE_SIZE * (1 + nr_pages))
1808                 return -EINVAL;
1809
1810         if (vma->vm_pgoff != 0)
1811                 return -EINVAL;
1812
1813         WARN_ON_ONCE(counter->ctx->parent_ctx);
1814         mutex_lock(&counter->mmap_mutex);
1815         if (atomic_inc_not_zero(&counter->mmap_count)) {
1816                 if (nr_pages != counter->data->nr_pages)
1817                         ret = -EINVAL;
1818                 goto unlock;
1819         }
1820
1821         user_extra = nr_pages + 1;
1822         user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1823
1824         /*
1825          * Increase the limit linearly with more CPUs:
1826          */
1827         user_lock_limit *= num_online_cpus();
1828
1829         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1830
1831         extra = 0;
1832         if (user_locked > user_lock_limit)
1833                 extra = user_locked - user_lock_limit;
1834
1835         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1836         lock_limit >>= PAGE_SHIFT;
1837         locked = vma->vm_mm->locked_vm + extra;
1838
1839         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1840                 ret = -EPERM;
1841                 goto unlock;
1842         }
1843
1844         WARN_ON(counter->data);
1845         ret = perf_mmap_data_alloc(counter, nr_pages);
1846         if (ret)
1847                 goto unlock;
1848
1849         atomic_set(&counter->mmap_count, 1);
1850         atomic_long_add(user_extra, &user->locked_vm);
1851         vma->vm_mm->locked_vm += extra;
1852         counter->data->nr_locked = extra;
1853 unlock:
1854         mutex_unlock(&counter->mmap_mutex);
1855
1856         vma->vm_flags &= ~VM_MAYWRITE;
1857         vma->vm_flags |= VM_RESERVED;
1858         vma->vm_ops = &perf_mmap_vmops;
1859
1860         return ret;
1861 }
1862
1863 static int perf_fasync(int fd, struct file *filp, int on)
1864 {
1865         struct perf_counter *counter = filp->private_data;
1866         struct inode *inode = filp->f_path.dentry->d_inode;
1867         int retval;
1868
1869         mutex_lock(&inode->i_mutex);
1870         retval = fasync_helper(fd, filp, on, &counter->fasync);
1871         mutex_unlock(&inode->i_mutex);
1872
1873         if (retval < 0)
1874                 return retval;
1875
1876         return 0;
1877 }
1878
1879 static const struct file_operations perf_fops = {
1880         .release                = perf_release,
1881         .read                   = perf_read,
1882         .poll                   = perf_poll,
1883         .unlocked_ioctl         = perf_ioctl,
1884         .compat_ioctl           = perf_ioctl,
1885         .mmap                   = perf_mmap,
1886         .fasync                 = perf_fasync,
1887 };
1888
1889 /*
1890  * Perf counter wakeup
1891  *
1892  * If there's data, ensure we set the poll() state and publish everything
1893  * to user-space before waking everybody up.
1894  */
1895
1896 void perf_counter_wakeup(struct perf_counter *counter)
1897 {
1898         wake_up_all(&counter->waitq);
1899
1900         if (counter->pending_kill) {
1901                 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1902                 counter->pending_kill = 0;
1903         }
1904 }
1905
1906 /*
1907  * Pending wakeups
1908  *
1909  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1910  *
1911  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1912  * single linked list and use cmpxchg() to add entries lockless.
1913  */
1914
1915 static void perf_pending_counter(struct perf_pending_entry *entry)
1916 {
1917         struct perf_counter *counter = container_of(entry,
1918                         struct perf_counter, pending);
1919
1920         if (counter->pending_disable) {
1921                 counter->pending_disable = 0;
1922                 perf_counter_disable(counter);
1923         }
1924
1925         if (counter->pending_wakeup) {
1926                 counter->pending_wakeup = 0;
1927                 perf_counter_wakeup(counter);
1928         }
1929 }
1930
1931 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
1932
1933 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
1934         PENDING_TAIL,
1935 };
1936
1937 static void perf_pending_queue(struct perf_pending_entry *entry,
1938                                void (*func)(struct perf_pending_entry *))
1939 {
1940         struct perf_pending_entry **head;
1941
1942         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
1943                 return;
1944
1945         entry->func = func;
1946
1947         head = &get_cpu_var(perf_pending_head);
1948
1949         do {
1950                 entry->next = *head;
1951         } while (cmpxchg(head, entry->next, entry) != entry->next);
1952
1953         set_perf_counter_pending();
1954
1955         put_cpu_var(perf_pending_head);
1956 }
1957
1958 static int __perf_pending_run(void)
1959 {
1960         struct perf_pending_entry *list;
1961         int nr = 0;
1962
1963         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
1964         while (list != PENDING_TAIL) {
1965                 void (*func)(struct perf_pending_entry *);
1966                 struct perf_pending_entry *entry = list;
1967
1968                 list = list->next;
1969
1970                 func = entry->func;
1971                 entry->next = NULL;
1972                 /*
1973                  * Ensure we observe the unqueue before we issue the wakeup,
1974                  * so that we won't be waiting forever.
1975                  * -- see perf_not_pending().
1976                  */
1977                 smp_wmb();
1978
1979                 func(entry);
1980                 nr++;
1981         }
1982
1983         return nr;
1984 }
1985
1986 static inline int perf_not_pending(struct perf_counter *counter)
1987 {
1988         /*
1989          * If we flush on whatever cpu we run, there is a chance we don't
1990          * need to wait.
1991          */
1992         get_cpu();
1993         __perf_pending_run();
1994         put_cpu();
1995
1996         /*
1997          * Ensure we see the proper queue state before going to sleep
1998          * so that we do not miss the wakeup. -- see perf_pending_handle()
1999          */
2000         smp_rmb();
2001         return counter->pending.next == NULL;
2002 }
2003
2004 static void perf_pending_sync(struct perf_counter *counter)
2005 {
2006         wait_event(counter->waitq, perf_not_pending(counter));
2007 }
2008
2009 void perf_counter_do_pending(void)
2010 {
2011         __perf_pending_run();
2012 }
2013
2014 /*
2015  * Callchain support -- arch specific
2016  */
2017
2018 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2019 {
2020         return NULL;
2021 }
2022
2023 /*
2024  * Output
2025  */
2026
2027 struct perf_output_handle {
2028         struct perf_counter     *counter;
2029         struct perf_mmap_data   *data;
2030         unsigned int            offset;
2031         unsigned int            head;
2032         int                     nmi;
2033         int                     overflow;
2034         int                     locked;
2035         unsigned long           flags;
2036 };
2037
2038 static void perf_output_wakeup(struct perf_output_handle *handle)
2039 {
2040         atomic_set(&handle->data->poll, POLL_IN);
2041
2042         if (handle->nmi) {
2043                 handle->counter->pending_wakeup = 1;
2044                 perf_pending_queue(&handle->counter->pending,
2045                                    perf_pending_counter);
2046         } else
2047                 perf_counter_wakeup(handle->counter);
2048 }
2049
2050 /*
2051  * Curious locking construct.
2052  *
2053  * We need to ensure a later event doesn't publish a head when a former
2054  * event isn't done writing. However since we need to deal with NMIs we
2055  * cannot fully serialize things.
2056  *
2057  * What we do is serialize between CPUs so we only have to deal with NMI
2058  * nesting on a single CPU.
2059  *
2060  * We only publish the head (and generate a wakeup) when the outer-most
2061  * event completes.
2062  */
2063 static void perf_output_lock(struct perf_output_handle *handle)
2064 {
2065         struct perf_mmap_data *data = handle->data;
2066         int cpu;
2067
2068         handle->locked = 0;
2069
2070         local_irq_save(handle->flags);
2071         cpu = smp_processor_id();
2072
2073         if (in_nmi() && atomic_read(&data->lock) == cpu)
2074                 return;
2075
2076         while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2077                 cpu_relax();
2078
2079         handle->locked = 1;
2080 }
2081
2082 static void perf_output_unlock(struct perf_output_handle *handle)
2083 {
2084         struct perf_mmap_data *data = handle->data;
2085         int head, cpu;
2086
2087         data->done_head = data->head;
2088
2089         if (!handle->locked)
2090                 goto out;
2091
2092 again:
2093         /*
2094          * The xchg implies a full barrier that ensures all writes are done
2095          * before we publish the new head, matched by a rmb() in userspace when
2096          * reading this position.
2097          */
2098         while ((head = atomic_xchg(&data->done_head, 0)))
2099                 data->user_page->data_head = head;
2100
2101         /*
2102          * NMI can happen here, which means we can miss a done_head update.
2103          */
2104
2105         cpu = atomic_xchg(&data->lock, -1);
2106         WARN_ON_ONCE(cpu != smp_processor_id());
2107
2108         /*
2109          * Therefore we have to validate we did not indeed do so.
2110          */
2111         if (unlikely(atomic_read(&data->done_head))) {
2112                 /*
2113                  * Since we had it locked, we can lock it again.
2114                  */
2115                 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2116                         cpu_relax();
2117
2118                 goto again;
2119         }
2120
2121         if (atomic_xchg(&data->wakeup, 0))
2122                 perf_output_wakeup(handle);
2123 out:
2124         local_irq_restore(handle->flags);
2125 }
2126
2127 static int perf_output_begin(struct perf_output_handle *handle,
2128                              struct perf_counter *counter, unsigned int size,
2129                              int nmi, int overflow)
2130 {
2131         struct perf_mmap_data *data;
2132         unsigned int offset, head;
2133
2134         /*
2135          * For inherited counters we send all the output towards the parent.
2136          */
2137         if (counter->parent)
2138                 counter = counter->parent;
2139
2140         rcu_read_lock();
2141         data = rcu_dereference(counter->data);
2142         if (!data)
2143                 goto out;
2144
2145         handle->data     = data;
2146         handle->counter  = counter;
2147         handle->nmi      = nmi;
2148         handle->overflow = overflow;
2149
2150         if (!data->nr_pages)
2151                 goto fail;
2152
2153         perf_output_lock(handle);
2154
2155         do {
2156                 offset = head = atomic_read(&data->head);
2157                 head += size;
2158         } while (atomic_cmpxchg(&data->head, offset, head) != offset);
2159
2160         handle->offset  = offset;
2161         handle->head    = head;
2162
2163         if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2164                 atomic_set(&data->wakeup, 1);
2165
2166         return 0;
2167
2168 fail:
2169         perf_output_wakeup(handle);
2170 out:
2171         rcu_read_unlock();
2172
2173         return -ENOSPC;
2174 }
2175
2176 static void perf_output_copy(struct perf_output_handle *handle,
2177                              void *buf, unsigned int len)
2178 {
2179         unsigned int pages_mask;
2180         unsigned int offset;
2181         unsigned int size;
2182         void **pages;
2183
2184         offset          = handle->offset;
2185         pages_mask      = handle->data->nr_pages - 1;
2186         pages           = handle->data->data_pages;
2187
2188         do {
2189                 unsigned int page_offset;
2190                 int nr;
2191
2192                 nr          = (offset >> PAGE_SHIFT) & pages_mask;
2193                 page_offset = offset & (PAGE_SIZE - 1);
2194                 size        = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2195
2196                 memcpy(pages[nr] + page_offset, buf, size);
2197
2198                 len         -= size;
2199                 buf         += size;
2200                 offset      += size;
2201         } while (len);
2202
2203         handle->offset = offset;
2204
2205         /*
2206          * Check we didn't copy past our reservation window, taking the
2207          * possible unsigned int wrap into account.
2208          */
2209         WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0);
2210 }
2211
2212 #define perf_output_put(handle, x) \
2213         perf_output_copy((handle), &(x), sizeof(x))
2214
2215 static void perf_output_end(struct perf_output_handle *handle)
2216 {
2217         struct perf_counter *counter = handle->counter;
2218         struct perf_mmap_data *data = handle->data;
2219
2220         int wakeup_events = counter->hw_event.wakeup_events;
2221
2222         if (handle->overflow && wakeup_events) {
2223                 int events = atomic_inc_return(&data->events);
2224                 if (events >= wakeup_events) {
2225                         atomic_sub(wakeup_events, &data->events);
2226                         atomic_set(&data->wakeup, 1);
2227                 }
2228         }
2229
2230         perf_output_unlock(handle);
2231         rcu_read_unlock();
2232 }
2233
2234 static void perf_counter_output(struct perf_counter *counter,
2235                                 int nmi, struct pt_regs *regs, u64 addr)
2236 {
2237         int ret;
2238         u64 record_type = counter->hw_event.record_type;
2239         struct perf_output_handle handle;
2240         struct perf_event_header header;
2241         u64 ip;
2242         struct {
2243                 u32 pid, tid;
2244         } tid_entry;
2245         struct {
2246                 u64 event;
2247                 u64 counter;
2248         } group_entry;
2249         struct perf_callchain_entry *callchain = NULL;
2250         int callchain_size = 0;
2251         u64 time;
2252         struct {
2253                 u32 cpu, reserved;
2254         } cpu_entry;
2255
2256         header.type = 0;
2257         header.size = sizeof(header);
2258
2259         header.misc = PERF_EVENT_MISC_OVERFLOW;
2260         header.misc |= perf_misc_flags(regs);
2261
2262         if (record_type & PERF_RECORD_IP) {
2263                 ip = perf_instruction_pointer(regs);
2264                 header.type |= PERF_RECORD_IP;
2265                 header.size += sizeof(ip);
2266         }
2267
2268         if (record_type & PERF_RECORD_TID) {
2269                 /* namespace issues */
2270                 tid_entry.pid = current->group_leader->pid;
2271                 tid_entry.tid = current->pid;
2272
2273                 header.type |= PERF_RECORD_TID;
2274                 header.size += sizeof(tid_entry);
2275         }
2276
2277         if (record_type & PERF_RECORD_TIME) {
2278                 /*
2279                  * Maybe do better on x86 and provide cpu_clock_nmi()
2280                  */
2281                 time = sched_clock();
2282
2283                 header.type |= PERF_RECORD_TIME;
2284                 header.size += sizeof(u64);
2285         }
2286
2287         if (record_type & PERF_RECORD_ADDR) {
2288                 header.type |= PERF_RECORD_ADDR;
2289                 header.size += sizeof(u64);
2290         }
2291
2292         if (record_type & PERF_RECORD_CONFIG) {
2293                 header.type |= PERF_RECORD_CONFIG;
2294                 header.size += sizeof(u64);
2295         }
2296
2297         if (record_type & PERF_RECORD_CPU) {
2298                 header.type |= PERF_RECORD_CPU;
2299                 header.size += sizeof(cpu_entry);
2300
2301                 cpu_entry.cpu = raw_smp_processor_id();
2302         }
2303
2304         if (record_type & PERF_RECORD_GROUP) {
2305                 header.type |= PERF_RECORD_GROUP;
2306                 header.size += sizeof(u64) +
2307                         counter->nr_siblings * sizeof(group_entry);
2308         }
2309
2310         if (record_type & PERF_RECORD_CALLCHAIN) {
2311                 callchain = perf_callchain(regs);
2312
2313                 if (callchain) {
2314                         callchain_size = (1 + callchain->nr) * sizeof(u64);
2315
2316                         header.type |= PERF_RECORD_CALLCHAIN;
2317                         header.size += callchain_size;
2318                 }
2319         }
2320
2321         ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2322         if (ret)
2323                 return;
2324
2325         perf_output_put(&handle, header);
2326
2327         if (record_type & PERF_RECORD_IP)
2328                 perf_output_put(&handle, ip);
2329
2330         if (record_type & PERF_RECORD_TID)
2331                 perf_output_put(&handle, tid_entry);
2332
2333         if (record_type & PERF_RECORD_TIME)
2334                 perf_output_put(&handle, time);
2335
2336         if (record_type & PERF_RECORD_ADDR)
2337                 perf_output_put(&handle, addr);
2338
2339         if (record_type & PERF_RECORD_CONFIG)
2340                 perf_output_put(&handle, counter->hw_event.config);
2341
2342         if (record_type & PERF_RECORD_CPU)
2343                 perf_output_put(&handle, cpu_entry);
2344
2345         /*
2346          * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
2347          */
2348         if (record_type & PERF_RECORD_GROUP) {
2349                 struct perf_counter *leader, *sub;
2350                 u64 nr = counter->nr_siblings;
2351
2352                 perf_output_put(&handle, nr);
2353
2354                 leader = counter->group_leader;
2355                 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2356                         if (sub != counter)
2357                                 sub->pmu->read(sub);
2358
2359                         group_entry.event = sub->hw_event.config;
2360                         group_entry.counter = atomic64_read(&sub->count);
2361
2362                         perf_output_put(&handle, group_entry);
2363                 }
2364         }
2365
2366         if (callchain)
2367                 perf_output_copy(&handle, callchain, callchain_size);
2368
2369         perf_output_end(&handle);
2370 }
2371
2372 /*
2373  * comm tracking
2374  */
2375
2376 struct perf_comm_event {
2377         struct task_struct      *task;
2378         char                    *comm;
2379         int                     comm_size;
2380
2381         struct {
2382                 struct perf_event_header        header;
2383
2384                 u32                             pid;
2385                 u32                             tid;
2386         } event;
2387 };
2388
2389 static void perf_counter_comm_output(struct perf_counter *counter,
2390                                      struct perf_comm_event *comm_event)
2391 {
2392         struct perf_output_handle handle;
2393         int size = comm_event->event.header.size;
2394         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2395
2396         if (ret)
2397                 return;
2398
2399         perf_output_put(&handle, comm_event->event);
2400         perf_output_copy(&handle, comm_event->comm,
2401                                    comm_event->comm_size);
2402         perf_output_end(&handle);
2403 }
2404
2405 static int perf_counter_comm_match(struct perf_counter *counter,
2406                                    struct perf_comm_event *comm_event)
2407 {
2408         if (counter->hw_event.comm &&
2409             comm_event->event.header.type == PERF_EVENT_COMM)
2410                 return 1;
2411
2412         return 0;
2413 }
2414
2415 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2416                                   struct perf_comm_event *comm_event)
2417 {
2418         struct perf_counter *counter;
2419
2420         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2421                 return;
2422
2423         rcu_read_lock();
2424         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2425                 if (perf_counter_comm_match(counter, comm_event))
2426                         perf_counter_comm_output(counter, comm_event);
2427         }
2428         rcu_read_unlock();
2429 }
2430
2431 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2432 {
2433         struct perf_cpu_context *cpuctx;
2434         unsigned int size;
2435         char *comm = comm_event->task->comm;
2436
2437         size = ALIGN(strlen(comm)+1, sizeof(u64));
2438
2439         comm_event->comm = comm;
2440         comm_event->comm_size = size;
2441
2442         comm_event->event.header.size = sizeof(comm_event->event) + size;
2443
2444         cpuctx = &get_cpu_var(perf_cpu_context);
2445         perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2446         if (cpuctx->task_ctx)
2447                 perf_counter_comm_ctx(cpuctx->task_ctx, comm_event);
2448         put_cpu_var(perf_cpu_context);
2449 }
2450
2451 void perf_counter_comm(struct task_struct *task)
2452 {
2453         struct perf_comm_event comm_event;
2454
2455         if (!atomic_read(&nr_comm_tracking))
2456                 return;
2457
2458         comm_event = (struct perf_comm_event){
2459                 .task   = task,
2460                 .event  = {
2461                         .header = { .type = PERF_EVENT_COMM, },
2462                         .pid    = task->group_leader->pid,
2463                         .tid    = task->pid,
2464                 },
2465         };
2466
2467         perf_counter_comm_event(&comm_event);
2468 }
2469
2470 /*
2471  * mmap tracking
2472  */
2473
2474 struct perf_mmap_event {
2475         struct file     *file;
2476         char            *file_name;
2477         int             file_size;
2478
2479         struct {
2480                 struct perf_event_header        header;
2481
2482                 u32                             pid;
2483                 u32                             tid;
2484                 u64                             start;
2485                 u64                             len;
2486                 u64                             pgoff;
2487         } event;
2488 };
2489
2490 static void perf_counter_mmap_output(struct perf_counter *counter,
2491                                      struct perf_mmap_event *mmap_event)
2492 {
2493         struct perf_output_handle handle;
2494         int size = mmap_event->event.header.size;
2495         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2496
2497         if (ret)
2498                 return;
2499
2500         perf_output_put(&handle, mmap_event->event);
2501         perf_output_copy(&handle, mmap_event->file_name,
2502                                    mmap_event->file_size);
2503         perf_output_end(&handle);
2504 }
2505
2506 static int perf_counter_mmap_match(struct perf_counter *counter,
2507                                    struct perf_mmap_event *mmap_event)
2508 {
2509         if (counter->hw_event.mmap &&
2510             mmap_event->event.header.type == PERF_EVENT_MMAP)
2511                 return 1;
2512
2513         if (counter->hw_event.munmap &&
2514             mmap_event->event.header.type == PERF_EVENT_MUNMAP)
2515                 return 1;
2516
2517         return 0;
2518 }
2519
2520 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2521                                   struct perf_mmap_event *mmap_event)
2522 {
2523         struct perf_counter *counter;
2524
2525         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2526                 return;
2527
2528         rcu_read_lock();
2529         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2530                 if (perf_counter_mmap_match(counter, mmap_event))
2531                         perf_counter_mmap_output(counter, mmap_event);
2532         }
2533         rcu_read_unlock();
2534 }
2535
2536 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2537 {
2538         struct perf_cpu_context *cpuctx;
2539         struct file *file = mmap_event->file;
2540         unsigned int size;
2541         char tmp[16];
2542         char *buf = NULL;
2543         char *name;
2544
2545         if (file) {
2546                 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2547                 if (!buf) {
2548                         name = strncpy(tmp, "//enomem", sizeof(tmp));
2549                         goto got_name;
2550                 }
2551                 name = d_path(&file->f_path, buf, PATH_MAX);
2552                 if (IS_ERR(name)) {
2553                         name = strncpy(tmp, "//toolong", sizeof(tmp));
2554                         goto got_name;
2555                 }
2556         } else {
2557                 name = strncpy(tmp, "//anon", sizeof(tmp));
2558                 goto got_name;
2559         }
2560
2561 got_name:
2562         size = ALIGN(strlen(name)+1, sizeof(u64));
2563
2564         mmap_event->file_name = name;
2565         mmap_event->file_size = size;
2566
2567         mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2568
2569         cpuctx = &get_cpu_var(perf_cpu_context);
2570         perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2571         if (cpuctx->task_ctx)
2572                 perf_counter_mmap_ctx(cpuctx->task_ctx, mmap_event);
2573         put_cpu_var(perf_cpu_context);
2574
2575         kfree(buf);
2576 }
2577
2578 void perf_counter_mmap(unsigned long addr, unsigned long len,
2579                        unsigned long pgoff, struct file *file)
2580 {
2581         struct perf_mmap_event mmap_event;
2582
2583         if (!atomic_read(&nr_mmap_tracking))
2584                 return;
2585
2586         mmap_event = (struct perf_mmap_event){
2587                 .file   = file,
2588                 .event  = {
2589                         .header = { .type = PERF_EVENT_MMAP, },
2590                         .pid    = current->group_leader->pid,
2591                         .tid    = current->pid,
2592                         .start  = addr,
2593                         .len    = len,
2594                         .pgoff  = pgoff,
2595                 },
2596         };
2597
2598         perf_counter_mmap_event(&mmap_event);
2599 }
2600
2601 void perf_counter_munmap(unsigned long addr, unsigned long len,
2602                          unsigned long pgoff, struct file *file)
2603 {
2604         struct perf_mmap_event mmap_event;
2605
2606         if (!atomic_read(&nr_munmap_tracking))
2607                 return;
2608
2609         mmap_event = (struct perf_mmap_event){
2610                 .file   = file,
2611                 .event  = {
2612                         .header = { .type = PERF_EVENT_MUNMAP, },
2613                         .pid    = current->group_leader->pid,
2614                         .tid    = current->pid,
2615                         .start  = addr,
2616                         .len    = len,
2617                         .pgoff  = pgoff,
2618                 },
2619         };
2620
2621         perf_counter_mmap_event(&mmap_event);
2622 }
2623
2624 /*
2625  * Log irq_period changes so that analyzing tools can re-normalize the
2626  * event flow.
2627  */
2628
2629 static void perf_log_period(struct perf_counter *counter, u64 period)
2630 {
2631         struct perf_output_handle handle;
2632         int ret;
2633
2634         struct {
2635                 struct perf_event_header        header;
2636                 u64                             time;
2637                 u64                             period;
2638         } freq_event = {
2639                 .header = {
2640                         .type = PERF_EVENT_PERIOD,
2641                         .misc = 0,
2642                         .size = sizeof(freq_event),
2643                 },
2644                 .time = sched_clock(),
2645                 .period = period,
2646         };
2647
2648         if (counter->hw.irq_period == period)
2649                 return;
2650
2651         ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
2652         if (ret)
2653                 return;
2654
2655         perf_output_put(&handle, freq_event);
2656         perf_output_end(&handle);
2657 }
2658
2659 /*
2660  * IRQ throttle logging
2661  */
2662
2663 static void perf_log_throttle(struct perf_counter *counter, int enable)
2664 {
2665         struct perf_output_handle handle;
2666         int ret;
2667
2668         struct {
2669                 struct perf_event_header        header;
2670                 u64                             time;
2671         } throttle_event = {
2672                 .header = {
2673                         .type = PERF_EVENT_THROTTLE + 1,
2674                         .misc = 0,
2675                         .size = sizeof(throttle_event),
2676                 },
2677                 .time = sched_clock(),
2678         };
2679
2680         ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
2681         if (ret)
2682                 return;
2683
2684         perf_output_put(&handle, throttle_event);
2685         perf_output_end(&handle);
2686 }
2687
2688 /*
2689  * Generic counter overflow handling.
2690  */
2691
2692 int perf_counter_overflow(struct perf_counter *counter,
2693                           int nmi, struct pt_regs *regs, u64 addr)
2694 {
2695         int events = atomic_read(&counter->event_limit);
2696         int throttle = counter->pmu->unthrottle != NULL;
2697         int ret = 0;
2698
2699         if (!throttle) {
2700                 counter->hw.interrupts++;
2701         } else if (counter->hw.interrupts != MAX_INTERRUPTS) {
2702                 counter->hw.interrupts++;
2703                 if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
2704                         counter->hw.interrupts = MAX_INTERRUPTS;
2705                         perf_log_throttle(counter, 0);
2706                         ret = 1;
2707                 }
2708         }
2709
2710         /*
2711          * XXX event_limit might not quite work as expected on inherited
2712          * counters
2713          */
2714
2715         counter->pending_kill = POLL_IN;
2716         if (events && atomic_dec_and_test(&counter->event_limit)) {
2717                 ret = 1;
2718                 counter->pending_kill = POLL_HUP;
2719                 if (nmi) {
2720                         counter->pending_disable = 1;
2721                         perf_pending_queue(&counter->pending,
2722                                            perf_pending_counter);
2723                 } else
2724                         perf_counter_disable(counter);
2725         }
2726
2727         perf_counter_output(counter, nmi, regs, addr);
2728         return ret;
2729 }
2730
2731 /*
2732  * Generic software counter infrastructure
2733  */
2734
2735 static void perf_swcounter_update(struct perf_counter *counter)
2736 {
2737         struct hw_perf_counter *hwc = &counter->hw;
2738         u64 prev, now;
2739         s64 delta;
2740
2741 again:
2742         prev = atomic64_read(&hwc->prev_count);
2743         now = atomic64_read(&hwc->count);
2744         if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2745                 goto again;
2746
2747         delta = now - prev;
2748
2749         atomic64_add(delta, &counter->count);
2750         atomic64_sub(delta, &hwc->period_left);
2751 }
2752
2753 static void perf_swcounter_set_period(struct perf_counter *counter)
2754 {
2755         struct hw_perf_counter *hwc = &counter->hw;
2756         s64 left = atomic64_read(&hwc->period_left);
2757         s64 period = hwc->irq_period;
2758
2759         if (unlikely(left <= -period)) {
2760                 left = period;
2761                 atomic64_set(&hwc->period_left, left);
2762         }
2763
2764         if (unlikely(left <= 0)) {
2765                 left += period;
2766                 atomic64_add(period, &hwc->period_left);
2767         }
2768
2769         atomic64_set(&hwc->prev_count, -left);
2770         atomic64_set(&hwc->count, -left);
2771 }
2772
2773 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2774 {
2775         enum hrtimer_restart ret = HRTIMER_RESTART;
2776         struct perf_counter *counter;
2777         struct pt_regs *regs;
2778         u64 period;
2779
2780         counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
2781         counter->pmu->read(counter);
2782
2783         regs = get_irq_regs();
2784         /*
2785          * In case we exclude kernel IPs or are somehow not in interrupt
2786          * context, provide the next best thing, the user IP.
2787          */
2788         if ((counter->hw_event.exclude_kernel || !regs) &&
2789                         !counter->hw_event.exclude_user)
2790                 regs = task_pt_regs(current);
2791
2792         if (regs) {
2793                 if (perf_counter_overflow(counter, 0, regs, 0))
2794                         ret = HRTIMER_NORESTART;
2795         }
2796
2797         period = max_t(u64, 10000, counter->hw.irq_period);
2798         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
2799
2800         return ret;
2801 }
2802
2803 static void perf_swcounter_overflow(struct perf_counter *counter,
2804                                     int nmi, struct pt_regs *regs, u64 addr)
2805 {
2806         perf_swcounter_update(counter);
2807         perf_swcounter_set_period(counter);
2808         if (perf_counter_overflow(counter, nmi, regs, addr))
2809                 /* soft-disable the counter */
2810                 ;
2811
2812 }
2813
2814 static int perf_swcounter_match(struct perf_counter *counter,
2815                                 enum perf_event_types type,
2816                                 u32 event, struct pt_regs *regs)
2817 {
2818         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2819                 return 0;
2820
2821         if (perf_event_raw(&counter->hw_event))
2822                 return 0;
2823
2824         if (perf_event_type(&counter->hw_event) != type)
2825                 return 0;
2826
2827         if (perf_event_id(&counter->hw_event) != event)
2828                 return 0;
2829
2830         if (counter->hw_event.exclude_user && user_mode(regs))
2831                 return 0;
2832
2833         if (counter->hw_event.exclude_kernel && !user_mode(regs))
2834                 return 0;
2835
2836         return 1;
2837 }
2838
2839 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2840                                int nmi, struct pt_regs *regs, u64 addr)
2841 {
2842         int neg = atomic64_add_negative(nr, &counter->hw.count);
2843         if (counter->hw.irq_period && !neg)
2844                 perf_swcounter_overflow(counter, nmi, regs, addr);
2845 }
2846
2847 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
2848                                      enum perf_event_types type, u32 event,
2849                                      u64 nr, int nmi, struct pt_regs *regs,
2850                                      u64 addr)
2851 {
2852         struct perf_counter *counter;
2853
2854         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2855                 return;
2856
2857         rcu_read_lock();
2858         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2859                 if (perf_swcounter_match(counter, type, event, regs))
2860                         perf_swcounter_add(counter, nr, nmi, regs, addr);
2861         }
2862         rcu_read_unlock();
2863 }
2864
2865 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
2866 {
2867         if (in_nmi())
2868                 return &cpuctx->recursion[3];
2869
2870         if (in_irq())
2871                 return &cpuctx->recursion[2];
2872
2873         if (in_softirq())
2874                 return &cpuctx->recursion[1];
2875
2876         return &cpuctx->recursion[0];
2877 }
2878
2879 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2880                                    u64 nr, int nmi, struct pt_regs *regs,
2881                                    u64 addr)
2882 {
2883         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
2884         int *recursion = perf_swcounter_recursion_context(cpuctx);
2885
2886         if (*recursion)
2887                 goto out;
2888
2889         (*recursion)++;
2890         barrier();
2891
2892         perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
2893                                  nr, nmi, regs, addr);
2894         if (cpuctx->task_ctx) {
2895                 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
2896                                          nr, nmi, regs, addr);
2897         }
2898
2899         barrier();
2900         (*recursion)--;
2901
2902 out:
2903         put_cpu_var(perf_cpu_context);
2904 }
2905
2906 void
2907 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
2908 {
2909         __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
2910 }
2911
2912 static void perf_swcounter_read(struct perf_counter *counter)
2913 {
2914         perf_swcounter_update(counter);
2915 }
2916
2917 static int perf_swcounter_enable(struct perf_counter *counter)
2918 {
2919         perf_swcounter_set_period(counter);
2920         return 0;
2921 }
2922
2923 static void perf_swcounter_disable(struct perf_counter *counter)
2924 {
2925         perf_swcounter_update(counter);
2926 }
2927
2928 static const struct pmu perf_ops_generic = {
2929         .enable         = perf_swcounter_enable,
2930         .disable        = perf_swcounter_disable,
2931         .read           = perf_swcounter_read,
2932 };
2933
2934 /*
2935  * Software counter: cpu wall time clock
2936  */
2937
2938 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
2939 {
2940         int cpu = raw_smp_processor_id();
2941         s64 prev;
2942         u64 now;
2943
2944         now = cpu_clock(cpu);
2945         prev = atomic64_read(&counter->hw.prev_count);
2946         atomic64_set(&counter->hw.prev_count, now);
2947         atomic64_add(now - prev, &counter->count);
2948 }
2949
2950 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
2951 {
2952         struct hw_perf_counter *hwc = &counter->hw;
2953         int cpu = raw_smp_processor_id();
2954
2955         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
2956         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2957         hwc->hrtimer.function = perf_swcounter_hrtimer;
2958         if (hwc->irq_period) {
2959                 u64 period = max_t(u64, 10000, hwc->irq_period);
2960                 __hrtimer_start_range_ns(&hwc->hrtimer,
2961                                 ns_to_ktime(period), 0,
2962                                 HRTIMER_MODE_REL, 0);
2963         }
2964
2965         return 0;
2966 }
2967
2968 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
2969 {
2970         if (counter->hw.irq_period)
2971                 hrtimer_cancel(&counter->hw.hrtimer);
2972         cpu_clock_perf_counter_update(counter);
2973 }
2974
2975 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
2976 {
2977         cpu_clock_perf_counter_update(counter);
2978 }
2979
2980 static const struct pmu perf_ops_cpu_clock = {
2981         .enable         = cpu_clock_perf_counter_enable,
2982         .disable        = cpu_clock_perf_counter_disable,
2983         .read           = cpu_clock_perf_counter_read,
2984 };
2985
2986 /*
2987  * Software counter: task time clock
2988  */
2989
2990 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
2991 {
2992         u64 prev;
2993         s64 delta;
2994
2995         prev = atomic64_xchg(&counter->hw.prev_count, now);
2996         delta = now - prev;
2997         atomic64_add(delta, &counter->count);
2998 }
2999
3000 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3001 {
3002         struct hw_perf_counter *hwc = &counter->hw;
3003         u64 now;
3004
3005         now = counter->ctx->time;
3006
3007         atomic64_set(&hwc->prev_count, now);
3008         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3009         hwc->hrtimer.function = perf_swcounter_hrtimer;
3010         if (hwc->irq_period) {
3011                 u64 period = max_t(u64, 10000, hwc->irq_period);
3012                 __hrtimer_start_range_ns(&hwc->hrtimer,
3013                                 ns_to_ktime(period), 0,
3014                                 HRTIMER_MODE_REL, 0);
3015         }
3016
3017         return 0;
3018 }
3019
3020 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3021 {
3022         if (counter->hw.irq_period)
3023                 hrtimer_cancel(&counter->hw.hrtimer);
3024         task_clock_perf_counter_update(counter, counter->ctx->time);
3025
3026 }
3027
3028 static void task_clock_perf_counter_read(struct perf_counter *counter)
3029 {
3030         u64 time;
3031
3032         if (!in_nmi()) {
3033                 update_context_time(counter->ctx);
3034                 time = counter->ctx->time;
3035         } else {
3036                 u64 now = perf_clock();
3037                 u64 delta = now - counter->ctx->timestamp;
3038                 time = counter->ctx->time + delta;
3039         }
3040
3041         task_clock_perf_counter_update(counter, time);
3042 }
3043
3044 static const struct pmu perf_ops_task_clock = {
3045         .enable         = task_clock_perf_counter_enable,
3046         .disable        = task_clock_perf_counter_disable,
3047         .read           = task_clock_perf_counter_read,
3048 };
3049
3050 /*
3051  * Software counter: cpu migrations
3052  */
3053
3054 static inline u64 get_cpu_migrations(struct perf_counter *counter)
3055 {
3056         struct task_struct *curr = counter->ctx->task;
3057
3058         if (curr)
3059                 return curr->se.nr_migrations;
3060         return cpu_nr_migrations(smp_processor_id());
3061 }
3062
3063 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
3064 {
3065         u64 prev, now;
3066         s64 delta;
3067
3068         prev = atomic64_read(&counter->hw.prev_count);
3069         now = get_cpu_migrations(counter);
3070
3071         atomic64_set(&counter->hw.prev_count, now);
3072
3073         delta = now - prev;
3074
3075         atomic64_add(delta, &counter->count);
3076 }
3077
3078 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
3079 {
3080         cpu_migrations_perf_counter_update(counter);
3081 }
3082
3083 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
3084 {
3085         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
3086                 atomic64_set(&counter->hw.prev_count,
3087                              get_cpu_migrations(counter));
3088         return 0;
3089 }
3090
3091 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
3092 {
3093         cpu_migrations_perf_counter_update(counter);
3094 }
3095
3096 static const struct pmu perf_ops_cpu_migrations = {
3097         .enable         = cpu_migrations_perf_counter_enable,
3098         .disable        = cpu_migrations_perf_counter_disable,
3099         .read           = cpu_migrations_perf_counter_read,
3100 };
3101
3102 #ifdef CONFIG_EVENT_PROFILE
3103 void perf_tpcounter_event(int event_id)
3104 {
3105         struct pt_regs *regs = get_irq_regs();
3106
3107         if (!regs)
3108                 regs = task_pt_regs(current);
3109
3110         __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3111 }
3112 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3113
3114 extern int ftrace_profile_enable(int);
3115 extern void ftrace_profile_disable(int);
3116
3117 static void tp_perf_counter_destroy(struct perf_counter *counter)
3118 {
3119         ftrace_profile_disable(perf_event_id(&counter->hw_event));
3120 }
3121
3122 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3123 {
3124         int event_id = perf_event_id(&counter->hw_event);
3125         int ret;
3126
3127         ret = ftrace_profile_enable(event_id);
3128         if (ret)
3129                 return NULL;
3130
3131         counter->destroy = tp_perf_counter_destroy;
3132         counter->hw.irq_period = counter->hw_event.irq_period;
3133
3134         return &perf_ops_generic;
3135 }
3136 #else
3137 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3138 {
3139         return NULL;
3140 }
3141 #endif
3142
3143 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3144 {
3145         const struct pmu *pmu = NULL;
3146
3147         /*
3148          * Software counters (currently) can't in general distinguish
3149          * between user, kernel and hypervisor events.
3150          * However, context switches and cpu migrations are considered
3151          * to be kernel events, and page faults are never hypervisor
3152          * events.
3153          */
3154         switch (perf_event_id(&counter->hw_event)) {
3155         case PERF_COUNT_CPU_CLOCK:
3156                 pmu = &perf_ops_cpu_clock;
3157
3158                 break;
3159         case PERF_COUNT_TASK_CLOCK:
3160                 /*
3161                  * If the user instantiates this as a per-cpu counter,
3162                  * use the cpu_clock counter instead.
3163                  */
3164                 if (counter->ctx->task)
3165                         pmu = &perf_ops_task_clock;
3166                 else
3167                         pmu = &perf_ops_cpu_clock;
3168
3169                 break;
3170         case PERF_COUNT_PAGE_FAULTS:
3171         case PERF_COUNT_PAGE_FAULTS_MIN:
3172         case PERF_COUNT_PAGE_FAULTS_MAJ:
3173         case PERF_COUNT_CONTEXT_SWITCHES:
3174                 pmu = &perf_ops_generic;
3175                 break;
3176         case PERF_COUNT_CPU_MIGRATIONS:
3177                 if (!counter->hw_event.exclude_kernel)
3178                         pmu = &perf_ops_cpu_migrations;
3179                 break;
3180         }
3181
3182         return pmu;
3183 }
3184
3185 /*
3186  * Allocate and initialize a counter structure
3187  */
3188 static struct perf_counter *
3189 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
3190                    int cpu,
3191                    struct perf_counter_context *ctx,
3192                    struct perf_counter *group_leader,
3193                    gfp_t gfpflags)
3194 {
3195         const struct pmu *pmu;
3196         struct perf_counter *counter;
3197         struct hw_perf_counter *hwc;
3198         long err;
3199
3200         counter = kzalloc(sizeof(*counter), gfpflags);
3201         if (!counter)
3202                 return ERR_PTR(-ENOMEM);
3203
3204         /*
3205          * Single counters are their own group leaders, with an
3206          * empty sibling list:
3207          */
3208         if (!group_leader)
3209                 group_leader = counter;
3210
3211         mutex_init(&counter->child_mutex);
3212         INIT_LIST_HEAD(&counter->child_list);
3213
3214         INIT_LIST_HEAD(&counter->list_entry);
3215         INIT_LIST_HEAD(&counter->event_entry);
3216         INIT_LIST_HEAD(&counter->sibling_list);
3217         init_waitqueue_head(&counter->waitq);
3218
3219         mutex_init(&counter->mmap_mutex);
3220
3221         counter->cpu                    = cpu;
3222         counter->hw_event               = *hw_event;
3223         counter->group_leader           = group_leader;
3224         counter->pmu                    = NULL;
3225         counter->ctx                    = ctx;
3226         counter->oncpu                  = -1;
3227
3228         counter->state = PERF_COUNTER_STATE_INACTIVE;
3229         if (hw_event->disabled)
3230                 counter->state = PERF_COUNTER_STATE_OFF;
3231
3232         pmu = NULL;
3233
3234         hwc = &counter->hw;
3235         if (hw_event->freq && hw_event->irq_freq)
3236                 hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq);
3237         else
3238                 hwc->irq_period = hw_event->irq_period;
3239
3240         /*
3241          * we currently do not support PERF_RECORD_GROUP on inherited counters
3242          */
3243         if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
3244                 goto done;
3245
3246         if (perf_event_raw(hw_event)) {
3247                 pmu = hw_perf_counter_init(counter);
3248                 goto done;
3249         }
3250
3251         switch (perf_event_type(hw_event)) {
3252         case PERF_TYPE_HARDWARE:
3253                 pmu = hw_perf_counter_init(counter);
3254                 break;
3255
3256         case PERF_TYPE_SOFTWARE:
3257                 pmu = sw_perf_counter_init(counter);
3258                 break;
3259
3260         case PERF_TYPE_TRACEPOINT:
3261                 pmu = tp_perf_counter_init(counter);
3262                 break;
3263         }
3264 done:
3265         err = 0;
3266         if (!pmu)
3267                 err = -EINVAL;
3268         else if (IS_ERR(pmu))
3269                 err = PTR_ERR(pmu);
3270
3271         if (err) {
3272                 kfree(counter);
3273                 return ERR_PTR(err);
3274         }
3275
3276         counter->pmu = pmu;
3277
3278         atomic_inc(&nr_counters);
3279         if (counter->hw_event.mmap)
3280                 atomic_inc(&nr_mmap_tracking);
3281         if (counter->hw_event.munmap)
3282                 atomic_inc(&nr_munmap_tracking);
3283         if (counter->hw_event.comm)
3284                 atomic_inc(&nr_comm_tracking);
3285
3286         return counter;
3287 }
3288
3289 /**
3290  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3291  *
3292  * @hw_event_uptr:      event type attributes for monitoring/sampling
3293  * @pid:                target pid
3294  * @cpu:                target cpu
3295  * @group_fd:           group leader counter fd
3296  */
3297 SYSCALL_DEFINE5(perf_counter_open,
3298                 const struct perf_counter_hw_event __user *, hw_event_uptr,
3299                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3300 {
3301         struct perf_counter *counter, *group_leader;
3302         struct perf_counter_hw_event hw_event;
3303         struct perf_counter_context *ctx;
3304         struct file *counter_file = NULL;
3305         struct file *group_file = NULL;
3306         int fput_needed = 0;
3307         int fput_needed2 = 0;
3308         int ret;
3309
3310         /* for future expandability... */
3311         if (flags)
3312                 return -EINVAL;
3313
3314         if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
3315                 return -EFAULT;
3316
3317         /*
3318          * Get the target context (task or percpu):
3319          */
3320         ctx = find_get_context(pid, cpu);
3321         if (IS_ERR(ctx))
3322                 return PTR_ERR(ctx);
3323
3324         /*
3325          * Look up the group leader (we will attach this counter to it):
3326          */
3327         group_leader = NULL;
3328         if (group_fd != -1) {
3329                 ret = -EINVAL;
3330                 group_file = fget_light(group_fd, &fput_needed);
3331                 if (!group_file)
3332                         goto err_put_context;
3333                 if (group_file->f_op != &perf_fops)
3334                         goto err_put_context;
3335
3336                 group_leader = group_file->private_data;
3337                 /*
3338                  * Do not allow a recursive hierarchy (this new sibling
3339                  * becoming part of another group-sibling):
3340                  */
3341                 if (group_leader->group_leader != group_leader)
3342                         goto err_put_context;
3343                 /*
3344                  * Do not allow to attach to a group in a different
3345                  * task or CPU context:
3346                  */
3347                 if (group_leader->ctx != ctx)
3348                         goto err_put_context;
3349                 /*
3350                  * Only a group leader can be exclusive or pinned
3351                  */
3352                 if (hw_event.exclusive || hw_event.pinned)
3353                         goto err_put_context;
3354         }
3355
3356         counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
3357                                      GFP_KERNEL);
3358         ret = PTR_ERR(counter);
3359         if (IS_ERR(counter))
3360                 goto err_put_context;
3361
3362         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3363         if (ret < 0)
3364                 goto err_free_put_context;
3365
3366         counter_file = fget_light(ret, &fput_needed2);
3367         if (!counter_file)
3368                 goto err_free_put_context;
3369
3370         counter->filp = counter_file;
3371         WARN_ON_ONCE(ctx->parent_ctx);
3372         mutex_lock(&ctx->mutex);
3373         perf_install_in_context(ctx, counter, cpu);
3374         ++ctx->generation;
3375         mutex_unlock(&ctx->mutex);
3376
3377         counter->owner = current;
3378         get_task_struct(current);
3379         mutex_lock(&current->perf_counter_mutex);
3380         list_add_tail(&counter->owner_entry, &current->perf_counter_list);
3381         mutex_unlock(&current->perf_counter_mutex);
3382
3383         fput_light(counter_file, fput_needed2);
3384
3385 out_fput:
3386         fput_light(group_file, fput_needed);
3387
3388         return ret;
3389
3390 err_free_put_context:
3391         kfree(counter);
3392
3393 err_put_context:
3394         put_ctx(ctx);
3395
3396         goto out_fput;
3397 }
3398
3399 /*
3400  * inherit a counter from parent task to child task:
3401  */
3402 static struct perf_counter *
3403 inherit_counter(struct perf_counter *parent_counter,
3404               struct task_struct *parent,
3405               struct perf_counter_context *parent_ctx,
3406               struct task_struct *child,
3407               struct perf_counter *group_leader,
3408               struct perf_counter_context *child_ctx)
3409 {
3410         struct perf_counter *child_counter;
3411
3412         /*
3413          * Instead of creating recursive hierarchies of counters,
3414          * we link inherited counters back to the original parent,
3415          * which has a filp for sure, which we use as the reference
3416          * count:
3417          */
3418         if (parent_counter->parent)
3419                 parent_counter = parent_counter->parent;
3420
3421         child_counter = perf_counter_alloc(&parent_counter->hw_event,
3422                                            parent_counter->cpu, child_ctx,
3423                                            group_leader, GFP_KERNEL);
3424         if (IS_ERR(child_counter))
3425                 return child_counter;
3426         get_ctx(child_ctx);
3427
3428         /*
3429          * Make the child state follow the state of the parent counter,
3430          * not its hw_event.disabled bit.  We hold the parent's mutex,
3431          * so we won't race with perf_counter_{en,dis}able_family.
3432          */
3433         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3434                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3435         else
3436                 child_counter->state = PERF_COUNTER_STATE_OFF;
3437
3438         /*
3439          * Link it up in the child's context:
3440          */
3441         add_counter_to_ctx(child_counter, child_ctx);
3442
3443         child_counter->parent = parent_counter;
3444         /*
3445          * inherit into child's child as well:
3446          */
3447         child_counter->hw_event.inherit = 1;
3448
3449         /*
3450          * Get a reference to the parent filp - we will fput it
3451          * when the child counter exits. This is safe to do because
3452          * we are in the parent and we know that the filp still
3453          * exists and has a nonzero count:
3454          */
3455         atomic_long_inc(&parent_counter->filp->f_count);
3456
3457         /*
3458          * Link this into the parent counter's child list
3459          */
3460         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3461         mutex_lock(&parent_counter->child_mutex);
3462         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3463         mutex_unlock(&parent_counter->child_mutex);
3464
3465         return child_counter;
3466 }
3467
3468 static int inherit_group(struct perf_counter *parent_counter,
3469               struct task_struct *parent,
3470               struct perf_counter_context *parent_ctx,
3471               struct task_struct *child,
3472               struct perf_counter_context *child_ctx)
3473 {
3474         struct perf_counter *leader;
3475         struct perf_counter *sub;
3476         struct perf_counter *child_ctr;
3477
3478         leader = inherit_counter(parent_counter, parent, parent_ctx,
3479                                  child, NULL, child_ctx);
3480         if (IS_ERR(leader))
3481                 return PTR_ERR(leader);
3482         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3483                 child_ctr = inherit_counter(sub, parent, parent_ctx,
3484                                             child, leader, child_ctx);
3485                 if (IS_ERR(child_ctr))
3486                         return PTR_ERR(child_ctr);
3487         }
3488         return 0;
3489 }
3490
3491 static void sync_child_counter(struct perf_counter *child_counter,
3492                                struct perf_counter *parent_counter)
3493 {
3494         u64 child_val;
3495
3496         child_val = atomic64_read(&child_counter->count);
3497
3498         /*
3499          * Add back the child's count to the parent's count:
3500          */
3501         atomic64_add(child_val, &parent_counter->count);
3502         atomic64_add(child_counter->total_time_enabled,
3503                      &parent_counter->child_total_time_enabled);
3504         atomic64_add(child_counter->total_time_running,
3505                      &parent_counter->child_total_time_running);
3506
3507         /*
3508          * Remove this counter from the parent's list
3509          */
3510         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3511         mutex_lock(&parent_counter->child_mutex);
3512         list_del_init(&child_counter->child_list);
3513         mutex_unlock(&parent_counter->child_mutex);
3514
3515         /*
3516          * Release the parent counter, if this was the last
3517          * reference to it.
3518          */
3519         fput(parent_counter->filp);
3520 }
3521
3522 static void
3523 __perf_counter_exit_task(struct task_struct *child,
3524                          struct perf_counter *child_counter,
3525                          struct perf_counter_context *child_ctx)
3526 {
3527         struct perf_counter *parent_counter;
3528
3529         update_counter_times(child_counter);
3530         perf_counter_remove_from_context(child_counter);
3531
3532         parent_counter = child_counter->parent;
3533         /*
3534          * It can happen that parent exits first, and has counters
3535          * that are still around due to the child reference. These
3536          * counters need to be zapped - but otherwise linger.
3537          */
3538         if (parent_counter) {
3539                 sync_child_counter(child_counter, parent_counter);
3540                 free_counter(child_counter);
3541         }
3542 }
3543
3544 /*
3545  * When a child task exits, feed back counter values to parent counters.
3546  */
3547 void perf_counter_exit_task(struct task_struct *child)
3548 {
3549         struct perf_counter *child_counter, *tmp;
3550         struct perf_counter_context *child_ctx;
3551         unsigned long flags;
3552
3553         if (likely(!child->perf_counter_ctxp))
3554                 return;
3555
3556         local_irq_save(flags);
3557         /*
3558          * We can't reschedule here because interrupts are disabled,
3559          * and either child is current or it is a task that can't be
3560          * scheduled, so we are now safe from rescheduling changing
3561          * our context.
3562          */
3563         child_ctx = child->perf_counter_ctxp;
3564         __perf_counter_task_sched_out(child_ctx);
3565
3566         /*
3567          * Take the context lock here so that if find_get_context is
3568          * reading child->perf_counter_ctxp, we wait until it has
3569          * incremented the context's refcount before we do put_ctx below.
3570          */
3571         spin_lock(&child_ctx->lock);
3572         child->perf_counter_ctxp = NULL;
3573         if (child_ctx->parent_ctx) {
3574                 /*
3575                  * This context is a clone; unclone it so it can't get
3576                  * swapped to another process while we're removing all
3577                  * the counters from it.
3578                  */
3579                 put_ctx(child_ctx->parent_ctx);
3580                 child_ctx->parent_ctx = NULL;
3581         }
3582         spin_unlock(&child_ctx->lock);
3583         local_irq_restore(flags);
3584
3585         mutex_lock(&child_ctx->mutex);
3586
3587 again:
3588         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3589                                  list_entry)
3590                 __perf_counter_exit_task(child, child_counter, child_ctx);
3591
3592         /*
3593          * If the last counter was a group counter, it will have appended all
3594          * its siblings to the list, but we obtained 'tmp' before that which
3595          * will still point to the list head terminating the iteration.
3596          */
3597         if (!list_empty(&child_ctx->counter_list))
3598                 goto again;
3599
3600         mutex_unlock(&child_ctx->mutex);
3601
3602         put_ctx(child_ctx);
3603 }
3604
3605 /*
3606  * Initialize the perf_counter context in task_struct
3607  */
3608 int perf_counter_init_task(struct task_struct *child)
3609 {
3610         struct perf_counter_context *child_ctx, *parent_ctx;
3611         struct perf_counter_context *cloned_ctx;
3612         struct perf_counter *counter;
3613         struct task_struct *parent = current;
3614         int inherited_all = 1;
3615         u64 cloned_gen;
3616         int ret = 0;
3617
3618         child->perf_counter_ctxp = NULL;
3619
3620         mutex_init(&child->perf_counter_mutex);
3621         INIT_LIST_HEAD(&child->perf_counter_list);
3622
3623         if (likely(!parent->perf_counter_ctxp))
3624                 return 0;
3625
3626         /*
3627          * This is executed from the parent task context, so inherit
3628          * counters that have been marked for cloning.
3629          * First allocate and initialize a context for the child.
3630          */
3631
3632         child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
3633         if (!child_ctx)
3634                 return -ENOMEM;
3635
3636         __perf_counter_init_context(child_ctx, child);
3637         child->perf_counter_ctxp = child_ctx;
3638         get_task_struct(child);
3639
3640         /*
3641          * If the parent's context is a clone, temporarily set its
3642          * parent_gen to an impossible value (all 1s) so it won't get
3643          * swapped under us.  The rcu_read_lock makes sure that
3644          * parent_ctx continues to exist even if it gets swapped to
3645          * another process and then freed while we are trying to get
3646          * its lock.
3647          */
3648         rcu_read_lock();
3649  retry:
3650         parent_ctx = rcu_dereference(parent->perf_counter_ctxp);
3651         /*
3652          * No need to check if parent_ctx != NULL here; since we saw
3653          * it non-NULL earlier, the only reason for it to become NULL
3654          * is if we exit, and since we're currently in the middle of
3655          * a fork we can't be exiting at the same time.
3656          */
3657         spin_lock_irq(&parent_ctx->lock);
3658         if (parent_ctx != rcu_dereference(parent->perf_counter_ctxp)) {
3659                 spin_unlock_irq(&parent_ctx->lock);
3660                 goto retry;
3661         }
3662         cloned_gen = parent_ctx->parent_gen;
3663         if (parent_ctx->parent_ctx)
3664                 parent_ctx->parent_gen = ~0ull;
3665         spin_unlock_irq(&parent_ctx->lock);
3666         rcu_read_unlock();
3667
3668         /*
3669          * Lock the parent list. No need to lock the child - not PID
3670          * hashed yet and not running, so nobody can access it.
3671          */
3672         mutex_lock(&parent_ctx->mutex);
3673
3674         /*
3675          * We dont have to disable NMIs - we are only looking at
3676          * the list, not manipulating it:
3677          */
3678         list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
3679                 if (counter != counter->group_leader)
3680                         continue;
3681
3682                 if (!counter->hw_event.inherit) {
3683                         inherited_all = 0;
3684                         continue;
3685                 }
3686
3687                 ret = inherit_group(counter, parent, parent_ctx,
3688                                              child, child_ctx);
3689                 if (ret) {
3690                         inherited_all = 0;
3691                         break;
3692                 }
3693         }
3694
3695         if (inherited_all) {
3696                 /*
3697                  * Mark the child context as a clone of the parent
3698                  * context, or of whatever the parent is a clone of.
3699                  * Note that if the parent is a clone, it could get
3700                  * uncloned at any point, but that doesn't matter
3701                  * because the list of counters and the generation
3702                  * count can't have changed since we took the mutex.
3703                  */
3704                 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
3705                 if (cloned_ctx) {
3706                         child_ctx->parent_ctx = cloned_ctx;
3707                         child_ctx->parent_gen = cloned_gen;
3708                 } else {
3709                         child_ctx->parent_ctx = parent_ctx;
3710                         child_ctx->parent_gen = parent_ctx->generation;
3711                 }
3712                 get_ctx(child_ctx->parent_ctx);
3713         }
3714
3715         mutex_unlock(&parent_ctx->mutex);
3716
3717         /*
3718          * Restore the clone status of the parent.
3719          */
3720         if (parent_ctx->parent_ctx) {
3721                 spin_lock_irq(&parent_ctx->lock);
3722                 if (parent_ctx->parent_ctx)
3723                         parent_ctx->parent_gen = cloned_gen;
3724                 spin_unlock_irq(&parent_ctx->lock);
3725         }
3726
3727         return ret;
3728 }
3729
3730 static void __cpuinit perf_counter_init_cpu(int cpu)
3731 {
3732         struct perf_cpu_context *cpuctx;
3733
3734         cpuctx = &per_cpu(perf_cpu_context, cpu);
3735         __perf_counter_init_context(&cpuctx->ctx, NULL);
3736
3737         spin_lock(&perf_resource_lock);
3738         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3739         spin_unlock(&perf_resource_lock);
3740
3741         hw_perf_counter_setup(cpu);
3742 }
3743
3744 #ifdef CONFIG_HOTPLUG_CPU
3745 static void __perf_counter_exit_cpu(void *info)
3746 {
3747         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3748         struct perf_counter_context *ctx = &cpuctx->ctx;
3749         struct perf_counter *counter, *tmp;
3750
3751         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
3752                 __perf_counter_remove_from_context(counter);
3753 }
3754 static void perf_counter_exit_cpu(int cpu)
3755 {
3756         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3757         struct perf_counter_context *ctx = &cpuctx->ctx;
3758
3759         mutex_lock(&ctx->mutex);
3760         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
3761         mutex_unlock(&ctx->mutex);
3762 }
3763 #else
3764 static inline void perf_counter_exit_cpu(int cpu) { }
3765 #endif
3766
3767 static int __cpuinit
3768 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
3769 {
3770         unsigned int cpu = (long)hcpu;
3771
3772         switch (action) {
3773
3774         case CPU_UP_PREPARE:
3775         case CPU_UP_PREPARE_FROZEN:
3776                 perf_counter_init_cpu(cpu);
3777                 break;
3778
3779         case CPU_DOWN_PREPARE:
3780         case CPU_DOWN_PREPARE_FROZEN:
3781                 perf_counter_exit_cpu(cpu);
3782                 break;
3783
3784         default:
3785                 break;
3786         }
3787
3788         return NOTIFY_OK;
3789 }
3790
3791 static struct notifier_block __cpuinitdata perf_cpu_nb = {
3792         .notifier_call          = perf_cpu_notify,
3793 };
3794
3795 void __init perf_counter_init(void)
3796 {
3797         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
3798                         (void *)(long)smp_processor_id());
3799         register_cpu_notifier(&perf_cpu_nb);
3800 }
3801
3802 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
3803 {
3804         return sprintf(buf, "%d\n", perf_reserved_percpu);
3805 }
3806
3807 static ssize_t
3808 perf_set_reserve_percpu(struct sysdev_class *class,
3809                         const char *buf,
3810                         size_t count)
3811 {
3812         struct perf_cpu_context *cpuctx;
3813         unsigned long val;
3814         int err, cpu, mpt;
3815
3816         err = strict_strtoul(buf, 10, &val);
3817         if (err)
3818                 return err;
3819         if (val > perf_max_counters)
3820                 return -EINVAL;
3821
3822         spin_lock(&perf_resource_lock);
3823         perf_reserved_percpu = val;
3824         for_each_online_cpu(cpu) {
3825                 cpuctx = &per_cpu(perf_cpu_context, cpu);
3826                 spin_lock_irq(&cpuctx->ctx.lock);
3827                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
3828                           perf_max_counters - perf_reserved_percpu);
3829                 cpuctx->max_pertask = mpt;
3830                 spin_unlock_irq(&cpuctx->ctx.lock);
3831         }
3832         spin_unlock(&perf_resource_lock);
3833
3834         return count;
3835 }
3836
3837 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
3838 {
3839         return sprintf(buf, "%d\n", perf_overcommit);
3840 }
3841
3842 static ssize_t
3843 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
3844 {
3845         unsigned long val;
3846         int err;
3847
3848         err = strict_strtoul(buf, 10, &val);
3849         if (err)
3850                 return err;
3851         if (val > 1)
3852                 return -EINVAL;
3853
3854         spin_lock(&perf_resource_lock);
3855         perf_overcommit = val;
3856         spin_unlock(&perf_resource_lock);
3857
3858         return count;
3859 }
3860
3861 static SYSDEV_CLASS_ATTR(
3862                                 reserve_percpu,
3863                                 0644,
3864                                 perf_show_reserve_percpu,
3865                                 perf_set_reserve_percpu
3866                         );
3867
3868 static SYSDEV_CLASS_ATTR(
3869                                 overcommit,
3870                                 0644,
3871                                 perf_show_overcommit,
3872                                 perf_set_overcommit
3873                         );
3874
3875 static struct attribute *perfclass_attrs[] = {
3876         &attr_reserve_percpu.attr,
3877         &attr_overcommit.attr,
3878         NULL
3879 };
3880
3881 static struct attribute_group perfclass_attr_group = {
3882         .attrs                  = perfclass_attrs,
3883         .name                   = "perf_counters",
3884 };
3885
3886 static int __init perf_counter_sysfs_init(void)
3887 {
3888         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
3889                                   &perfclass_attr_group);
3890 }
3891 device_initcall(perf_counter_sysfs_init);