perf_counter: Separate out attr->type from attr->config
[safe/jmp/linux-2.6] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  *  For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_counter.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34  * Each CPU has a list of per CPU counters:
35  */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_counters __read_mostly;
44 static atomic_t nr_comm_counters __read_mostly;
45
46 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
47 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
48 int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
49
50 static atomic64_t perf_counter_id;
51
52 /*
53  * Lock for (sysadmin-configurable) counter reservations:
54  */
55 static DEFINE_SPINLOCK(perf_resource_lock);
56
57 /*
58  * Architecture provided APIs - weak aliases:
59  */
60 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
61 {
62         return NULL;
63 }
64
65 void __weak hw_perf_disable(void)               { barrier(); }
66 void __weak hw_perf_enable(void)                { barrier(); }
67
68 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
69
70 int __weak
71 hw_perf_group_sched_in(struct perf_counter *group_leader,
72                struct perf_cpu_context *cpuctx,
73                struct perf_counter_context *ctx, int cpu)
74 {
75         return 0;
76 }
77
78 void __weak perf_counter_print_debug(void)      { }
79
80 static DEFINE_PER_CPU(int, disable_count);
81
82 void __perf_disable(void)
83 {
84         __get_cpu_var(disable_count)++;
85 }
86
87 bool __perf_enable(void)
88 {
89         return !--__get_cpu_var(disable_count);
90 }
91
92 void perf_disable(void)
93 {
94         __perf_disable();
95         hw_perf_disable();
96 }
97
98 void perf_enable(void)
99 {
100         if (__perf_enable())
101                 hw_perf_enable();
102 }
103
104 static void get_ctx(struct perf_counter_context *ctx)
105 {
106         atomic_inc(&ctx->refcount);
107 }
108
109 static void free_ctx(struct rcu_head *head)
110 {
111         struct perf_counter_context *ctx;
112
113         ctx = container_of(head, struct perf_counter_context, rcu_head);
114         kfree(ctx);
115 }
116
117 static void put_ctx(struct perf_counter_context *ctx)
118 {
119         if (atomic_dec_and_test(&ctx->refcount)) {
120                 if (ctx->parent_ctx)
121                         put_ctx(ctx->parent_ctx);
122                 if (ctx->task)
123                         put_task_struct(ctx->task);
124                 call_rcu(&ctx->rcu_head, free_ctx);
125         }
126 }
127
128 /*
129  * Get the perf_counter_context for a task and lock it.
130  * This has to cope with with the fact that until it is locked,
131  * the context could get moved to another task.
132  */
133 static struct perf_counter_context *
134 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
135 {
136         struct perf_counter_context *ctx;
137
138         rcu_read_lock();
139  retry:
140         ctx = rcu_dereference(task->perf_counter_ctxp);
141         if (ctx) {
142                 /*
143                  * If this context is a clone of another, it might
144                  * get swapped for another underneath us by
145                  * perf_counter_task_sched_out, though the
146                  * rcu_read_lock() protects us from any context
147                  * getting freed.  Lock the context and check if it
148                  * got swapped before we could get the lock, and retry
149                  * if so.  If we locked the right context, then it
150                  * can't get swapped on us any more.
151                  */
152                 spin_lock_irqsave(&ctx->lock, *flags);
153                 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
154                         spin_unlock_irqrestore(&ctx->lock, *flags);
155                         goto retry;
156                 }
157         }
158         rcu_read_unlock();
159         return ctx;
160 }
161
162 /*
163  * Get the context for a task and increment its pin_count so it
164  * can't get swapped to another task.  This also increments its
165  * reference count so that the context can't get freed.
166  */
167 static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
168 {
169         struct perf_counter_context *ctx;
170         unsigned long flags;
171
172         ctx = perf_lock_task_context(task, &flags);
173         if (ctx) {
174                 ++ctx->pin_count;
175                 get_ctx(ctx);
176                 spin_unlock_irqrestore(&ctx->lock, flags);
177         }
178         return ctx;
179 }
180
181 static void perf_unpin_context(struct perf_counter_context *ctx)
182 {
183         unsigned long flags;
184
185         spin_lock_irqsave(&ctx->lock, flags);
186         --ctx->pin_count;
187         spin_unlock_irqrestore(&ctx->lock, flags);
188         put_ctx(ctx);
189 }
190
191 /*
192  * Add a counter from the lists for its context.
193  * Must be called with ctx->mutex and ctx->lock held.
194  */
195 static void
196 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
197 {
198         struct perf_counter *group_leader = counter->group_leader;
199
200         /*
201          * Depending on whether it is a standalone or sibling counter,
202          * add it straight to the context's counter list, or to the group
203          * leader's sibling list:
204          */
205         if (group_leader == counter)
206                 list_add_tail(&counter->list_entry, &ctx->counter_list);
207         else {
208                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
209                 group_leader->nr_siblings++;
210         }
211
212         list_add_rcu(&counter->event_entry, &ctx->event_list);
213         ctx->nr_counters++;
214 }
215
216 /*
217  * Remove a counter from the lists for its context.
218  * Must be called with ctx->mutex and ctx->lock held.
219  */
220 static void
221 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
222 {
223         struct perf_counter *sibling, *tmp;
224
225         if (list_empty(&counter->list_entry))
226                 return;
227         ctx->nr_counters--;
228
229         list_del_init(&counter->list_entry);
230         list_del_rcu(&counter->event_entry);
231
232         if (counter->group_leader != counter)
233                 counter->group_leader->nr_siblings--;
234
235         /*
236          * If this was a group counter with sibling counters then
237          * upgrade the siblings to singleton counters by adding them
238          * to the context list directly:
239          */
240         list_for_each_entry_safe(sibling, tmp,
241                                  &counter->sibling_list, list_entry) {
242
243                 list_move_tail(&sibling->list_entry, &ctx->counter_list);
244                 sibling->group_leader = sibling;
245         }
246 }
247
248 static void
249 counter_sched_out(struct perf_counter *counter,
250                   struct perf_cpu_context *cpuctx,
251                   struct perf_counter_context *ctx)
252 {
253         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
254                 return;
255
256         counter->state = PERF_COUNTER_STATE_INACTIVE;
257         counter->tstamp_stopped = ctx->time;
258         counter->pmu->disable(counter);
259         counter->oncpu = -1;
260
261         if (!is_software_counter(counter))
262                 cpuctx->active_oncpu--;
263         ctx->nr_active--;
264         if (counter->attr.exclusive || !cpuctx->active_oncpu)
265                 cpuctx->exclusive = 0;
266 }
267
268 static void
269 group_sched_out(struct perf_counter *group_counter,
270                 struct perf_cpu_context *cpuctx,
271                 struct perf_counter_context *ctx)
272 {
273         struct perf_counter *counter;
274
275         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
276                 return;
277
278         counter_sched_out(group_counter, cpuctx, ctx);
279
280         /*
281          * Schedule out siblings (if any):
282          */
283         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
284                 counter_sched_out(counter, cpuctx, ctx);
285
286         if (group_counter->attr.exclusive)
287                 cpuctx->exclusive = 0;
288 }
289
290 /*
291  * Cross CPU call to remove a performance counter
292  *
293  * We disable the counter on the hardware level first. After that we
294  * remove it from the context list.
295  */
296 static void __perf_counter_remove_from_context(void *info)
297 {
298         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
299         struct perf_counter *counter = info;
300         struct perf_counter_context *ctx = counter->ctx;
301
302         /*
303          * If this is a task context, we need to check whether it is
304          * the current task context of this cpu. If not it has been
305          * scheduled out before the smp call arrived.
306          */
307         if (ctx->task && cpuctx->task_ctx != ctx)
308                 return;
309
310         spin_lock(&ctx->lock);
311         /*
312          * Protect the list operation against NMI by disabling the
313          * counters on a global level.
314          */
315         perf_disable();
316
317         counter_sched_out(counter, cpuctx, ctx);
318
319         list_del_counter(counter, ctx);
320
321         if (!ctx->task) {
322                 /*
323                  * Allow more per task counters with respect to the
324                  * reservation:
325                  */
326                 cpuctx->max_pertask =
327                         min(perf_max_counters - ctx->nr_counters,
328                             perf_max_counters - perf_reserved_percpu);
329         }
330
331         perf_enable();
332         spin_unlock(&ctx->lock);
333 }
334
335
336 /*
337  * Remove the counter from a task's (or a CPU's) list of counters.
338  *
339  * Must be called with ctx->mutex held.
340  *
341  * CPU counters are removed with a smp call. For task counters we only
342  * call when the task is on a CPU.
343  *
344  * If counter->ctx is a cloned context, callers must make sure that
345  * every task struct that counter->ctx->task could possibly point to
346  * remains valid.  This is OK when called from perf_release since
347  * that only calls us on the top-level context, which can't be a clone.
348  * When called from perf_counter_exit_task, it's OK because the
349  * context has been detached from its task.
350  */
351 static void perf_counter_remove_from_context(struct perf_counter *counter)
352 {
353         struct perf_counter_context *ctx = counter->ctx;
354         struct task_struct *task = ctx->task;
355
356         if (!task) {
357                 /*
358                  * Per cpu counters are removed via an smp call and
359                  * the removal is always sucessful.
360                  */
361                 smp_call_function_single(counter->cpu,
362                                          __perf_counter_remove_from_context,
363                                          counter, 1);
364                 return;
365         }
366
367 retry:
368         task_oncpu_function_call(task, __perf_counter_remove_from_context,
369                                  counter);
370
371         spin_lock_irq(&ctx->lock);
372         /*
373          * If the context is active we need to retry the smp call.
374          */
375         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
376                 spin_unlock_irq(&ctx->lock);
377                 goto retry;
378         }
379
380         /*
381          * The lock prevents that this context is scheduled in so we
382          * can remove the counter safely, if the call above did not
383          * succeed.
384          */
385         if (!list_empty(&counter->list_entry)) {
386                 list_del_counter(counter, ctx);
387         }
388         spin_unlock_irq(&ctx->lock);
389 }
390
391 static inline u64 perf_clock(void)
392 {
393         return cpu_clock(smp_processor_id());
394 }
395
396 /*
397  * Update the record of the current time in a context.
398  */
399 static void update_context_time(struct perf_counter_context *ctx)
400 {
401         u64 now = perf_clock();
402
403         ctx->time += now - ctx->timestamp;
404         ctx->timestamp = now;
405 }
406
407 /*
408  * Update the total_time_enabled and total_time_running fields for a counter.
409  */
410 static void update_counter_times(struct perf_counter *counter)
411 {
412         struct perf_counter_context *ctx = counter->ctx;
413         u64 run_end;
414
415         if (counter->state < PERF_COUNTER_STATE_INACTIVE)
416                 return;
417
418         counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
419
420         if (counter->state == PERF_COUNTER_STATE_INACTIVE)
421                 run_end = counter->tstamp_stopped;
422         else
423                 run_end = ctx->time;
424
425         counter->total_time_running = run_end - counter->tstamp_running;
426 }
427
428 /*
429  * Update total_time_enabled and total_time_running for all counters in a group.
430  */
431 static void update_group_times(struct perf_counter *leader)
432 {
433         struct perf_counter *counter;
434
435         update_counter_times(leader);
436         list_for_each_entry(counter, &leader->sibling_list, list_entry)
437                 update_counter_times(counter);
438 }
439
440 /*
441  * Cross CPU call to disable a performance counter
442  */
443 static void __perf_counter_disable(void *info)
444 {
445         struct perf_counter *counter = info;
446         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
447         struct perf_counter_context *ctx = counter->ctx;
448
449         /*
450          * If this is a per-task counter, need to check whether this
451          * counter's task is the current task on this cpu.
452          */
453         if (ctx->task && cpuctx->task_ctx != ctx)
454                 return;
455
456         spin_lock(&ctx->lock);
457
458         /*
459          * If the counter is on, turn it off.
460          * If it is in error state, leave it in error state.
461          */
462         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
463                 update_context_time(ctx);
464                 update_counter_times(counter);
465                 if (counter == counter->group_leader)
466                         group_sched_out(counter, cpuctx, ctx);
467                 else
468                         counter_sched_out(counter, cpuctx, ctx);
469                 counter->state = PERF_COUNTER_STATE_OFF;
470         }
471
472         spin_unlock(&ctx->lock);
473 }
474
475 /*
476  * Disable a counter.
477  *
478  * If counter->ctx is a cloned context, callers must make sure that
479  * every task struct that counter->ctx->task could possibly point to
480  * remains valid.  This condition is satisifed when called through
481  * perf_counter_for_each_child or perf_counter_for_each because they
482  * hold the top-level counter's child_mutex, so any descendant that
483  * goes to exit will block in sync_child_counter.
484  * When called from perf_pending_counter it's OK because counter->ctx
485  * is the current context on this CPU and preemption is disabled,
486  * hence we can't get into perf_counter_task_sched_out for this context.
487  */
488 static void perf_counter_disable(struct perf_counter *counter)
489 {
490         struct perf_counter_context *ctx = counter->ctx;
491         struct task_struct *task = ctx->task;
492
493         if (!task) {
494                 /*
495                  * Disable the counter on the cpu that it's on
496                  */
497                 smp_call_function_single(counter->cpu, __perf_counter_disable,
498                                          counter, 1);
499                 return;
500         }
501
502  retry:
503         task_oncpu_function_call(task, __perf_counter_disable, counter);
504
505         spin_lock_irq(&ctx->lock);
506         /*
507          * If the counter is still active, we need to retry the cross-call.
508          */
509         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
510                 spin_unlock_irq(&ctx->lock);
511                 goto retry;
512         }
513
514         /*
515          * Since we have the lock this context can't be scheduled
516          * in, so we can change the state safely.
517          */
518         if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
519                 update_counter_times(counter);
520                 counter->state = PERF_COUNTER_STATE_OFF;
521         }
522
523         spin_unlock_irq(&ctx->lock);
524 }
525
526 static int
527 counter_sched_in(struct perf_counter *counter,
528                  struct perf_cpu_context *cpuctx,
529                  struct perf_counter_context *ctx,
530                  int cpu)
531 {
532         if (counter->state <= PERF_COUNTER_STATE_OFF)
533                 return 0;
534
535         counter->state = PERF_COUNTER_STATE_ACTIVE;
536         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
537         /*
538          * The new state must be visible before we turn it on in the hardware:
539          */
540         smp_wmb();
541
542         if (counter->pmu->enable(counter)) {
543                 counter->state = PERF_COUNTER_STATE_INACTIVE;
544                 counter->oncpu = -1;
545                 return -EAGAIN;
546         }
547
548         counter->tstamp_running += ctx->time - counter->tstamp_stopped;
549
550         if (!is_software_counter(counter))
551                 cpuctx->active_oncpu++;
552         ctx->nr_active++;
553
554         if (counter->attr.exclusive)
555                 cpuctx->exclusive = 1;
556
557         return 0;
558 }
559
560 static int
561 group_sched_in(struct perf_counter *group_counter,
562                struct perf_cpu_context *cpuctx,
563                struct perf_counter_context *ctx,
564                int cpu)
565 {
566         struct perf_counter *counter, *partial_group;
567         int ret;
568
569         if (group_counter->state == PERF_COUNTER_STATE_OFF)
570                 return 0;
571
572         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
573         if (ret)
574                 return ret < 0 ? ret : 0;
575
576         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
577                 return -EAGAIN;
578
579         /*
580          * Schedule in siblings as one group (if any):
581          */
582         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
583                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
584                         partial_group = counter;
585                         goto group_error;
586                 }
587         }
588
589         return 0;
590
591 group_error:
592         /*
593          * Groups can be scheduled in as one unit only, so undo any
594          * partial group before returning:
595          */
596         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
597                 if (counter == partial_group)
598                         break;
599                 counter_sched_out(counter, cpuctx, ctx);
600         }
601         counter_sched_out(group_counter, cpuctx, ctx);
602
603         return -EAGAIN;
604 }
605
606 /*
607  * Return 1 for a group consisting entirely of software counters,
608  * 0 if the group contains any hardware counters.
609  */
610 static int is_software_only_group(struct perf_counter *leader)
611 {
612         struct perf_counter *counter;
613
614         if (!is_software_counter(leader))
615                 return 0;
616
617         list_for_each_entry(counter, &leader->sibling_list, list_entry)
618                 if (!is_software_counter(counter))
619                         return 0;
620
621         return 1;
622 }
623
624 /*
625  * Work out whether we can put this counter group on the CPU now.
626  */
627 static int group_can_go_on(struct perf_counter *counter,
628                            struct perf_cpu_context *cpuctx,
629                            int can_add_hw)
630 {
631         /*
632          * Groups consisting entirely of software counters can always go on.
633          */
634         if (is_software_only_group(counter))
635                 return 1;
636         /*
637          * If an exclusive group is already on, no other hardware
638          * counters can go on.
639          */
640         if (cpuctx->exclusive)
641                 return 0;
642         /*
643          * If this group is exclusive and there are already
644          * counters on the CPU, it can't go on.
645          */
646         if (counter->attr.exclusive && cpuctx->active_oncpu)
647                 return 0;
648         /*
649          * Otherwise, try to add it if all previous groups were able
650          * to go on.
651          */
652         return can_add_hw;
653 }
654
655 static void add_counter_to_ctx(struct perf_counter *counter,
656                                struct perf_counter_context *ctx)
657 {
658         list_add_counter(counter, ctx);
659         counter->tstamp_enabled = ctx->time;
660         counter->tstamp_running = ctx->time;
661         counter->tstamp_stopped = ctx->time;
662 }
663
664 /*
665  * Cross CPU call to install and enable a performance counter
666  *
667  * Must be called with ctx->mutex held
668  */
669 static void __perf_install_in_context(void *info)
670 {
671         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
672         struct perf_counter *counter = info;
673         struct perf_counter_context *ctx = counter->ctx;
674         struct perf_counter *leader = counter->group_leader;
675         int cpu = smp_processor_id();
676         int err;
677
678         /*
679          * If this is a task context, we need to check whether it is
680          * the current task context of this cpu. If not it has been
681          * scheduled out before the smp call arrived.
682          * Or possibly this is the right context but it isn't
683          * on this cpu because it had no counters.
684          */
685         if (ctx->task && cpuctx->task_ctx != ctx) {
686                 if (cpuctx->task_ctx || ctx->task != current)
687                         return;
688                 cpuctx->task_ctx = ctx;
689         }
690
691         spin_lock(&ctx->lock);
692         ctx->is_active = 1;
693         update_context_time(ctx);
694
695         /*
696          * Protect the list operation against NMI by disabling the
697          * counters on a global level. NOP for non NMI based counters.
698          */
699         perf_disable();
700
701         add_counter_to_ctx(counter, ctx);
702
703         /*
704          * Don't put the counter on if it is disabled or if
705          * it is in a group and the group isn't on.
706          */
707         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
708             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
709                 goto unlock;
710
711         /*
712          * An exclusive counter can't go on if there are already active
713          * hardware counters, and no hardware counter can go on if there
714          * is already an exclusive counter on.
715          */
716         if (!group_can_go_on(counter, cpuctx, 1))
717                 err = -EEXIST;
718         else
719                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
720
721         if (err) {
722                 /*
723                  * This counter couldn't go on.  If it is in a group
724                  * then we have to pull the whole group off.
725                  * If the counter group is pinned then put it in error state.
726                  */
727                 if (leader != counter)
728                         group_sched_out(leader, cpuctx, ctx);
729                 if (leader->attr.pinned) {
730                         update_group_times(leader);
731                         leader->state = PERF_COUNTER_STATE_ERROR;
732                 }
733         }
734
735         if (!err && !ctx->task && cpuctx->max_pertask)
736                 cpuctx->max_pertask--;
737
738  unlock:
739         perf_enable();
740
741         spin_unlock(&ctx->lock);
742 }
743
744 /*
745  * Attach a performance counter to a context
746  *
747  * First we add the counter to the list with the hardware enable bit
748  * in counter->hw_config cleared.
749  *
750  * If the counter is attached to a task which is on a CPU we use a smp
751  * call to enable it in the task context. The task might have been
752  * scheduled away, but we check this in the smp call again.
753  *
754  * Must be called with ctx->mutex held.
755  */
756 static void
757 perf_install_in_context(struct perf_counter_context *ctx,
758                         struct perf_counter *counter,
759                         int cpu)
760 {
761         struct task_struct *task = ctx->task;
762
763         if (!task) {
764                 /*
765                  * Per cpu counters are installed via an smp call and
766                  * the install is always sucessful.
767                  */
768                 smp_call_function_single(cpu, __perf_install_in_context,
769                                          counter, 1);
770                 return;
771         }
772
773 retry:
774         task_oncpu_function_call(task, __perf_install_in_context,
775                                  counter);
776
777         spin_lock_irq(&ctx->lock);
778         /*
779          * we need to retry the smp call.
780          */
781         if (ctx->is_active && list_empty(&counter->list_entry)) {
782                 spin_unlock_irq(&ctx->lock);
783                 goto retry;
784         }
785
786         /*
787          * The lock prevents that this context is scheduled in so we
788          * can add the counter safely, if it the call above did not
789          * succeed.
790          */
791         if (list_empty(&counter->list_entry))
792                 add_counter_to_ctx(counter, ctx);
793         spin_unlock_irq(&ctx->lock);
794 }
795
796 /*
797  * Cross CPU call to enable a performance counter
798  */
799 static void __perf_counter_enable(void *info)
800 {
801         struct perf_counter *counter = info;
802         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
803         struct perf_counter_context *ctx = counter->ctx;
804         struct perf_counter *leader = counter->group_leader;
805         int err;
806
807         /*
808          * If this is a per-task counter, need to check whether this
809          * counter's task is the current task on this cpu.
810          */
811         if (ctx->task && cpuctx->task_ctx != ctx) {
812                 if (cpuctx->task_ctx || ctx->task != current)
813                         return;
814                 cpuctx->task_ctx = ctx;
815         }
816
817         spin_lock(&ctx->lock);
818         ctx->is_active = 1;
819         update_context_time(ctx);
820
821         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
822                 goto unlock;
823         counter->state = PERF_COUNTER_STATE_INACTIVE;
824         counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
825
826         /*
827          * If the counter is in a group and isn't the group leader,
828          * then don't put it on unless the group is on.
829          */
830         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
831                 goto unlock;
832
833         if (!group_can_go_on(counter, cpuctx, 1)) {
834                 err = -EEXIST;
835         } else {
836                 perf_disable();
837                 if (counter == leader)
838                         err = group_sched_in(counter, cpuctx, ctx,
839                                              smp_processor_id());
840                 else
841                         err = counter_sched_in(counter, cpuctx, ctx,
842                                                smp_processor_id());
843                 perf_enable();
844         }
845
846         if (err) {
847                 /*
848                  * If this counter can't go on and it's part of a
849                  * group, then the whole group has to come off.
850                  */
851                 if (leader != counter)
852                         group_sched_out(leader, cpuctx, ctx);
853                 if (leader->attr.pinned) {
854                         update_group_times(leader);
855                         leader->state = PERF_COUNTER_STATE_ERROR;
856                 }
857         }
858
859  unlock:
860         spin_unlock(&ctx->lock);
861 }
862
863 /*
864  * Enable a counter.
865  *
866  * If counter->ctx is a cloned context, callers must make sure that
867  * every task struct that counter->ctx->task could possibly point to
868  * remains valid.  This condition is satisfied when called through
869  * perf_counter_for_each_child or perf_counter_for_each as described
870  * for perf_counter_disable.
871  */
872 static void perf_counter_enable(struct perf_counter *counter)
873 {
874         struct perf_counter_context *ctx = counter->ctx;
875         struct task_struct *task = ctx->task;
876
877         if (!task) {
878                 /*
879                  * Enable the counter on the cpu that it's on
880                  */
881                 smp_call_function_single(counter->cpu, __perf_counter_enable,
882                                          counter, 1);
883                 return;
884         }
885
886         spin_lock_irq(&ctx->lock);
887         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
888                 goto out;
889
890         /*
891          * If the counter is in error state, clear that first.
892          * That way, if we see the counter in error state below, we
893          * know that it has gone back into error state, as distinct
894          * from the task having been scheduled away before the
895          * cross-call arrived.
896          */
897         if (counter->state == PERF_COUNTER_STATE_ERROR)
898                 counter->state = PERF_COUNTER_STATE_OFF;
899
900  retry:
901         spin_unlock_irq(&ctx->lock);
902         task_oncpu_function_call(task, __perf_counter_enable, counter);
903
904         spin_lock_irq(&ctx->lock);
905
906         /*
907          * If the context is active and the counter is still off,
908          * we need to retry the cross-call.
909          */
910         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
911                 goto retry;
912
913         /*
914          * Since we have the lock this context can't be scheduled
915          * in, so we can change the state safely.
916          */
917         if (counter->state == PERF_COUNTER_STATE_OFF) {
918                 counter->state = PERF_COUNTER_STATE_INACTIVE;
919                 counter->tstamp_enabled =
920                         ctx->time - counter->total_time_enabled;
921         }
922  out:
923         spin_unlock_irq(&ctx->lock);
924 }
925
926 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
927 {
928         /*
929          * not supported on inherited counters
930          */
931         if (counter->attr.inherit)
932                 return -EINVAL;
933
934         atomic_add(refresh, &counter->event_limit);
935         perf_counter_enable(counter);
936
937         return 0;
938 }
939
940 void __perf_counter_sched_out(struct perf_counter_context *ctx,
941                               struct perf_cpu_context *cpuctx)
942 {
943         struct perf_counter *counter;
944
945         spin_lock(&ctx->lock);
946         ctx->is_active = 0;
947         if (likely(!ctx->nr_counters))
948                 goto out;
949         update_context_time(ctx);
950
951         perf_disable();
952         if (ctx->nr_active) {
953                 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
954                         if (counter != counter->group_leader)
955                                 counter_sched_out(counter, cpuctx, ctx);
956                         else
957                                 group_sched_out(counter, cpuctx, ctx);
958                 }
959         }
960         perf_enable();
961  out:
962         spin_unlock(&ctx->lock);
963 }
964
965 /*
966  * Test whether two contexts are equivalent, i.e. whether they
967  * have both been cloned from the same version of the same context
968  * and they both have the same number of enabled counters.
969  * If the number of enabled counters is the same, then the set
970  * of enabled counters should be the same, because these are both
971  * inherited contexts, therefore we can't access individual counters
972  * in them directly with an fd; we can only enable/disable all
973  * counters via prctl, or enable/disable all counters in a family
974  * via ioctl, which will have the same effect on both contexts.
975  */
976 static int context_equiv(struct perf_counter_context *ctx1,
977                          struct perf_counter_context *ctx2)
978 {
979         return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
980                 && ctx1->parent_gen == ctx2->parent_gen
981                 && !ctx1->pin_count && !ctx2->pin_count;
982 }
983
984 /*
985  * Called from scheduler to remove the counters of the current task,
986  * with interrupts disabled.
987  *
988  * We stop each counter and update the counter value in counter->count.
989  *
990  * This does not protect us against NMI, but disable()
991  * sets the disabled bit in the control field of counter _before_
992  * accessing the counter control register. If a NMI hits, then it will
993  * not restart the counter.
994  */
995 void perf_counter_task_sched_out(struct task_struct *task,
996                                  struct task_struct *next, int cpu)
997 {
998         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
999         struct perf_counter_context *ctx = task->perf_counter_ctxp;
1000         struct perf_counter_context *next_ctx;
1001         struct perf_counter_context *parent;
1002         struct pt_regs *regs;
1003         int do_switch = 1;
1004
1005         regs = task_pt_regs(task);
1006         perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
1007
1008         if (likely(!ctx || !cpuctx->task_ctx))
1009                 return;
1010
1011         update_context_time(ctx);
1012
1013         rcu_read_lock();
1014         parent = rcu_dereference(ctx->parent_ctx);
1015         next_ctx = next->perf_counter_ctxp;
1016         if (parent && next_ctx &&
1017             rcu_dereference(next_ctx->parent_ctx) == parent) {
1018                 /*
1019                  * Looks like the two contexts are clones, so we might be
1020                  * able to optimize the context switch.  We lock both
1021                  * contexts and check that they are clones under the
1022                  * lock (including re-checking that neither has been
1023                  * uncloned in the meantime).  It doesn't matter which
1024                  * order we take the locks because no other cpu could
1025                  * be trying to lock both of these tasks.
1026                  */
1027                 spin_lock(&ctx->lock);
1028                 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1029                 if (context_equiv(ctx, next_ctx)) {
1030                         /*
1031                          * XXX do we need a memory barrier of sorts
1032                          * wrt to rcu_dereference() of perf_counter_ctxp
1033                          */
1034                         task->perf_counter_ctxp = next_ctx;
1035                         next->perf_counter_ctxp = ctx;
1036                         ctx->task = next;
1037                         next_ctx->task = task;
1038                         do_switch = 0;
1039                 }
1040                 spin_unlock(&next_ctx->lock);
1041                 spin_unlock(&ctx->lock);
1042         }
1043         rcu_read_unlock();
1044
1045         if (do_switch) {
1046                 __perf_counter_sched_out(ctx, cpuctx);
1047                 cpuctx->task_ctx = NULL;
1048         }
1049 }
1050
1051 /*
1052  * Called with IRQs disabled
1053  */
1054 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1055 {
1056         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1057
1058         if (!cpuctx->task_ctx)
1059                 return;
1060
1061         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1062                 return;
1063
1064         __perf_counter_sched_out(ctx, cpuctx);
1065         cpuctx->task_ctx = NULL;
1066 }
1067
1068 /*
1069  * Called with IRQs disabled
1070  */
1071 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1072 {
1073         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1074 }
1075
1076 static void
1077 __perf_counter_sched_in(struct perf_counter_context *ctx,
1078                         struct perf_cpu_context *cpuctx, int cpu)
1079 {
1080         struct perf_counter *counter;
1081         int can_add_hw = 1;
1082
1083         spin_lock(&ctx->lock);
1084         ctx->is_active = 1;
1085         if (likely(!ctx->nr_counters))
1086                 goto out;
1087
1088         ctx->timestamp = perf_clock();
1089
1090         perf_disable();
1091
1092         /*
1093          * First go through the list and put on any pinned groups
1094          * in order to give them the best chance of going on.
1095          */
1096         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1097                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1098                     !counter->attr.pinned)
1099                         continue;
1100                 if (counter->cpu != -1 && counter->cpu != cpu)
1101                         continue;
1102
1103                 if (counter != counter->group_leader)
1104                         counter_sched_in(counter, cpuctx, ctx, cpu);
1105                 else {
1106                         if (group_can_go_on(counter, cpuctx, 1))
1107                                 group_sched_in(counter, cpuctx, ctx, cpu);
1108                 }
1109
1110                 /*
1111                  * If this pinned group hasn't been scheduled,
1112                  * put it in error state.
1113                  */
1114                 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1115                         update_group_times(counter);
1116                         counter->state = PERF_COUNTER_STATE_ERROR;
1117                 }
1118         }
1119
1120         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1121                 /*
1122                  * Ignore counters in OFF or ERROR state, and
1123                  * ignore pinned counters since we did them already.
1124                  */
1125                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1126                     counter->attr.pinned)
1127                         continue;
1128
1129                 /*
1130                  * Listen to the 'cpu' scheduling filter constraint
1131                  * of counters:
1132                  */
1133                 if (counter->cpu != -1 && counter->cpu != cpu)
1134                         continue;
1135
1136                 if (counter != counter->group_leader) {
1137                         if (counter_sched_in(counter, cpuctx, ctx, cpu))
1138                                 can_add_hw = 0;
1139                 } else {
1140                         if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1141                                 if (group_sched_in(counter, cpuctx, ctx, cpu))
1142                                         can_add_hw = 0;
1143                         }
1144                 }
1145         }
1146         perf_enable();
1147  out:
1148         spin_unlock(&ctx->lock);
1149 }
1150
1151 /*
1152  * Called from scheduler to add the counters of the current task
1153  * with interrupts disabled.
1154  *
1155  * We restore the counter value and then enable it.
1156  *
1157  * This does not protect us against NMI, but enable()
1158  * sets the enabled bit in the control field of counter _before_
1159  * accessing the counter control register. If a NMI hits, then it will
1160  * keep the counter running.
1161  */
1162 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1163 {
1164         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1165         struct perf_counter_context *ctx = task->perf_counter_ctxp;
1166
1167         if (likely(!ctx))
1168                 return;
1169         if (cpuctx->task_ctx == ctx)
1170                 return;
1171         __perf_counter_sched_in(ctx, cpuctx, cpu);
1172         cpuctx->task_ctx = ctx;
1173 }
1174
1175 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1176 {
1177         struct perf_counter_context *ctx = &cpuctx->ctx;
1178
1179         __perf_counter_sched_in(ctx, cpuctx, cpu);
1180 }
1181
1182 #define MAX_INTERRUPTS (~0ULL)
1183
1184 static void perf_log_throttle(struct perf_counter *counter, int enable);
1185 static void perf_log_period(struct perf_counter *counter, u64 period);
1186
1187 static void perf_adjust_freq(struct perf_counter_context *ctx)
1188 {
1189         struct perf_counter *counter;
1190         struct hw_perf_counter *hwc;
1191         u64 interrupts, sample_period;
1192         u64 events, period, freq;
1193         s64 delta;
1194
1195         spin_lock(&ctx->lock);
1196         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1197                 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1198                         continue;
1199
1200                 hwc = &counter->hw;
1201
1202                 interrupts = hwc->interrupts;
1203                 hwc->interrupts = 0;
1204
1205                 if (interrupts == MAX_INTERRUPTS) {
1206                         perf_log_throttle(counter, 1);
1207                         counter->pmu->unthrottle(counter);
1208                         interrupts = 2*sysctl_perf_counter_limit/HZ;
1209                 }
1210
1211                 if (!counter->attr.freq || !counter->attr.sample_freq)
1212                         continue;
1213
1214                 if (counter->attr.sample_freq < HZ) {
1215                         freq = counter->attr.sample_freq;
1216
1217                         hwc->freq_count += freq;
1218                         hwc->freq_interrupts += interrupts;
1219
1220                         if (hwc->freq_count < HZ)
1221                                 continue;
1222
1223                         interrupts = hwc->freq_interrupts;
1224                         hwc->freq_interrupts = 0;
1225                         hwc->freq_count -= HZ;
1226                 } else
1227                         freq = HZ;
1228
1229                 events = freq * interrupts * hwc->sample_period;
1230                 period = div64_u64(events, counter->attr.sample_freq);
1231
1232                 delta = (s64)(1 + period - hwc->sample_period);
1233                 delta >>= 1;
1234
1235                 sample_period = hwc->sample_period + delta;
1236
1237                 if (!sample_period)
1238                         sample_period = 1;
1239
1240                 perf_log_period(counter, sample_period);
1241
1242                 hwc->sample_period = sample_period;
1243         }
1244         spin_unlock(&ctx->lock);
1245 }
1246
1247 /*
1248  * Round-robin a context's counters:
1249  */
1250 static void rotate_ctx(struct perf_counter_context *ctx)
1251 {
1252         struct perf_counter *counter;
1253
1254         if (!ctx->nr_counters)
1255                 return;
1256
1257         spin_lock(&ctx->lock);
1258         /*
1259          * Rotate the first entry last (works just fine for group counters too):
1260          */
1261         perf_disable();
1262         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1263                 list_move_tail(&counter->list_entry, &ctx->counter_list);
1264                 break;
1265         }
1266         perf_enable();
1267
1268         spin_unlock(&ctx->lock);
1269 }
1270
1271 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1272 {
1273         struct perf_cpu_context *cpuctx;
1274         struct perf_counter_context *ctx;
1275
1276         if (!atomic_read(&nr_counters))
1277                 return;
1278
1279         cpuctx = &per_cpu(perf_cpu_context, cpu);
1280         ctx = curr->perf_counter_ctxp;
1281
1282         perf_adjust_freq(&cpuctx->ctx);
1283         if (ctx)
1284                 perf_adjust_freq(ctx);
1285
1286         perf_counter_cpu_sched_out(cpuctx);
1287         if (ctx)
1288                 __perf_counter_task_sched_out(ctx);
1289
1290         rotate_ctx(&cpuctx->ctx);
1291         if (ctx)
1292                 rotate_ctx(ctx);
1293
1294         perf_counter_cpu_sched_in(cpuctx, cpu);
1295         if (ctx)
1296                 perf_counter_task_sched_in(curr, cpu);
1297 }
1298
1299 /*
1300  * Cross CPU call to read the hardware counter
1301  */
1302 static void __read(void *info)
1303 {
1304         struct perf_counter *counter = info;
1305         struct perf_counter_context *ctx = counter->ctx;
1306         unsigned long flags;
1307
1308         local_irq_save(flags);
1309         if (ctx->is_active)
1310                 update_context_time(ctx);
1311         counter->pmu->read(counter);
1312         update_counter_times(counter);
1313         local_irq_restore(flags);
1314 }
1315
1316 static u64 perf_counter_read(struct perf_counter *counter)
1317 {
1318         /*
1319          * If counter is enabled and currently active on a CPU, update the
1320          * value in the counter structure:
1321          */
1322         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1323                 smp_call_function_single(counter->oncpu,
1324                                          __read, counter, 1);
1325         } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1326                 update_counter_times(counter);
1327         }
1328
1329         return atomic64_read(&counter->count);
1330 }
1331
1332 /*
1333  * Initialize the perf_counter context in a task_struct:
1334  */
1335 static void
1336 __perf_counter_init_context(struct perf_counter_context *ctx,
1337                             struct task_struct *task)
1338 {
1339         memset(ctx, 0, sizeof(*ctx));
1340         spin_lock_init(&ctx->lock);
1341         mutex_init(&ctx->mutex);
1342         INIT_LIST_HEAD(&ctx->counter_list);
1343         INIT_LIST_HEAD(&ctx->event_list);
1344         atomic_set(&ctx->refcount, 1);
1345         ctx->task = task;
1346 }
1347
1348 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1349 {
1350         struct perf_counter_context *parent_ctx;
1351         struct perf_counter_context *ctx;
1352         struct perf_cpu_context *cpuctx;
1353         struct task_struct *task;
1354         unsigned long flags;
1355         int err;
1356
1357         /*
1358          * If cpu is not a wildcard then this is a percpu counter:
1359          */
1360         if (cpu != -1) {
1361                 /* Must be root to operate on a CPU counter: */
1362                 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
1363                         return ERR_PTR(-EACCES);
1364
1365                 if (cpu < 0 || cpu > num_possible_cpus())
1366                         return ERR_PTR(-EINVAL);
1367
1368                 /*
1369                  * We could be clever and allow to attach a counter to an
1370                  * offline CPU and activate it when the CPU comes up, but
1371                  * that's for later.
1372                  */
1373                 if (!cpu_isset(cpu, cpu_online_map))
1374                         return ERR_PTR(-ENODEV);
1375
1376                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1377                 ctx = &cpuctx->ctx;
1378                 get_ctx(ctx);
1379
1380                 return ctx;
1381         }
1382
1383         rcu_read_lock();
1384         if (!pid)
1385                 task = current;
1386         else
1387                 task = find_task_by_vpid(pid);
1388         if (task)
1389                 get_task_struct(task);
1390         rcu_read_unlock();
1391
1392         if (!task)
1393                 return ERR_PTR(-ESRCH);
1394
1395         /*
1396          * Can't attach counters to a dying task.
1397          */
1398         err = -ESRCH;
1399         if (task->flags & PF_EXITING)
1400                 goto errout;
1401
1402         /* Reuse ptrace permission checks for now. */
1403         err = -EACCES;
1404         if (!ptrace_may_access(task, PTRACE_MODE_READ))
1405                 goto errout;
1406
1407  retry:
1408         ctx = perf_lock_task_context(task, &flags);
1409         if (ctx) {
1410                 parent_ctx = ctx->parent_ctx;
1411                 if (parent_ctx) {
1412                         put_ctx(parent_ctx);
1413                         ctx->parent_ctx = NULL;         /* no longer a clone */
1414                 }
1415                 /*
1416                  * Get an extra reference before dropping the lock so that
1417                  * this context won't get freed if the task exits.
1418                  */
1419                 get_ctx(ctx);
1420                 spin_unlock_irqrestore(&ctx->lock, flags);
1421         }
1422
1423         if (!ctx) {
1424                 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1425                 err = -ENOMEM;
1426                 if (!ctx)
1427                         goto errout;
1428                 __perf_counter_init_context(ctx, task);
1429                 get_ctx(ctx);
1430                 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1431                         /*
1432                          * We raced with some other task; use
1433                          * the context they set.
1434                          */
1435                         kfree(ctx);
1436                         goto retry;
1437                 }
1438                 get_task_struct(task);
1439         }
1440
1441         put_task_struct(task);
1442         return ctx;
1443
1444  errout:
1445         put_task_struct(task);
1446         return ERR_PTR(err);
1447 }
1448
1449 static void free_counter_rcu(struct rcu_head *head)
1450 {
1451         struct perf_counter *counter;
1452
1453         counter = container_of(head, struct perf_counter, rcu_head);
1454         if (counter->ns)
1455                 put_pid_ns(counter->ns);
1456         kfree(counter);
1457 }
1458
1459 static void perf_pending_sync(struct perf_counter *counter);
1460
1461 static void free_counter(struct perf_counter *counter)
1462 {
1463         perf_pending_sync(counter);
1464
1465         atomic_dec(&nr_counters);
1466         if (counter->attr.mmap)
1467                 atomic_dec(&nr_mmap_counters);
1468         if (counter->attr.comm)
1469                 atomic_dec(&nr_comm_counters);
1470
1471         if (counter->destroy)
1472                 counter->destroy(counter);
1473
1474         put_ctx(counter->ctx);
1475         call_rcu(&counter->rcu_head, free_counter_rcu);
1476 }
1477
1478 /*
1479  * Called when the last reference to the file is gone.
1480  */
1481 static int perf_release(struct inode *inode, struct file *file)
1482 {
1483         struct perf_counter *counter = file->private_data;
1484         struct perf_counter_context *ctx = counter->ctx;
1485
1486         file->private_data = NULL;
1487
1488         WARN_ON_ONCE(ctx->parent_ctx);
1489         mutex_lock(&ctx->mutex);
1490         perf_counter_remove_from_context(counter);
1491         mutex_unlock(&ctx->mutex);
1492
1493         mutex_lock(&counter->owner->perf_counter_mutex);
1494         list_del_init(&counter->owner_entry);
1495         mutex_unlock(&counter->owner->perf_counter_mutex);
1496         put_task_struct(counter->owner);
1497
1498         free_counter(counter);
1499
1500         return 0;
1501 }
1502
1503 /*
1504  * Read the performance counter - simple non blocking version for now
1505  */
1506 static ssize_t
1507 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1508 {
1509         u64 values[3];
1510         int n;
1511
1512         /*
1513          * Return end-of-file for a read on a counter that is in
1514          * error state (i.e. because it was pinned but it couldn't be
1515          * scheduled on to the CPU at some point).
1516          */
1517         if (counter->state == PERF_COUNTER_STATE_ERROR)
1518                 return 0;
1519
1520         WARN_ON_ONCE(counter->ctx->parent_ctx);
1521         mutex_lock(&counter->child_mutex);
1522         values[0] = perf_counter_read(counter);
1523         n = 1;
1524         if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1525                 values[n++] = counter->total_time_enabled +
1526                         atomic64_read(&counter->child_total_time_enabled);
1527         if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1528                 values[n++] = counter->total_time_running +
1529                         atomic64_read(&counter->child_total_time_running);
1530         if (counter->attr.read_format & PERF_FORMAT_ID)
1531                 values[n++] = counter->id;
1532         mutex_unlock(&counter->child_mutex);
1533
1534         if (count < n * sizeof(u64))
1535                 return -EINVAL;
1536         count = n * sizeof(u64);
1537
1538         if (copy_to_user(buf, values, count))
1539                 return -EFAULT;
1540
1541         return count;
1542 }
1543
1544 static ssize_t
1545 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1546 {
1547         struct perf_counter *counter = file->private_data;
1548
1549         return perf_read_hw(counter, buf, count);
1550 }
1551
1552 static unsigned int perf_poll(struct file *file, poll_table *wait)
1553 {
1554         struct perf_counter *counter = file->private_data;
1555         struct perf_mmap_data *data;
1556         unsigned int events = POLL_HUP;
1557
1558         rcu_read_lock();
1559         data = rcu_dereference(counter->data);
1560         if (data)
1561                 events = atomic_xchg(&data->poll, 0);
1562         rcu_read_unlock();
1563
1564         poll_wait(file, &counter->waitq, wait);
1565
1566         return events;
1567 }
1568
1569 static void perf_counter_reset(struct perf_counter *counter)
1570 {
1571         (void)perf_counter_read(counter);
1572         atomic64_set(&counter->count, 0);
1573         perf_counter_update_userpage(counter);
1574 }
1575
1576 static void perf_counter_for_each_sibling(struct perf_counter *counter,
1577                                           void (*func)(struct perf_counter *))
1578 {
1579         struct perf_counter_context *ctx = counter->ctx;
1580         struct perf_counter *sibling;
1581
1582         WARN_ON_ONCE(ctx->parent_ctx);
1583         mutex_lock(&ctx->mutex);
1584         counter = counter->group_leader;
1585
1586         func(counter);
1587         list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1588                 func(sibling);
1589         mutex_unlock(&ctx->mutex);
1590 }
1591
1592 /*
1593  * Holding the top-level counter's child_mutex means that any
1594  * descendant process that has inherited this counter will block
1595  * in sync_child_counter if it goes to exit, thus satisfying the
1596  * task existence requirements of perf_counter_enable/disable.
1597  */
1598 static void perf_counter_for_each_child(struct perf_counter *counter,
1599                                         void (*func)(struct perf_counter *))
1600 {
1601         struct perf_counter *child;
1602
1603         WARN_ON_ONCE(counter->ctx->parent_ctx);
1604         mutex_lock(&counter->child_mutex);
1605         func(counter);
1606         list_for_each_entry(child, &counter->child_list, child_list)
1607                 func(child);
1608         mutex_unlock(&counter->child_mutex);
1609 }
1610
1611 static void perf_counter_for_each(struct perf_counter *counter,
1612                                   void (*func)(struct perf_counter *))
1613 {
1614         struct perf_counter *child;
1615
1616         WARN_ON_ONCE(counter->ctx->parent_ctx);
1617         mutex_lock(&counter->child_mutex);
1618         perf_counter_for_each_sibling(counter, func);
1619         list_for_each_entry(child, &counter->child_list, child_list)
1620                 perf_counter_for_each_sibling(child, func);
1621         mutex_unlock(&counter->child_mutex);
1622 }
1623
1624 static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1625 {
1626         struct perf_counter_context *ctx = counter->ctx;
1627         unsigned long size;
1628         int ret = 0;
1629         u64 value;
1630
1631         if (!counter->attr.sample_period)
1632                 return -EINVAL;
1633
1634         size = copy_from_user(&value, arg, sizeof(value));
1635         if (size != sizeof(value))
1636                 return -EFAULT;
1637
1638         if (!value)
1639                 return -EINVAL;
1640
1641         spin_lock_irq(&ctx->lock);
1642         if (counter->attr.freq) {
1643                 if (value > sysctl_perf_counter_limit) {
1644                         ret = -EINVAL;
1645                         goto unlock;
1646                 }
1647
1648                 counter->attr.sample_freq = value;
1649         } else {
1650                 counter->attr.sample_period = value;
1651                 counter->hw.sample_period = value;
1652
1653                 perf_log_period(counter, value);
1654         }
1655 unlock:
1656         spin_unlock_irq(&ctx->lock);
1657
1658         return ret;
1659 }
1660
1661 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1662 {
1663         struct perf_counter *counter = file->private_data;
1664         void (*func)(struct perf_counter *);
1665         u32 flags = arg;
1666
1667         switch (cmd) {
1668         case PERF_COUNTER_IOC_ENABLE:
1669                 func = perf_counter_enable;
1670                 break;
1671         case PERF_COUNTER_IOC_DISABLE:
1672                 func = perf_counter_disable;
1673                 break;
1674         case PERF_COUNTER_IOC_RESET:
1675                 func = perf_counter_reset;
1676                 break;
1677
1678         case PERF_COUNTER_IOC_REFRESH:
1679                 return perf_counter_refresh(counter, arg);
1680
1681         case PERF_COUNTER_IOC_PERIOD:
1682                 return perf_counter_period(counter, (u64 __user *)arg);
1683
1684         default:
1685                 return -ENOTTY;
1686         }
1687
1688         if (flags & PERF_IOC_FLAG_GROUP)
1689                 perf_counter_for_each(counter, func);
1690         else
1691                 perf_counter_for_each_child(counter, func);
1692
1693         return 0;
1694 }
1695
1696 int perf_counter_task_enable(void)
1697 {
1698         struct perf_counter *counter;
1699
1700         mutex_lock(&current->perf_counter_mutex);
1701         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1702                 perf_counter_for_each_child(counter, perf_counter_enable);
1703         mutex_unlock(&current->perf_counter_mutex);
1704
1705         return 0;
1706 }
1707
1708 int perf_counter_task_disable(void)
1709 {
1710         struct perf_counter *counter;
1711
1712         mutex_lock(&current->perf_counter_mutex);
1713         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1714                 perf_counter_for_each_child(counter, perf_counter_disable);
1715         mutex_unlock(&current->perf_counter_mutex);
1716
1717         return 0;
1718 }
1719
1720 /*
1721  * Callers need to ensure there can be no nesting of this function, otherwise
1722  * the seqlock logic goes bad. We can not serialize this because the arch
1723  * code calls this from NMI context.
1724  */
1725 void perf_counter_update_userpage(struct perf_counter *counter)
1726 {
1727         struct perf_counter_mmap_page *userpg;
1728         struct perf_mmap_data *data;
1729
1730         rcu_read_lock();
1731         data = rcu_dereference(counter->data);
1732         if (!data)
1733                 goto unlock;
1734
1735         userpg = data->user_page;
1736
1737         /*
1738          * Disable preemption so as to not let the corresponding user-space
1739          * spin too long if we get preempted.
1740          */
1741         preempt_disable();
1742         ++userpg->lock;
1743         barrier();
1744         userpg->index = counter->hw.idx;
1745         userpg->offset = atomic64_read(&counter->count);
1746         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1747                 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1748
1749         barrier();
1750         ++userpg->lock;
1751         preempt_enable();
1752 unlock:
1753         rcu_read_unlock();
1754 }
1755
1756 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1757 {
1758         struct perf_counter *counter = vma->vm_file->private_data;
1759         struct perf_mmap_data *data;
1760         int ret = VM_FAULT_SIGBUS;
1761
1762         rcu_read_lock();
1763         data = rcu_dereference(counter->data);
1764         if (!data)
1765                 goto unlock;
1766
1767         if (vmf->pgoff == 0) {
1768                 vmf->page = virt_to_page(data->user_page);
1769         } else {
1770                 int nr = vmf->pgoff - 1;
1771
1772                 if ((unsigned)nr > data->nr_pages)
1773                         goto unlock;
1774
1775                 vmf->page = virt_to_page(data->data_pages[nr]);
1776         }
1777         get_page(vmf->page);
1778         ret = 0;
1779 unlock:
1780         rcu_read_unlock();
1781
1782         return ret;
1783 }
1784
1785 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1786 {
1787         struct perf_mmap_data *data;
1788         unsigned long size;
1789         int i;
1790
1791         WARN_ON(atomic_read(&counter->mmap_count));
1792
1793         size = sizeof(struct perf_mmap_data);
1794         size += nr_pages * sizeof(void *);
1795
1796         data = kzalloc(size, GFP_KERNEL);
1797         if (!data)
1798                 goto fail;
1799
1800         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1801         if (!data->user_page)
1802                 goto fail_user_page;
1803
1804         for (i = 0; i < nr_pages; i++) {
1805                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1806                 if (!data->data_pages[i])
1807                         goto fail_data_pages;
1808         }
1809
1810         data->nr_pages = nr_pages;
1811         atomic_set(&data->lock, -1);
1812
1813         rcu_assign_pointer(counter->data, data);
1814
1815         return 0;
1816
1817 fail_data_pages:
1818         for (i--; i >= 0; i--)
1819                 free_page((unsigned long)data->data_pages[i]);
1820
1821         free_page((unsigned long)data->user_page);
1822
1823 fail_user_page:
1824         kfree(data);
1825
1826 fail:
1827         return -ENOMEM;
1828 }
1829
1830 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1831 {
1832         struct perf_mmap_data *data;
1833         int i;
1834
1835         data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
1836
1837         free_page((unsigned long)data->user_page);
1838         for (i = 0; i < data->nr_pages; i++)
1839                 free_page((unsigned long)data->data_pages[i]);
1840         kfree(data);
1841 }
1842
1843 static void perf_mmap_data_free(struct perf_counter *counter)
1844 {
1845         struct perf_mmap_data *data = counter->data;
1846
1847         WARN_ON(atomic_read(&counter->mmap_count));
1848
1849         rcu_assign_pointer(counter->data, NULL);
1850         call_rcu(&data->rcu_head, __perf_mmap_data_free);
1851 }
1852
1853 static void perf_mmap_open(struct vm_area_struct *vma)
1854 {
1855         struct perf_counter *counter = vma->vm_file->private_data;
1856
1857         atomic_inc(&counter->mmap_count);
1858 }
1859
1860 static void perf_mmap_close(struct vm_area_struct *vma)
1861 {
1862         struct perf_counter *counter = vma->vm_file->private_data;
1863
1864         WARN_ON_ONCE(counter->ctx->parent_ctx);
1865         if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
1866                 struct user_struct *user = current_user();
1867
1868                 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1869                 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1870                 perf_mmap_data_free(counter);
1871                 mutex_unlock(&counter->mmap_mutex);
1872         }
1873 }
1874
1875 static struct vm_operations_struct perf_mmap_vmops = {
1876         .open  = perf_mmap_open,
1877         .close = perf_mmap_close,
1878         .fault = perf_mmap_fault,
1879 };
1880
1881 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1882 {
1883         struct perf_counter *counter = file->private_data;
1884         unsigned long user_locked, user_lock_limit;
1885         struct user_struct *user = current_user();
1886         unsigned long locked, lock_limit;
1887         unsigned long vma_size;
1888         unsigned long nr_pages;
1889         long user_extra, extra;
1890         int ret = 0;
1891
1892         if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1893                 return -EINVAL;
1894
1895         vma_size = vma->vm_end - vma->vm_start;
1896         nr_pages = (vma_size / PAGE_SIZE) - 1;
1897
1898         /*
1899          * If we have data pages ensure they're a power-of-two number, so we
1900          * can do bitmasks instead of modulo.
1901          */
1902         if (nr_pages != 0 && !is_power_of_2(nr_pages))
1903                 return -EINVAL;
1904
1905         if (vma_size != PAGE_SIZE * (1 + nr_pages))
1906                 return -EINVAL;
1907
1908         if (vma->vm_pgoff != 0)
1909                 return -EINVAL;
1910
1911         WARN_ON_ONCE(counter->ctx->parent_ctx);
1912         mutex_lock(&counter->mmap_mutex);
1913         if (atomic_inc_not_zero(&counter->mmap_count)) {
1914                 if (nr_pages != counter->data->nr_pages)
1915                         ret = -EINVAL;
1916                 goto unlock;
1917         }
1918
1919         user_extra = nr_pages + 1;
1920         user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1921
1922         /*
1923          * Increase the limit linearly with more CPUs:
1924          */
1925         user_lock_limit *= num_online_cpus();
1926
1927         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1928
1929         extra = 0;
1930         if (user_locked > user_lock_limit)
1931                 extra = user_locked - user_lock_limit;
1932
1933         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1934         lock_limit >>= PAGE_SHIFT;
1935         locked = vma->vm_mm->locked_vm + extra;
1936
1937         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1938                 ret = -EPERM;
1939                 goto unlock;
1940         }
1941
1942         WARN_ON(counter->data);
1943         ret = perf_mmap_data_alloc(counter, nr_pages);
1944         if (ret)
1945                 goto unlock;
1946
1947         atomic_set(&counter->mmap_count, 1);
1948         atomic_long_add(user_extra, &user->locked_vm);
1949         vma->vm_mm->locked_vm += extra;
1950         counter->data->nr_locked = extra;
1951 unlock:
1952         mutex_unlock(&counter->mmap_mutex);
1953
1954         vma->vm_flags &= ~VM_MAYWRITE;
1955         vma->vm_flags |= VM_RESERVED;
1956         vma->vm_ops = &perf_mmap_vmops;
1957
1958         return ret;
1959 }
1960
1961 static int perf_fasync(int fd, struct file *filp, int on)
1962 {
1963         struct inode *inode = filp->f_path.dentry->d_inode;
1964         struct perf_counter *counter = filp->private_data;
1965         int retval;
1966
1967         mutex_lock(&inode->i_mutex);
1968         retval = fasync_helper(fd, filp, on, &counter->fasync);
1969         mutex_unlock(&inode->i_mutex);
1970
1971         if (retval < 0)
1972                 return retval;
1973
1974         return 0;
1975 }
1976
1977 static const struct file_operations perf_fops = {
1978         .release                = perf_release,
1979         .read                   = perf_read,
1980         .poll                   = perf_poll,
1981         .unlocked_ioctl         = perf_ioctl,
1982         .compat_ioctl           = perf_ioctl,
1983         .mmap                   = perf_mmap,
1984         .fasync                 = perf_fasync,
1985 };
1986
1987 /*
1988  * Perf counter wakeup
1989  *
1990  * If there's data, ensure we set the poll() state and publish everything
1991  * to user-space before waking everybody up.
1992  */
1993
1994 void perf_counter_wakeup(struct perf_counter *counter)
1995 {
1996         wake_up_all(&counter->waitq);
1997
1998         if (counter->pending_kill) {
1999                 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
2000                 counter->pending_kill = 0;
2001         }
2002 }
2003
2004 /*
2005  * Pending wakeups
2006  *
2007  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2008  *
2009  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2010  * single linked list and use cmpxchg() to add entries lockless.
2011  */
2012
2013 static void perf_pending_counter(struct perf_pending_entry *entry)
2014 {
2015         struct perf_counter *counter = container_of(entry,
2016                         struct perf_counter, pending);
2017
2018         if (counter->pending_disable) {
2019                 counter->pending_disable = 0;
2020                 perf_counter_disable(counter);
2021         }
2022
2023         if (counter->pending_wakeup) {
2024                 counter->pending_wakeup = 0;
2025                 perf_counter_wakeup(counter);
2026         }
2027 }
2028
2029 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2030
2031 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2032         PENDING_TAIL,
2033 };
2034
2035 static void perf_pending_queue(struct perf_pending_entry *entry,
2036                                void (*func)(struct perf_pending_entry *))
2037 {
2038         struct perf_pending_entry **head;
2039
2040         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2041                 return;
2042
2043         entry->func = func;
2044
2045         head = &get_cpu_var(perf_pending_head);
2046
2047         do {
2048                 entry->next = *head;
2049         } while (cmpxchg(head, entry->next, entry) != entry->next);
2050
2051         set_perf_counter_pending();
2052
2053         put_cpu_var(perf_pending_head);
2054 }
2055
2056 static int __perf_pending_run(void)
2057 {
2058         struct perf_pending_entry *list;
2059         int nr = 0;
2060
2061         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2062         while (list != PENDING_TAIL) {
2063                 void (*func)(struct perf_pending_entry *);
2064                 struct perf_pending_entry *entry = list;
2065
2066                 list = list->next;
2067
2068                 func = entry->func;
2069                 entry->next = NULL;
2070                 /*
2071                  * Ensure we observe the unqueue before we issue the wakeup,
2072                  * so that we won't be waiting forever.
2073                  * -- see perf_not_pending().
2074                  */
2075                 smp_wmb();
2076
2077                 func(entry);
2078                 nr++;
2079         }
2080
2081         return nr;
2082 }
2083
2084 static inline int perf_not_pending(struct perf_counter *counter)
2085 {
2086         /*
2087          * If we flush on whatever cpu we run, there is a chance we don't
2088          * need to wait.
2089          */
2090         get_cpu();
2091         __perf_pending_run();
2092         put_cpu();
2093
2094         /*
2095          * Ensure we see the proper queue state before going to sleep
2096          * so that we do not miss the wakeup. -- see perf_pending_handle()
2097          */
2098         smp_rmb();
2099         return counter->pending.next == NULL;
2100 }
2101
2102 static void perf_pending_sync(struct perf_counter *counter)
2103 {
2104         wait_event(counter->waitq, perf_not_pending(counter));
2105 }
2106
2107 void perf_counter_do_pending(void)
2108 {
2109         __perf_pending_run();
2110 }
2111
2112 /*
2113  * Callchain support -- arch specific
2114  */
2115
2116 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2117 {
2118         return NULL;
2119 }
2120
2121 /*
2122  * Output
2123  */
2124
2125 struct perf_output_handle {
2126         struct perf_counter     *counter;
2127         struct perf_mmap_data   *data;
2128         unsigned long           head;
2129         unsigned long           offset;
2130         int                     nmi;
2131         int                     overflow;
2132         int                     locked;
2133         unsigned long           flags;
2134 };
2135
2136 static void perf_output_wakeup(struct perf_output_handle *handle)
2137 {
2138         atomic_set(&handle->data->poll, POLL_IN);
2139
2140         if (handle->nmi) {
2141                 handle->counter->pending_wakeup = 1;
2142                 perf_pending_queue(&handle->counter->pending,
2143                                    perf_pending_counter);
2144         } else
2145                 perf_counter_wakeup(handle->counter);
2146 }
2147
2148 /*
2149  * Curious locking construct.
2150  *
2151  * We need to ensure a later event doesn't publish a head when a former
2152  * event isn't done writing. However since we need to deal with NMIs we
2153  * cannot fully serialize things.
2154  *
2155  * What we do is serialize between CPUs so we only have to deal with NMI
2156  * nesting on a single CPU.
2157  *
2158  * We only publish the head (and generate a wakeup) when the outer-most
2159  * event completes.
2160  */
2161 static void perf_output_lock(struct perf_output_handle *handle)
2162 {
2163         struct perf_mmap_data *data = handle->data;
2164         int cpu;
2165
2166         handle->locked = 0;
2167
2168         local_irq_save(handle->flags);
2169         cpu = smp_processor_id();
2170
2171         if (in_nmi() && atomic_read(&data->lock) == cpu)
2172                 return;
2173
2174         while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2175                 cpu_relax();
2176
2177         handle->locked = 1;
2178 }
2179
2180 static void perf_output_unlock(struct perf_output_handle *handle)
2181 {
2182         struct perf_mmap_data *data = handle->data;
2183         unsigned long head;
2184         int cpu;
2185
2186         data->done_head = data->head;
2187
2188         if (!handle->locked)
2189                 goto out;
2190
2191 again:
2192         /*
2193          * The xchg implies a full barrier that ensures all writes are done
2194          * before we publish the new head, matched by a rmb() in userspace when
2195          * reading this position.
2196          */
2197         while ((head = atomic_long_xchg(&data->done_head, 0)))
2198                 data->user_page->data_head = head;
2199
2200         /*
2201          * NMI can happen here, which means we can miss a done_head update.
2202          */
2203
2204         cpu = atomic_xchg(&data->lock, -1);
2205         WARN_ON_ONCE(cpu != smp_processor_id());
2206
2207         /*
2208          * Therefore we have to validate we did not indeed do so.
2209          */
2210         if (unlikely(atomic_long_read(&data->done_head))) {
2211                 /*
2212                  * Since we had it locked, we can lock it again.
2213                  */
2214                 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2215                         cpu_relax();
2216
2217                 goto again;
2218         }
2219
2220         if (atomic_xchg(&data->wakeup, 0))
2221                 perf_output_wakeup(handle);
2222 out:
2223         local_irq_restore(handle->flags);
2224 }
2225
2226 static int perf_output_begin(struct perf_output_handle *handle,
2227                              struct perf_counter *counter, unsigned int size,
2228                              int nmi, int overflow)
2229 {
2230         struct perf_mmap_data *data;
2231         unsigned int offset, head;
2232
2233         /*
2234          * For inherited counters we send all the output towards the parent.
2235          */
2236         if (counter->parent)
2237                 counter = counter->parent;
2238
2239         rcu_read_lock();
2240         data = rcu_dereference(counter->data);
2241         if (!data)
2242                 goto out;
2243
2244         handle->data     = data;
2245         handle->counter  = counter;
2246         handle->nmi      = nmi;
2247         handle->overflow = overflow;
2248
2249         if (!data->nr_pages)
2250                 goto fail;
2251
2252         perf_output_lock(handle);
2253
2254         do {
2255                 offset = head = atomic_long_read(&data->head);
2256                 head += size;
2257         } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2258
2259         handle->offset  = offset;
2260         handle->head    = head;
2261
2262         if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2263                 atomic_set(&data->wakeup, 1);
2264
2265         return 0;
2266
2267 fail:
2268         perf_output_wakeup(handle);
2269 out:
2270         rcu_read_unlock();
2271
2272         return -ENOSPC;
2273 }
2274
2275 static void perf_output_copy(struct perf_output_handle *handle,
2276                              const void *buf, unsigned int len)
2277 {
2278         unsigned int pages_mask;
2279         unsigned int offset;
2280         unsigned int size;
2281         void **pages;
2282
2283         offset          = handle->offset;
2284         pages_mask      = handle->data->nr_pages - 1;
2285         pages           = handle->data->data_pages;
2286
2287         do {
2288                 unsigned int page_offset;
2289                 int nr;
2290
2291                 nr          = (offset >> PAGE_SHIFT) & pages_mask;
2292                 page_offset = offset & (PAGE_SIZE - 1);
2293                 size        = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2294
2295                 memcpy(pages[nr] + page_offset, buf, size);
2296
2297                 len         -= size;
2298                 buf         += size;
2299                 offset      += size;
2300         } while (len);
2301
2302         handle->offset = offset;
2303
2304         /*
2305          * Check we didn't copy past our reservation window, taking the
2306          * possible unsigned int wrap into account.
2307          */
2308         WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2309 }
2310
2311 #define perf_output_put(handle, x) \
2312         perf_output_copy((handle), &(x), sizeof(x))
2313
2314 static void perf_output_end(struct perf_output_handle *handle)
2315 {
2316         struct perf_counter *counter = handle->counter;
2317         struct perf_mmap_data *data = handle->data;
2318
2319         int wakeup_events = counter->attr.wakeup_events;
2320
2321         if (handle->overflow && wakeup_events) {
2322                 int events = atomic_inc_return(&data->events);
2323                 if (events >= wakeup_events) {
2324                         atomic_sub(wakeup_events, &data->events);
2325                         atomic_set(&data->wakeup, 1);
2326                 }
2327         }
2328
2329         perf_output_unlock(handle);
2330         rcu_read_unlock();
2331 }
2332
2333 static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2334 {
2335         /*
2336          * only top level counters have the pid namespace they were created in
2337          */
2338         if (counter->parent)
2339                 counter = counter->parent;
2340
2341         return task_tgid_nr_ns(p, counter->ns);
2342 }
2343
2344 static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2345 {
2346         /*
2347          * only top level counters have the pid namespace they were created in
2348          */
2349         if (counter->parent)
2350                 counter = counter->parent;
2351
2352         return task_pid_nr_ns(p, counter->ns);
2353 }
2354
2355 static void perf_counter_output(struct perf_counter *counter,
2356                                 int nmi, struct pt_regs *regs, u64 addr)
2357 {
2358         int ret;
2359         u64 sample_type = counter->attr.sample_type;
2360         struct perf_output_handle handle;
2361         struct perf_event_header header;
2362         u64 ip;
2363         struct {
2364                 u32 pid, tid;
2365         } tid_entry;
2366         struct {
2367                 u64 id;
2368                 u64 counter;
2369         } group_entry;
2370         struct perf_callchain_entry *callchain = NULL;
2371         int callchain_size = 0;
2372         u64 time;
2373         struct {
2374                 u32 cpu, reserved;
2375         } cpu_entry;
2376
2377         header.type = 0;
2378         header.size = sizeof(header);
2379
2380         header.misc = PERF_EVENT_MISC_OVERFLOW;
2381         header.misc |= perf_misc_flags(regs);
2382
2383         if (sample_type & PERF_SAMPLE_IP) {
2384                 ip = perf_instruction_pointer(regs);
2385                 header.type |= PERF_SAMPLE_IP;
2386                 header.size += sizeof(ip);
2387         }
2388
2389         if (sample_type & PERF_SAMPLE_TID) {
2390                 /* namespace issues */
2391                 tid_entry.pid = perf_counter_pid(counter, current);
2392                 tid_entry.tid = perf_counter_tid(counter, current);
2393
2394                 header.type |= PERF_SAMPLE_TID;
2395                 header.size += sizeof(tid_entry);
2396         }
2397
2398         if (sample_type & PERF_SAMPLE_TIME) {
2399                 /*
2400                  * Maybe do better on x86 and provide cpu_clock_nmi()
2401                  */
2402                 time = sched_clock();
2403
2404                 header.type |= PERF_SAMPLE_TIME;
2405                 header.size += sizeof(u64);
2406         }
2407
2408         if (sample_type & PERF_SAMPLE_ADDR) {
2409                 header.type |= PERF_SAMPLE_ADDR;
2410                 header.size += sizeof(u64);
2411         }
2412
2413         if (sample_type & PERF_SAMPLE_ID) {
2414                 header.type |= PERF_SAMPLE_ID;
2415                 header.size += sizeof(u64);
2416         }
2417
2418         if (sample_type & PERF_SAMPLE_CPU) {
2419                 header.type |= PERF_SAMPLE_CPU;
2420                 header.size += sizeof(cpu_entry);
2421
2422                 cpu_entry.cpu = raw_smp_processor_id();
2423         }
2424
2425         if (sample_type & PERF_SAMPLE_PERIOD) {
2426                 header.type |= PERF_SAMPLE_PERIOD;
2427                 header.size += sizeof(u64);
2428         }
2429
2430         if (sample_type & PERF_SAMPLE_GROUP) {
2431                 header.type |= PERF_SAMPLE_GROUP;
2432                 header.size += sizeof(u64) +
2433                         counter->nr_siblings * sizeof(group_entry);
2434         }
2435
2436         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2437                 callchain = perf_callchain(regs);
2438
2439                 if (callchain) {
2440                         callchain_size = (1 + callchain->nr) * sizeof(u64);
2441
2442                         header.type |= PERF_SAMPLE_CALLCHAIN;
2443                         header.size += callchain_size;
2444                 }
2445         }
2446
2447         ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2448         if (ret)
2449                 return;
2450
2451         perf_output_put(&handle, header);
2452
2453         if (sample_type & PERF_SAMPLE_IP)
2454                 perf_output_put(&handle, ip);
2455
2456         if (sample_type & PERF_SAMPLE_TID)
2457                 perf_output_put(&handle, tid_entry);
2458
2459         if (sample_type & PERF_SAMPLE_TIME)
2460                 perf_output_put(&handle, time);
2461
2462         if (sample_type & PERF_SAMPLE_ADDR)
2463                 perf_output_put(&handle, addr);
2464
2465         if (sample_type & PERF_SAMPLE_ID)
2466                 perf_output_put(&handle, counter->id);
2467
2468         if (sample_type & PERF_SAMPLE_CPU)
2469                 perf_output_put(&handle, cpu_entry);
2470
2471         if (sample_type & PERF_SAMPLE_PERIOD)
2472                 perf_output_put(&handle, counter->hw.sample_period);
2473
2474         /*
2475          * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2476          */
2477         if (sample_type & PERF_SAMPLE_GROUP) {
2478                 struct perf_counter *leader, *sub;
2479                 u64 nr = counter->nr_siblings;
2480
2481                 perf_output_put(&handle, nr);
2482
2483                 leader = counter->group_leader;
2484                 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2485                         if (sub != counter)
2486                                 sub->pmu->read(sub);
2487
2488                         group_entry.id = sub->id;
2489                         group_entry.counter = atomic64_read(&sub->count);
2490
2491                         perf_output_put(&handle, group_entry);
2492                 }
2493         }
2494
2495         if (callchain)
2496                 perf_output_copy(&handle, callchain, callchain_size);
2497
2498         perf_output_end(&handle);
2499 }
2500
2501 /*
2502  * fork tracking
2503  */
2504
2505 struct perf_fork_event {
2506         struct task_struct      *task;
2507
2508         struct {
2509                 struct perf_event_header        header;
2510
2511                 u32                             pid;
2512                 u32                             ppid;
2513         } event;
2514 };
2515
2516 static void perf_counter_fork_output(struct perf_counter *counter,
2517                                      struct perf_fork_event *fork_event)
2518 {
2519         struct perf_output_handle handle;
2520         int size = fork_event->event.header.size;
2521         struct task_struct *task = fork_event->task;
2522         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2523
2524         if (ret)
2525                 return;
2526
2527         fork_event->event.pid = perf_counter_pid(counter, task);
2528         fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2529
2530         perf_output_put(&handle, fork_event->event);
2531         perf_output_end(&handle);
2532 }
2533
2534 static int perf_counter_fork_match(struct perf_counter *counter)
2535 {
2536         if (counter->attr.comm || counter->attr.mmap)
2537                 return 1;
2538
2539         return 0;
2540 }
2541
2542 static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2543                                   struct perf_fork_event *fork_event)
2544 {
2545         struct perf_counter *counter;
2546
2547         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2548                 return;
2549
2550         rcu_read_lock();
2551         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2552                 if (perf_counter_fork_match(counter))
2553                         perf_counter_fork_output(counter, fork_event);
2554         }
2555         rcu_read_unlock();
2556 }
2557
2558 static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2559 {
2560         struct perf_cpu_context *cpuctx;
2561         struct perf_counter_context *ctx;
2562
2563         cpuctx = &get_cpu_var(perf_cpu_context);
2564         perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2565         put_cpu_var(perf_cpu_context);
2566
2567         rcu_read_lock();
2568         /*
2569          * doesn't really matter which of the child contexts the
2570          * events ends up in.
2571          */
2572         ctx = rcu_dereference(current->perf_counter_ctxp);
2573         if (ctx)
2574                 perf_counter_fork_ctx(ctx, fork_event);
2575         rcu_read_unlock();
2576 }
2577
2578 void perf_counter_fork(struct task_struct *task)
2579 {
2580         struct perf_fork_event fork_event;
2581
2582         if (!atomic_read(&nr_comm_counters) &&
2583             !atomic_read(&nr_mmap_counters))
2584                 return;
2585
2586         fork_event = (struct perf_fork_event){
2587                 .task   = task,
2588                 .event  = {
2589                         .header = {
2590                                 .type = PERF_EVENT_FORK,
2591                                 .size = sizeof(fork_event.event),
2592                         },
2593                 },
2594         };
2595
2596         perf_counter_fork_event(&fork_event);
2597 }
2598
2599 /*
2600  * comm tracking
2601  */
2602
2603 struct perf_comm_event {
2604         struct task_struct      *task;
2605         char                    *comm;
2606         int                     comm_size;
2607
2608         struct {
2609                 struct perf_event_header        header;
2610
2611                 u32                             pid;
2612                 u32                             tid;
2613         } event;
2614 };
2615
2616 static void perf_counter_comm_output(struct perf_counter *counter,
2617                                      struct perf_comm_event *comm_event)
2618 {
2619         struct perf_output_handle handle;
2620         int size = comm_event->event.header.size;
2621         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2622
2623         if (ret)
2624                 return;
2625
2626         comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
2627         comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
2628
2629         perf_output_put(&handle, comm_event->event);
2630         perf_output_copy(&handle, comm_event->comm,
2631                                    comm_event->comm_size);
2632         perf_output_end(&handle);
2633 }
2634
2635 static int perf_counter_comm_match(struct perf_counter *counter)
2636 {
2637         if (counter->attr.comm)
2638                 return 1;
2639
2640         return 0;
2641 }
2642
2643 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2644                                   struct perf_comm_event *comm_event)
2645 {
2646         struct perf_counter *counter;
2647
2648         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2649                 return;
2650
2651         rcu_read_lock();
2652         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2653                 if (perf_counter_comm_match(counter))
2654                         perf_counter_comm_output(counter, comm_event);
2655         }
2656         rcu_read_unlock();
2657 }
2658
2659 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2660 {
2661         struct perf_cpu_context *cpuctx;
2662         struct perf_counter_context *ctx;
2663         unsigned int size;
2664         char *comm = comm_event->task->comm;
2665
2666         size = ALIGN(strlen(comm)+1, sizeof(u64));
2667
2668         comm_event->comm = comm;
2669         comm_event->comm_size = size;
2670
2671         comm_event->event.header.size = sizeof(comm_event->event) + size;
2672
2673         cpuctx = &get_cpu_var(perf_cpu_context);
2674         perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2675         put_cpu_var(perf_cpu_context);
2676
2677         rcu_read_lock();
2678         /*
2679          * doesn't really matter which of the child contexts the
2680          * events ends up in.
2681          */
2682         ctx = rcu_dereference(current->perf_counter_ctxp);
2683         if (ctx)
2684                 perf_counter_comm_ctx(ctx, comm_event);
2685         rcu_read_unlock();
2686 }
2687
2688 void perf_counter_comm(struct task_struct *task)
2689 {
2690         struct perf_comm_event comm_event;
2691
2692         if (!atomic_read(&nr_comm_counters))
2693                 return;
2694
2695         comm_event = (struct perf_comm_event){
2696                 .task   = task,
2697                 .event  = {
2698                         .header = { .type = PERF_EVENT_COMM, },
2699                 },
2700         };
2701
2702         perf_counter_comm_event(&comm_event);
2703 }
2704
2705 /*
2706  * mmap tracking
2707  */
2708
2709 struct perf_mmap_event {
2710         struct vm_area_struct   *vma;
2711
2712         const char              *file_name;
2713         int                     file_size;
2714
2715         struct {
2716                 struct perf_event_header        header;
2717
2718                 u32                             pid;
2719                 u32                             tid;
2720                 u64                             start;
2721                 u64                             len;
2722                 u64                             pgoff;
2723         } event;
2724 };
2725
2726 static void perf_counter_mmap_output(struct perf_counter *counter,
2727                                      struct perf_mmap_event *mmap_event)
2728 {
2729         struct perf_output_handle handle;
2730         int size = mmap_event->event.header.size;
2731         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2732
2733         if (ret)
2734                 return;
2735
2736         mmap_event->event.pid = perf_counter_pid(counter, current);
2737         mmap_event->event.tid = perf_counter_tid(counter, current);
2738
2739         perf_output_put(&handle, mmap_event->event);
2740         perf_output_copy(&handle, mmap_event->file_name,
2741                                    mmap_event->file_size);
2742         perf_output_end(&handle);
2743 }
2744
2745 static int perf_counter_mmap_match(struct perf_counter *counter,
2746                                    struct perf_mmap_event *mmap_event)
2747 {
2748         if (counter->attr.mmap)
2749                 return 1;
2750
2751         return 0;
2752 }
2753
2754 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2755                                   struct perf_mmap_event *mmap_event)
2756 {
2757         struct perf_counter *counter;
2758
2759         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2760                 return;
2761
2762         rcu_read_lock();
2763         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2764                 if (perf_counter_mmap_match(counter, mmap_event))
2765                         perf_counter_mmap_output(counter, mmap_event);
2766         }
2767         rcu_read_unlock();
2768 }
2769
2770 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2771 {
2772         struct perf_cpu_context *cpuctx;
2773         struct perf_counter_context *ctx;
2774         struct vm_area_struct *vma = mmap_event->vma;
2775         struct file *file = vma->vm_file;
2776         unsigned int size;
2777         char tmp[16];
2778         char *buf = NULL;
2779         const char *name;
2780
2781         if (file) {
2782                 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2783                 if (!buf) {
2784                         name = strncpy(tmp, "//enomem", sizeof(tmp));
2785                         goto got_name;
2786                 }
2787                 name = d_path(&file->f_path, buf, PATH_MAX);
2788                 if (IS_ERR(name)) {
2789                         name = strncpy(tmp, "//toolong", sizeof(tmp));
2790                         goto got_name;
2791                 }
2792         } else {
2793                 name = arch_vma_name(mmap_event->vma);
2794                 if (name)
2795                         goto got_name;
2796
2797                 if (!vma->vm_mm) {
2798                         name = strncpy(tmp, "[vdso]", sizeof(tmp));
2799                         goto got_name;
2800                 }
2801
2802                 name = strncpy(tmp, "//anon", sizeof(tmp));
2803                 goto got_name;
2804         }
2805
2806 got_name:
2807         size = ALIGN(strlen(name)+1, sizeof(u64));
2808
2809         mmap_event->file_name = name;
2810         mmap_event->file_size = size;
2811
2812         mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2813
2814         cpuctx = &get_cpu_var(perf_cpu_context);
2815         perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2816         put_cpu_var(perf_cpu_context);
2817
2818         rcu_read_lock();
2819         /*
2820          * doesn't really matter which of the child contexts the
2821          * events ends up in.
2822          */
2823         ctx = rcu_dereference(current->perf_counter_ctxp);
2824         if (ctx)
2825                 perf_counter_mmap_ctx(ctx, mmap_event);
2826         rcu_read_unlock();
2827
2828         kfree(buf);
2829 }
2830
2831 void __perf_counter_mmap(struct vm_area_struct *vma)
2832 {
2833         struct perf_mmap_event mmap_event;
2834
2835         if (!atomic_read(&nr_mmap_counters))
2836                 return;
2837
2838         mmap_event = (struct perf_mmap_event){
2839                 .vma    = vma,
2840                 .event  = {
2841                         .header = { .type = PERF_EVENT_MMAP, },
2842                         .start  = vma->vm_start,
2843                         .len    = vma->vm_end - vma->vm_start,
2844                         .pgoff  = vma->vm_pgoff,
2845                 },
2846         };
2847
2848         perf_counter_mmap_event(&mmap_event);
2849 }
2850
2851 /*
2852  * Log sample_period changes so that analyzing tools can re-normalize the
2853  * event flow.
2854  */
2855
2856 static void perf_log_period(struct perf_counter *counter, u64 period)
2857 {
2858         struct perf_output_handle handle;
2859         int ret;
2860
2861         struct {
2862                 struct perf_event_header        header;
2863                 u64                             time;
2864                 u64                             id;
2865                 u64                             period;
2866         } freq_event = {
2867                 .header = {
2868                         .type = PERF_EVENT_PERIOD,
2869                         .misc = 0,
2870                         .size = sizeof(freq_event),
2871                 },
2872                 .time = sched_clock(),
2873                 .id = counter->id,
2874                 .period = period,
2875         };
2876
2877         if (counter->hw.sample_period == period)
2878                 return;
2879
2880         ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
2881         if (ret)
2882                 return;
2883
2884         perf_output_put(&handle, freq_event);
2885         perf_output_end(&handle);
2886 }
2887
2888 /*
2889  * IRQ throttle logging
2890  */
2891
2892 static void perf_log_throttle(struct perf_counter *counter, int enable)
2893 {
2894         struct perf_output_handle handle;
2895         int ret;
2896
2897         struct {
2898                 struct perf_event_header        header;
2899                 u64                             time;
2900         } throttle_event = {
2901                 .header = {
2902                         .type = PERF_EVENT_THROTTLE + 1,
2903                         .misc = 0,
2904                         .size = sizeof(throttle_event),
2905                 },
2906                 .time = sched_clock(),
2907         };
2908
2909         ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
2910         if (ret)
2911                 return;
2912
2913         perf_output_put(&handle, throttle_event);
2914         perf_output_end(&handle);
2915 }
2916
2917 /*
2918  * Generic counter overflow handling.
2919  */
2920
2921 int perf_counter_overflow(struct perf_counter *counter,
2922                           int nmi, struct pt_regs *regs, u64 addr)
2923 {
2924         int events = atomic_read(&counter->event_limit);
2925         int throttle = counter->pmu->unthrottle != NULL;
2926         int ret = 0;
2927
2928         if (!throttle) {
2929                 counter->hw.interrupts++;
2930         } else {
2931                 if (counter->hw.interrupts != MAX_INTERRUPTS) {
2932                         counter->hw.interrupts++;
2933                         if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
2934                                 counter->hw.interrupts = MAX_INTERRUPTS;
2935                                 perf_log_throttle(counter, 0);
2936                                 ret = 1;
2937                         }
2938                 } else {
2939                         /*
2940                          * Keep re-disabling counters even though on the previous
2941                          * pass we disabled it - just in case we raced with a
2942                          * sched-in and the counter got enabled again:
2943                          */
2944                         ret = 1;
2945                 }
2946         }
2947
2948         /*
2949          * XXX event_limit might not quite work as expected on inherited
2950          * counters
2951          */
2952
2953         counter->pending_kill = POLL_IN;
2954         if (events && atomic_dec_and_test(&counter->event_limit)) {
2955                 ret = 1;
2956                 counter->pending_kill = POLL_HUP;
2957                 if (nmi) {
2958                         counter->pending_disable = 1;
2959                         perf_pending_queue(&counter->pending,
2960                                            perf_pending_counter);
2961                 } else
2962                         perf_counter_disable(counter);
2963         }
2964
2965         perf_counter_output(counter, nmi, regs, addr);
2966         return ret;
2967 }
2968
2969 /*
2970  * Generic software counter infrastructure
2971  */
2972
2973 static void perf_swcounter_update(struct perf_counter *counter)
2974 {
2975         struct hw_perf_counter *hwc = &counter->hw;
2976         u64 prev, now;
2977         s64 delta;
2978
2979 again:
2980         prev = atomic64_read(&hwc->prev_count);
2981         now = atomic64_read(&hwc->count);
2982         if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2983                 goto again;
2984
2985         delta = now - prev;
2986
2987         atomic64_add(delta, &counter->count);
2988         atomic64_sub(delta, &hwc->period_left);
2989 }
2990
2991 static void perf_swcounter_set_period(struct perf_counter *counter)
2992 {
2993         struct hw_perf_counter *hwc = &counter->hw;
2994         s64 left = atomic64_read(&hwc->period_left);
2995         s64 period = hwc->sample_period;
2996
2997         if (unlikely(left <= -period)) {
2998                 left = period;
2999                 atomic64_set(&hwc->period_left, left);
3000         }
3001
3002         if (unlikely(left <= 0)) {
3003                 left += period;
3004                 atomic64_add(period, &hwc->period_left);
3005         }
3006
3007         atomic64_set(&hwc->prev_count, -left);
3008         atomic64_set(&hwc->count, -left);
3009 }
3010
3011 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3012 {
3013         enum hrtimer_restart ret = HRTIMER_RESTART;
3014         struct perf_counter *counter;
3015         struct pt_regs *regs;
3016         u64 period;
3017
3018         counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3019         counter->pmu->read(counter);
3020
3021         regs = get_irq_regs();
3022         /*
3023          * In case we exclude kernel IPs or are somehow not in interrupt
3024          * context, provide the next best thing, the user IP.
3025          */
3026         if ((counter->attr.exclude_kernel || !regs) &&
3027                         !counter->attr.exclude_user)
3028                 regs = task_pt_regs(current);
3029
3030         if (regs) {
3031                 if (perf_counter_overflow(counter, 0, regs, 0))
3032                         ret = HRTIMER_NORESTART;
3033         }
3034
3035         period = max_t(u64, 10000, counter->hw.sample_period);
3036         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3037
3038         return ret;
3039 }
3040
3041 static void perf_swcounter_overflow(struct perf_counter *counter,
3042                                     int nmi, struct pt_regs *regs, u64 addr)
3043 {
3044         perf_swcounter_update(counter);
3045         perf_swcounter_set_period(counter);
3046         if (perf_counter_overflow(counter, nmi, regs, addr))
3047                 /* soft-disable the counter */
3048                 ;
3049
3050 }
3051
3052 static int perf_swcounter_is_counting(struct perf_counter *counter)
3053 {
3054         struct perf_counter_context *ctx;
3055         unsigned long flags;
3056         int count;
3057
3058         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3059                 return 1;
3060
3061         if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3062                 return 0;
3063
3064         /*
3065          * If the counter is inactive, it could be just because
3066          * its task is scheduled out, or because it's in a group
3067          * which could not go on the PMU.  We want to count in
3068          * the first case but not the second.  If the context is
3069          * currently active then an inactive software counter must
3070          * be the second case.  If it's not currently active then
3071          * we need to know whether the counter was active when the
3072          * context was last active, which we can determine by
3073          * comparing counter->tstamp_stopped with ctx->time.
3074          *
3075          * We are within an RCU read-side critical section,
3076          * which protects the existence of *ctx.
3077          */
3078         ctx = counter->ctx;
3079         spin_lock_irqsave(&ctx->lock, flags);
3080         count = 1;
3081         /* Re-check state now we have the lock */
3082         if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
3083             counter->ctx->is_active ||
3084             counter->tstamp_stopped < ctx->time)
3085                 count = 0;
3086         spin_unlock_irqrestore(&ctx->lock, flags);
3087         return count;
3088 }
3089
3090 static int perf_swcounter_match(struct perf_counter *counter,
3091                                 enum perf_event_types type,
3092                                 u32 event, struct pt_regs *regs)
3093 {
3094         if (!perf_swcounter_is_counting(counter))
3095                 return 0;
3096
3097         if (counter->attr.type != type)
3098                 return 0;
3099         if (counter->attr.config != event)
3100                 return 0;
3101
3102         if (regs) {
3103                 if (counter->attr.exclude_user && user_mode(regs))
3104                         return 0;
3105
3106                 if (counter->attr.exclude_kernel && !user_mode(regs))
3107                         return 0;
3108         }
3109
3110         return 1;
3111 }
3112
3113 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3114                                int nmi, struct pt_regs *regs, u64 addr)
3115 {
3116         int neg = atomic64_add_negative(nr, &counter->hw.count);
3117
3118         if (counter->hw.sample_period && !neg && regs)
3119                 perf_swcounter_overflow(counter, nmi, regs, addr);
3120 }
3121
3122 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3123                                      enum perf_event_types type, u32 event,
3124                                      u64 nr, int nmi, struct pt_regs *regs,
3125                                      u64 addr)
3126 {
3127         struct perf_counter *counter;
3128
3129         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3130                 return;
3131
3132         rcu_read_lock();
3133         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3134                 if (perf_swcounter_match(counter, type, event, regs))
3135                         perf_swcounter_add(counter, nr, nmi, regs, addr);
3136         }
3137         rcu_read_unlock();
3138 }
3139
3140 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3141 {
3142         if (in_nmi())
3143                 return &cpuctx->recursion[3];
3144
3145         if (in_irq())
3146                 return &cpuctx->recursion[2];
3147
3148         if (in_softirq())
3149                 return &cpuctx->recursion[1];
3150
3151         return &cpuctx->recursion[0];
3152 }
3153
3154 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
3155                                    u64 nr, int nmi, struct pt_regs *regs,
3156                                    u64 addr)
3157 {
3158         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3159         int *recursion = perf_swcounter_recursion_context(cpuctx);
3160         struct perf_counter_context *ctx;
3161
3162         if (*recursion)
3163                 goto out;
3164
3165         (*recursion)++;
3166         barrier();
3167
3168         perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3169                                  nr, nmi, regs, addr);
3170         rcu_read_lock();
3171         /*
3172          * doesn't really matter which of the child contexts the
3173          * events ends up in.
3174          */
3175         ctx = rcu_dereference(current->perf_counter_ctxp);
3176         if (ctx)
3177                 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
3178         rcu_read_unlock();
3179
3180         barrier();
3181         (*recursion)--;
3182
3183 out:
3184         put_cpu_var(perf_cpu_context);
3185 }
3186
3187 void
3188 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
3189 {
3190         __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
3191 }
3192
3193 static void perf_swcounter_read(struct perf_counter *counter)
3194 {
3195         perf_swcounter_update(counter);
3196 }
3197
3198 static int perf_swcounter_enable(struct perf_counter *counter)
3199 {
3200         perf_swcounter_set_period(counter);
3201         return 0;
3202 }
3203
3204 static void perf_swcounter_disable(struct perf_counter *counter)
3205 {
3206         perf_swcounter_update(counter);
3207 }
3208
3209 static const struct pmu perf_ops_generic = {
3210         .enable         = perf_swcounter_enable,
3211         .disable        = perf_swcounter_disable,
3212         .read           = perf_swcounter_read,
3213 };
3214
3215 /*
3216  * Software counter: cpu wall time clock
3217  */
3218
3219 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3220 {
3221         int cpu = raw_smp_processor_id();
3222         s64 prev;
3223         u64 now;
3224
3225         now = cpu_clock(cpu);
3226         prev = atomic64_read(&counter->hw.prev_count);
3227         atomic64_set(&counter->hw.prev_count, now);
3228         atomic64_add(now - prev, &counter->count);
3229 }
3230
3231 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3232 {
3233         struct hw_perf_counter *hwc = &counter->hw;
3234         int cpu = raw_smp_processor_id();
3235
3236         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3237         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3238         hwc->hrtimer.function = perf_swcounter_hrtimer;
3239         if (hwc->sample_period) {
3240                 u64 period = max_t(u64, 10000, hwc->sample_period);
3241                 __hrtimer_start_range_ns(&hwc->hrtimer,
3242                                 ns_to_ktime(period), 0,
3243                                 HRTIMER_MODE_REL, 0);
3244         }
3245
3246         return 0;
3247 }
3248
3249 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3250 {
3251         if (counter->hw.sample_period)
3252                 hrtimer_cancel(&counter->hw.hrtimer);
3253         cpu_clock_perf_counter_update(counter);
3254 }
3255
3256 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3257 {
3258         cpu_clock_perf_counter_update(counter);
3259 }
3260
3261 static const struct pmu perf_ops_cpu_clock = {
3262         .enable         = cpu_clock_perf_counter_enable,
3263         .disable        = cpu_clock_perf_counter_disable,
3264         .read           = cpu_clock_perf_counter_read,
3265 };
3266
3267 /*
3268  * Software counter: task time clock
3269  */
3270
3271 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
3272 {
3273         u64 prev;
3274         s64 delta;
3275
3276         prev = atomic64_xchg(&counter->hw.prev_count, now);
3277         delta = now - prev;
3278         atomic64_add(delta, &counter->count);
3279 }
3280
3281 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3282 {
3283         struct hw_perf_counter *hwc = &counter->hw;
3284         u64 now;
3285
3286         now = counter->ctx->time;
3287
3288         atomic64_set(&hwc->prev_count, now);
3289         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3290         hwc->hrtimer.function = perf_swcounter_hrtimer;
3291         if (hwc->sample_period) {
3292                 u64 period = max_t(u64, 10000, hwc->sample_period);
3293                 __hrtimer_start_range_ns(&hwc->hrtimer,
3294                                 ns_to_ktime(period), 0,
3295                                 HRTIMER_MODE_REL, 0);
3296         }
3297
3298         return 0;
3299 }
3300
3301 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3302 {
3303         if (counter->hw.sample_period)
3304                 hrtimer_cancel(&counter->hw.hrtimer);
3305         task_clock_perf_counter_update(counter, counter->ctx->time);
3306
3307 }
3308
3309 static void task_clock_perf_counter_read(struct perf_counter *counter)
3310 {
3311         u64 time;
3312
3313         if (!in_nmi()) {
3314                 update_context_time(counter->ctx);
3315                 time = counter->ctx->time;
3316         } else {
3317                 u64 now = perf_clock();
3318                 u64 delta = now - counter->ctx->timestamp;
3319                 time = counter->ctx->time + delta;
3320         }
3321
3322         task_clock_perf_counter_update(counter, time);
3323 }
3324
3325 static const struct pmu perf_ops_task_clock = {
3326         .enable         = task_clock_perf_counter_enable,
3327         .disable        = task_clock_perf_counter_disable,
3328         .read           = task_clock_perf_counter_read,
3329 };
3330
3331 /*
3332  * Software counter: cpu migrations
3333  */
3334 void perf_counter_task_migration(struct task_struct *task, int cpu)
3335 {
3336         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3337         struct perf_counter_context *ctx;
3338
3339         perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
3340                                  PERF_COUNT_CPU_MIGRATIONS,
3341                                  1, 1, NULL, 0);
3342
3343         ctx = perf_pin_task_context(task);
3344         if (ctx) {
3345                 perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
3346                                          PERF_COUNT_CPU_MIGRATIONS,
3347                                          1, 1, NULL, 0);
3348                 perf_unpin_context(ctx);
3349         }
3350 }
3351
3352 #ifdef CONFIG_EVENT_PROFILE
3353 void perf_tpcounter_event(int event_id)
3354 {
3355         struct pt_regs *regs = get_irq_regs();
3356
3357         if (!regs)
3358                 regs = task_pt_regs(current);
3359
3360         __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3361 }
3362 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3363
3364 extern int ftrace_profile_enable(int);
3365 extern void ftrace_profile_disable(int);
3366
3367 static void tp_perf_counter_destroy(struct perf_counter *counter)
3368 {
3369         ftrace_profile_disable(perf_event_id(&counter->attr));
3370 }
3371
3372 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3373 {
3374         int event_id = perf_event_id(&counter->attr);
3375         int ret;
3376
3377         ret = ftrace_profile_enable(event_id);
3378         if (ret)
3379                 return NULL;
3380
3381         counter->destroy = tp_perf_counter_destroy;
3382         counter->hw.sample_period = counter->attr.sample_period;
3383
3384         return &perf_ops_generic;
3385 }
3386 #else
3387 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3388 {
3389         return NULL;
3390 }
3391 #endif
3392
3393 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3394 {
3395         const struct pmu *pmu = NULL;
3396
3397         /*
3398          * Software counters (currently) can't in general distinguish
3399          * between user, kernel and hypervisor events.
3400          * However, context switches and cpu migrations are considered
3401          * to be kernel events, and page faults are never hypervisor
3402          * events.
3403          */
3404         switch (counter->attr.config) {
3405         case PERF_COUNT_CPU_CLOCK:
3406                 pmu = &perf_ops_cpu_clock;
3407
3408                 break;
3409         case PERF_COUNT_TASK_CLOCK:
3410                 /*
3411                  * If the user instantiates this as a per-cpu counter,
3412                  * use the cpu_clock counter instead.
3413                  */
3414                 if (counter->ctx->task)
3415                         pmu = &perf_ops_task_clock;
3416                 else
3417                         pmu = &perf_ops_cpu_clock;
3418
3419                 break;
3420         case PERF_COUNT_PAGE_FAULTS:
3421         case PERF_COUNT_PAGE_FAULTS_MIN:
3422         case PERF_COUNT_PAGE_FAULTS_MAJ:
3423         case PERF_COUNT_CONTEXT_SWITCHES:
3424         case PERF_COUNT_CPU_MIGRATIONS:
3425                 pmu = &perf_ops_generic;
3426                 break;
3427         }
3428
3429         return pmu;
3430 }
3431
3432 /*
3433  * Allocate and initialize a counter structure
3434  */
3435 static struct perf_counter *
3436 perf_counter_alloc(struct perf_counter_attr *attr,
3437                    int cpu,
3438                    struct perf_counter_context *ctx,
3439                    struct perf_counter *group_leader,
3440                    gfp_t gfpflags)
3441 {
3442         const struct pmu *pmu;
3443         struct perf_counter *counter;
3444         struct hw_perf_counter *hwc;
3445         long err;
3446
3447         counter = kzalloc(sizeof(*counter), gfpflags);
3448         if (!counter)
3449                 return ERR_PTR(-ENOMEM);
3450
3451         /*
3452          * Single counters are their own group leaders, with an
3453          * empty sibling list:
3454          */
3455         if (!group_leader)
3456                 group_leader = counter;
3457
3458         mutex_init(&counter->child_mutex);
3459         INIT_LIST_HEAD(&counter->child_list);
3460
3461         INIT_LIST_HEAD(&counter->list_entry);
3462         INIT_LIST_HEAD(&counter->event_entry);
3463         INIT_LIST_HEAD(&counter->sibling_list);
3464         init_waitqueue_head(&counter->waitq);
3465
3466         mutex_init(&counter->mmap_mutex);
3467
3468         counter->cpu            = cpu;
3469         counter->attr           = *attr;
3470         counter->group_leader   = group_leader;
3471         counter->pmu            = NULL;
3472         counter->ctx            = ctx;
3473         counter->oncpu          = -1;
3474
3475         counter->ns             = get_pid_ns(current->nsproxy->pid_ns);
3476         counter->id             = atomic64_inc_return(&perf_counter_id);
3477
3478         counter->state          = PERF_COUNTER_STATE_INACTIVE;
3479
3480         if (attr->disabled)
3481                 counter->state = PERF_COUNTER_STATE_OFF;
3482
3483         pmu = NULL;
3484
3485         hwc = &counter->hw;
3486         if (attr->freq && attr->sample_freq)
3487                 hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
3488         else
3489                 hwc->sample_period = attr->sample_period;
3490
3491         /*
3492          * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3493          */
3494         if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3495                 goto done;
3496
3497         if (attr->type == PERF_TYPE_RAW) {
3498                 pmu = hw_perf_counter_init(counter);
3499                 goto done;
3500         }
3501
3502         switch (attr->type) {
3503         case PERF_TYPE_HARDWARE:
3504                 pmu = hw_perf_counter_init(counter);
3505                 break;
3506
3507         case PERF_TYPE_SOFTWARE:
3508                 pmu = sw_perf_counter_init(counter);
3509                 break;
3510
3511         case PERF_TYPE_TRACEPOINT:
3512                 pmu = tp_perf_counter_init(counter);
3513                 break;
3514         }
3515 done:
3516         err = 0;
3517         if (!pmu)
3518                 err = -EINVAL;
3519         else if (IS_ERR(pmu))
3520                 err = PTR_ERR(pmu);
3521
3522         if (err) {
3523                 if (counter->ns)
3524                         put_pid_ns(counter->ns);
3525                 kfree(counter);
3526                 return ERR_PTR(err);
3527         }
3528
3529         counter->pmu = pmu;
3530
3531         atomic_inc(&nr_counters);
3532         if (counter->attr.mmap)
3533                 atomic_inc(&nr_mmap_counters);
3534         if (counter->attr.comm)
3535                 atomic_inc(&nr_comm_counters);
3536
3537         return counter;
3538 }
3539
3540 /**
3541  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3542  *
3543  * @attr_uptr:  event type attributes for monitoring/sampling
3544  * @pid:                target pid
3545  * @cpu:                target cpu
3546  * @group_fd:           group leader counter fd
3547  */
3548 SYSCALL_DEFINE5(perf_counter_open,
3549                 const struct perf_counter_attr __user *, attr_uptr,
3550                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3551 {
3552         struct perf_counter *counter, *group_leader;
3553         struct perf_counter_attr attr;
3554         struct perf_counter_context *ctx;
3555         struct file *counter_file = NULL;
3556         struct file *group_file = NULL;
3557         int fput_needed = 0;
3558         int fput_needed2 = 0;
3559         int ret;
3560
3561         /* for future expandability... */
3562         if (flags)
3563                 return -EINVAL;
3564
3565         if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
3566                 return -EFAULT;
3567
3568         /*
3569          * Get the target context (task or percpu):
3570          */
3571         ctx = find_get_context(pid, cpu);
3572         if (IS_ERR(ctx))
3573                 return PTR_ERR(ctx);
3574
3575         /*
3576          * Look up the group leader (we will attach this counter to it):
3577          */
3578         group_leader = NULL;
3579         if (group_fd != -1) {
3580                 ret = -EINVAL;
3581                 group_file = fget_light(group_fd, &fput_needed);
3582                 if (!group_file)
3583                         goto err_put_context;
3584                 if (group_file->f_op != &perf_fops)
3585                         goto err_put_context;
3586
3587                 group_leader = group_file->private_data;
3588                 /*
3589                  * Do not allow a recursive hierarchy (this new sibling
3590                  * becoming part of another group-sibling):
3591                  */
3592                 if (group_leader->group_leader != group_leader)
3593                         goto err_put_context;
3594                 /*
3595                  * Do not allow to attach to a group in a different
3596                  * task or CPU context:
3597                  */
3598                 if (group_leader->ctx != ctx)
3599                         goto err_put_context;
3600                 /*
3601                  * Only a group leader can be exclusive or pinned
3602                  */
3603                 if (attr.exclusive || attr.pinned)
3604                         goto err_put_context;
3605         }
3606
3607         counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3608                                      GFP_KERNEL);
3609         ret = PTR_ERR(counter);
3610         if (IS_ERR(counter))
3611                 goto err_put_context;
3612
3613         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3614         if (ret < 0)
3615                 goto err_free_put_context;
3616
3617         counter_file = fget_light(ret, &fput_needed2);
3618         if (!counter_file)
3619                 goto err_free_put_context;
3620
3621         counter->filp = counter_file;
3622         WARN_ON_ONCE(ctx->parent_ctx);
3623         mutex_lock(&ctx->mutex);
3624         perf_install_in_context(ctx, counter, cpu);
3625         ++ctx->generation;
3626         mutex_unlock(&ctx->mutex);
3627
3628         counter->owner = current;
3629         get_task_struct(current);
3630         mutex_lock(&current->perf_counter_mutex);
3631         list_add_tail(&counter->owner_entry, &current->perf_counter_list);
3632         mutex_unlock(&current->perf_counter_mutex);
3633
3634         fput_light(counter_file, fput_needed2);
3635
3636 out_fput:
3637         fput_light(group_file, fput_needed);
3638
3639         return ret;
3640
3641 err_free_put_context:
3642         kfree(counter);
3643
3644 err_put_context:
3645         put_ctx(ctx);
3646
3647         goto out_fput;
3648 }
3649
3650 /*
3651  * inherit a counter from parent task to child task:
3652  */
3653 static struct perf_counter *
3654 inherit_counter(struct perf_counter *parent_counter,
3655               struct task_struct *parent,
3656               struct perf_counter_context *parent_ctx,
3657               struct task_struct *child,
3658               struct perf_counter *group_leader,
3659               struct perf_counter_context *child_ctx)
3660 {
3661         struct perf_counter *child_counter;
3662
3663         /*
3664          * Instead of creating recursive hierarchies of counters,
3665          * we link inherited counters back to the original parent,
3666          * which has a filp for sure, which we use as the reference
3667          * count:
3668          */
3669         if (parent_counter->parent)
3670                 parent_counter = parent_counter->parent;
3671
3672         child_counter = perf_counter_alloc(&parent_counter->attr,
3673                                            parent_counter->cpu, child_ctx,
3674                                            group_leader, GFP_KERNEL);
3675         if (IS_ERR(child_counter))
3676                 return child_counter;
3677         get_ctx(child_ctx);
3678
3679         /*
3680          * Make the child state follow the state of the parent counter,
3681          * not its attr.disabled bit.  We hold the parent's mutex,
3682          * so we won't race with perf_counter_{en, dis}able_family.
3683          */
3684         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3685                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3686         else
3687                 child_counter->state = PERF_COUNTER_STATE_OFF;
3688
3689         /*
3690          * Link it up in the child's context:
3691          */
3692         add_counter_to_ctx(child_counter, child_ctx);
3693
3694         child_counter->parent = parent_counter;
3695         /*
3696          * inherit into child's child as well:
3697          */
3698         child_counter->attr.inherit = 1;
3699
3700         /*
3701          * Get a reference to the parent filp - we will fput it
3702          * when the child counter exits. This is safe to do because
3703          * we are in the parent and we know that the filp still
3704          * exists and has a nonzero count:
3705          */
3706         atomic_long_inc(&parent_counter->filp->f_count);
3707
3708         /*
3709          * Link this into the parent counter's child list
3710          */
3711         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3712         mutex_lock(&parent_counter->child_mutex);
3713         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3714         mutex_unlock(&parent_counter->child_mutex);
3715
3716         return child_counter;
3717 }
3718
3719 static int inherit_group(struct perf_counter *parent_counter,
3720               struct task_struct *parent,
3721               struct perf_counter_context *parent_ctx,
3722               struct task_struct *child,
3723               struct perf_counter_context *child_ctx)
3724 {
3725         struct perf_counter *leader;
3726         struct perf_counter *sub;
3727         struct perf_counter *child_ctr;
3728
3729         leader = inherit_counter(parent_counter, parent, parent_ctx,
3730                                  child, NULL, child_ctx);
3731         if (IS_ERR(leader))
3732                 return PTR_ERR(leader);
3733         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3734                 child_ctr = inherit_counter(sub, parent, parent_ctx,
3735                                             child, leader, child_ctx);
3736                 if (IS_ERR(child_ctr))
3737                         return PTR_ERR(child_ctr);
3738         }
3739         return 0;
3740 }
3741
3742 static void sync_child_counter(struct perf_counter *child_counter,
3743                                struct perf_counter *parent_counter)
3744 {
3745         u64 child_val;
3746
3747         child_val = atomic64_read(&child_counter->count);
3748
3749         /*
3750          * Add back the child's count to the parent's count:
3751          */
3752         atomic64_add(child_val, &parent_counter->count);
3753         atomic64_add(child_counter->total_time_enabled,
3754                      &parent_counter->child_total_time_enabled);
3755         atomic64_add(child_counter->total_time_running,
3756                      &parent_counter->child_total_time_running);
3757
3758         /*
3759          * Remove this counter from the parent's list
3760          */
3761         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3762         mutex_lock(&parent_counter->child_mutex);
3763         list_del_init(&child_counter->child_list);
3764         mutex_unlock(&parent_counter->child_mutex);
3765
3766         /*
3767          * Release the parent counter, if this was the last
3768          * reference to it.
3769          */
3770         fput(parent_counter->filp);
3771 }
3772
3773 static void
3774 __perf_counter_exit_task(struct perf_counter *child_counter,
3775                          struct perf_counter_context *child_ctx)
3776 {
3777         struct perf_counter *parent_counter;
3778
3779         update_counter_times(child_counter);
3780         perf_counter_remove_from_context(child_counter);
3781
3782         parent_counter = child_counter->parent;
3783         /*
3784          * It can happen that parent exits first, and has counters
3785          * that are still around due to the child reference. These
3786          * counters need to be zapped - but otherwise linger.
3787          */
3788         if (parent_counter) {
3789                 sync_child_counter(child_counter, parent_counter);
3790                 free_counter(child_counter);
3791         }
3792 }
3793
3794 /*
3795  * When a child task exits, feed back counter values to parent counters.
3796  */
3797 void perf_counter_exit_task(struct task_struct *child)
3798 {
3799         struct perf_counter *child_counter, *tmp;
3800         struct perf_counter_context *child_ctx;
3801         unsigned long flags;
3802
3803         if (likely(!child->perf_counter_ctxp))
3804                 return;
3805
3806         local_irq_save(flags);
3807         /*
3808          * We can't reschedule here because interrupts are disabled,
3809          * and either child is current or it is a task that can't be
3810          * scheduled, so we are now safe from rescheduling changing
3811          * our context.
3812          */
3813         child_ctx = child->perf_counter_ctxp;
3814         __perf_counter_task_sched_out(child_ctx);
3815
3816         /*
3817          * Take the context lock here so that if find_get_context is
3818          * reading child->perf_counter_ctxp, we wait until it has
3819          * incremented the context's refcount before we do put_ctx below.
3820          */
3821         spin_lock(&child_ctx->lock);
3822         child->perf_counter_ctxp = NULL;
3823         if (child_ctx->parent_ctx) {
3824                 /*
3825                  * This context is a clone; unclone it so it can't get
3826                  * swapped to another process while we're removing all
3827                  * the counters from it.
3828                  */
3829                 put_ctx(child_ctx->parent_ctx);
3830                 child_ctx->parent_ctx = NULL;
3831         }
3832         spin_unlock(&child_ctx->lock);
3833         local_irq_restore(flags);
3834
3835         mutex_lock(&child_ctx->mutex);
3836
3837 again:
3838         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3839                                  list_entry)
3840                 __perf_counter_exit_task(child_counter, child_ctx);
3841
3842         /*
3843          * If the last counter was a group counter, it will have appended all
3844          * its siblings to the list, but we obtained 'tmp' before that which
3845          * will still point to the list head terminating the iteration.
3846          */
3847         if (!list_empty(&child_ctx->counter_list))
3848                 goto again;
3849
3850         mutex_unlock(&child_ctx->mutex);
3851
3852         put_ctx(child_ctx);
3853 }
3854
3855 /*
3856  * free an unexposed, unused context as created by inheritance by
3857  * init_task below, used by fork() in case of fail.
3858  */
3859 void perf_counter_free_task(struct task_struct *task)
3860 {
3861         struct perf_counter_context *ctx = task->perf_counter_ctxp;
3862         struct perf_counter *counter, *tmp;
3863
3864         if (!ctx)
3865                 return;
3866
3867         mutex_lock(&ctx->mutex);
3868 again:
3869         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
3870                 struct perf_counter *parent = counter->parent;
3871
3872                 if (WARN_ON_ONCE(!parent))
3873                         continue;
3874
3875                 mutex_lock(&parent->child_mutex);
3876                 list_del_init(&counter->child_list);
3877                 mutex_unlock(&parent->child_mutex);
3878
3879                 fput(parent->filp);
3880
3881                 list_del_counter(counter, ctx);
3882                 free_counter(counter);
3883         }
3884
3885         if (!list_empty(&ctx->counter_list))
3886                 goto again;
3887
3888         mutex_unlock(&ctx->mutex);
3889
3890         put_ctx(ctx);
3891 }
3892
3893 /*
3894  * Initialize the perf_counter context in task_struct
3895  */
3896 int perf_counter_init_task(struct task_struct *child)
3897 {
3898         struct perf_counter_context *child_ctx, *parent_ctx;
3899         struct perf_counter_context *cloned_ctx;
3900         struct perf_counter *counter;
3901         struct task_struct *parent = current;
3902         int inherited_all = 1;
3903         int ret = 0;
3904
3905         child->perf_counter_ctxp = NULL;
3906
3907         mutex_init(&child->perf_counter_mutex);
3908         INIT_LIST_HEAD(&child->perf_counter_list);
3909
3910         if (likely(!parent->perf_counter_ctxp))
3911                 return 0;
3912
3913         /*
3914          * This is executed from the parent task context, so inherit
3915          * counters that have been marked for cloning.
3916          * First allocate and initialize a context for the child.
3917          */
3918
3919         child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
3920         if (!child_ctx)
3921                 return -ENOMEM;
3922
3923         __perf_counter_init_context(child_ctx, child);
3924         child->perf_counter_ctxp = child_ctx;
3925         get_task_struct(child);
3926
3927         /*
3928          * If the parent's context is a clone, pin it so it won't get
3929          * swapped under us.
3930          */
3931         parent_ctx = perf_pin_task_context(parent);
3932
3933         /*
3934          * No need to check if parent_ctx != NULL here; since we saw
3935          * it non-NULL earlier, the only reason for it to become NULL
3936          * is if we exit, and since we're currently in the middle of
3937          * a fork we can't be exiting at the same time.
3938          */
3939
3940         /*
3941          * Lock the parent list. No need to lock the child - not PID
3942          * hashed yet and not running, so nobody can access it.
3943          */
3944         mutex_lock(&parent_ctx->mutex);
3945
3946         /*
3947          * We dont have to disable NMIs - we are only looking at
3948          * the list, not manipulating it:
3949          */
3950         list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
3951                 if (counter != counter->group_leader)
3952                         continue;
3953
3954                 if (!counter->attr.inherit) {
3955                         inherited_all = 0;
3956                         continue;
3957                 }
3958
3959                 ret = inherit_group(counter, parent, parent_ctx,
3960                                              child, child_ctx);
3961                 if (ret) {
3962                         inherited_all = 0;
3963                         break;
3964                 }
3965         }
3966
3967         if (inherited_all) {
3968                 /*
3969                  * Mark the child context as a clone of the parent
3970                  * context, or of whatever the parent is a clone of.
3971                  * Note that if the parent is a clone, it could get
3972                  * uncloned at any point, but that doesn't matter
3973                  * because the list of counters and the generation
3974                  * count can't have changed since we took the mutex.
3975                  */
3976                 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
3977                 if (cloned_ctx) {
3978                         child_ctx->parent_ctx = cloned_ctx;
3979                         child_ctx->parent_gen = parent_ctx->parent_gen;
3980                 } else {
3981                         child_ctx->parent_ctx = parent_ctx;
3982                         child_ctx->parent_gen = parent_ctx->generation;
3983                 }
3984                 get_ctx(child_ctx->parent_ctx);
3985         }
3986
3987         mutex_unlock(&parent_ctx->mutex);
3988
3989         perf_unpin_context(parent_ctx);
3990
3991         return ret;
3992 }
3993
3994 static void __cpuinit perf_counter_init_cpu(int cpu)
3995 {
3996         struct perf_cpu_context *cpuctx;
3997
3998         cpuctx = &per_cpu(perf_cpu_context, cpu);
3999         __perf_counter_init_context(&cpuctx->ctx, NULL);
4000
4001         spin_lock(&perf_resource_lock);
4002         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
4003         spin_unlock(&perf_resource_lock);
4004
4005         hw_perf_counter_setup(cpu);
4006 }
4007
4008 #ifdef CONFIG_HOTPLUG_CPU
4009 static void __perf_counter_exit_cpu(void *info)
4010 {
4011         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4012         struct perf_counter_context *ctx = &cpuctx->ctx;
4013         struct perf_counter *counter, *tmp;
4014
4015         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4016                 __perf_counter_remove_from_context(counter);
4017 }
4018 static void perf_counter_exit_cpu(int cpu)
4019 {
4020         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4021         struct perf_counter_context *ctx = &cpuctx->ctx;
4022
4023         mutex_lock(&ctx->mutex);
4024         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4025         mutex_unlock(&ctx->mutex);
4026 }
4027 #else
4028 static inline void perf_counter_exit_cpu(int cpu) { }
4029 #endif
4030
4031 static int __cpuinit
4032 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4033 {
4034         unsigned int cpu = (long)hcpu;
4035
4036         switch (action) {
4037
4038         case CPU_UP_PREPARE:
4039         case CPU_UP_PREPARE_FROZEN:
4040                 perf_counter_init_cpu(cpu);
4041                 break;
4042
4043         case CPU_DOWN_PREPARE:
4044         case CPU_DOWN_PREPARE_FROZEN:
4045                 perf_counter_exit_cpu(cpu);
4046                 break;
4047
4048         default:
4049                 break;
4050         }
4051
4052         return NOTIFY_OK;
4053 }
4054
4055 /*
4056  * This has to have a higher priority than migration_notifier in sched.c.
4057  */
4058 static struct notifier_block __cpuinitdata perf_cpu_nb = {
4059         .notifier_call          = perf_cpu_notify,
4060         .priority               = 20,
4061 };
4062
4063 void __init perf_counter_init(void)
4064 {
4065         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4066                         (void *)(long)smp_processor_id());
4067         register_cpu_notifier(&perf_cpu_nb);
4068 }
4069
4070 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4071 {
4072         return sprintf(buf, "%d\n", perf_reserved_percpu);
4073 }
4074
4075 static ssize_t
4076 perf_set_reserve_percpu(struct sysdev_class *class,
4077                         const char *buf,
4078                         size_t count)
4079 {
4080         struct perf_cpu_context *cpuctx;
4081         unsigned long val;
4082         int err, cpu, mpt;
4083
4084         err = strict_strtoul(buf, 10, &val);
4085         if (err)
4086                 return err;
4087         if (val > perf_max_counters)
4088                 return -EINVAL;
4089
4090         spin_lock(&perf_resource_lock);
4091         perf_reserved_percpu = val;
4092         for_each_online_cpu(cpu) {
4093                 cpuctx = &per_cpu(perf_cpu_context, cpu);
4094                 spin_lock_irq(&cpuctx->ctx.lock);
4095                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4096                           perf_max_counters - perf_reserved_percpu);
4097                 cpuctx->max_pertask = mpt;
4098                 spin_unlock_irq(&cpuctx->ctx.lock);
4099         }
4100         spin_unlock(&perf_resource_lock);
4101
4102         return count;
4103 }
4104
4105 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4106 {
4107         return sprintf(buf, "%d\n", perf_overcommit);
4108 }
4109
4110 static ssize_t
4111 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4112 {
4113         unsigned long val;
4114         int err;
4115
4116         err = strict_strtoul(buf, 10, &val);
4117         if (err)
4118                 return err;
4119         if (val > 1)
4120                 return -EINVAL;
4121
4122         spin_lock(&perf_resource_lock);
4123         perf_overcommit = val;
4124         spin_unlock(&perf_resource_lock);
4125
4126         return count;
4127 }
4128
4129 static SYSDEV_CLASS_ATTR(
4130                                 reserve_percpu,
4131                                 0644,
4132                                 perf_show_reserve_percpu,
4133                                 perf_set_reserve_percpu
4134                         );
4135
4136 static SYSDEV_CLASS_ATTR(
4137                                 overcommit,
4138                                 0644,
4139                                 perf_show_overcommit,
4140                                 perf_set_overcommit
4141                         );
4142
4143 static struct attribute *perfclass_attrs[] = {
4144         &attr_reserve_percpu.attr,
4145         &attr_overcommit.attr,
4146         NULL
4147 };
4148
4149 static struct attribute_group perfclass_attr_group = {
4150         .attrs                  = perfclass_attrs,
4151         .name                   = "perf_counters",
4152 };
4153
4154 static int __init perf_counter_sysfs_init(void)
4155 {
4156         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4157                                   &perfclass_attr_group);
4158 }
4159 device_initcall(perf_counter_sysfs_init);