perf_counter: Ammend cleanup in fork() fail
[safe/jmp/linux-2.6] / include / linux / perf_counter.h
1 /*
2  *  Performance counters:
3  *
4  *   Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5  *   Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6  *
7  *  Data type definitions, declarations, prototypes.
8  *
9  *  Started by: Thomas Gleixner and Ingo Molnar
10  *
11  *  For licencing details see kernel-base/COPYING
12  */
13 #ifndef _LINUX_PERF_COUNTER_H
14 #define _LINUX_PERF_COUNTER_H
15
16 #include <linux/types.h>
17 #include <linux/ioctl.h>
18 #include <asm/byteorder.h>
19
20 /*
21  * User-space ABI bits:
22  */
23
24 /*
25  * hw_event.type
26  */
27 enum perf_event_types {
28         PERF_TYPE_HARDWARE              = 0,
29         PERF_TYPE_SOFTWARE              = 1,
30         PERF_TYPE_TRACEPOINT            = 2,
31
32         /*
33          * available TYPE space, raw is the max value.
34          */
35
36         PERF_TYPE_RAW                   = 128,
37 };
38
39 /*
40  * Generalized performance counter event types, used by the hw_event.event_id
41  * parameter of the sys_perf_counter_open() syscall:
42  */
43 enum hw_event_ids {
44         /*
45          * Common hardware events, generalized by the kernel:
46          */
47         PERF_COUNT_CPU_CYCLES           = 0,
48         PERF_COUNT_INSTRUCTIONS         = 1,
49         PERF_COUNT_CACHE_REFERENCES     = 2,
50         PERF_COUNT_CACHE_MISSES         = 3,
51         PERF_COUNT_BRANCH_INSTRUCTIONS  = 4,
52         PERF_COUNT_BRANCH_MISSES        = 5,
53         PERF_COUNT_BUS_CYCLES           = 6,
54
55         PERF_HW_EVENTS_MAX              = 7,
56 };
57
58 /*
59  * Special "software" counters provided by the kernel, even if the hardware
60  * does not support performance counters. These counters measure various
61  * physical and sw events of the kernel (and allow the profiling of them as
62  * well):
63  */
64 enum sw_event_ids {
65         PERF_COUNT_CPU_CLOCK            = 0,
66         PERF_COUNT_TASK_CLOCK           = 1,
67         PERF_COUNT_PAGE_FAULTS          = 2,
68         PERF_COUNT_CONTEXT_SWITCHES     = 3,
69         PERF_COUNT_CPU_MIGRATIONS       = 4,
70         PERF_COUNT_PAGE_FAULTS_MIN      = 5,
71         PERF_COUNT_PAGE_FAULTS_MAJ      = 6,
72
73         PERF_SW_EVENTS_MAX              = 7,
74 };
75
76 #define __PERF_COUNTER_MASK(name)                       \
77         (((1ULL << PERF_COUNTER_##name##_BITS) - 1) <<  \
78          PERF_COUNTER_##name##_SHIFT)
79
80 #define PERF_COUNTER_RAW_BITS           1
81 #define PERF_COUNTER_RAW_SHIFT          63
82 #define PERF_COUNTER_RAW_MASK           __PERF_COUNTER_MASK(RAW)
83
84 #define PERF_COUNTER_CONFIG_BITS        63
85 #define PERF_COUNTER_CONFIG_SHIFT       0
86 #define PERF_COUNTER_CONFIG_MASK        __PERF_COUNTER_MASK(CONFIG)
87
88 #define PERF_COUNTER_TYPE_BITS          7
89 #define PERF_COUNTER_TYPE_SHIFT         56
90 #define PERF_COUNTER_TYPE_MASK          __PERF_COUNTER_MASK(TYPE)
91
92 #define PERF_COUNTER_EVENT_BITS         56
93 #define PERF_COUNTER_EVENT_SHIFT        0
94 #define PERF_COUNTER_EVENT_MASK         __PERF_COUNTER_MASK(EVENT)
95
96 /*
97  * Bits that can be set in hw_event.record_type to request information
98  * in the overflow packets.
99  */
100 enum perf_counter_record_format {
101         PERF_RECORD_IP                  = 1U << 0,
102         PERF_RECORD_TID                 = 1U << 1,
103         PERF_RECORD_TIME                = 1U << 2,
104         PERF_RECORD_ADDR                = 1U << 3,
105         PERF_RECORD_GROUP               = 1U << 4,
106         PERF_RECORD_CALLCHAIN           = 1U << 5,
107         PERF_RECORD_CONFIG              = 1U << 6,
108         PERF_RECORD_CPU                 = 1U << 7,
109 };
110
111 /*
112  * Bits that can be set in hw_event.read_format to request that
113  * reads on the counter should return the indicated quantities,
114  * in increasing order of bit value, after the counter value.
115  */
116 enum perf_counter_read_format {
117         PERF_FORMAT_TOTAL_TIME_ENABLED  =  1,
118         PERF_FORMAT_TOTAL_TIME_RUNNING  =  2,
119 };
120
121 /*
122  * Hardware event to monitor via a performance monitoring counter:
123  */
124 struct perf_counter_hw_event {
125         /*
126          * The MSB of the config word signifies if the rest contains cpu
127          * specific (raw) counter configuration data, if unset, the next
128          * 7 bits are an event type and the rest of the bits are the event
129          * identifier.
130          */
131         __u64                   config;
132
133         union {
134                 __u64           irq_period;
135                 __u64           irq_freq;
136         };
137
138         __u32                   record_type;
139         __u32                   read_format;
140
141         __u64                   disabled       :  1, /* off by default        */
142                                 nmi            :  1, /* NMI sampling          */
143                                 inherit        :  1, /* children inherit it   */
144                                 pinned         :  1, /* must always be on PMU */
145                                 exclusive      :  1, /* only group on PMU     */
146                                 exclude_user   :  1, /* don't count user      */
147                                 exclude_kernel :  1, /* ditto kernel          */
148                                 exclude_hv     :  1, /* ditto hypervisor      */
149                                 exclude_idle   :  1, /* don't count when idle */
150                                 mmap           :  1, /* include mmap data     */
151                                 munmap         :  1, /* include munmap data   */
152                                 comm           :  1, /* include comm data     */
153                                 freq           :  1, /* use freq, not period  */
154
155                                 __reserved_1   : 51;
156
157         __u32                   wakeup_events;  /* wakeup every n events */
158         __u32                   __reserved_2;
159
160         __u64                   __reserved_3;
161         __u64                   __reserved_4;
162 };
163
164 /*
165  * Ioctls that can be done on a perf counter fd:
166  */
167 #define PERF_COUNTER_IOC_ENABLE         _IOW('$', 0, u32)
168 #define PERF_COUNTER_IOC_DISABLE        _IOW('$', 1, u32)
169 #define PERF_COUNTER_IOC_REFRESH        _IOW('$', 2, u32)
170 #define PERF_COUNTER_IOC_RESET          _IOW('$', 3, u32)
171
172 enum perf_counter_ioc_flags {
173         PERF_IOC_FLAG_GROUP             = 1U << 0,
174 };
175
176 /*
177  * Structure of the page that can be mapped via mmap
178  */
179 struct perf_counter_mmap_page {
180         __u32   version;                /* version number of this structure */
181         __u32   compat_version;         /* lowest version this is compat with */
182
183         /*
184          * Bits needed to read the hw counters in user-space.
185          *
186          *   u32 seq;
187          *   s64 count;
188          *
189          *   do {
190          *     seq = pc->lock;
191          *
192          *     barrier()
193          *     if (pc->index) {
194          *       count = pmc_read(pc->index - 1);
195          *       count += pc->offset;
196          *     } else
197          *       goto regular_read;
198          *
199          *     barrier();
200          *   } while (pc->lock != seq);
201          *
202          * NOTE: for obvious reason this only works on self-monitoring
203          *       processes.
204          */
205         __u32   lock;                   /* seqlock for synchronization */
206         __u32   index;                  /* hardware counter identifier */
207         __s64   offset;                 /* add to hardware counter value */
208
209         /*
210          * Control data for the mmap() data buffer.
211          *
212          * User-space reading this value should issue an rmb(), on SMP capable
213          * platforms, after reading this value -- see perf_counter_wakeup().
214          */
215         __u32   data_head;              /* head in the data section */
216 };
217
218 #define PERF_EVENT_MISC_CPUMODE_MASK    (3 << 0)
219 #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
220 #define PERF_EVENT_MISC_KERNEL          (1 << 0)
221 #define PERF_EVENT_MISC_USER            (2 << 0)
222 #define PERF_EVENT_MISC_HYPERVISOR      (3 << 0)
223 #define PERF_EVENT_MISC_OVERFLOW        (1 << 2)
224
225 struct perf_event_header {
226         __u32   type;
227         __u16   misc;
228         __u16   size;
229 };
230
231 enum perf_event_type {
232
233         /*
234          * The MMAP events record the PROT_EXEC mappings so that we can
235          * correlate userspace IPs to code. They have the following structure:
236          *
237          * struct {
238          *      struct perf_event_header        header;
239          *
240          *      u32                             pid, tid;
241          *      u64                             addr;
242          *      u64                             len;
243          *      u64                             pgoff;
244          *      char                            filename[];
245          * };
246          */
247         PERF_EVENT_MMAP                 = 1,
248         PERF_EVENT_MUNMAP               = 2,
249
250         /*
251          * struct {
252          *      struct perf_event_header        header;
253          *
254          *      u32                             pid, tid;
255          *      char                            comm[];
256          * };
257          */
258         PERF_EVENT_COMM                 = 3,
259
260         /*
261          * struct {
262          *      struct perf_event_header        header;
263          *      u64                             time;
264          *      u64                             irq_period;
265          * };
266          */
267         PERF_EVENT_PERIOD               = 4,
268
269         /*
270          * struct {
271          *      struct perf_event_header        header;
272          *      u64                             time;
273          * };
274          */
275         PERF_EVENT_THROTTLE             = 5,
276         PERF_EVENT_UNTHROTTLE           = 6,
277
278         /*
279          * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
280          * will be PERF_RECORD_*
281          *
282          * struct {
283          *      struct perf_event_header        header;
284          *
285          *      { u64                   ip;       } && PERF_RECORD_IP
286          *      { u32                   pid, tid; } && PERF_RECORD_TID
287          *      { u64                   time;     } && PERF_RECORD_TIME
288          *      { u64                   addr;     } && PERF_RECORD_ADDR
289          *      { u64                   config;   } && PERF_RECORD_CONFIG
290          *      { u32                   cpu, res; } && PERF_RECORD_CPU
291          *
292          *      { u64                   nr;
293          *        { u64 event, val; }   cnt[nr];  } && PERF_RECORD_GROUP
294          *
295          *      { u16                   nr,
296          *                              hv,
297          *                              kernel,
298          *                              user;
299          *        u64                   ips[nr];  } && PERF_RECORD_CALLCHAIN
300          * };
301          */
302 };
303
304 #ifdef __KERNEL__
305 /*
306  * Kernel-internal data types and definitions:
307  */
308
309 #ifdef CONFIG_PERF_COUNTERS
310 # include <asm/perf_counter.h>
311 #endif
312
313 #include <linux/list.h>
314 #include <linux/mutex.h>
315 #include <linux/rculist.h>
316 #include <linux/rcupdate.h>
317 #include <linux/spinlock.h>
318 #include <linux/hrtimer.h>
319 #include <linux/fs.h>
320 #include <asm/atomic.h>
321
322 struct task_struct;
323
324 static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event)
325 {
326         return hw_event->config & PERF_COUNTER_RAW_MASK;
327 }
328
329 static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event)
330 {
331         return hw_event->config & PERF_COUNTER_CONFIG_MASK;
332 }
333
334 static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event)
335 {
336         return (hw_event->config & PERF_COUNTER_TYPE_MASK) >>
337                 PERF_COUNTER_TYPE_SHIFT;
338 }
339
340 static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event)
341 {
342         return hw_event->config & PERF_COUNTER_EVENT_MASK;
343 }
344
345 /**
346  * struct hw_perf_counter - performance counter hardware details:
347  */
348 struct hw_perf_counter {
349 #ifdef CONFIG_PERF_COUNTERS
350         union {
351                 struct { /* hardware */
352                         u64                             config;
353                         unsigned long                   config_base;
354                         unsigned long                   counter_base;
355                         int                             nmi;
356                         int                             idx;
357                 };
358                 union { /* software */
359                         atomic64_t                      count;
360                         struct hrtimer                  hrtimer;
361                 };
362         };
363         atomic64_t                      prev_count;
364         u64                             irq_period;
365         atomic64_t                      period_left;
366         u64                             interrupts;
367 #endif
368 };
369
370 struct perf_counter;
371
372 /**
373  * struct pmu - generic performance monitoring unit
374  */
375 struct pmu {
376         int (*enable)                   (struct perf_counter *counter);
377         void (*disable)                 (struct perf_counter *counter);
378         void (*read)                    (struct perf_counter *counter);
379         void (*unthrottle)              (struct perf_counter *counter);
380 };
381
382 /**
383  * enum perf_counter_active_state - the states of a counter
384  */
385 enum perf_counter_active_state {
386         PERF_COUNTER_STATE_ERROR        = -2,
387         PERF_COUNTER_STATE_OFF          = -1,
388         PERF_COUNTER_STATE_INACTIVE     =  0,
389         PERF_COUNTER_STATE_ACTIVE       =  1,
390 };
391
392 struct file;
393
394 struct perf_mmap_data {
395         struct rcu_head                 rcu_head;
396         int                             nr_pages;       /* nr of data pages  */
397         int                             nr_locked;      /* nr pages mlocked  */
398
399         atomic_t                        poll;           /* POLL_ for wakeups */
400         atomic_t                        head;           /* write position    */
401         atomic_t                        events;         /* event limit       */
402
403         atomic_t                        done_head;      /* completed head    */
404         atomic_t                        lock;           /* concurrent writes */
405
406         atomic_t                        wakeup;         /* needs a wakeup    */
407
408         struct perf_counter_mmap_page   *user_page;
409         void                            *data_pages[0];
410 };
411
412 struct perf_pending_entry {
413         struct perf_pending_entry *next;
414         void (*func)(struct perf_pending_entry *);
415 };
416
417 /**
418  * struct perf_counter - performance counter kernel representation:
419  */
420 struct perf_counter {
421 #ifdef CONFIG_PERF_COUNTERS
422         struct list_head                list_entry;
423         struct list_head                event_entry;
424         struct list_head                sibling_list;
425         int                             nr_siblings;
426         struct perf_counter             *group_leader;
427         const struct pmu                *pmu;
428
429         enum perf_counter_active_state  state;
430         enum perf_counter_active_state  prev_state;
431         atomic64_t                      count;
432
433         /*
434          * These are the total time in nanoseconds that the counter
435          * has been enabled (i.e. eligible to run, and the task has
436          * been scheduled in, if this is a per-task counter)
437          * and running (scheduled onto the CPU), respectively.
438          *
439          * They are computed from tstamp_enabled, tstamp_running and
440          * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
441          */
442         u64                             total_time_enabled;
443         u64                             total_time_running;
444
445         /*
446          * These are timestamps used for computing total_time_enabled
447          * and total_time_running when the counter is in INACTIVE or
448          * ACTIVE state, measured in nanoseconds from an arbitrary point
449          * in time.
450          * tstamp_enabled: the notional time when the counter was enabled
451          * tstamp_running: the notional time when the counter was scheduled on
452          * tstamp_stopped: in INACTIVE state, the notional time when the
453          *      counter was scheduled off.
454          */
455         u64                             tstamp_enabled;
456         u64                             tstamp_running;
457         u64                             tstamp_stopped;
458
459         struct perf_counter_hw_event    hw_event;
460         struct hw_perf_counter          hw;
461
462         struct perf_counter_context     *ctx;
463         struct file                     *filp;
464
465         /*
466          * These accumulate total time (in nanoseconds) that children
467          * counters have been enabled and running, respectively.
468          */
469         atomic64_t                      child_total_time_enabled;
470         atomic64_t                      child_total_time_running;
471
472         /*
473          * Protect attach/detach and child_list:
474          */
475         struct mutex                    child_mutex;
476         struct list_head                child_list;
477         struct perf_counter             *parent;
478
479         int                             oncpu;
480         int                             cpu;
481
482         struct list_head                owner_entry;
483         struct task_struct              *owner;
484
485         /* mmap bits */
486         struct mutex                    mmap_mutex;
487         atomic_t                        mmap_count;
488         struct perf_mmap_data           *data;
489
490         /* poll related */
491         wait_queue_head_t               waitq;
492         struct fasync_struct            *fasync;
493
494         /* delayed work for NMIs and such */
495         int                             pending_wakeup;
496         int                             pending_kill;
497         int                             pending_disable;
498         struct perf_pending_entry       pending;
499
500         atomic_t                        event_limit;
501
502         void (*destroy)(struct perf_counter *);
503         struct rcu_head                 rcu_head;
504 #endif
505 };
506
507 /**
508  * struct perf_counter_context - counter context structure
509  *
510  * Used as a container for task counters and CPU counters as well:
511  */
512 struct perf_counter_context {
513         /*
514          * Protect the states of the counters in the list,
515          * nr_active, and the list:
516          */
517         spinlock_t              lock;
518         /*
519          * Protect the list of counters.  Locking either mutex or lock
520          * is sufficient to ensure the list doesn't change; to change
521          * the list you need to lock both the mutex and the spinlock.
522          */
523         struct mutex            mutex;
524
525         struct list_head        counter_list;
526         struct list_head        event_list;
527         int                     nr_counters;
528         int                     nr_active;
529         int                     is_active;
530         atomic_t                refcount;
531         struct task_struct      *task;
532
533         /*
534          * Context clock, runs when context enabled.
535          */
536         u64                     time;
537         u64                     timestamp;
538
539         /*
540          * These fields let us detect when two contexts have both
541          * been cloned (inherited) from a common ancestor.
542          */
543         struct perf_counter_context *parent_ctx;
544         u64                     parent_gen;
545         u64                     generation;
546         struct rcu_head         rcu_head;
547 };
548
549 /**
550  * struct perf_counter_cpu_context - per cpu counter context structure
551  */
552 struct perf_cpu_context {
553         struct perf_counter_context     ctx;
554         struct perf_counter_context     *task_ctx;
555         int                             active_oncpu;
556         int                             max_pertask;
557         int                             exclusive;
558
559         /*
560          * Recursion avoidance:
561          *
562          * task, softirq, irq, nmi context
563          */
564         int                     recursion[4];
565 };
566
567 #ifdef CONFIG_PERF_COUNTERS
568
569 /*
570  * Set by architecture code:
571  */
572 extern int perf_max_counters;
573
574 extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
575
576 extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
577 extern void perf_counter_task_sched_out(struct task_struct *task,
578                                         struct task_struct *next, int cpu);
579 extern void perf_counter_task_tick(struct task_struct *task, int cpu);
580 extern int perf_counter_init_task(struct task_struct *child);
581 extern void perf_counter_exit_task(struct task_struct *child);
582 extern void perf_counter_free_task(struct task_struct *task);
583 extern void perf_counter_do_pending(void);
584 extern void perf_counter_print_debug(void);
585 extern void __perf_disable(void);
586 extern bool __perf_enable(void);
587 extern void perf_disable(void);
588 extern void perf_enable(void);
589 extern int perf_counter_task_disable(void);
590 extern int perf_counter_task_enable(void);
591 extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
592                struct perf_cpu_context *cpuctx,
593                struct perf_counter_context *ctx, int cpu);
594 extern void perf_counter_update_userpage(struct perf_counter *counter);
595
596 extern int perf_counter_overflow(struct perf_counter *counter,
597                                  int nmi, struct pt_regs *regs, u64 addr);
598 /*
599  * Return 1 for a software counter, 0 for a hardware counter
600  */
601 static inline int is_software_counter(struct perf_counter *counter)
602 {
603         return !perf_event_raw(&counter->hw_event) &&
604                 perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE;
605 }
606
607 extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
608
609 extern void perf_counter_mmap(unsigned long addr, unsigned long len,
610                               unsigned long pgoff, struct file *file);
611
612 extern void perf_counter_munmap(unsigned long addr, unsigned long len,
613                                 unsigned long pgoff, struct file *file);
614
615 extern void perf_counter_comm(struct task_struct *tsk);
616
617 #define MAX_STACK_DEPTH         255
618
619 struct perf_callchain_entry {
620         u16     nr, hv, kernel, user;
621         u64     ip[MAX_STACK_DEPTH];
622 };
623
624 extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
625
626 extern int sysctl_perf_counter_priv;
627 extern int sysctl_perf_counter_mlock;
628 extern int sysctl_perf_counter_limit;
629
630 extern void perf_counter_init(void);
631
632 #ifndef perf_misc_flags
633 #define perf_misc_flags(regs)   (user_mode(regs) ? PERF_EVENT_MISC_USER : \
634                                  PERF_EVENT_MISC_KERNEL)
635 #define perf_instruction_pointer(regs)  instruction_pointer(regs)
636 #endif
637
638 #else
639 static inline void
640 perf_counter_task_sched_in(struct task_struct *task, int cpu)           { }
641 static inline void
642 perf_counter_task_sched_out(struct task_struct *task,
643                             struct task_struct *next, int cpu)          { }
644 static inline void
645 perf_counter_task_tick(struct task_struct *task, int cpu)               { }
646 static inline int perf_counter_init_task(struct task_struct *child)     { return 0; }
647 static inline void perf_counter_exit_task(struct task_struct *child)    { }
648 static inline void perf_counter_free_task(struct task_struct *task)     { }
649 static inline void perf_counter_do_pending(void)                        { }
650 static inline void perf_counter_print_debug(void)                       { }
651 static inline void perf_disable(void)                                   { }
652 static inline void perf_enable(void)                                    { }
653 static inline int perf_counter_task_disable(void)       { return -EINVAL; }
654 static inline int perf_counter_task_enable(void)        { return -EINVAL; }
655
656 static inline void
657 perf_swcounter_event(u32 event, u64 nr, int nmi,
658                      struct pt_regs *regs, u64 addr)                    { }
659
660 static inline void
661 perf_counter_mmap(unsigned long addr, unsigned long len,
662                   unsigned long pgoff, struct file *file)               { }
663
664 static inline void
665 perf_counter_munmap(unsigned long addr, unsigned long len,
666                     unsigned long pgoff, struct file *file)             { }
667
668 static inline void perf_counter_comm(struct task_struct *tsk)           { }
669 static inline void perf_counter_init(void)                              { }
670 #endif
671
672 #endif /* __KERNEL__ */
673 #endif /* _LINUX_PERF_COUNTER_H */