perf_counter: Add optional hw_perf_group_sched_in arch function
[safe/jmp/linux-2.6] / include / linux / perf_counter.h
1 /*
2  *  Performance counters:
3  *
4  *   Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5  *   Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6  *
7  *  Data type definitions, declarations, prototypes.
8  *
9  *  Started by: Thomas Gleixner and Ingo Molnar
10  *
11  *  For licencing details see kernel-base/COPYING
12  */
13 #ifndef _LINUX_PERF_COUNTER_H
14 #define _LINUX_PERF_COUNTER_H
15
16 #include <asm/atomic.h>
17
18 #ifdef CONFIG_PERF_COUNTERS
19 # include <asm/perf_counter.h>
20 #endif
21
22 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/rculist.h>
25 #include <linux/rcupdate.h>
26 #include <linux/spinlock.h>
27
28 struct task_struct;
29
30 /*
31  * User-space ABI bits:
32  */
33
34 /*
35  * Generalized performance counter event types, used by the hw_event.type
36  * parameter of the sys_perf_counter_open() syscall:
37  */
38 enum hw_event_types {
39         /*
40          * Common hardware events, generalized by the kernel:
41          */
42         PERF_COUNT_CPU_CYCLES           =  0,
43         PERF_COUNT_INSTRUCTIONS         =  1,
44         PERF_COUNT_CACHE_REFERENCES     =  2,
45         PERF_COUNT_CACHE_MISSES         =  3,
46         PERF_COUNT_BRANCH_INSTRUCTIONS  =  4,
47         PERF_COUNT_BRANCH_MISSES        =  5,
48         PERF_COUNT_BUS_CYCLES           =  6,
49
50         PERF_HW_EVENTS_MAX              =  7,
51
52         /*
53          * Special "software" counters provided by the kernel, even if
54          * the hardware does not support performance counters. These
55          * counters measure various physical and sw events of the
56          * kernel (and allow the profiling of them as well):
57          */
58         PERF_COUNT_CPU_CLOCK            = -1,
59         PERF_COUNT_TASK_CLOCK           = -2,
60         PERF_COUNT_PAGE_FAULTS          = -3,
61         PERF_COUNT_CONTEXT_SWITCHES     = -4,
62         PERF_COUNT_CPU_MIGRATIONS       = -5,
63
64         PERF_SW_EVENTS_MIN              = -6,
65 };
66
67 /*
68  * IRQ-notification data record type:
69  */
70 enum perf_counter_record_type {
71         PERF_RECORD_SIMPLE              =  0,
72         PERF_RECORD_IRQ                 =  1,
73         PERF_RECORD_GROUP               =  2,
74 };
75
76 /*
77  * Hardware event to monitor via a performance monitoring counter:
78  */
79 struct perf_counter_hw_event {
80         s64                     type;
81
82         u64                     irq_period;
83         u32                     record_type;
84
85         u32                     disabled     :  1, /* off by default      */
86                                 nmi          :  1, /* NMI sampling        */
87                                 raw          :  1, /* raw event type      */
88                                 inherit      :  1, /* children inherit it */
89                                 __reserved_1 : 28;
90
91         u64                     __reserved_2;
92 };
93
94 /*
95  * Kernel-internal data types:
96  */
97
98 /**
99  * struct hw_perf_counter - performance counter hardware details:
100  */
101 struct hw_perf_counter {
102 #ifdef CONFIG_PERF_COUNTERS
103         u64                             config;
104         unsigned long                   config_base;
105         unsigned long                   counter_base;
106         int                             nmi;
107         unsigned int                    idx;
108         atomic64_t                      prev_count;
109         u64                             irq_period;
110         atomic64_t                      period_left;
111 #endif
112 };
113
114 /*
115  * Hardcoded buffer length limit for now, for IRQ-fed events:
116  */
117 #define PERF_DATA_BUFLEN                2048
118
119 /**
120  * struct perf_data - performance counter IRQ data sampling ...
121  */
122 struct perf_data {
123         int                             len;
124         int                             rd_idx;
125         int                             overrun;
126         u8                              data[PERF_DATA_BUFLEN];
127 };
128
129 struct perf_counter;
130
131 /**
132  * struct hw_perf_counter_ops - performance counter hw ops
133  */
134 struct hw_perf_counter_ops {
135         int (*enable)                   (struct perf_counter *counter);
136         void (*disable)                 (struct perf_counter *counter);
137         void (*read)                    (struct perf_counter *counter);
138 };
139
140 /**
141  * enum perf_counter_active_state - the states of a counter
142  */
143 enum perf_counter_active_state {
144         PERF_COUNTER_STATE_OFF          = -1,
145         PERF_COUNTER_STATE_INACTIVE     =  0,
146         PERF_COUNTER_STATE_ACTIVE       =  1,
147 };
148
149 struct file;
150
151 /**
152  * struct perf_counter - performance counter kernel representation:
153  */
154 struct perf_counter {
155 #ifdef CONFIG_PERF_COUNTERS
156         struct list_head                list_entry;
157         struct list_head                sibling_list;
158         struct perf_counter             *group_leader;
159         const struct hw_perf_counter_ops *hw_ops;
160
161         enum perf_counter_active_state  state;
162         atomic64_t                      count;
163
164         struct perf_counter_hw_event    hw_event;
165         struct hw_perf_counter          hw;
166
167         struct perf_counter_context     *ctx;
168         struct task_struct              *task;
169         struct file                     *filp;
170
171         struct perf_counter             *parent;
172         /*
173          * Protect attach/detach:
174          */
175         struct mutex                    mutex;
176
177         int                             oncpu;
178         int                             cpu;
179
180         /* read() / irq related data */
181         wait_queue_head_t               waitq;
182         /* optional: for NMIs */
183         int                             wakeup_pending;
184         struct perf_data                *irqdata;
185         struct perf_data                *usrdata;
186         struct perf_data                data[2];
187 #endif
188 };
189
190 /**
191  * struct perf_counter_context - counter context structure
192  *
193  * Used as a container for task counters and CPU counters as well:
194  */
195 struct perf_counter_context {
196 #ifdef CONFIG_PERF_COUNTERS
197         /*
198          * Protect the list of counters:
199          */
200         spinlock_t              lock;
201
202         struct list_head        counter_list;
203         int                     nr_counters;
204         int                     nr_active;
205         struct task_struct      *task;
206 #endif
207 };
208
209 /**
210  * struct perf_counter_cpu_context - per cpu counter context structure
211  */
212 struct perf_cpu_context {
213         struct perf_counter_context     ctx;
214         struct perf_counter_context     *task_ctx;
215         int                             active_oncpu;
216         int                             max_pertask;
217 };
218
219 /*
220  * Set by architecture code:
221  */
222 extern int perf_max_counters;
223
224 #ifdef CONFIG_PERF_COUNTERS
225 extern const struct hw_perf_counter_ops *
226 hw_perf_counter_init(struct perf_counter *counter);
227
228 extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
229 extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
230 extern void perf_counter_task_tick(struct task_struct *task, int cpu);
231 extern void perf_counter_init_task(struct task_struct *child);
232 extern void perf_counter_exit_task(struct task_struct *child);
233 extern void perf_counter_notify(struct pt_regs *regs);
234 extern void perf_counter_print_debug(void);
235 extern u64 hw_perf_save_disable(void);
236 extern void hw_perf_restore(u64 ctrl);
237 extern int perf_counter_task_disable(void);
238 extern int perf_counter_task_enable(void);
239 extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
240                struct perf_cpu_context *cpuctx,
241                struct perf_counter_context *ctx, int cpu);
242
243 #else
244 static inline void
245 perf_counter_task_sched_in(struct task_struct *task, int cpu)           { }
246 static inline void
247 perf_counter_task_sched_out(struct task_struct *task, int cpu)          { }
248 static inline void
249 perf_counter_task_tick(struct task_struct *task, int cpu)               { }
250 static inline void perf_counter_init_task(struct task_struct *child)    { }
251 static inline void perf_counter_exit_task(struct task_struct *child)    { }
252 static inline void perf_counter_notify(struct pt_regs *regs)            { }
253 static inline void perf_counter_print_debug(void)                       { }
254 static inline void hw_perf_restore(u64 ctrl)                    { }
255 static inline u64 hw_perf_save_disable(void)                  { return 0; }
256 static inline int perf_counter_task_disable(void)       { return -EINVAL; }
257 static inline int perf_counter_task_enable(void)        { return -EINVAL; }
258 #endif
259
260 #endif /* _LINUX_PERF_COUNTER_H */