tracing/events: Move TRACE_SYSTEM outside of include guard
[safe/jmp/linux-2.6] / include / trace / events / kmem.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM kmem
3
4 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_KMEM_H
6
7 #include <linux/types.h>
8 #include <linux/tracepoint.h>
9
10 /*
11  * The order of these masks is important. Matching masks will be seen
12  * first and the left over flags will end up showing by themselves.
13  *
14  * For example, if we have GFP_KERNEL before GFP_USER we wil get:
15  *
16  *  GFP_KERNEL|GFP_HARDWALL
17  *
18  * Thus most bits set go first.
19  */
20 #define show_gfp_flags(flags)                                           \
21         (flags) ? __print_flags(flags, "|",                             \
22         {(unsigned long)GFP_HIGHUSER_MOVABLE,   "GFP_HIGHUSER_MOVABLE"}, \
23         {(unsigned long)GFP_HIGHUSER,           "GFP_HIGHUSER"},        \
24         {(unsigned long)GFP_USER,               "GFP_USER"},            \
25         {(unsigned long)GFP_TEMPORARY,          "GFP_TEMPORARY"},       \
26         {(unsigned long)GFP_KERNEL,             "GFP_KERNEL"},          \
27         {(unsigned long)GFP_NOFS,               "GFP_NOFS"},            \
28         {(unsigned long)GFP_ATOMIC,             "GFP_ATOMIC"},          \
29         {(unsigned long)GFP_NOIO,               "GFP_NOIO"},            \
30         {(unsigned long)__GFP_HIGH,             "GFP_HIGH"},            \
31         {(unsigned long)__GFP_WAIT,             "GFP_WAIT"},            \
32         {(unsigned long)__GFP_IO,               "GFP_IO"},              \
33         {(unsigned long)__GFP_COLD,             "GFP_COLD"},            \
34         {(unsigned long)__GFP_NOWARN,           "GFP_NOWARN"},          \
35         {(unsigned long)__GFP_REPEAT,           "GFP_REPEAT"},          \
36         {(unsigned long)__GFP_NOFAIL,           "GFP_NOFAIL"},          \
37         {(unsigned long)__GFP_NORETRY,          "GFP_NORETRY"},         \
38         {(unsigned long)__GFP_COMP,             "GFP_COMP"},            \
39         {(unsigned long)__GFP_ZERO,             "GFP_ZERO"},            \
40         {(unsigned long)__GFP_NOMEMALLOC,       "GFP_NOMEMALLOC"},      \
41         {(unsigned long)__GFP_HARDWALL,         "GFP_HARDWALL"},        \
42         {(unsigned long)__GFP_THISNODE,         "GFP_THISNODE"},        \
43         {(unsigned long)__GFP_RECLAIMABLE,      "GFP_RECLAIMABLE"},     \
44         {(unsigned long)__GFP_MOVABLE,          "GFP_MOVABLE"}          \
45         ) : "GFP_NOWAIT"
46
47 TRACE_EVENT(kmalloc,
48
49         TP_PROTO(unsigned long call_site,
50                  const void *ptr,
51                  size_t bytes_req,
52                  size_t bytes_alloc,
53                  gfp_t gfp_flags),
54
55         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
56
57         TP_STRUCT__entry(
58                 __field(        unsigned long,  call_site       )
59                 __field(        const void *,   ptr             )
60                 __field(        size_t,         bytes_req       )
61                 __field(        size_t,         bytes_alloc     )
62                 __field(        gfp_t,          gfp_flags       )
63         ),
64
65         TP_fast_assign(
66                 __entry->call_site      = call_site;
67                 __entry->ptr            = ptr;
68                 __entry->bytes_req      = bytes_req;
69                 __entry->bytes_alloc    = bytes_alloc;
70                 __entry->gfp_flags      = gfp_flags;
71         ),
72
73         TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
74                 __entry->call_site,
75                 __entry->ptr,
76                 __entry->bytes_req,
77                 __entry->bytes_alloc,
78                 show_gfp_flags(__entry->gfp_flags))
79 );
80
81 TRACE_EVENT(kmem_cache_alloc,
82
83         TP_PROTO(unsigned long call_site,
84                  const void *ptr,
85                  size_t bytes_req,
86                  size_t bytes_alloc,
87                  gfp_t gfp_flags),
88
89         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
90
91         TP_STRUCT__entry(
92                 __field(        unsigned long,  call_site       )
93                 __field(        const void *,   ptr             )
94                 __field(        size_t,         bytes_req       )
95                 __field(        size_t,         bytes_alloc     )
96                 __field(        gfp_t,          gfp_flags       )
97         ),
98
99         TP_fast_assign(
100                 __entry->call_site      = call_site;
101                 __entry->ptr            = ptr;
102                 __entry->bytes_req      = bytes_req;
103                 __entry->bytes_alloc    = bytes_alloc;
104                 __entry->gfp_flags      = gfp_flags;
105         ),
106
107         TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
108                 __entry->call_site,
109                 __entry->ptr,
110                 __entry->bytes_req,
111                 __entry->bytes_alloc,
112                 show_gfp_flags(__entry->gfp_flags))
113 );
114
115 TRACE_EVENT(kmalloc_node,
116
117         TP_PROTO(unsigned long call_site,
118                  const void *ptr,
119                  size_t bytes_req,
120                  size_t bytes_alloc,
121                  gfp_t gfp_flags,
122                  int node),
123
124         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
125
126         TP_STRUCT__entry(
127                 __field(        unsigned long,  call_site       )
128                 __field(        const void *,   ptr             )
129                 __field(        size_t,         bytes_req       )
130                 __field(        size_t,         bytes_alloc     )
131                 __field(        gfp_t,          gfp_flags       )
132                 __field(        int,            node            )
133         ),
134
135         TP_fast_assign(
136                 __entry->call_site      = call_site;
137                 __entry->ptr            = ptr;
138                 __entry->bytes_req      = bytes_req;
139                 __entry->bytes_alloc    = bytes_alloc;
140                 __entry->gfp_flags      = gfp_flags;
141                 __entry->node           = node;
142         ),
143
144         TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
145                 __entry->call_site,
146                 __entry->ptr,
147                 __entry->bytes_req,
148                 __entry->bytes_alloc,
149                 show_gfp_flags(__entry->gfp_flags),
150                 __entry->node)
151 );
152
153 TRACE_EVENT(kmem_cache_alloc_node,
154
155         TP_PROTO(unsigned long call_site,
156                  const void *ptr,
157                  size_t bytes_req,
158                  size_t bytes_alloc,
159                  gfp_t gfp_flags,
160                  int node),
161
162         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
163
164         TP_STRUCT__entry(
165                 __field(        unsigned long,  call_site       )
166                 __field(        const void *,   ptr             )
167                 __field(        size_t,         bytes_req       )
168                 __field(        size_t,         bytes_alloc     )
169                 __field(        gfp_t,          gfp_flags       )
170                 __field(        int,            node            )
171         ),
172
173         TP_fast_assign(
174                 __entry->call_site      = call_site;
175                 __entry->ptr            = ptr;
176                 __entry->bytes_req      = bytes_req;
177                 __entry->bytes_alloc    = bytes_alloc;
178                 __entry->gfp_flags      = gfp_flags;
179                 __entry->node           = node;
180         ),
181
182         TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
183                 __entry->call_site,
184                 __entry->ptr,
185                 __entry->bytes_req,
186                 __entry->bytes_alloc,
187                 show_gfp_flags(__entry->gfp_flags),
188                 __entry->node)
189 );
190
191 TRACE_EVENT(kfree,
192
193         TP_PROTO(unsigned long call_site, const void *ptr),
194
195         TP_ARGS(call_site, ptr),
196
197         TP_STRUCT__entry(
198                 __field(        unsigned long,  call_site       )
199                 __field(        const void *,   ptr             )
200         ),
201
202         TP_fast_assign(
203                 __entry->call_site      = call_site;
204                 __entry->ptr            = ptr;
205         ),
206
207         TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
208 );
209
210 TRACE_EVENT(kmem_cache_free,
211
212         TP_PROTO(unsigned long call_site, const void *ptr),
213
214         TP_ARGS(call_site, ptr),
215
216         TP_STRUCT__entry(
217                 __field(        unsigned long,  call_site       )
218                 __field(        const void *,   ptr             )
219         ),
220
221         TP_fast_assign(
222                 __entry->call_site      = call_site;
223                 __entry->ptr            = ptr;
224         ),
225
226         TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
227 );
228 #endif /* _TRACE_KMEM_H */
229
230 /* This part must be outside protection */
231 #include <trace/define_trace.h>