MIPS: BCM63xx: Add serial driver for bcm63xx integrated UART.
[safe/jmp/linux-2.6] / include / linux / compiler.h
index dcae0c8..04fb513 100644 (file)
@@ -36,14 +36,12 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 
 #ifdef __KERNEL__
 
-#if __GNUC__ >= 4
-# include <linux/compiler-gcc4.h>
-#elif __GNUC__ == 3 && __GNUC_MINOR__ >= 2
-# include <linux/compiler-gcc3.h>
-#else
-# error Sorry, your compiler is too old/not recognized.
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
 #endif
 
+#define notrace __attribute__((no_instrument_function))
+
 /* Intel compiler defines __GNUC__. So we will overwrite implementations
  * coming from above header files here
  */
@@ -57,8 +55,89 @@ extern void __chk_io_ptr(const volatile void __iomem *);
  * specific implementations come from the above header files
  */
 
-#define likely(x)      __builtin_expect(!!(x), 1)
-#define unlikely(x)    __builtin_expect(!!(x), 0)
+struct ftrace_branch_data {
+       const char *func;
+       const char *file;
+       unsigned line;
+       union {
+               struct {
+                       unsigned long correct;
+                       unsigned long incorrect;
+               };
+               struct {
+                       unsigned long miss;
+                       unsigned long hit;
+               };
+               unsigned long miss_hit[2];
+       };
+};
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+
+#define likely_notrace(x)      __builtin_expect(!!(x), 1)
+#define unlikely_notrace(x)    __builtin_expect(!!(x), 0)
+
+#define __branch_check__(x, expect) ({                                 \
+                       int ______r;                                    \
+                       static struct ftrace_branch_data                \
+                               __attribute__((__aligned__(4)))         \
+                               __attribute__((section("_ftrace_annotated_branch"))) \
+                               ______f = {                             \
+                               .func = __func__,                       \
+                               .file = __FILE__,                       \
+                               .line = __LINE__,                       \
+                       };                                              \
+                       ______r = likely_notrace(x);                    \
+                       ftrace_likely_update(&______f, ______r, expect); \
+                       ______r;                                        \
+               })
+
+/*
+ * Using __builtin_constant_p(x) to ignore cases where the return
+ * value is always the same.  This idea is taken from a similar patch
+ * written by Daniel Walker.
+ */
+# ifndef likely
+#  define likely(x)    (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+# endif
+# ifndef unlikely
+#  define unlikely(x)  (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+# endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+/*
+ * "Define 'is'", Bill Clinton
+ * "Define 'if'", Steven Rostedt
+ */
+#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+#define __trace_if(cond) \
+       if (__builtin_constant_p((cond)) ? !!(cond) :                   \
+       ({                                                              \
+               int ______r;                                            \
+               static struct ftrace_branch_data                        \
+                       __attribute__((__aligned__(4)))                 \
+                       __attribute__((section("_ftrace_branch")))      \
+                       ______f = {                                     \
+                               .func = __func__,                       \
+                               .file = __FILE__,                       \
+                               .line = __LINE__,                       \
+                       };                                              \
+               ______r = !!(cond);                                     \
+               ______f.miss_hit[______r]++;                                    \
+               ______r;                                                \
+       }))
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+#else
+# define likely(x)     __builtin_expect(!!(x), 1)
+# define unlikely(x)   __builtin_expect(!!(x), 0)
+#endif
 
 /* Optimization barrier */
 #ifndef barrier
@@ -182,4 +261,23 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 # define __section(S) __attribute__ ((__section__(#S)))
 #endif
 
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
+/*
+ * Prevent the compiler from merging or refetching accesses.  The compiler
+ * is also forbidden from reordering successive instances of ACCESS_ONCE(),
+ * but only when the compiler is aware of some particular ordering.  One way
+ * to make the compiler aware of ordering is to put the two invocations of
+ * ACCESS_ONCE() in different C statements.
+ *
+ * This macro does absolutely -nothing- to prevent the CPU from reordering,
+ * merging, or refetching absolutely anything at any time.  Its main intended
+ * use is to mediate communication between process-level code and irq/NMI
+ * handlers, all running on the same CPU.
+ */
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+
 #endif /* __LINUX_COMPILER_H */