Merge branches 'x86/acpi', 'x86/asm', 'x86/cpudetect', 'x86/crashdump', 'x86/debug...
[safe/jmp/linux-2.6] / arch / x86 / kernel / ftrace.c
index 5dd5813..231bdd3 100644 (file)
 
 #include <linux/spinlock.h>
 #include <linux/hardirq.h>
+#include <linux/uaccess.h>
 #include <linux/ftrace.h>
 #include <linux/percpu.h>
+#include <linux/sched.h>
 #include <linux/init.h>
 #include <linux/list.h>
 
-#define CALL_BACK              5
-
-#define JMPFWD                 0x03eb
-
-static unsigned short ftrace_jmp = JMPFWD;
-
-struct ftrace_record {
-       struct dyn_ftrace       rec;
-       int                     failed;
-} __attribute__((packed));
-
-struct ftrace_page {
-       struct ftrace_page      *next;
-       int                     index;
-       struct ftrace_record    records[];
-} __attribute__((packed));
-
-#define ENTRIES_PER_PAGE \
-  ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct ftrace_record))
+#include <asm/ftrace.h>
+#include <linux/ftrace.h>
+#include <asm/nops.h>
+#include <asm/nmi.h>
 
-/* estimate from running different kernels */
-#define NR_TO_INIT             10000
 
-#define MCOUNT_ADDR ((long)(&mcount))
+#ifdef CONFIG_DYNAMIC_FTRACE
 
 union ftrace_code_union {
-       char code[5];
+       char code[MCOUNT_INSN_SIZE];
        struct {
                char e8;
                int offset;
        } __attribute__((packed));
 };
 
-static struct ftrace_page      *ftrace_pages_start;
-static struct ftrace_page      *ftrace_pages;
+static int ftrace_calc_offset(long ip, long addr)
+{
+       return (int)(addr - ip);
+}
 
-notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
+static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
 {
-       struct ftrace_record *rec;
-       unsigned short save;
+       static union ftrace_code_union calc;
+
+       calc.e8         = 0xe8;
+       calc.offset     = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
+
+       /*
+        * No locking needed, this must be called via kstop_machine
+        * which in essence is like running on a uniprocessor machine.
+        */
+       return calc.code;
+}
+
+/*
+ * Modifying code must take extra care. On an SMP machine, if
+ * the code being modified is also being executed on another CPU
+ * that CPU will have undefined results and possibly take a GPF.
+ * We use kstop_machine to stop other CPUS from exectuing code.
+ * But this does not stop NMIs from happening. We still need
+ * to protect against that. We separate out the modification of
+ * the code to take care of this.
+ *
+ * Two buffers are added: An IP buffer and a "code" buffer.
+ *
+ * 1) Put the instruction pointer into the IP buffer
+ *    and the new code into the "code" buffer.
+ * 2) Set a flag that says we are modifying code
+ * 3) Wait for any running NMIs to finish.
+ * 4) Write the code
+ * 5) clear the flag.
+ * 6) Wait for any running NMIs to finish.
+ *
+ * If an NMI is executed, the first thing it does is to call
+ * "ftrace_nmi_enter". This will check if the flag is set to write
+ * and if it is, it will write what is in the IP and "code" buffers.
+ *
+ * The trick is, it does not matter if everyone is writing the same
+ * content to the code location. Also, if a CPU is executing code
+ * it is OK to write to that code location if the contents being written
+ * are the same as what exists.
+ */
+
+static atomic_t in_nmi = ATOMIC_INIT(0);
+static int mod_code_status;            /* holds return value of text write */
+static int mod_code_write;             /* set when NMI should do the write */
+static void *mod_code_ip;              /* holds the IP to write to */
+static void *mod_code_newcode;         /* holds the text to write to the IP */
+
+static unsigned nmi_wait_count;
+static atomic_t nmi_update_count = ATOMIC_INIT(0);
+
+int ftrace_arch_read_dyn_info(char *buf, int size)
+{
+       int r;
+
+       r = snprintf(buf, size, "%u %u",
+                    nmi_wait_count,
+                    atomic_read(&nmi_update_count));
+       return r;
+}
+
+static void ftrace_mod_code(void)
+{
+       /*
+        * Yes, more than one CPU process can be writing to mod_code_status.
+        *    (and the code itself)
+        * But if one were to fail, then they all should, and if one were
+        * to succeed, then they all should.
+        */
+       mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
+                                            MCOUNT_INSN_SIZE);
+}
+
+void ftrace_nmi_enter(void)
+{
+       atomic_inc(&in_nmi);
+       /* Must have in_nmi seen before reading write flag */
+       smp_mb();
+       if (mod_code_write) {
+               ftrace_mod_code();
+               atomic_inc(&nmi_update_count);
+       }
+}
 
-       ip -= CALL_BACK;
-       save = *(short *)ip;
+void ftrace_nmi_exit(void)
+{
+       /* Finish all executions before clearing in_nmi */
+       smp_wmb();
+       atomic_dec(&in_nmi);
+}
 
-       /* If this was already converted, skip it */
-       if (save == JMPFWD)
-               return NULL;
+static void wait_for_nmi(void)
+{
+       int waited = 0;
 
-       if (ftrace_pages->index == ENTRIES_PER_PAGE) {
-               if (!ftrace_pages->next)
-                       return NULL;
-               ftrace_pages = ftrace_pages->next;
+       while (atomic_read(&in_nmi)) {
+               waited = 1;
+               cpu_relax();
        }
 
-       rec = &ftrace_pages->records[ftrace_pages->index++];
+       if (waited)
+               nmi_wait_count++;
+}
+
+static int
+do_ftrace_mod_code(unsigned long ip, void *new_code)
+{
+       mod_code_ip = (void *)ip;
+       mod_code_newcode = new_code;
+
+       /* The buffers need to be visible before we let NMIs write them */
+       smp_wmb();
+
+       mod_code_write = 1;
+
+       /* Make sure write bit is visible before we wait on NMIs */
+       smp_mb();
 
-       return &rec->rec;
+       wait_for_nmi();
+
+       /* Make sure all running NMIs have finished before we write the code */
+       smp_mb();
+
+       ftrace_mod_code();
+
+       /* Make sure the write happens before clearing the bit */
+       smp_wmb();
+
+       mod_code_write = 0;
+
+       /* make sure NMIs see the cleared bit */
+       smp_mb();
+
+       wait_for_nmi();
+
+       return mod_code_status;
 }
 
-static int notrace
+
+
+
+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
+
+static unsigned char *ftrace_nop_replace(void)
+{
+       return ftrace_nop;
+}
+
+static int
 ftrace_modify_code(unsigned long ip, unsigned char *old_code,
                   unsigned char *new_code)
 {
-       unsigned short old = *(unsigned short *)old_code;
-       unsigned short new = *(unsigned short *)new_code;
-       unsigned short replaced;
-       int faulted = 0;
+       unsigned char replaced[MCOUNT_INSN_SIZE];
 
        /*
         * Note: Due to modules and __init, code can
         *  disappear and change, we need to protect against faulting
-        *  as well as code changing.
+        *  as well as code changing. We do this by using the
+        *  probe_kernel_* functions.
         *
         * No real locking needed, this code is run through
-        * kstop_machine.
+        * kstop_machine, or before SMP starts.
         */
-       asm volatile (
-               "1: lock\n"
-               "   cmpxchg %w3, (%2)\n"
-               "2:\n"
-               ".section .fixup, \"ax\"\n"
-               "       movl $1, %0\n"
-               "3:     jmp 2b\n"
-               ".previous\n"
-               _ASM_EXTABLE(1b, 3b)
-               : "=r"(faulted), "=a"(replaced)
-               : "r"(ip), "r"(new), "0"(faulted), "a"(old)
-               : "memory");
+
+       /* read the text we want to modify */
+       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+
+       /* Make sure it is what we expect it to be */
+       if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+               return -EINVAL;
+
+       /* replace the text with the new text */
+       if (do_ftrace_mod_code(ip, new_code))
+               return -EPERM;
+
        sync_core();
 
-       if (replaced != old)
-               faulted = 2;
+       return 0;
+}
+
+int ftrace_make_nop(struct module *mod,
+                   struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned char *new, *old;
+       unsigned long ip = rec->ip;
 
-       return faulted;
+       old = ftrace_call_replace(ip, addr);
+       new = ftrace_nop_replace();
+
+       return ftrace_modify_code(rec->ip, old, new);
 }
 
-static int notrace ftrace_calc_offset(long ip)
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
-       return (int)(MCOUNT_ADDR - ip);
+       unsigned char *new, *old;
+       unsigned long ip = rec->ip;
+
+       old = ftrace_nop_replace();
+       new = ftrace_call_replace(ip, addr);
+
+       return ftrace_modify_code(rec->ip, old, new);
 }
 
-notrace void ftrace_code_disable(struct dyn_ftrace *rec)
+int ftrace_update_ftrace_func(ftrace_func_t func)
 {
-       unsigned long ip;
-       union ftrace_code_union save;
-       struct ftrace_record *r =
-               container_of(rec, struct ftrace_record, rec);
+       unsigned long ip = (unsigned long)(&ftrace_call);
+       unsigned char old[MCOUNT_INSN_SIZE], *new;
+       int ret;
 
-       ip = rec->ip;
+       memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+       ret = ftrace_modify_code(ip, old, new);
+
+       return ret;
+}
 
-       save.e8         = 0xe8;
-       save.offset     = ftrace_calc_offset(ip);
+int __init ftrace_dyn_arch_init(void *data)
+{
+       extern const unsigned char ftrace_test_p6nop[];
+       extern const unsigned char ftrace_test_nop5[];
+       extern const unsigned char ftrace_test_jmp[];
+       int faulted = 0;
+
+       /*
+        * There is no good nop for all x86 archs.
+        * We will default to using the P6_NOP5, but first we
+        * will test to make sure that the nop will actually
+        * work on this CPU. If it faults, we will then
+        * go to a lesser efficient 5 byte nop. If that fails
+        * we then just use a jmp as our nop. This isn't the most
+        * efficient nop, but we can not use a multi part nop
+        * since we would then risk being preempted in the middle
+        * of that nop, and if we enabled tracing then, it might
+        * cause a system crash.
+        *
+        * TODO: check the cpuid to determine the best nop.
+        */
+       asm volatile (
+               "ftrace_test_jmp:"
+               "jmp ftrace_test_p6nop\n"
+               "nop\n"
+               "nop\n"
+               "nop\n"  /* 2 byte jmp + 3 bytes */
+               "ftrace_test_p6nop:"
+               P6_NOP5
+               "jmp 1f\n"
+               "ftrace_test_nop5:"
+               ".byte 0x66,0x66,0x66,0x66,0x90\n"
+               "1:"
+               ".section .fixup, \"ax\"\n"
+               "2:     movl $1, %0\n"
+               "       jmp ftrace_test_nop5\n"
+               "3:     movl $2, %0\n"
+               "       jmp 1b\n"
+               ".previous\n"
+               _ASM_EXTABLE(ftrace_test_p6nop, 2b)
+               _ASM_EXTABLE(ftrace_test_nop5, 3b)
+               : "=r"(faulted) : "0" (faulted));
+
+       switch (faulted) {
+       case 0:
+               pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
+               memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
+               break;
+       case 1:
+               pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
+               memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
+               break;
+       case 2:
+               pr_info("ftrace: converting mcount calls to jmp . + 5\n");
+               memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
+               break;
+       }
 
-       /* move the IP back to the start of the call */
-       ip -= CALL_BACK;
+       /* The return code is retured via data */
+       *(unsigned long *)data = 0;
 
-       r->failed = ftrace_modify_code(ip, save.code, (char *)&ftrace_jmp);
+       return 0;
 }
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
 
-static void notrace ftrace_replace_code(int saved)
+static int ftrace_mod_jmp(unsigned long ip,
+                         int old_offset, int new_offset)
 {
-       unsigned char *new = NULL, *old = NULL;
-       struct ftrace_record *rec;
-       struct ftrace_page *pg;
-       unsigned long ip;
-       int i;
+       unsigned char code[MCOUNT_INSN_SIZE];
 
-       if (saved)
-               old = (char *)&ftrace_jmp;
-       else
-               new = (char *)&ftrace_jmp;
+       if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
 
-       for (pg = ftrace_pages_start; pg; pg = pg->next) {
-               for (i = 0; i < pg->index; i++) {
-                       union ftrace_code_union calc;
-                       rec = &pg->records[i];
+       if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
+               return -EINVAL;
 
-                       /* don't modify code that has already faulted */
-                       if (rec->failed)
-                               continue;
+       *(int *)(&code[1]) = new_offset;
 
-                       ip = rec->rec.ip;
+       if (do_ftrace_mod_code(ip, &code))
+               return -EPERM;
 
-                       calc.e8         = 0xe8;
-                       calc.offset     = ftrace_calc_offset(ip);
+       return 0;
+}
 
-                       if (saved)
-                               new = calc.code;
-                       else
-                               old = calc.code;
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       unsigned long ip = (unsigned long)(&ftrace_graph_call);
+       int old_offset, new_offset;
 
-                       ip -= CALL_BACK;
+       old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
+       new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
 
-                       rec->failed = ftrace_modify_code(ip, old, new);
-               }
-       }
+       return ftrace_mod_jmp(ip, old_offset, new_offset);
+}
 
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       unsigned long ip = (unsigned long)(&ftrace_graph_call);
+       int old_offset, new_offset;
+
+       old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
+       new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
+
+       return ftrace_mod_jmp(ip, old_offset, new_offset);
 }
 
-notrace void ftrace_startup_code(void)
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * These functions are picked from those used on
+ * this page for dynamic ftrace. They have been
+ * simplified to ignore all traces in NMI context.
+ */
+static atomic_t in_nmi;
+
+void ftrace_nmi_enter(void)
 {
-       ftrace_replace_code(1);
+       atomic_inc(&in_nmi);
 }
 
-notrace void ftrace_shutdown_code(void)
+void ftrace_nmi_exit(void)
 {
-       ftrace_replace_code(0);
+       atomic_dec(&in_nmi);
 }
 
-notrace void ftrace_shutdown_replenish(void)
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
+/* Add a function return address to the trace stack on thread info.*/
+static int push_return_trace(unsigned long ret, unsigned long long time,
+                               unsigned long func, int *depth)
 {
-       if (ftrace_pages->next)
+       int index;
+
+       if (!current->ret_stack)
+               return -EBUSY;
+
+       /* The return trace stack is full */
+       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+               atomic_inc(&current->trace_overrun);
+               return -EBUSY;
+       }
+
+       index = ++current->curr_ret_stack;
+       barrier();
+       current->ret_stack[index].ret = ret;
+       current->ret_stack[index].func = func;
+       current->ret_stack[index].calltime = time;
+       *depth = index;
+
+       return 0;
+}
+
+/* Retrieve a function return address to the trace stack on thread info.*/
+static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+{
+       int index;
+
+       index = current->curr_ret_stack;
+
+       if (unlikely(index < 0)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic, otherwise we have no where to go */
+               *ret = (unsigned long)panic;
                return;
+       }
+
+       *ret = current->ret_stack[index].ret;
+       trace->func = current->ret_stack[index].func;
+       trace->calltime = current->ret_stack[index].calltime;
+       trace->overrun = atomic_read(&current->trace_overrun);
+       trace->depth = index;
+       barrier();
+       current->curr_ret_stack--;
 
-       /* allocate another page */
-       ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
 }
 
-notrace int ftrace_shutdown_arch_init(void)
+/*
+ * Send the trace to the ring-buffer.
+ * @return the original return address.
+ */
+unsigned long ftrace_return_to_handler(void)
+{
+       struct ftrace_graph_ret trace;
+       unsigned long ret;
+
+       pop_return_trace(&trace, &ret);
+       trace.rettime = cpu_clock(raw_smp_processor_id());
+       ftrace_graph_return(&trace);
+
+       if (unlikely(!ret)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic. What else to do? */
+               ret = (unsigned long)panic;
+       }
+
+       return ret;
+}
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
 {
-       struct ftrace_page *pg;
-       int cnt;
-       int i;
+       unsigned long old;
+       unsigned long long calltime;
+       int faulted;
+       struct ftrace_graph_ent trace;
+       unsigned long return_hooker = (unsigned long)
+                               &return_to_handler;
+
+       /* Nmi's are currently unsupported */
+       if (unlikely(atomic_read(&in_nmi)))
+               return;
 
-       /* allocate a few pages */
-       ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
-       if (!ftrace_pages_start)
-               return -1;
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
 
        /*
-        * Allocate a few more pages.
-        *
-        * TODO: have some parser search vmlinux before
-        *   final linking to find all calls to ftrace.
-        *   Then we can:
-        *    a) know how many pages to allocate.
-        *     and/or
-        *    b) set up the table then.
-        *
-        *  The dynamic code is still necessary for
-        *  modules.
+        * Protect against fault, even if it shouldn't
+        * happen. This tool is too much intrusive to
+        * ignore such a protection.
         */
+       asm volatile(
+               "1: " _ASM_MOV " (%[parent]), %[old]\n"
+               "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
+               "   movl $0, %[faulted]\n"
+               "3:\n"
 
-       pg = ftrace_pages = ftrace_pages_start;
+               ".section .fixup, \"ax\"\n"
+               "4: movl $1, %[faulted]\n"
+               "   jmp 3b\n"
+               ".previous\n"
 
-       cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
+               _ASM_EXTABLE(1b, 4b)
+               _ASM_EXTABLE(2b, 4b)
 
-       for (i = 0; i < cnt; i++) {
-               pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+               : [old] "=r" (old), [faulted] "=r" (faulted)
+               : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
+               : "memory"
+       );
 
-               /* If we fail, we'll try later anyway */
-               if (!pg->next)
-                       break;
+       if (unlikely(faulted)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               return;
+       }
 
-               pg = pg->next;
+       if (unlikely(!__kernel_text_address(old))) {
+               ftrace_graph_stop();
+               *parent = old;
+               WARN_ON(1);
+               return;
        }
 
-       return 0;
+       calltime = cpu_clock(raw_smp_processor_id());
+
+       if (push_return_trace(old, calltime,
+                               self_addr, &trace.depth) == -EBUSY) {
+               *parent = old;
+               return;
+       }
+
+       trace.func = self_addr;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace)) {
+               current->curr_ret_stack--;
+               *parent = old;
+       }
 }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */