Merge branch 'x86/core' into core/ipi
[safe/jmp/linux-2.6] / arch / x86 / kernel / stacktrace.c
index 02f0f61..f7bddc2 100644 (file)
@@ -1,11 +1,12 @@
 /*
  * Stack trace management functions
  *
- *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  */
 #include <linux/sched.h>
 #include <linux/stacktrace.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 #include <asm/stacktrace.h>
 
 static void save_stack_warning(void *data, char *msg)
@@ -25,6 +26,8 @@ static int save_stack_stack(void *data, char *name)
 static void save_stack_address(void *data, unsigned long addr, int reliable)
 {
        struct stack_trace *trace = data;
+       if (!reliable)
+               return;
        if (trace->skip > 0) {
                trace->skip--;
                return;
@@ -37,6 +40,8 @@ static void
 save_stack_address_nosched(void *data, unsigned long addr, int reliable)
 {
        struct stack_trace *trace = (struct stack_trace *)data;
+       if (!reliable)
+               return;
        if (in_sched_functions(addr))
                return;
        if (trace->skip > 0) {
@@ -70,6 +75,7 @@ void save_stack_trace(struct stack_trace *trace)
        if (trace->nr_entries < trace->max_entries)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
+EXPORT_SYMBOL_GPL(save_stack_trace);
 
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
@@ -77,3 +83,67 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        if (trace->nr_entries < trace->max_entries)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
+
+struct stack_frame {
+       const void __user       *next_fp;
+       unsigned long           ret_addr;
+};
+
+static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+{
+       int ret;
+
+       if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
+               return 0;
+
+       ret = 1;
+       pagefault_disable();
+       if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
+               ret = 0;
+       pagefault_enable();
+
+       return ret;
+}
+
+static inline void __save_stack_trace_user(struct stack_trace *trace)
+{
+       const struct pt_regs *regs = task_pt_regs(current);
+       const void __user *fp = (const void __user *)regs->bp;
+
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = regs->ip;
+
+       while (trace->nr_entries < trace->max_entries) {
+               struct stack_frame frame;
+
+               frame.next_fp = NULL;
+               frame.ret_addr = 0;
+               if (!copy_stack_frame(fp, &frame))
+                       break;
+               if ((unsigned long)fp < regs->sp)
+                       break;
+               if (frame.ret_addr) {
+                       trace->entries[trace->nr_entries++] =
+                               frame.ret_addr;
+               }
+               if (fp == frame.next_fp)
+                       break;
+               fp = frame.next_fp;
+       }
+}
+
+void save_stack_trace_user(struct stack_trace *trace)
+{
+       /*
+        * Trace user stack if we are not a kernel thread
+        */
+       if (current->mm) {
+               __save_stack_trace_user(trace);
+       }
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+