perf events, x86/stacktrace: Fix performance/softlockup by providing a special frame...
[safe/jmp/linux-2.6] / arch / x86 / kernel / dumpstack.c
index 5b75afa..c56bc28 100644 (file)
@@ -109,6 +109,30 @@ print_context_stack(struct thread_info *tinfo,
        }
        return bp;
 }
+EXPORT_SYMBOL_GPL(print_context_stack);
+
+unsigned long
+print_context_stack_bp(struct thread_info *tinfo,
+                      unsigned long *stack, unsigned long bp,
+                      const struct stacktrace_ops *ops, void *data,
+                      unsigned long *end, int *graph)
+{
+       struct stack_frame *frame = (struct stack_frame *)bp;
+       unsigned long *ret_addr = &frame->return_address;
+
+       while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+               unsigned long addr = *ret_addr;
+
+               if (__kernel_text_address(addr)) {
+                       ops->address(data, addr, 1);
+                       frame = frame->next_frame;
+                       ret_addr = &frame->return_address;
+                       print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+               }
+       }
+       return (unsigned long)frame;
+}
+EXPORT_SYMBOL_GPL(print_context_stack_bp);
 
 
 static void
@@ -141,10 +165,11 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
 }
 
 static const struct stacktrace_ops print_trace_ops = {
-       .warning = print_trace_warning,
-       .warning_symbol = print_trace_warning_symbol,
-       .stack = print_trace_stack,
-       .address = print_trace_address,
+       .warning                = print_trace_warning,
+       .warning_symbol         = print_trace_warning_symbol,
+       .stack                  = print_trace_stack,
+       .address                = print_trace_address,
+       .walk_stack             = print_context_stack,
 };
 
 void
@@ -207,11 +232,11 @@ unsigned __kprobes long oops_begin(void)
        /* racy, but better than risking deadlock. */
        raw_local_irq_save(flags);
        cpu = smp_processor_id();
-       if (!__raw_spin_trylock(&die_lock)) {
+       if (!arch_spin_trylock(&die_lock)) {
                if (cpu == die_owner)
                        /* nested oops. should stop eventually */;
                else
-                       __raw_spin_lock(&die_lock);
+                       arch_spin_lock(&die_lock);
        }
        die_nest_count++;
        die_owner = cpu;
@@ -231,7 +256,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
        die_nest_count--;
        if (!die_nest_count)
                /* Nest count reaches zero, release the lock. */
-               __raw_spin_unlock(&die_lock);
+               arch_spin_unlock(&die_lock);
        raw_local_irq_restore(flags);
        oops_exit();