From: David S. Miller Date: Wed, 21 Apr 2010 10:08:11 +0000 (-0700) Subject: sparc64: Fix stack dumping and tracing when function graph is enabled. X-Git-Tag: v2.6.35-rc1~498^2~2 X-Git-Url: http://ftp.safe.ca/?p=safe%2Fjmp%2Flinux-2.6;a=commitdiff_plain;h=667f0cee3e0321151aa7a1a5222afe67ca4be0ea sparc64: Fix stack dumping and tracing when function graph is enabled. Like x86, when the function graph tracer is enabled, emit the ftrace stub as well as the program counter it will be transformed back into. We duplicate a lot of similar stack walking logic in 3 or 4 spots, so eventually we should consolidate things like x86 does. Thanks to Frederic Weisbecker for pointing this out. Signed-off-by: David S. Miller --- diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index e277193..34ce49f 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -1276,6 +1277,9 @@ static void perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) { unsigned long ksp, fp; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + int graph = 0; +#endif callchain_store(entry, PERF_CONTEXT_KERNEL); callchain_store(entry, regs->tpc); @@ -1303,6 +1307,16 @@ static void perf_callchain_kernel(struct pt_regs *regs, fp = (unsigned long)sf->fp + STACK_BIAS; } callchain_store(entry, pc); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + int index = current->curr_ret_stack; + if (current->ret_stack && index >= graph) { + pc = current->ret_stack[index - graph].ret; + callchain_store(entry, pc); + graph++; + } + } +#endif } while (entry->nr < PERF_MAX_STACK_DEPTH); } diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c index acb12f6..3e08153 100644 --- a/arch/sparc/kernel/stacktrace.c +++ b/arch/sparc/kernel/stacktrace.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -12,6 +13,10 @@ static void __save_stack_trace(struct thread_info *tp, bool skip_sched) { unsigned long ksp, fp; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + struct task_struct *t; + int graph = 0; +#endif if (tp == current_thread_info()) { stack_trace_flush(); @@ -21,6 +26,9 @@ static void __save_stack_trace(struct thread_info *tp, } fp = ksp + STACK_BIAS; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + t = tp->task; +#endif do { struct sparc_stackf *sf; struct pt_regs *regs; @@ -44,8 +52,21 @@ static void __save_stack_trace(struct thread_info *tp, if (trace->skip > 0) trace->skip--; - else if (!skip_sched || !in_sched_functions(pc)) + else if (!skip_sched || !in_sched_functions(pc)) { trace->entries[trace->nr_entries++] = pc; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + int index = t->curr_ret_stack; + if (t->ret_stack && index >= graph) { + pc = t->ret_stack[index - graph].ret; + if (trace->nr_entries < + trace->max_entries) + trace->entries[trace->nr_entries++] = pc; + graph++; + } + } +#endif + } } while (trace->nr_entries < trace->max_entries); } diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 9da57f0..42ad2ba 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -2154,6 +2155,9 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) unsigned long fp, thread_base, ksp; struct thread_info *tp; int count = 0; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + int graph = 0; +#endif ksp = (unsigned long) _ksp; if (!tsk) @@ -2193,6 +2197,16 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) } printk(" [%016lx] %pS\n", pc, (void *) pc); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if ((pc + 8UL) == (unsigned long) &return_to_handler) { + int index = tsk->curr_ret_stack; + if (tsk->ret_stack && index >= graph) { + pc = tsk->ret_stack[index - graph].ret; + printk(" [%016lx] %pS\n", pc, (void *) pc); + graph++; + } + } +#endif } while (++count < 16); }