ftrace: core support for ARM
authorAbhishek Sagar <sagar.abhishek@gmail.com>
Sat, 31 May 2008 08:53:50 +0000 (14:23 +0530)
committerIngo Molnar <mingo@elte.hu>
Mon, 2 Jun 2008 09:32:20 +0000 (11:32 +0200)
Core ftrace support for the ARM architecture, which includes support
for dynamic function tracing.

Signed-off-by: Abhishek Sagar <sagar.abhishek@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/arm/Kconfig
arch/arm/boot/compressed/Makefile
arch/arm/kernel/Makefile
arch/arm/kernel/armksyms.c
arch/arm/kernel/entry-common.S
arch/arm/kernel/ftrace.c [new file with mode: 0644]

index b786e68..3845e5c 100644 (file)
@@ -14,6 +14,8 @@ config ARM
        select HAVE_OPROFILE
        select HAVE_KPROBES if (!XIP_KERNEL)
        select HAVE_KRETPROBES if (HAVE_KPROBES)
+       select HAVE_FTRACE if (!XIP_KERNEL)
+       select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE)
        help
          The ARM series is a line of low-power-consumption RISC chip designs
          licensed by ARM Ltd and targeted at embedded applications and
index de9d9ee..95baac4 100644 (file)
@@ -69,6 +69,12 @@ SEDFLAGS     = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
 
 targets       := vmlinux vmlinux.lds piggy.gz piggy.o font.o font.c \
                 head.o misc.o $(OBJS)
+
+ifeq ($(CONFIG_FTRACE),y)
+ORIG_CFLAGS := $(KBUILD_CFLAGS)
+KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
+endif
+
 EXTRA_CFLAGS  := -fpic -fno-builtin
 EXTRA_AFLAGS  :=
 
index ad455ff..eb9092c 100644 (file)
@@ -4,6 +4,10 @@
 
 AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
 
+ifdef CONFIG_DYNAMIC_FTRACE
+CFLAGS_REMOVE_ftrace.o = -pg
+endif
+
 # Object file lists.
 
 obj-y          := compat.o entry-armv.o entry-common.o irq.o \
@@ -18,6 +22,7 @@ obj-$(CONFIG_ARTHUR)          += arthur.o
 obj-$(CONFIG_ISA_DMA)          += dma-isa.o
 obj-$(CONFIG_PCI)              += bios32.o isa.o
 obj-$(CONFIG_SMP)              += smp.o
+obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_KPROBES)          += kprobes.o kprobes-decode.o
 obj-$(CONFIG_ATAGS_PROC)       += atags.o
index 688b7b1..3b13221 100644 (file)
@@ -48,6 +48,11 @@ extern void __aeabi_ulcmp(void);
 extern void fpundefinstr(void);
 extern void fp_enter(void);
 
+#ifdef CONFIG_FTRACE
+extern void mcount(void);
+EXPORT_SYMBOL(mcount);
+#endif
+
 /*
  * This has a special calling convention; it doesn't
  * modify any of the usual registers, except for LR.
index 597ed00..8f79a47 100644 (file)
@@ -99,6 +99,53 @@ ENTRY(ret_from_fork)
 #undef CALL
 #define CALL(x) .long x
 
+#ifdef CONFIG_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(mcount)
+       stmdb sp!, {r0-r3, lr}
+       mov r0, lr
+
+       .globl mcount_call
+mcount_call:
+       bl ftrace_stub
+       ldmia sp!, {r0-r3, pc}
+
+ENTRY(ftrace_caller)
+       stmdb sp!, {r0-r3, lr}
+       ldr r1, [fp, #-4]
+       mov r0, lr
+
+       .globl ftrace_call
+ftrace_call:
+       bl ftrace_stub
+       ldmia sp!, {r0-r3, pc}
+
+#else
+
+ENTRY(mcount)
+       stmdb sp!, {r0-r3, lr}
+       ldr r0, =ftrace_trace_function
+       ldr r2, [r0]
+       adr r0, ftrace_stub
+       cmp r0, r2
+       bne trace
+       ldmia sp!, {r0-r3, pc}
+
+trace:
+       ldr r1, [fp, #-4]
+       mov r0, lr
+       mov lr, pc
+       mov pc, r2
+       ldmia sp!, {r0-r3, pc}
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+       .globl ftrace_stub
+ftrace_stub:
+       mov pc, lr
+
+#endif /* CONFIG_FTRACE */
+
 /*=============================================================================
  * SWI handler
  *-----------------------------------------------------------------------------
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..f4cb4cc
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Dynamic function tracing support.
+ *
+ * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
+ *
+ * For licencing details, see COPYING.
+ *
+ * Defines low-level handling of mcount calls when the kernel
+ * is compiled with the -pg flag. When using dynamic ftrace, the
+ * mcount call-sites get patched lazily with NOP till they are
+ * enabled. All code mutation routines here take effect atomically.
+ */
+
+#include <linux/ftrace.h>
+#include <asm/cacheflush.h>
+
+#define INSN_SIZE      4
+#define PC_OFFSET      8
+#define BL_OPCODE      0xeb000000
+#define BL_OFFSET_MASK 0x00ffffff
+
+static unsigned long bl_insn;
+static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */
+
+/* return true if mcount call site is already patched/no-op'ed */
+int ftrace_ip_converted(unsigned long pc)
+{
+       unsigned long save;
+
+       pc -= INSN_SIZE;
+       save = *(unsigned long *)pc;
+       return save == NOP;
+}
+
+unsigned char *ftrace_nop_replace(void)
+{
+       return (char *)&NOP;
+}
+
+/* construct a branch (BL) instruction to addr */
+unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
+{
+       long offset;
+
+       offset = (long)addr - (long)(pc - INSN_SIZE + PC_OFFSET);
+       if (unlikely(offset < -33554432 || offset > 33554428)) {
+               /* Can't generate branches that far (from ARM ARM). Ftrace
+                * doesn't generate branches outside of core kernel text.
+                */
+               WARN_ON_ONCE(1);
+               return NULL;
+       }
+       offset = (offset >> 2) & BL_OFFSET_MASK;
+       bl_insn = BL_OPCODE | offset;
+       return (unsigned char *)&bl_insn;
+}
+
+int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
+                      unsigned char *new_code)
+{
+       unsigned long err = 0, replaced = 0, old, new;
+
+       old = *(unsigned long *)old_code;
+       new = *(unsigned long *)new_code;
+       pc -= INSN_SIZE;
+
+       __asm__ __volatile__ (
+               "1:  ldr    %1, [%2]  \n"
+               "    cmp    %1, %4    \n"
+               "2:  streq  %3, [%2]  \n"
+               "    cmpne  %1, %3    \n"
+               "    movne  %0, #2    \n"
+               "3:\n"
+
+               ".section .fixup, \"ax\"\n"
+               "4:  mov  %0, #1  \n"
+               "    b    3b      \n"
+               ".previous\n"
+
+               ".section __ex_table, \"a\"\n"
+               "    .long 1b, 4b \n"
+               "    .long 2b, 4b \n"
+               ".previous\n"
+
+               : "=r"(err), "=r"(replaced)
+               : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
+               : "memory");
+
+       if (!err && (replaced == old))
+               flush_icache_range(pc, pc + INSN_SIZE);
+
+       return err;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       int ret;
+       unsigned long pc, old;
+       unsigned char *new;
+
+       pc = (unsigned long)&ftrace_call;
+       pc += INSN_SIZE;
+       memcpy(&old, &ftrace_call, INSN_SIZE);
+       new = ftrace_call_replace(pc, (unsigned long)func);
+       ret = ftrace_modify_code(pc, (unsigned char *)&old, new);
+       return ret;
+}
+
+int ftrace_mcount_set(unsigned long *data)
+{
+       unsigned long pc, old;
+       unsigned long *addr = data;
+       unsigned char *new;
+
+       pc = (unsigned long)&mcount_call;
+       pc += INSN_SIZE;
+       memcpy(&old, &mcount_call, INSN_SIZE);
+       new = ftrace_call_replace(pc, *addr);
+       *addr = ftrace_modify_code(pc, (unsigned char *)&old, new);
+       return 0;
+}
+
+/* run from kstop_machine */
+int __init ftrace_dyn_arch_init(void *data)
+{
+       ftrace_mcount_set(data);
+       return 0;
+}