perf, x86: Pass enable bit mask to __x86_pmu_enable_event()
[safe/jmp/linux-2.6] / arch / x86 / kernel / entry_32.S
index 848f73f..44a8e0d 100644 (file)
@@ -48,7 +48,6 @@
 #include <asm/segment.h>
 #include <asm/smp.h>
 #include <asm/page_types.h>
-#include <asm/desc.h>
 #include <asm/percpu.h>
 #include <asm/dwarf2.h>
 #include <asm/processor-flags.h>
@@ -335,6 +334,10 @@ ENTRY(ret_from_fork)
 END(ret_from_fork)
 
 /*
+ * Interrupt exit functions should be protected against kprobes
+ */
+       .pushsection .kprobes.text, "ax"
+/*
  * Return to user mode is not as complex as all this looks,
  * but we want the default path for a system call return to
  * go as quickly as possible which is why some of this is
@@ -384,6 +387,10 @@ need_resched:
 END(resume_kernel)
 #endif
        CFI_ENDPROC
+/*
+ * End of kprobes section
+ */
+       .popsection
 
 /* SYSENTER_RETURN points to after the "sysenter" instruction in
    the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
@@ -514,6 +521,10 @@ sysexit_audit:
        PTGS_TO_GS_EX
 ENDPROC(ia32_sysenter_target)
 
+/*
+ * syscall stub including irq exit should be protected against kprobes
+ */
+       .pushsection .kprobes.text, "ax"
        # system call handler stub
 ENTRY(system_call)
        RING0_INT_FRAME                 # can't unwind into user space anyway
@@ -706,26 +717,69 @@ syscall_badsys:
        jmp resume_userspace
 END(syscall_badsys)
        CFI_ENDPROC
+/*
+ * End of kprobes section
+ */
+       .popsection
 
 /*
  * System calls that need a pt_regs pointer.
  */
-#define PTREGSCALL(name) \
+#define PTREGSCALL0(name) \
        ALIGN; \
 ptregs_##name: \
        leal 4(%esp),%eax; \
        jmp sys_##name;
 
-PTREGSCALL(iopl)
-PTREGSCALL(fork)
-PTREGSCALL(clone)
-PTREGSCALL(vfork)
-PTREGSCALL(execve)
-PTREGSCALL(sigaltstack)
-PTREGSCALL(sigreturn)
-PTREGSCALL(rt_sigreturn)
-PTREGSCALL(vm86)
-PTREGSCALL(vm86old)
+#define PTREGSCALL1(name) \
+       ALIGN; \
+ptregs_##name: \
+       leal 4(%esp),%edx; \
+       movl (PT_EBX+4)(%esp),%eax; \
+       jmp sys_##name;
+
+#define PTREGSCALL2(name) \
+       ALIGN; \
+ptregs_##name: \
+       leal 4(%esp),%ecx; \
+       movl (PT_ECX+4)(%esp),%edx; \
+       movl (PT_EBX+4)(%esp),%eax; \
+       jmp sys_##name;
+
+#define PTREGSCALL3(name) \
+       ALIGN; \
+ptregs_##name: \
+       leal 4(%esp),%eax; \
+       pushl %eax; \
+       movl PT_EDX(%eax),%ecx; \
+       movl PT_ECX(%eax),%edx; \
+       movl PT_EBX(%eax),%eax; \
+       call sys_##name; \
+       addl $4,%esp; \
+       ret
+
+PTREGSCALL1(iopl)
+PTREGSCALL0(fork)
+PTREGSCALL0(vfork)
+PTREGSCALL3(execve)
+PTREGSCALL2(sigaltstack)
+PTREGSCALL0(sigreturn)
+PTREGSCALL0(rt_sigreturn)
+PTREGSCALL2(vm86)
+PTREGSCALL1(vm86old)
+
+/* Clone is an oddball.  The 4th arg is in %edi */
+       ALIGN;
+ptregs_clone:
+       leal 4(%esp),%eax
+       pushl %eax
+       pushl PT_EDI(%eax)
+       movl PT_EDX(%eax),%ecx
+       movl PT_ECX(%eax),%edx
+       movl PT_EBX(%eax),%eax
+       call sys_clone
+       addl $8,%esp
+       ret
 
 .macro FIXUP_ESPFIX_STACK
 /*
@@ -815,6 +869,10 @@ common_interrupt:
 ENDPROC(common_interrupt)
        CFI_ENDPROC
 
+/*
+ *  Irq entries should be protected against kprobes
+ */
+       .pushsection .kprobes.text, "ax"
 #define BUILD_INTERRUPT3(name, nr, fn) \
 ENTRY(name)                            \
        RING0_INT_FRAME;                \
@@ -981,16 +1039,16 @@ ENTRY(spurious_interrupt_bug)
        jmp error_code
        CFI_ENDPROC
 END(spurious_interrupt_bug)
+/*
+ * End of kprobes section
+ */
+       .popsection
 
 ENTRY(kernel_thread_helper)
        pushl $0                # fake return address for unwinder
        CFI_STARTPROC
-       movl %edx,%eax
-       push %edx
-       CFI_ADJUST_CFA_OFFSET 4
-       call *%ebx
-       push %eax
-       CFI_ADJUST_CFA_OFFSET 4
+       movl %edi,%eax
+       call *%esi
        call do_exit
        ud2                     # padding for call trace
        CFI_ENDPROC
@@ -1175,6 +1233,7 @@ ENTRY(ftrace_graph_caller)
        pushl %edx
        movl 0xc(%esp), %edx
        lea 0x4(%ebp), %eax
+       movl (%ebp), %ecx
        subl $MCOUNT_INSN_SIZE, %edx
        call prepare_ftrace_return
        popl %edx
@@ -1185,16 +1244,14 @@ END(ftrace_graph_caller)
 
 .globl return_to_handler
 return_to_handler:
-       pushl $0
        pushl %eax
-       pushl %ecx
        pushl %edx
+       movl %ebp, %eax
        call ftrace_return_to_handler
-       movl %eax, 0xc(%esp)
+       movl %eax, %ecx
        popl %edx
-       popl %ecx
        popl %eax
-       ret
+       jmp *%ecx
 #endif
 
 .section .rodata,"a"