2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
56 #ifndef CONFIG_PREEMPT
57 #define retint_kernel retint_restore_args
61 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
62 #ifdef CONFIG_TRACE_IRQFLAGS
63 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
71 * C code is not supposed to know about undefined top of stack. Every time
72 * a C function with an pt_regs argument is called from the SYSCALL based
73 * fast path FIXUP_TOP_OF_STACK is needed.
74 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
78 /* %rsp:at FRAMEEND */
79 .macro FIXUP_TOP_OF_STACK tmp
80 movq %gs:pda_oldrsp,\tmp
82 movq $__USER_DS,SS(%rsp)
83 movq $__USER_CS,CS(%rsp)
85 movq R11(%rsp),\tmp /* get eflags */
86 movq \tmp,EFLAGS(%rsp)
89 .macro RESTORE_TOP_OF_STACK tmp,offset=0
90 movq RSP-\offset(%rsp),\tmp
91 movq \tmp,%gs:pda_oldrsp
92 movq EFLAGS-\offset(%rsp),\tmp
93 movq \tmp,R11-\offset(%rsp)
96 .macro FAKE_STACK_FRAME child_rip
97 /* push in order ss, rsp, eflags, cs, rip */
100 CFI_ADJUST_CFA_OFFSET 8
101 /*CFI_REL_OFFSET ss,0*/
103 CFI_ADJUST_CFA_OFFSET 8
105 pushq $(1<<9) /* eflags - interrupts on */
106 CFI_ADJUST_CFA_OFFSET 8
107 /*CFI_REL_OFFSET rflags,0*/
108 pushq $__KERNEL_CS /* cs */
109 CFI_ADJUST_CFA_OFFSET 8
110 /*CFI_REL_OFFSET cs,0*/
111 pushq \child_rip /* rip */
112 CFI_ADJUST_CFA_OFFSET 8
114 pushq %rax /* orig rax */
115 CFI_ADJUST_CFA_OFFSET 8
118 .macro UNFAKE_STACK_FRAME
120 CFI_ADJUST_CFA_OFFSET -(6*8)
123 .macro CFI_DEFAULT_STACK start=1
128 CFI_DEF_CFA_OFFSET SS+8
130 CFI_REL_OFFSET r15,R15
131 CFI_REL_OFFSET r14,R14
132 CFI_REL_OFFSET r13,R13
133 CFI_REL_OFFSET r12,R12
134 CFI_REL_OFFSET rbp,RBP
135 CFI_REL_OFFSET rbx,RBX
136 CFI_REL_OFFSET r11,R11
137 CFI_REL_OFFSET r10,R10
140 CFI_REL_OFFSET rax,RAX
141 CFI_REL_OFFSET rcx,RCX
142 CFI_REL_OFFSET rdx,RDX
143 CFI_REL_OFFSET rsi,RSI
144 CFI_REL_OFFSET rdi,RDI
145 CFI_REL_OFFSET rip,RIP
146 /*CFI_REL_OFFSET cs,CS*/
147 /*CFI_REL_OFFSET rflags,EFLAGS*/
148 CFI_REL_OFFSET rsp,RSP
149 /*CFI_REL_OFFSET ss,SS*/
152 * A newly forked process directly context switches into this.
158 GET_THREAD_INFO(%rcx)
159 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
163 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
164 je int_ret_from_sys_call
165 testl $_TIF_IA32,threadinfo_flags(%rcx)
166 jnz int_ret_from_sys_call
167 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
168 jmp ret_from_sys_call
171 call syscall_trace_leave
172 GET_THREAD_INFO(%rcx)
178 * System call entry. Upto 6 arguments in registers are supported.
180 * SYSCALL does not save anything on the stack and does not change the
186 * rax system call number
188 * rcx return address for syscall/sysret, C arg3
191 * r10 arg3 (--> moved to rcx for C)
194 * r11 eflags for syscall/sysret, temporary for C
195 * r12-r15,rbp,rbx saved by C code, not touched.
197 * Interrupts are off on entry.
198 * Only called from user space.
200 * XXX if we had a free scratch register we could save the RSP into the stack frame
201 * and report it properly in ps. Unfortunately we haven't.
203 * When user can change the frames always force IRET. That is because
204 * it deals with uncanonical addresses better. SYSRET has trouble
205 * with them due to bugs in both AMD and Intel CPUs.
210 CFI_DEF_CFA rsp,PDA_STACKOFFSET
212 /*CFI_REGISTER rflags,r11*/
214 movq %rsp,%gs:pda_oldrsp
215 movq %gs:pda_kernelstack,%rsp
217 * No need to follow this irqs off/on section - it's straight
222 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
223 movq %rcx,RIP-ARGOFFSET(%rsp)
224 CFI_REL_OFFSET rip,RIP-ARGOFFSET
225 GET_THREAD_INFO(%rcx)
226 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
229 cmpq $__NR_syscall_max,%rax
232 call *sys_call_table(,%rax,8) # XXX: rip relative
233 movq %rax,RAX-ARGOFFSET(%rsp)
235 * Syscall return path ending with SYSRET (fast path)
236 * Has incomplete stack frame and undefined top of stack.
238 .globl ret_from_sys_call
240 movl $_TIF_ALLWORK_MASK,%edi
243 GET_THREAD_INFO(%rcx)
246 movl threadinfo_flags(%rcx),%edx
251 * sysretq will re-enable interrupts:
254 movq RIP-ARGOFFSET(%rsp),%rcx
256 RESTORE_ARGS 0,-ARG_SKIP,1
257 /*CFI_REGISTER rflags,r11*/
258 movq %gs:pda_oldrsp,%rsp
262 /* Handle reschedules */
263 /* edx: work, edi: workmask */
266 bt $TIF_NEED_RESCHED,%edx
271 CFI_ADJUST_CFA_OFFSET 8
274 CFI_ADJUST_CFA_OFFSET -8
277 /* Handle a signal */
281 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
284 /* Really a signal */
285 /* edx: work flags (arg3) */
286 leaq do_notify_resume(%rip),%rax
287 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
288 xorl %esi,%esi # oldset -> arg2
289 call ptregscall_common
290 1: movl $_TIF_NEED_RESCHED,%edi
291 /* Use IRET because user could have changed frame. This
292 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
298 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
299 jmp ret_from_sys_call
301 /* Do syscall tracing */
305 movq $-ENOSYS,RAX(%rsp)
306 FIXUP_TOP_OF_STACK %rdi
308 call syscall_trace_enter
309 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
311 cmpq $__NR_syscall_max,%rax
313 movq %r10,%rcx /* fixup for C */
314 call *sys_call_table(,%rax,8)
315 1: movq %rax,RAX-ARGOFFSET(%rsp)
316 /* Use IRET because user could have changed frame */
317 jmp int_ret_from_sys_call
322 * Syscall return path ending with IRET.
323 * Has correct top of stack, but partial stack frame.
325 ENTRY(int_ret_from_sys_call)
327 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
328 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
329 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
330 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
331 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
332 CFI_REL_OFFSET rip,RIP-ARGOFFSET
333 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
334 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
335 CFI_REL_OFFSET rax,RAX-ARGOFFSET
336 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
337 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
338 CFI_REL_OFFSET r8,R8-ARGOFFSET
339 CFI_REL_OFFSET r9,R9-ARGOFFSET
340 CFI_REL_OFFSET r10,R10-ARGOFFSET
341 CFI_REL_OFFSET r11,R11-ARGOFFSET
344 testl $3,CS-ARGOFFSET(%rsp)
345 je retint_restore_args
346 movl $_TIF_ALLWORK_MASK,%edi
347 /* edi: mask to check */
349 GET_THREAD_INFO(%rcx)
350 movl threadinfo_flags(%rcx),%edx
353 andl $~TS_COMPAT,threadinfo_status(%rcx)
356 /* Either reschedule or signal or syscall exit tracking needed. */
357 /* First do a reschedule test. */
358 /* edx: work, edi: workmask */
360 bt $TIF_NEED_RESCHED,%edx
365 CFI_ADJUST_CFA_OFFSET 8
368 CFI_ADJUST_CFA_OFFSET -8
373 /* handle signals and tracing -- both require a full stack frame */
378 /* Check for syscall exit trace */
379 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
382 CFI_ADJUST_CFA_OFFSET 8
383 leaq 8(%rsp),%rdi # &ptregs -> arg1
384 call syscall_trace_leave
386 CFI_ADJUST_CFA_OFFSET -8
387 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
393 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
395 movq %rsp,%rdi # &ptregs -> arg1
396 xorl %esi,%esi # oldset -> arg2
397 call do_notify_resume
398 1: movl $_TIF_NEED_RESCHED,%edi
405 END(int_ret_from_sys_call)
408 * Certain special system calls that need to save a complete full stack frame.
411 .macro PTREGSCALL label,func,arg
414 leaq \func(%rip),%rax
415 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
416 jmp ptregscall_common
422 PTREGSCALL stub_clone, sys_clone, %r8
423 PTREGSCALL stub_fork, sys_fork, %rdi
424 PTREGSCALL stub_vfork, sys_vfork, %rdi
425 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
426 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
427 PTREGSCALL stub_iopl, sys_iopl, %rsi
429 ENTRY(ptregscall_common)
431 CFI_ADJUST_CFA_OFFSET -8
432 CFI_REGISTER rip, r11
435 CFI_REGISTER rip, r15
436 FIXUP_TOP_OF_STACK %r11
438 RESTORE_TOP_OF_STACK %r11
440 CFI_REGISTER rip, r11
443 CFI_ADJUST_CFA_OFFSET 8
444 CFI_REL_OFFSET rip, 0
447 END(ptregscall_common)
452 CFI_ADJUST_CFA_OFFSET -8
453 CFI_REGISTER rip, r11
455 FIXUP_TOP_OF_STACK %r11
457 RESTORE_TOP_OF_STACK %r11
460 jmp int_ret_from_sys_call
465 * sigreturn is special because it needs to restore all registers on return.
466 * This cannot be done with SYSRET, so use the IRET return path instead.
468 ENTRY(stub_rt_sigreturn)
471 CFI_ADJUST_CFA_OFFSET -8
474 FIXUP_TOP_OF_STACK %r11
475 call sys_rt_sigreturn
476 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
478 jmp int_ret_from_sys_call
480 END(stub_rt_sigreturn)
483 * initial frame state for interrupts and exceptions
487 CFI_DEF_CFA rsp,SS+8-\ref
488 /*CFI_REL_OFFSET ss,SS-\ref*/
489 CFI_REL_OFFSET rsp,RSP-\ref
490 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
491 /*CFI_REL_OFFSET cs,CS-\ref*/
492 CFI_REL_OFFSET rip,RIP-\ref
495 /* initial frame state for interrupts (and exceptions without error code) */
496 #define INTR_FRAME _frame RIP
497 /* initial frame state for exceptions with error code (and interrupts with
498 vector already pushed) */
499 #define XCPT_FRAME _frame ORIG_RAX
502 * Interrupt entry/exit.
504 * Interrupt entry points save only callee clobbered registers in fast path.
506 * Entry runs with interrupts off.
509 /* 0(%rsp): interrupt number */
510 .macro interrupt func
513 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
515 CFI_ADJUST_CFA_OFFSET 8
516 CFI_REL_OFFSET rbp, 0
518 CFI_DEF_CFA_REGISTER rbp
522 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
523 cmoveq %gs:pda_irqstackptr,%rsp
524 push %rbp # backlink for old unwinder
526 * We entered an interrupt context - irqs are off:
532 ENTRY(common_interrupt)
535 /* 0(%rsp): oldrsp-ARGOFFSET */
539 decl %gs:pda_irqcount
541 CFI_DEF_CFA_REGISTER rsp
542 CFI_ADJUST_CFA_OFFSET -8
544 GET_THREAD_INFO(%rcx)
545 testl $3,CS-ARGOFFSET(%rsp)
548 /* Interrupt came from user space */
550 * Has a correct top of stack, but a partial stack frame
551 * %rcx: thread info. Interrupts off.
553 retint_with_reschedule:
554 movl $_TIF_WORK_MASK,%edi
556 movl threadinfo_flags(%rcx),%edx
562 * The iretq could re-enable interrupts:
572 * The iretq could re-enable interrupts:
580 .section __ex_table,"a"
581 .quad iret_label,bad_iret
584 /* force a signal here? this matches i386 behaviour */
585 /* running with kernel gs */
587 movq $11,%rdi /* SIGSEGV */
593 /* edi: workmask, edx: work */
596 bt $TIF_NEED_RESCHED,%edx
601 CFI_ADJUST_CFA_OFFSET 8
604 CFI_ADJUST_CFA_OFFSET -8
605 GET_THREAD_INFO(%rcx)
611 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
616 movq $-1,ORIG_RAX(%rsp)
617 xorl %esi,%esi # oldset
618 movq %rsp,%rdi # &pt_regs
619 call do_notify_resume
623 movl $_TIF_NEED_RESCHED,%edi
624 GET_THREAD_INFO(%rcx)
627 #ifdef CONFIG_PREEMPT
628 /* Returning to kernel space. Check if we need preemption */
629 /* rcx: threadinfo. interrupts off. */
631 cmpl $0,threadinfo_preempt_count(%rcx)
632 jnz retint_restore_args
633 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
634 jnc retint_restore_args
635 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
636 jnc retint_restore_args
637 call preempt_schedule_irq
642 END(common_interrupt)
647 .macro apicinterrupt num,func
650 CFI_ADJUST_CFA_OFFSET 8
656 ENTRY(thermal_interrupt)
657 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
658 END(thermal_interrupt)
660 ENTRY(threshold_interrupt)
661 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
662 END(threshold_interrupt)
665 ENTRY(reschedule_interrupt)
666 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
667 END(reschedule_interrupt)
669 .macro INVALIDATE_ENTRY num
670 ENTRY(invalidate_interrupt\num)
671 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
672 END(invalidate_interrupt\num)
684 ENTRY(call_function_interrupt)
685 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
686 END(call_function_interrupt)
689 ENTRY(apic_timer_interrupt)
690 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
691 END(apic_timer_interrupt)
693 ENTRY(error_interrupt)
694 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
697 ENTRY(spurious_interrupt)
698 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
699 END(spurious_interrupt)
702 * Exception entry points.
706 pushq $0 /* push error code/oldrax */
707 CFI_ADJUST_CFA_OFFSET 8
708 pushq %rax /* push real oldrax to the rdi slot */
709 CFI_ADJUST_CFA_OFFSET 8
715 .macro errorentry sym
718 CFI_ADJUST_CFA_OFFSET 8
724 /* error code is on the stack already */
725 /* handle NMI like exceptions that can happen everywhere */
726 .macro paranoidentry sym, ist=0, irqtrace=1
730 movl $MSR_GS_BASE,%ecx
738 movq %gs:pda_data_offset, %rbp
741 movq ORIG_RAX(%rsp),%rsi
742 movq $-1,ORIG_RAX(%rsp)
744 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
748 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
757 * "Paranoid" exit path from exception stack.
758 * Paranoid because this is used by NMIs and cannot take
759 * any kernel state for granted.
760 * We don't do kernel preemption checks here, because only
761 * NMI should be common and it does not enable IRQs and
762 * cannot get reschedule ticks.
764 * "trace" is 0 for the NMI handler only, because irq-tracing
765 * is fundamentally NMI-unsafe. (we cannot change the soft and
766 * hard flags at once, atomically)
768 .macro paranoidexit trace=1
769 /* ebx: no swapgs flag */
771 testl %ebx,%ebx /* swapgs needed? */
772 jnz paranoid_restore\trace
774 jnz paranoid_userspace\trace
775 paranoid_swapgs\trace:
778 paranoid_restore\trace:
781 paranoid_userspace\trace:
782 GET_THREAD_INFO(%rcx)
783 movl threadinfo_flags(%rcx),%ebx
784 andl $_TIF_WORK_MASK,%ebx
785 jz paranoid_swapgs\trace
786 movq %rsp,%rdi /* &pt_regs */
788 movq %rax,%rsp /* switch stack for scheduling */
789 testl $_TIF_NEED_RESCHED,%ebx
790 jnz paranoid_schedule\trace
791 movl %ebx,%edx /* arg3: thread flags */
796 xorl %esi,%esi /* arg2: oldset */
797 movq %rsp,%rdi /* arg1: &pt_regs */
798 call do_notify_resume
803 jmp paranoid_userspace\trace
804 paranoid_schedule\trace:
814 jmp paranoid_userspace\trace
819 * Exception entry point. This expects an error code/orig_rax on the stack
820 * and the exception handler in %rax.
822 KPROBE_ENTRY(error_entry)
824 /* rdi slot contains rax, oldrax contains error code */
827 CFI_ADJUST_CFA_OFFSET (14*8)
829 CFI_REL_OFFSET rsi,RSI
830 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
832 CFI_REL_OFFSET rdx,RDX
834 CFI_REL_OFFSET rcx,RCX
835 movq %rsi,10*8(%rsp) /* store rax */
836 CFI_REL_OFFSET rax,RAX
842 CFI_REL_OFFSET r10,R10
844 CFI_REL_OFFSET r11,R11
846 CFI_REL_OFFSET rbx,RBX
848 CFI_REL_OFFSET rbp,RBP
850 CFI_REL_OFFSET r12,R12
852 CFI_REL_OFFSET r13,R13
854 CFI_REL_OFFSET r14,R14
856 CFI_REL_OFFSET r15,R15
865 movq ORIG_RAX(%rsp),%rsi /* get error code */
866 movq $-1,ORIG_RAX(%rsp)
868 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
874 GET_THREAD_INFO(%rcx)
877 movl threadinfo_flags(%rcx),%edx
878 movl $_TIF_WORK_MASK,%edi
882 * The iret might restore flags:
892 /* There are two places in the kernel that can potentially fault with
893 usergs. Handle them here. The exception handlers after
894 iret run with kernel gs again, so don't set the user space flag.
895 B stepping K8s sometimes report an truncated RIP for IRET
896 exceptions returning to compat mode. Check for these here too. */
897 leaq iret_label(%rip),%rbp
900 movl %ebp,%ebp /* zero extend */
903 cmpq $gs_change,RIP(%rsp)
906 KPROBE_END(error_entry)
908 /* Reload gs selector with exception handling */
909 /* edi: new selector */
913 CFI_ADJUST_CFA_OFFSET 8
918 2: mfence /* workaround */
921 CFI_ADJUST_CFA_OFFSET -8
924 ENDPROC(load_gs_index)
926 .section __ex_table,"a"
928 .quad gs_change,bad_gs
931 /* running with kernelgs */
933 swapgs /* switch back to user gs */
940 * Create a kernel thread.
942 * C extern interface:
943 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
945 * asm input arguments:
946 * rdi: fn, rsi: arg, rdx: flags
950 FAKE_STACK_FRAME $child_rip
953 # rdi: flags, rsi: usp, rdx: will be &pt_regs
955 orq kernel_thread_flags(%rip),%rdi
968 * It isn't worth to check for reschedule here,
969 * so internally to the x86_64 port you can rely on kernel_thread()
970 * not to reschedule the child before returning, this avoids the need
971 * of hacks for example to fork off the per-CPU idle tasks.
972 * [Hopefully no generic code relies on the reschedule -AK]
978 ENDPROC(kernel_thread)
981 pushq $0 # fake return address
984 * Here we are in the child and the registers are set as they were
985 * at kernel_thread() invocation in the parent.
997 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
999 * C extern interface:
1000 * extern long execve(char *name, char **argv, char **envp)
1002 * asm input arguments:
1003 * rdi: name, rsi: argv, rdx: envp
1005 * We want to fallback into:
1006 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
1008 * do_sys_execve asm fallback arguments:
1009 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
1016 movq %rax, RAX(%rsp)
1019 je int_ret_from_sys_call
1026 KPROBE_ENTRY(page_fault)
1027 errorentry do_page_fault
1028 KPROBE_END(page_fault)
1030 ENTRY(coprocessor_error)
1031 zeroentry do_coprocessor_error
1032 END(coprocessor_error)
1034 ENTRY(simd_coprocessor_error)
1035 zeroentry do_simd_coprocessor_error
1036 END(simd_coprocessor_error)
1038 ENTRY(device_not_available)
1039 zeroentry math_state_restore
1040 END(device_not_available)
1042 /* runs on exception stack */
1046 CFI_ADJUST_CFA_OFFSET 8
1047 paranoidentry do_debug, DEBUG_STACK
1051 /* runs on exception stack */
1055 CFI_ADJUST_CFA_OFFSET 8
1056 paranoidentry do_nmi, 0, 0
1057 #ifdef CONFIG_TRACE_IRQFLAGS
1068 CFI_ADJUST_CFA_OFFSET 8
1069 paranoidentry do_int3, DEBUG_STACK
1075 zeroentry do_overflow
1083 zeroentry do_invalid_op
1086 ENTRY(coprocessor_segment_overrun)
1087 zeroentry do_coprocessor_segment_overrun
1088 END(coprocessor_segment_overrun)
1091 zeroentry do_reserved
1094 /* runs on exception stack */
1097 paranoidentry do_double_fault
1103 errorentry do_invalid_TSS
1106 ENTRY(segment_not_present)
1107 errorentry do_segment_not_present
1108 END(segment_not_present)
1110 /* runs on exception stack */
1111 ENTRY(stack_segment)
1113 paranoidentry do_stack_segment
1118 KPROBE_ENTRY(general_protection)
1119 errorentry do_general_protection
1120 KPROBE_END(general_protection)
1122 ENTRY(alignment_check)
1123 errorentry do_alignment_check
1124 END(alignment_check)
1127 zeroentry do_divide_error
1130 ENTRY(spurious_interrupt_bug)
1131 zeroentry do_spurious_interrupt_bug
1132 END(spurious_interrupt_bug)
1134 #ifdef CONFIG_X86_MCE
1135 /* runs on exception stack */
1136 ENTRY(machine_check)
1139 CFI_ADJUST_CFA_OFFSET 8
1140 paranoidentry do_machine_check
1146 /* Call softirq on interrupt stack. Interrupts are off. */
1150 CFI_ADJUST_CFA_OFFSET 8
1151 CFI_REL_OFFSET rbp,0
1153 CFI_DEF_CFA_REGISTER rbp
1154 incl %gs:pda_irqcount
1155 cmove %gs:pda_irqstackptr,%rsp
1156 push %rbp # backlink for old unwinder
1159 CFI_DEF_CFA_REGISTER rsp
1160 CFI_ADJUST_CFA_OFFSET -8
1161 decl %gs:pda_irqcount
1164 ENDPROC(call_softirq)
1166 #ifdef CONFIG_STACK_UNWIND
1167 ENTRY(arch_unwind_init_running)
1169 movq %r15, R15(%rdi)
1170 movq %r14, R14(%rdi)
1172 movq %r13, R13(%rdi)
1173 movq %r12, R12(%rdi)
1175 movq %rbp, RBP(%rdi)
1176 movq %rbx, RBX(%rdi)
1178 movq %rax, R11(%rdi)
1179 movq %rax, R10(%rdi)
1182 movq %rax, RAX(%rdi)
1183 movq %rax, RCX(%rdi)
1184 movq %rax, RDX(%rdi)
1185 movq %rax, RSI(%rdi)
1186 movq %rax, RDI(%rdi)
1187 movq %rax, ORIG_RAX(%rdi)
1188 movq %rcx, RIP(%rdi)
1190 movq $__KERNEL_CS, CS(%rdi)
1191 movq %rax, EFLAGS(%rdi)
1192 movq %rcx, RSP(%rdi)
1193 movq $__KERNEL_DS, SS(%rdi)
1196 ENDPROC(arch_unwind_init_running)