2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
12 * entry.S contains the system-call and fault low-level handling routines.
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
27 * - schedule it carefully for the final hardware.
31 #include <linux/linkage.h>
32 #include <asm/segment.h>
34 #include <asm/cache.h>
35 #include <asm/errno.h>
36 #include <asm/dwarf2.h>
37 #include <asm/calling.h>
38 #include <asm/asm-offsets.h>
40 #include <asm/unistd.h>
41 #include <asm/thread_info.h>
42 #include <asm/hw_irq.h>
47 #ifndef CONFIG_PREEMPT
48 #define retint_kernel retint_restore_args
52 * C code is not supposed to know about undefined top of stack. Every time
53 * a C function with an pt_regs argument is called from the SYSCALL based
54 * fast path FIXUP_TOP_OF_STACK is needed.
55 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
59 /* %rsp:at FRAMEEND */
60 .macro FIXUP_TOP_OF_STACK tmp
61 movq %gs:pda_oldrsp,\tmp
63 movq $__USER_DS,SS(%rsp)
64 movq $__USER_CS,CS(%rsp)
66 movq R11(%rsp),\tmp /* get eflags */
67 movq \tmp,EFLAGS(%rsp)
70 .macro RESTORE_TOP_OF_STACK tmp,offset=0
71 movq RSP-\offset(%rsp),\tmp
72 movq \tmp,%gs:pda_oldrsp
73 movq EFLAGS-\offset(%rsp),\tmp
74 movq \tmp,R11-\offset(%rsp)
77 .macro FAKE_STACK_FRAME child_rip
78 /* push in order ss, rsp, eflags, cs, rip */
81 CFI_ADJUST_CFA_OFFSET 8
82 /*CFI_REL_OFFSET ss,0*/
84 CFI_ADJUST_CFA_OFFSET 8
86 pushq $(1<<9) /* eflags - interrupts on */
87 CFI_ADJUST_CFA_OFFSET 8
88 /*CFI_REL_OFFSET rflags,0*/
89 pushq $__KERNEL_CS /* cs */
90 CFI_ADJUST_CFA_OFFSET 8
91 /*CFI_REL_OFFSET cs,0*/
92 pushq \child_rip /* rip */
93 CFI_ADJUST_CFA_OFFSET 8
95 pushq %rax /* orig rax */
96 CFI_ADJUST_CFA_OFFSET 8
99 .macro UNFAKE_STACK_FRAME
101 CFI_ADJUST_CFA_OFFSET -(6*8)
104 .macro CFI_DEFAULT_STACK start=1
109 CFI_DEF_CFA_OFFSET SS+8
111 CFI_REL_OFFSET r15,R15
112 CFI_REL_OFFSET r14,R14
113 CFI_REL_OFFSET r13,R13
114 CFI_REL_OFFSET r12,R12
115 CFI_REL_OFFSET rbp,RBP
116 CFI_REL_OFFSET rbx,RBX
117 CFI_REL_OFFSET r11,R11
118 CFI_REL_OFFSET r10,R10
121 CFI_REL_OFFSET rax,RAX
122 CFI_REL_OFFSET rcx,RCX
123 CFI_REL_OFFSET rdx,RDX
124 CFI_REL_OFFSET rsi,RSI
125 CFI_REL_OFFSET rdi,RDI
126 CFI_REL_OFFSET rip,RIP
127 /*CFI_REL_OFFSET cs,CS*/
128 /*CFI_REL_OFFSET rflags,EFLAGS*/
129 CFI_REL_OFFSET rsp,RSP
130 /*CFI_REL_OFFSET ss,SS*/
133 * A newly forked process directly context switches into this.
139 GET_THREAD_INFO(%rcx)
140 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
144 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
145 je int_ret_from_sys_call
146 testl $_TIF_IA32,threadinfo_flags(%rcx)
147 jnz int_ret_from_sys_call
148 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
149 jmp ret_from_sys_call
152 call syscall_trace_leave
153 GET_THREAD_INFO(%rcx)
159 * System call entry. Upto 6 arguments in registers are supported.
161 * SYSCALL does not save anything on the stack and does not change the
167 * rax system call number
169 * rcx return address for syscall/sysret, C arg3
172 * r10 arg3 (--> moved to rcx for C)
175 * r11 eflags for syscall/sysret, temporary for C
176 * r12-r15,rbp,rbx saved by C code, not touched.
178 * Interrupts are off on entry.
179 * Only called from user space.
181 * XXX if we had a free scratch register we could save the RSP into the stack frame
182 * and report it properly in ps. Unfortunately we haven't.
184 * When user can change the frames always force IRET. That is because
185 * it deals with uncanonical addresses better. SYSRET has trouble
186 * with them due to bugs in both AMD and Intel CPUs.
191 CFI_DEF_CFA rsp,PDA_STACKOFFSET
193 /*CFI_REGISTER rflags,r11*/
195 movq %rsp,%gs:pda_oldrsp
196 movq %gs:pda_kernelstack,%rsp
199 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
200 movq %rcx,RIP-ARGOFFSET(%rsp)
201 CFI_REL_OFFSET rip,RIP-ARGOFFSET
202 GET_THREAD_INFO(%rcx)
203 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
206 cmpq $__NR_syscall_max,%rax
209 call *sys_call_table(,%rax,8) # XXX: rip relative
210 movq %rax,RAX-ARGOFFSET(%rsp)
212 * Syscall return path ending with SYSRET (fast path)
213 * Has incomplete stack frame and undefined top of stack.
215 .globl ret_from_sys_call
217 movl $_TIF_ALLWORK_MASK,%edi
220 GET_THREAD_INFO(%rcx)
222 movl threadinfo_flags(%rcx),%edx
226 movq RIP-ARGOFFSET(%rsp),%rcx
228 RESTORE_ARGS 0,-ARG_SKIP,1
229 /*CFI_REGISTER rflags,r11*/
230 movq %gs:pda_oldrsp,%rsp
234 /* Handle reschedules */
235 /* edx: work, edi: workmask */
238 bt $TIF_NEED_RESCHED,%edx
242 CFI_ADJUST_CFA_OFFSET 8
245 CFI_ADJUST_CFA_OFFSET -8
248 /* Handle a signal */
251 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
254 /* Really a signal */
255 /* edx: work flags (arg3) */
256 leaq do_notify_resume(%rip),%rax
257 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
258 xorl %esi,%esi # oldset -> arg2
259 call ptregscall_common
260 1: movl $_TIF_NEED_RESCHED,%edi
261 /* Use IRET because user could have changed frame. This
262 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
267 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
268 jmp ret_from_sys_call
270 /* Do syscall tracing */
274 movq $-ENOSYS,RAX(%rsp)
275 FIXUP_TOP_OF_STACK %rdi
277 call syscall_trace_enter
278 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
280 cmpq $__NR_syscall_max,%rax
282 movq %r10,%rcx /* fixup for C */
283 call *sys_call_table(,%rax,8)
284 1: movq %rax,RAX-ARGOFFSET(%rsp)
285 /* Use IRET because user could have changed frame */
286 jmp int_ret_from_sys_call
291 * Syscall return path ending with IRET.
292 * Has correct top of stack, but partial stack frame.
294 ENTRY(int_ret_from_sys_call)
296 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
297 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
298 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
299 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
300 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
301 CFI_REL_OFFSET rip,RIP-ARGOFFSET
302 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
303 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
304 CFI_REL_OFFSET rax,RAX-ARGOFFSET
305 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
306 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
307 CFI_REL_OFFSET r8,R8-ARGOFFSET
308 CFI_REL_OFFSET r9,R9-ARGOFFSET
309 CFI_REL_OFFSET r10,R10-ARGOFFSET
310 CFI_REL_OFFSET r11,R11-ARGOFFSET
312 testl $3,CS-ARGOFFSET(%rsp)
313 je retint_restore_args
314 movl $_TIF_ALLWORK_MASK,%edi
315 /* edi: mask to check */
317 GET_THREAD_INFO(%rcx)
318 movl threadinfo_flags(%rcx),%edx
321 andl $~TS_COMPAT,threadinfo_status(%rcx)
324 /* Either reschedule or signal or syscall exit tracking needed. */
325 /* First do a reschedule test. */
326 /* edx: work, edi: workmask */
328 bt $TIF_NEED_RESCHED,%edx
332 CFI_ADJUST_CFA_OFFSET 8
335 CFI_ADJUST_CFA_OFFSET -8
339 /* handle signals and tracing -- both require a full stack frame */
343 /* Check for syscall exit trace */
344 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
347 CFI_ADJUST_CFA_OFFSET 8
348 leaq 8(%rsp),%rdi # &ptregs -> arg1
349 call syscall_trace_leave
351 CFI_ADJUST_CFA_OFFSET -8
352 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
357 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
359 movq %rsp,%rdi # &ptregs -> arg1
360 xorl %esi,%esi # oldset -> arg2
361 call do_notify_resume
362 1: movl $_TIF_NEED_RESCHED,%edi
368 END(int_ret_from_sys_call)
371 * Certain special system calls that need to save a complete full stack frame.
374 .macro PTREGSCALL label,func,arg
377 leaq \func(%rip),%rax
378 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
379 jmp ptregscall_common
385 PTREGSCALL stub_clone, sys_clone, %r8
386 PTREGSCALL stub_fork, sys_fork, %rdi
387 PTREGSCALL stub_vfork, sys_vfork, %rdi
388 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
389 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
390 PTREGSCALL stub_iopl, sys_iopl, %rsi
392 ENTRY(ptregscall_common)
394 CFI_ADJUST_CFA_OFFSET -8
395 CFI_REGISTER rip, r11
398 CFI_REGISTER rip, r15
399 FIXUP_TOP_OF_STACK %r11
401 RESTORE_TOP_OF_STACK %r11
403 CFI_REGISTER rip, r11
406 CFI_ADJUST_CFA_OFFSET 8
407 CFI_REL_OFFSET rip, 0
410 END(ptregscall_common)
415 CFI_ADJUST_CFA_OFFSET -8
416 CFI_REGISTER rip, r11
418 FIXUP_TOP_OF_STACK %r11
420 RESTORE_TOP_OF_STACK %r11
423 jmp int_ret_from_sys_call
428 * sigreturn is special because it needs to restore all registers on return.
429 * This cannot be done with SYSRET, so use the IRET return path instead.
431 ENTRY(stub_rt_sigreturn)
434 CFI_ADJUST_CFA_OFFSET -8
437 FIXUP_TOP_OF_STACK %r11
438 call sys_rt_sigreturn
439 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
441 jmp int_ret_from_sys_call
443 END(stub_rt_sigreturn)
446 * initial frame state for interrupts and exceptions
450 CFI_DEF_CFA rsp,SS+8-\ref
451 /*CFI_REL_OFFSET ss,SS-\ref*/
452 CFI_REL_OFFSET rsp,RSP-\ref
453 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
454 /*CFI_REL_OFFSET cs,CS-\ref*/
455 CFI_REL_OFFSET rip,RIP-\ref
458 /* initial frame state for interrupts (and exceptions without error code) */
459 #define INTR_FRAME _frame RIP
460 /* initial frame state for exceptions with error code (and interrupts with
461 vector already pushed) */
462 #define XCPT_FRAME _frame ORIG_RAX
465 * Interrupt entry/exit.
467 * Interrupt entry points save only callee clobbered registers in fast path.
469 * Entry runs with interrupts off.
472 /* 0(%rsp): interrupt number */
473 .macro interrupt func
476 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
478 CFI_ADJUST_CFA_OFFSET 8
479 CFI_REL_OFFSET rbp, 0
481 CFI_DEF_CFA_REGISTER rbp
485 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
486 cmoveq %gs:pda_irqstackptr,%rsp
490 ENTRY(common_interrupt)
493 /* 0(%rsp): oldrsp-ARGOFFSET */
496 decl %gs:pda_irqcount
498 CFI_DEF_CFA_REGISTER rsp
499 CFI_ADJUST_CFA_OFFSET -8
501 GET_THREAD_INFO(%rcx)
502 testl $3,CS-ARGOFFSET(%rsp)
505 /* Interrupt came from user space */
507 * Has a correct top of stack, but a partial stack frame
508 * %rcx: thread info. Interrupts off.
510 retint_with_reschedule:
511 movl $_TIF_WORK_MASK,%edi
513 movl threadinfo_flags(%rcx),%edx
525 .section __ex_table,"a"
526 .quad iret_label,bad_iret
529 /* force a signal here? this matches i386 behaviour */
530 /* running with kernel gs */
532 movq $11,%rdi /* SIGSEGV */
537 /* edi: workmask, edx: work */
540 bt $TIF_NEED_RESCHED,%edx
544 CFI_ADJUST_CFA_OFFSET 8
547 CFI_ADJUST_CFA_OFFSET -8
548 GET_THREAD_INFO(%rcx)
553 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
557 movq $-1,ORIG_RAX(%rsp)
558 xorl %esi,%esi # oldset
559 movq %rsp,%rdi # &pt_regs
560 call do_notify_resume
563 movl $_TIF_NEED_RESCHED,%edi
564 GET_THREAD_INFO(%rcx)
567 #ifdef CONFIG_PREEMPT
568 /* Returning to kernel space. Check if we need preemption */
569 /* rcx: threadinfo. interrupts off. */
572 cmpl $0,threadinfo_preempt_count(%rcx)
573 jnz retint_restore_args
574 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
575 jnc retint_restore_args
576 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
577 jnc retint_restore_args
578 call preempt_schedule_irq
583 END(common_interrupt)
588 .macro apicinterrupt num,func
591 CFI_ADJUST_CFA_OFFSET 8
597 ENTRY(thermal_interrupt)
598 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
599 END(thermal_interrupt)
601 ENTRY(threshold_interrupt)
602 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
603 END(threshold_interrupt)
606 ENTRY(reschedule_interrupt)
607 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
608 END(reschedule_interrupt)
610 .macro INVALIDATE_ENTRY num
611 ENTRY(invalidate_interrupt\num)
612 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
613 END(invalidate_interrupt\num)
625 ENTRY(call_function_interrupt)
626 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
627 END(call_function_interrupt)
630 #ifdef CONFIG_X86_LOCAL_APIC
631 ENTRY(apic_timer_interrupt)
632 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
633 END(apic_timer_interrupt)
635 ENTRY(error_interrupt)
636 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
639 ENTRY(spurious_interrupt)
640 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
641 END(spurious_interrupt)
645 * Exception entry points.
649 pushq $0 /* push error code/oldrax */
650 CFI_ADJUST_CFA_OFFSET 8
651 pushq %rax /* push real oldrax to the rdi slot */
652 CFI_ADJUST_CFA_OFFSET 8
658 .macro errorentry sym
661 CFI_ADJUST_CFA_OFFSET 8
667 /* error code is on the stack already */
668 /* handle NMI like exceptions that can happen everywhere */
669 .macro paranoidentry sym, ist=0
673 movl $MSR_GS_BASE,%ecx
681 movq %gs:pda_data_offset, %rbp
684 movq ORIG_RAX(%rsp),%rsi
685 movq $-1,ORIG_RAX(%rsp)
687 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
691 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
697 * Exception entry point. This expects an error code/orig_rax on the stack
698 * and the exception handler in %rax.
702 /* rdi slot contains rax, oldrax contains error code */
705 CFI_ADJUST_CFA_OFFSET (14*8)
707 CFI_REL_OFFSET rsi,RSI
708 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
710 CFI_REL_OFFSET rdx,RDX
712 CFI_REL_OFFSET rcx,RCX
713 movq %rsi,10*8(%rsp) /* store rax */
714 CFI_REL_OFFSET rax,RAX
720 CFI_REL_OFFSET r10,R10
722 CFI_REL_OFFSET r11,R11
724 CFI_REL_OFFSET rbx,RBX
726 CFI_REL_OFFSET rbp,RBP
728 CFI_REL_OFFSET r12,R12
730 CFI_REL_OFFSET r13,R13
732 CFI_REL_OFFSET r14,R14
734 CFI_REL_OFFSET r15,R15
743 movq ORIG_RAX(%rsp),%rsi /* get error code */
744 movq $-1,ORIG_RAX(%rsp)
746 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
751 GET_THREAD_INFO(%rcx)
754 movl threadinfo_flags(%rcx),%edx
755 movl $_TIF_WORK_MASK,%edi
765 /* There are two places in the kernel that can potentially fault with
766 usergs. Handle them here. The exception handlers after
767 iret run with kernel gs again, so don't set the user space flag.
768 B stepping K8s sometimes report an truncated RIP for IRET
769 exceptions returning to compat mode. Check for these here too. */
770 leaq iret_label(%rip),%rbp
773 movl %ebp,%ebp /* zero extend */
776 cmpq $gs_change,RIP(%rsp)
781 /* Reload gs selector with exception handling */
782 /* edi: new selector */
786 CFI_ADJUST_CFA_OFFSET 8
791 2: mfence /* workaround */
794 CFI_ADJUST_CFA_OFFSET -8
797 ENDPROC(load_gs_index)
799 .section __ex_table,"a"
801 .quad gs_change,bad_gs
804 /* running with kernelgs */
806 swapgs /* switch back to user gs */
813 * Create a kernel thread.
815 * C extern interface:
816 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
818 * asm input arguments:
819 * rdi: fn, rsi: arg, rdx: flags
823 FAKE_STACK_FRAME $child_rip
826 # rdi: flags, rsi: usp, rdx: will be &pt_regs
828 orq kernel_thread_flags(%rip),%rdi
841 * It isn't worth to check for reschedule here,
842 * so internally to the x86_64 port you can rely on kernel_thread()
843 * not to reschedule the child before returning, this avoids the need
844 * of hacks for example to fork off the per-CPU idle tasks.
845 * [Hopefully no generic code relies on the reschedule -AK]
851 ENDPROC(kernel_thread)
855 * Here we are in the child and the registers are set as they were
856 * at kernel_thread() invocation in the parent.
867 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
869 * C extern interface:
870 * extern long execve(char *name, char **argv, char **envp)
872 * asm input arguments:
873 * rdi: name, rsi: argv, rdx: envp
875 * We want to fallback into:
876 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
878 * do_sys_execve asm fallback arguments:
879 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
889 je int_ret_from_sys_call
896 KPROBE_ENTRY(page_fault)
897 errorentry do_page_fault
901 ENTRY(coprocessor_error)
902 zeroentry do_coprocessor_error
903 END(coprocessor_error)
905 ENTRY(simd_coprocessor_error)
906 zeroentry do_simd_coprocessor_error
907 END(simd_coprocessor_error)
909 ENTRY(device_not_available)
910 zeroentry math_state_restore
911 END(device_not_available)
913 /* runs on exception stack */
917 CFI_ADJUST_CFA_OFFSET 8
918 paranoidentry do_debug, DEBUG_STACK
924 /* runs on exception stack */
928 CFI_ADJUST_CFA_OFFSET 8
931 * "Paranoid" exit path from exception stack.
932 * Paranoid because this is used by NMIs and cannot take
933 * any kernel state for granted.
934 * We don't do kernel preemption checks here, because only
935 * NMI should be common and it does not enable IRQs and
936 * cannot get reschedule ticks.
938 /* ebx: no swapgs flag */
940 testl %ebx,%ebx /* swapgs needed? */
943 jnz paranoid_userspace
950 GET_THREAD_INFO(%rcx)
951 movl threadinfo_flags(%rcx),%ebx
952 andl $_TIF_WORK_MASK,%ebx
954 movq %rsp,%rdi /* &pt_regs */
956 movq %rax,%rsp /* switch stack for scheduling */
957 testl $_TIF_NEED_RESCHED,%ebx
958 jnz paranoid_schedule
959 movl %ebx,%edx /* arg3: thread flags */
961 xorl %esi,%esi /* arg2: oldset */
962 movq %rsp,%rdi /* arg1: &pt_regs */
963 call do_notify_resume
965 jmp paranoid_userspace
970 jmp paranoid_userspace
978 CFI_ADJUST_CFA_OFFSET 8
979 paranoidentry do_int3, DEBUG_STACK
986 zeroentry do_overflow
994 zeroentry do_invalid_op
997 ENTRY(coprocessor_segment_overrun)
998 zeroentry do_coprocessor_segment_overrun
999 END(coprocessor_segment_overrun)
1002 zeroentry do_reserved
1005 /* runs on exception stack */
1008 paranoidentry do_double_fault
1014 errorentry do_invalid_TSS
1017 ENTRY(segment_not_present)
1018 errorentry do_segment_not_present
1019 END(segment_not_present)
1021 /* runs on exception stack */
1022 ENTRY(stack_segment)
1024 paranoidentry do_stack_segment
1029 KPROBE_ENTRY(general_protection)
1030 errorentry do_general_protection
1031 END(general_protection)
1034 ENTRY(alignment_check)
1035 errorentry do_alignment_check
1036 END(alignment_check)
1039 zeroentry do_divide_error
1042 ENTRY(spurious_interrupt_bug)
1043 zeroentry do_spurious_interrupt_bug
1044 END(spurious_interrupt_bug)
1046 #ifdef CONFIG_X86_MCE
1047 /* runs on exception stack */
1048 ENTRY(machine_check)
1051 CFI_ADJUST_CFA_OFFSET 8
1052 paranoidentry do_machine_check
1060 movq %gs:pda_irqstackptr,%rax
1062 CFI_DEF_CFA_REGISTER rdx
1063 incl %gs:pda_irqcount
1066 /*todo CFI_DEF_CFA_EXPRESSION ...*/
1069 CFI_DEF_CFA_REGISTER rsp
1070 decl %gs:pda_irqcount
1073 ENDPROC(call_softirq)
1075 #ifdef CONFIG_STACK_UNWIND
1076 ENTRY(arch_unwind_init_running)
1078 movq %r15, R15(%rdi)
1079 movq %r14, R14(%rdi)
1081 movq %r13, R13(%rdi)
1082 movq %r12, R12(%rdi)
1084 movq %rbp, RBP(%rdi)
1085 movq %rbx, RBX(%rdi)
1087 movq %rax, R11(%rdi)
1088 movq %rax, R10(%rdi)
1091 movq %rax, RAX(%rdi)
1092 movq %rax, RCX(%rdi)
1093 movq %rax, RDX(%rdi)
1094 movq %rax, RSI(%rdi)
1095 movq %rax, RDI(%rdi)
1096 movq %rax, ORIG_RAX(%rdi)
1097 movq %rcx, RIP(%rdi)
1099 movq $__KERNEL_CS, CS(%rdi)
1100 movq %rax, EFLAGS(%rdi)
1101 movq %rcx, RSP(%rdi)
1102 movq $__KERNEL_DS, SS(%rdi)
1105 ENDPROC(arch_unwind_init_running)