2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
64 #ifdef CONFIG_FUNCTION_TRACER
65 #ifdef CONFIG_DYNAMIC_FTRACE
71 cmpl $0, function_trace_stop
74 /* taken from glibc */
86 subq $MCOUNT_INSN_SIZE, %rdi
101 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
102 .globl ftrace_graph_call
112 #else /* ! CONFIG_DYNAMIC_FTRACE */
114 cmpl $0, function_trace_stop
117 cmpq $ftrace_stub, ftrace_trace_function
120 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
121 cmpq $ftrace_stub, ftrace_graph_return
122 jnz ftrace_graph_caller
130 /* taken from glibc */
140 movq 0x38(%rsp), %rdi
142 subq $MCOUNT_INSN_SIZE, %rdi
144 call *ftrace_trace_function
157 #endif /* CONFIG_DYNAMIC_FTRACE */
158 #endif /* CONFIG_FUNCTION_TRACER */
160 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
161 ENTRY(ftrace_graph_caller)
162 cmpl $0, function_trace_stop
175 movq 0x38(%rsp), %rsi
176 subq $MCOUNT_INSN_SIZE, %rsi
178 call prepare_ftrace_return
189 END(ftrace_graph_caller)
192 .globl return_to_handler
206 call ftrace_return_to_handler
223 #ifndef CONFIG_PREEMPT
224 #define retint_kernel retint_restore_args
227 #ifdef CONFIG_PARAVIRT
228 ENTRY(native_usergs_sysret64)
231 #endif /* CONFIG_PARAVIRT */
234 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
235 #ifdef CONFIG_TRACE_IRQFLAGS
236 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
244 * C code is not supposed to know about undefined top of stack. Every time
245 * a C function with an pt_regs argument is called from the SYSCALL based
246 * fast path FIXUP_TOP_OF_STACK is needed.
247 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
251 /* %rsp:at FRAMEEND */
252 .macro FIXUP_TOP_OF_STACK tmp
253 movq %gs:pda_oldrsp,\tmp
255 movq $__USER_DS,SS(%rsp)
256 movq $__USER_CS,CS(%rsp)
258 movq R11(%rsp),\tmp /* get eflags */
259 movq \tmp,EFLAGS(%rsp)
262 .macro RESTORE_TOP_OF_STACK tmp,offset=0
263 movq RSP-\offset(%rsp),\tmp
264 movq \tmp,%gs:pda_oldrsp
265 movq EFLAGS-\offset(%rsp),\tmp
266 movq \tmp,R11-\offset(%rsp)
269 .macro FAKE_STACK_FRAME child_rip
270 /* push in order ss, rsp, eflags, cs, rip */
272 pushq $__KERNEL_DS /* ss */
273 CFI_ADJUST_CFA_OFFSET 8
274 /*CFI_REL_OFFSET ss,0*/
276 CFI_ADJUST_CFA_OFFSET 8
278 pushq $(1<<9) /* eflags - interrupts on */
279 CFI_ADJUST_CFA_OFFSET 8
280 /*CFI_REL_OFFSET rflags,0*/
281 pushq $__KERNEL_CS /* cs */
282 CFI_ADJUST_CFA_OFFSET 8
283 /*CFI_REL_OFFSET cs,0*/
284 pushq \child_rip /* rip */
285 CFI_ADJUST_CFA_OFFSET 8
287 pushq %rax /* orig rax */
288 CFI_ADJUST_CFA_OFFSET 8
291 .macro UNFAKE_STACK_FRAME
293 CFI_ADJUST_CFA_OFFSET -(6*8)
296 .macro CFI_DEFAULT_STACK start=1
302 CFI_DEF_CFA_OFFSET SS+8
304 CFI_REL_OFFSET r15,R15
305 CFI_REL_OFFSET r14,R14
306 CFI_REL_OFFSET r13,R13
307 CFI_REL_OFFSET r12,R12
308 CFI_REL_OFFSET rbp,RBP
309 CFI_REL_OFFSET rbx,RBX
310 CFI_REL_OFFSET r11,R11
311 CFI_REL_OFFSET r10,R10
314 CFI_REL_OFFSET rax,RAX
315 CFI_REL_OFFSET rcx,RCX
316 CFI_REL_OFFSET rdx,RDX
317 CFI_REL_OFFSET rsi,RSI
318 CFI_REL_OFFSET rdi,RDI
319 CFI_REL_OFFSET rip,RIP
320 /*CFI_REL_OFFSET cs,CS*/
321 /*CFI_REL_OFFSET rflags,EFLAGS*/
322 CFI_REL_OFFSET rsp,RSP
323 /*CFI_REL_OFFSET ss,SS*/
326 * A newly forked process directly context switches into this.
331 push kernel_eflags(%rip)
332 CFI_ADJUST_CFA_OFFSET 8
333 popf # reset kernel eflags
334 CFI_ADJUST_CFA_OFFSET -8
336 GET_THREAD_INFO(%rcx)
337 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
341 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
342 je int_ret_from_sys_call
343 testl $_TIF_IA32,TI_flags(%rcx)
344 jnz int_ret_from_sys_call
345 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
346 jmp ret_from_sys_call
349 call syscall_trace_leave
350 GET_THREAD_INFO(%rcx)
356 * System call entry. Upto 6 arguments in registers are supported.
358 * SYSCALL does not save anything on the stack and does not change the
364 * rax system call number
366 * rcx return address for syscall/sysret, C arg3
369 * r10 arg3 (--> moved to rcx for C)
372 * r11 eflags for syscall/sysret, temporary for C
373 * r12-r15,rbp,rbx saved by C code, not touched.
375 * Interrupts are off on entry.
376 * Only called from user space.
378 * XXX if we had a free scratch register we could save the RSP into the stack frame
379 * and report it properly in ps. Unfortunately we haven't.
381 * When user can change the frames always force IRET. That is because
382 * it deals with uncanonical addresses better. SYSRET has trouble
383 * with them due to bugs in both AMD and Intel CPUs.
389 CFI_DEF_CFA rsp,PDA_STACKOFFSET
391 /*CFI_REGISTER rflags,r11*/
394 * A hypervisor implementation might want to use a label
395 * after the swapgs, so that it can do the swapgs
396 * for the guest and jump here on syscall.
398 ENTRY(system_call_after_swapgs)
400 movq %rsp,%gs:pda_oldrsp
401 movq %gs:pda_kernelstack,%rsp
403 * No need to follow this irqs off/on section - it's straight
406 ENABLE_INTERRUPTS(CLBR_NONE)
408 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
409 movq %rcx,RIP-ARGOFFSET(%rsp)
410 CFI_REL_OFFSET rip,RIP-ARGOFFSET
411 GET_THREAD_INFO(%rcx)
412 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
414 system_call_fastpath:
415 cmpq $__NR_syscall_max,%rax
418 call *sys_call_table(,%rax,8) # XXX: rip relative
419 movq %rax,RAX-ARGOFFSET(%rsp)
421 * Syscall return path ending with SYSRET (fast path)
422 * Has incomplete stack frame and undefined top of stack.
425 movl $_TIF_ALLWORK_MASK,%edi
429 GET_THREAD_INFO(%rcx)
430 DISABLE_INTERRUPTS(CLBR_NONE)
432 movl TI_flags(%rcx),%edx
437 * sysretq will re-enable interrupts:
440 movq RIP-ARGOFFSET(%rsp),%rcx
442 RESTORE_ARGS 0,-ARG_SKIP,1
443 /*CFI_REGISTER rflags,r11*/
444 movq %gs:pda_oldrsp, %rsp
448 /* Handle reschedules */
449 /* edx: work, edi: workmask */
451 bt $TIF_NEED_RESCHED,%edx
454 ENABLE_INTERRUPTS(CLBR_NONE)
456 CFI_ADJUST_CFA_OFFSET 8
459 CFI_ADJUST_CFA_OFFSET -8
462 /* Handle a signal */
465 ENABLE_INTERRUPTS(CLBR_NONE)
466 #ifdef CONFIG_AUDITSYSCALL
467 bt $TIF_SYSCALL_AUDIT,%edx
470 /* edx: work flags (arg3) */
471 leaq do_notify_resume(%rip),%rax
472 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
473 xorl %esi,%esi # oldset -> arg2
474 call ptregscall_common
475 movl $_TIF_WORK_MASK,%edi
476 /* Use IRET because user could have changed frame. This
477 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
478 DISABLE_INTERRUPTS(CLBR_NONE)
483 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
484 jmp ret_from_sys_call
486 #ifdef CONFIG_AUDITSYSCALL
488 * Fast path for syscall audit without full syscall trace.
489 * We just call audit_syscall_entry() directly, and then
490 * jump back to the normal fast path.
493 movq %r10,%r9 /* 6th arg: 4th syscall arg */
494 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
495 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
496 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
497 movq %rax,%rsi /* 2nd arg: syscall number */
498 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
499 call audit_syscall_entry
500 LOAD_ARGS 0 /* reload call-clobbered registers */
501 jmp system_call_fastpath
504 * Return fast path for syscall audit. Call audit_syscall_exit()
505 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
509 movq %rax,%rsi /* second arg, syscall return value */
510 cmpq $0,%rax /* is it < 0? */
511 setl %al /* 1 if so, 0 if not */
512 movzbl %al,%edi /* zero-extend that into %edi */
513 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
514 call audit_syscall_exit
515 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
517 #endif /* CONFIG_AUDITSYSCALL */
519 /* Do syscall tracing */
521 #ifdef CONFIG_AUDITSYSCALL
522 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
526 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
527 FIXUP_TOP_OF_STACK %rdi
529 call syscall_trace_enter
531 * Reload arg registers from stack in case ptrace changed them.
532 * We don't reload %rax because syscall_trace_enter() returned
533 * the value it wants us to use in the table lookup.
535 LOAD_ARGS ARGOFFSET, 1
537 cmpq $__NR_syscall_max,%rax
538 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
539 movq %r10,%rcx /* fixup for C */
540 call *sys_call_table(,%rax,8)
541 movq %rax,RAX-ARGOFFSET(%rsp)
542 /* Use IRET because user could have changed frame */
545 * Syscall return path ending with IRET.
546 * Has correct top of stack, but partial stack frame.
548 .globl int_ret_from_sys_call
549 .globl int_with_check
550 int_ret_from_sys_call:
551 DISABLE_INTERRUPTS(CLBR_NONE)
553 testl $3,CS-ARGOFFSET(%rsp)
554 je retint_restore_args
555 movl $_TIF_ALLWORK_MASK,%edi
556 /* edi: mask to check */
559 GET_THREAD_INFO(%rcx)
560 movl TI_flags(%rcx),%edx
563 andl $~TS_COMPAT,TI_status(%rcx)
566 /* Either reschedule or signal or syscall exit tracking needed. */
567 /* First do a reschedule test. */
568 /* edx: work, edi: workmask */
570 bt $TIF_NEED_RESCHED,%edx
573 ENABLE_INTERRUPTS(CLBR_NONE)
575 CFI_ADJUST_CFA_OFFSET 8
578 CFI_ADJUST_CFA_OFFSET -8
579 DISABLE_INTERRUPTS(CLBR_NONE)
583 /* handle signals and tracing -- both require a full stack frame */
586 ENABLE_INTERRUPTS(CLBR_NONE)
588 /* Check for syscall exit trace */
589 testl $_TIF_WORK_SYSCALL_EXIT,%edx
592 CFI_ADJUST_CFA_OFFSET 8
593 leaq 8(%rsp),%rdi # &ptregs -> arg1
594 call syscall_trace_leave
596 CFI_ADJUST_CFA_OFFSET -8
597 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
601 testl $_TIF_DO_NOTIFY_MASK,%edx
603 movq %rsp,%rdi # &ptregs -> arg1
604 xorl %esi,%esi # oldset -> arg2
605 call do_notify_resume
606 1: movl $_TIF_WORK_MASK,%edi
609 DISABLE_INTERRUPTS(CLBR_NONE)
616 * Certain special system calls that need to save a complete full stack frame.
619 .macro PTREGSCALL label,func,arg
622 leaq \func(%rip),%rax
623 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
624 jmp ptregscall_common
630 PTREGSCALL stub_clone, sys_clone, %r8
631 PTREGSCALL stub_fork, sys_fork, %rdi
632 PTREGSCALL stub_vfork, sys_vfork, %rdi
633 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
634 PTREGSCALL stub_iopl, sys_iopl, %rsi
636 ENTRY(ptregscall_common)
638 CFI_ADJUST_CFA_OFFSET -8
639 CFI_REGISTER rip, r11
642 CFI_REGISTER rip, r15
643 FIXUP_TOP_OF_STACK %r11
645 RESTORE_TOP_OF_STACK %r11
647 CFI_REGISTER rip, r11
650 CFI_ADJUST_CFA_OFFSET 8
651 CFI_REL_OFFSET rip, 0
654 END(ptregscall_common)
659 CFI_ADJUST_CFA_OFFSET -8
660 CFI_REGISTER rip, r11
662 FIXUP_TOP_OF_STACK %r11
665 RESTORE_TOP_OF_STACK %r11
668 jmp int_ret_from_sys_call
673 * sigreturn is special because it needs to restore all registers on return.
674 * This cannot be done with SYSRET, so use the IRET return path instead.
676 ENTRY(stub_rt_sigreturn)
679 CFI_ADJUST_CFA_OFFSET -8
682 FIXUP_TOP_OF_STACK %r11
683 call sys_rt_sigreturn
684 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
686 jmp int_ret_from_sys_call
688 END(stub_rt_sigreturn)
691 * initial frame state for interrupts and exceptions
696 CFI_DEF_CFA rsp,SS+8-\ref
697 /*CFI_REL_OFFSET ss,SS-\ref*/
698 CFI_REL_OFFSET rsp,RSP-\ref
699 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
700 /*CFI_REL_OFFSET cs,CS-\ref*/
701 CFI_REL_OFFSET rip,RIP-\ref
704 /* initial frame state for interrupts (and exceptions without error code) */
705 #define INTR_FRAME _frame RIP
706 /* initial frame state for exceptions with error code (and interrupts with
707 vector already pushed) */
708 #define XCPT_FRAME _frame ORIG_RAX
711 * Interrupt entry/exit.
713 * Interrupt entry points save only callee clobbered registers in fast path.
715 * Entry runs with interrupts off.
718 /* 0(%rsp): interrupt number */
719 .macro interrupt func
722 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
725 * Save rbp twice: One is for marking the stack frame, as usual, and the
726 * other, to fill pt_regs properly. This is because bx comes right
727 * before the last saved register in that structure, and not bp. If the
728 * base pointer were in the place bx is today, this would not be needed.
731 CFI_ADJUST_CFA_OFFSET 8
732 CFI_REL_OFFSET rbp, 0
734 CFI_DEF_CFA_REGISTER rbp
738 /* irqcount is used to check if a CPU is already on an interrupt
739 stack or not. While this is essentially redundant with preempt_count
740 it is a little cheaper to use a separate counter in the PDA
741 (short of moving irq_enter into assembly, which would be too
743 1: incl %gs:pda_irqcount
744 cmoveq %gs:pda_irqstackptr,%rsp
745 push %rbp # backlink for old unwinder
747 * We entered an interrupt context - irqs are off:
753 ENTRY(common_interrupt)
756 /* 0(%rsp): oldrsp-ARGOFFSET */
758 DISABLE_INTERRUPTS(CLBR_NONE)
760 decl %gs:pda_irqcount
762 CFI_DEF_CFA_REGISTER rsp
763 CFI_ADJUST_CFA_OFFSET -8
765 GET_THREAD_INFO(%rcx)
766 testl $3,CS-ARGOFFSET(%rsp)
769 /* Interrupt came from user space */
771 * Has a correct top of stack, but a partial stack frame
772 * %rcx: thread info. Interrupts off.
774 retint_with_reschedule:
775 movl $_TIF_WORK_MASK,%edi
778 movl TI_flags(%rcx),%edx
783 retint_swapgs: /* return to user-space */
785 * The iretq could re-enable interrupts:
787 DISABLE_INTERRUPTS(CLBR_ANY)
792 retint_restore_args: /* return to kernel space */
793 DISABLE_INTERRUPTS(CLBR_ANY)
795 * The iretq could re-enable interrupts:
804 .section __ex_table, "a"
805 .quad irq_return, bad_iret
808 #ifdef CONFIG_PARAVIRT
812 .section __ex_table,"a"
813 .quad native_iret, bad_iret
820 * The iret traps when the %cs or %ss being restored is bogus.
821 * We've lost the original trap vector and error code.
822 * #GPF is the most likely one to get for an invalid selector.
823 * So pretend we completed the iret and took the #GPF in user mode.
825 * We are now running with the kernel GS after exception recovery.
826 * But error_entry expects us to have user GS to match the user %cs,
832 jmp general_protection
836 /* edi: workmask, edx: work */
839 bt $TIF_NEED_RESCHED,%edx
842 ENABLE_INTERRUPTS(CLBR_NONE)
844 CFI_ADJUST_CFA_OFFSET 8
847 CFI_ADJUST_CFA_OFFSET -8
848 GET_THREAD_INFO(%rcx)
849 DISABLE_INTERRUPTS(CLBR_NONE)
854 testl $_TIF_DO_NOTIFY_MASK,%edx
857 ENABLE_INTERRUPTS(CLBR_NONE)
859 movq $-1,ORIG_RAX(%rsp)
860 xorl %esi,%esi # oldset
861 movq %rsp,%rdi # &pt_regs
862 call do_notify_resume
864 DISABLE_INTERRUPTS(CLBR_NONE)
866 GET_THREAD_INFO(%rcx)
867 jmp retint_with_reschedule
869 #ifdef CONFIG_PREEMPT
870 /* Returning to kernel space. Check if we need preemption */
871 /* rcx: threadinfo. interrupts off. */
873 cmpl $0,TI_preempt_count(%rcx)
874 jnz retint_restore_args
875 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
876 jnc retint_restore_args
877 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
878 jnc retint_restore_args
879 call preempt_schedule_irq
884 END(common_interrupt)
889 .macro apicinterrupt num,func
892 CFI_ADJUST_CFA_OFFSET 8
898 ENTRY(thermal_interrupt)
899 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
900 END(thermal_interrupt)
902 ENTRY(threshold_interrupt)
903 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
904 END(threshold_interrupt)
907 ENTRY(reschedule_interrupt)
908 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
909 END(reschedule_interrupt)
911 .macro INVALIDATE_ENTRY num
912 ENTRY(invalidate_interrupt\num)
913 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
914 END(invalidate_interrupt\num)
926 ENTRY(call_function_interrupt)
927 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
928 END(call_function_interrupt)
929 ENTRY(call_function_single_interrupt)
930 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
931 END(call_function_single_interrupt)
932 ENTRY(irq_move_cleanup_interrupt)
933 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
934 END(irq_move_cleanup_interrupt)
937 ENTRY(apic_timer_interrupt)
938 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
939 END(apic_timer_interrupt)
941 ENTRY(uv_bau_message_intr1)
942 apicinterrupt 220,uv_bau_message_interrupt
943 END(uv_bau_message_intr1)
945 ENTRY(error_interrupt)
946 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
949 ENTRY(spurious_interrupt)
950 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
951 END(spurious_interrupt)
954 * Exception entry points.
958 PARAVIRT_ADJUST_EXCEPTION_FRAME
959 pushq $0 /* push error code/oldrax */
960 CFI_ADJUST_CFA_OFFSET 8
961 pushq %rax /* push real oldrax to the rdi slot */
962 CFI_ADJUST_CFA_OFFSET 8
969 .macro errorentry sym
971 PARAVIRT_ADJUST_EXCEPTION_FRAME
973 CFI_ADJUST_CFA_OFFSET 8
980 /* error code is on the stack already */
981 /* handle NMI like exceptions that can happen everywhere */
982 .macro paranoidentry sym, ist=0, irqtrace=1
986 movl $MSR_GS_BASE,%ecx
994 movq %gs:pda_data_offset, %rbp
1000 movq ORIG_RAX(%rsp),%rsi
1001 movq $-1,ORIG_RAX(%rsp)
1003 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1007 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1009 DISABLE_INTERRUPTS(CLBR_NONE)
1016 * "Paranoid" exit path from exception stack.
1017 * Paranoid because this is used by NMIs and cannot take
1018 * any kernel state for granted.
1019 * We don't do kernel preemption checks here, because only
1020 * NMI should be common and it does not enable IRQs and
1021 * cannot get reschedule ticks.
1023 * "trace" is 0 for the NMI handler only, because irq-tracing
1024 * is fundamentally NMI-unsafe. (we cannot change the soft and
1025 * hard flags at once, atomically)
1027 .macro paranoidexit trace=1
1028 /* ebx: no swapgs flag */
1029 paranoid_exit\trace:
1030 testl %ebx,%ebx /* swapgs needed? */
1031 jnz paranoid_restore\trace
1033 jnz paranoid_userspace\trace
1034 paranoid_swapgs\trace:
1039 paranoid_restore\trace:
1042 paranoid_userspace\trace:
1043 GET_THREAD_INFO(%rcx)
1044 movl TI_flags(%rcx),%ebx
1045 andl $_TIF_WORK_MASK,%ebx
1046 jz paranoid_swapgs\trace
1047 movq %rsp,%rdi /* &pt_regs */
1049 movq %rax,%rsp /* switch stack for scheduling */
1050 testl $_TIF_NEED_RESCHED,%ebx
1051 jnz paranoid_schedule\trace
1052 movl %ebx,%edx /* arg3: thread flags */
1056 ENABLE_INTERRUPTS(CLBR_NONE)
1057 xorl %esi,%esi /* arg2: oldset */
1058 movq %rsp,%rdi /* arg1: &pt_regs */
1059 call do_notify_resume
1060 DISABLE_INTERRUPTS(CLBR_NONE)
1064 jmp paranoid_userspace\trace
1065 paranoid_schedule\trace:
1069 ENABLE_INTERRUPTS(CLBR_ANY)
1071 DISABLE_INTERRUPTS(CLBR_ANY)
1075 jmp paranoid_userspace\trace
1080 * Exception entry point. This expects an error code/orig_rax on the stack
1081 * and the exception handler in %rax.
1083 KPROBE_ENTRY(error_entry)
1085 CFI_REL_OFFSET rax,0
1086 /* rdi slot contains rax, oldrax contains error code */
1089 CFI_ADJUST_CFA_OFFSET (14*8)
1090 movq %rsi,13*8(%rsp)
1091 CFI_REL_OFFSET rsi,RSI
1092 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
1093 CFI_REGISTER rax,rsi
1094 movq %rdx,12*8(%rsp)
1095 CFI_REL_OFFSET rdx,RDX
1096 movq %rcx,11*8(%rsp)
1097 CFI_REL_OFFSET rcx,RCX
1098 movq %rsi,10*8(%rsp) /* store rax */
1099 CFI_REL_OFFSET rax,RAX
1101 CFI_REL_OFFSET r8,R8
1103 CFI_REL_OFFSET r9,R9
1105 CFI_REL_OFFSET r10,R10
1107 CFI_REL_OFFSET r11,R11
1109 CFI_REL_OFFSET rbx,RBX
1111 CFI_REL_OFFSET rbp,RBP
1113 CFI_REL_OFFSET r12,R12
1115 CFI_REL_OFFSET r13,R13
1117 CFI_REL_OFFSET r14,R14
1119 CFI_REL_OFFSET r15,R15
1122 je error_kernelspace
1128 CFI_REL_OFFSET rdi,RDI
1130 movq ORIG_RAX(%rsp),%rsi /* get error code */
1131 movq $-1,ORIG_RAX(%rsp)
1133 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1137 DISABLE_INTERRUPTS(CLBR_NONE)
1139 GET_THREAD_INFO(%rcx)
1142 LOCKDEP_SYS_EXIT_IRQ
1143 movl TI_flags(%rcx),%edx
1144 movl $_TIF_WORK_MASK,%edi
1152 /* There are two places in the kernel that can potentially fault with
1153 usergs. Handle them here. The exception handlers after
1154 iret run with kernel gs again, so don't set the user space flag.
1155 B stepping K8s sometimes report an truncated RIP for IRET
1156 exceptions returning to compat mode. Check for these here too. */
1157 leaq irq_return(%rip),%rcx
1160 movl %ecx,%ecx /* zero extend */
1163 cmpq $gs_change,RIP(%rsp)
1166 KPROBE_END(error_entry)
1168 /* Reload gs selector with exception handling */
1169 /* edi: new selector */
1170 ENTRY(native_load_gs_index)
1173 CFI_ADJUST_CFA_OFFSET 8
1174 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1178 2: mfence /* workaround */
1181 CFI_ADJUST_CFA_OFFSET -8
1184 ENDPROC(native_load_gs_index)
1186 .section __ex_table,"a"
1188 .quad gs_change,bad_gs
1190 .section .fixup,"ax"
1191 /* running with kernelgs */
1193 SWAPGS /* switch back to user gs */
1200 * Create a kernel thread.
1202 * C extern interface:
1203 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1205 * asm input arguments:
1206 * rdi: fn, rsi: arg, rdx: flags
1208 ENTRY(kernel_thread)
1210 FAKE_STACK_FRAME $child_rip
1213 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1215 orq kernel_thread_flags(%rip),%rdi
1228 * It isn't worth to check for reschedule here,
1229 * so internally to the x86_64 port you can rely on kernel_thread()
1230 * not to reschedule the child before returning, this avoids the need
1231 * of hacks for example to fork off the per-CPU idle tasks.
1232 * [Hopefully no generic code relies on the reschedule -AK]
1238 ENDPROC(kernel_thread)
1241 pushq $0 # fake return address
1244 * Here we are in the child and the registers are set as they were
1245 * at kernel_thread() invocation in the parent.
1257 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1259 * C extern interface:
1260 * extern long execve(char *name, char **argv, char **envp)
1262 * asm input arguments:
1263 * rdi: name, rsi: argv, rdx: envp
1265 * We want to fallback into:
1266 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1268 * do_sys_execve asm fallback arguments:
1269 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1271 ENTRY(kernel_execve)
1277 movq %rax, RAX(%rsp)
1280 je int_ret_from_sys_call
1285 ENDPROC(kernel_execve)
1287 KPROBE_ENTRY(page_fault)
1288 errorentry do_page_fault
1289 KPROBE_END(page_fault)
1291 ENTRY(coprocessor_error)
1292 zeroentry do_coprocessor_error
1293 END(coprocessor_error)
1295 ENTRY(simd_coprocessor_error)
1296 zeroentry do_simd_coprocessor_error
1297 END(simd_coprocessor_error)
1299 ENTRY(device_not_available)
1300 zeroentry do_device_not_available
1301 END(device_not_available)
1303 /* runs on exception stack */
1306 PARAVIRT_ADJUST_EXCEPTION_FRAME
1308 CFI_ADJUST_CFA_OFFSET 8
1309 paranoidentry do_debug, DEBUG_STACK
1313 /* runs on exception stack */
1316 PARAVIRT_ADJUST_EXCEPTION_FRAME
1318 CFI_ADJUST_CFA_OFFSET 8
1319 paranoidentry do_nmi, 0, 0
1320 #ifdef CONFIG_TRACE_IRQFLAGS
1330 PARAVIRT_ADJUST_EXCEPTION_FRAME
1332 CFI_ADJUST_CFA_OFFSET 8
1333 paranoidentry do_int3, DEBUG_STACK
1339 zeroentry do_overflow
1347 zeroentry do_invalid_op
1350 ENTRY(coprocessor_segment_overrun)
1351 zeroentry do_coprocessor_segment_overrun
1352 END(coprocessor_segment_overrun)
1354 /* runs on exception stack */
1357 PARAVIRT_ADJUST_EXCEPTION_FRAME
1358 paranoidentry do_double_fault
1364 errorentry do_invalid_TSS
1367 ENTRY(segment_not_present)
1368 errorentry do_segment_not_present
1369 END(segment_not_present)
1371 /* runs on exception stack */
1372 ENTRY(stack_segment)
1374 PARAVIRT_ADJUST_EXCEPTION_FRAME
1375 paranoidentry do_stack_segment
1380 KPROBE_ENTRY(general_protection)
1381 errorentry do_general_protection
1382 KPROBE_END(general_protection)
1384 ENTRY(alignment_check)
1385 errorentry do_alignment_check
1386 END(alignment_check)
1389 zeroentry do_divide_error
1392 ENTRY(spurious_interrupt_bug)
1393 zeroentry do_spurious_interrupt_bug
1394 END(spurious_interrupt_bug)
1396 #ifdef CONFIG_X86_MCE
1397 /* runs on exception stack */
1398 ENTRY(machine_check)
1400 PARAVIRT_ADJUST_EXCEPTION_FRAME
1402 CFI_ADJUST_CFA_OFFSET 8
1403 paranoidentry do_machine_check
1409 /* Call softirq on interrupt stack. Interrupts are off. */
1413 CFI_ADJUST_CFA_OFFSET 8
1414 CFI_REL_OFFSET rbp,0
1416 CFI_DEF_CFA_REGISTER rbp
1417 incl %gs:pda_irqcount
1418 cmove %gs:pda_irqstackptr,%rsp
1419 push %rbp # backlink for old unwinder
1422 CFI_DEF_CFA_REGISTER rsp
1423 CFI_ADJUST_CFA_OFFSET -8
1424 decl %gs:pda_irqcount
1427 ENDPROC(call_softirq)
1429 KPROBE_ENTRY(ignore_sysret)
1434 ENDPROC(ignore_sysret)
1437 ENTRY(xen_hypervisor_callback)
1438 zeroentry xen_do_hypervisor_callback
1439 END(xen_hypervisor_callback)
1442 # A note on the "critical region" in our callback handler.
1443 # We want to avoid stacking callback handlers due to events occurring
1444 # during handling of the last event. To do this, we keep events disabled
1445 # until we've done all processing. HOWEVER, we must enable events before
1446 # popping the stack frame (can't be done atomically) and so it would still
1447 # be possible to get enough handler activations to overflow the stack.
1448 # Although unlikely, bugs of that kind are hard to track down, so we'd
1449 # like to avoid the possibility.
1450 # So, on entry to the handler we detect whether we interrupted an
1451 # existing activation in its critical region -- if so, we pop the current
1452 # activation and restart the handler using the previous one.
1454 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1456 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1457 see the correct pointer to the pt_regs */
1458 movq %rdi, %rsp # we don't return, adjust the stack frame
1461 11: incl %gs:pda_irqcount
1463 CFI_DEF_CFA_REGISTER rbp
1464 cmovzq %gs:pda_irqstackptr,%rsp
1465 pushq %rbp # backlink for old unwinder
1466 call xen_evtchn_do_upcall
1468 CFI_DEF_CFA_REGISTER rsp
1469 decl %gs:pda_irqcount
1472 END(do_hypervisor_callback)
1475 # Hypervisor uses this for application faults while it executes.
1476 # We get here for two reasons:
1477 # 1. Fault while reloading DS, ES, FS or GS
1478 # 2. Fault while executing IRET
1479 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1480 # registers that could be reloaded and zeroed the others.
1481 # Category 2 we fix up by killing the current process. We cannot use the
1482 # normal Linux return path in this case because if we use the IRET hypercall
1483 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1484 # We distinguish between categories by comparing each saved segment register
1485 # with its current contents: any discrepancy means we in category 1.
1487 ENTRY(xen_failsafe_callback)
1488 framesz = (RIP-0x30) /* workaround buggy gas */
1490 CFI_REL_OFFSET rcx, 0
1491 CFI_REL_OFFSET r11, 8
1505 /* All segments match their saved values => Category 2 (Bad IRET). */
1511 CFI_ADJUST_CFA_OFFSET -0x30
1513 CFI_ADJUST_CFA_OFFSET 8
1515 CFI_ADJUST_CFA_OFFSET 8
1517 CFI_ADJUST_CFA_OFFSET 8
1518 jmp general_protection
1520 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1526 CFI_ADJUST_CFA_OFFSET -0x30
1528 CFI_ADJUST_CFA_OFFSET 8
1532 END(xen_failsafe_callback)
1534 #endif /* CONFIG_XEN */