2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * Handle hardware traps and faults.
12 #include <linux/interrupt.h>
13 #include <linux/kallsyms.h>
14 #include <linux/spinlock.h>
15 #include <linux/kprobes.h>
16 #include <linux/uaccess.h>
17 #include <linux/utsname.h>
18 #include <linux/kdebug.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/ptrace.h>
22 #include <linux/string.h>
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/kexec.h>
26 #include <linux/sched.h>
27 #include <linux/timer.h>
28 #include <linux/init.h>
29 #include <linux/bug.h>
30 #include <linux/nmi.h>
32 #include <linux/smp.h>
36 #include <linux/ioport.h>
37 #include <linux/eisa.h>
41 #include <linux/mca.h>
44 #if defined(CONFIG_EDAC)
45 #include <linux/edac.h>
48 #include <asm/stacktrace.h>
49 #include <asm/processor.h>
50 #include <asm/debugreg.h>
51 #include <asm/atomic.h>
52 #include <asm/system.h>
53 #include <asm/traps.h>
58 #include <asm/mach_traps.h>
61 #include <asm/pgalloc.h>
62 #include <asm/proto.h>
64 #include <asm/processor-flags.h>
65 #include <asm/setup.h>
66 #include <asm/traps.h>
68 asmlinkage int system_call(void);
70 /* Do we ignore FPU interrupts ? */
74 * The IDT has to be page-aligned to simplify the Pentium
75 * F0 0F bug workaround.. We have a special link segment
78 gate_desc idt_table[256]
79 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
82 DECLARE_BITMAP(used_vectors, NR_VECTORS);
83 EXPORT_SYMBOL_GPL(used_vectors);
85 static int ignore_nmis;
87 static inline void conditional_sti(struct pt_regs *regs)
89 if (regs->flags & X86_EFLAGS_IF)
93 static inline void preempt_conditional_sti(struct pt_regs *regs)
96 if (regs->flags & X86_EFLAGS_IF)
100 static inline void conditional_cli(struct pt_regs *regs)
102 if (regs->flags & X86_EFLAGS_IF)
106 static inline void preempt_conditional_cli(struct pt_regs *regs)
108 if (regs->flags & X86_EFLAGS_IF)
115 die_if_kernel(const char *str, struct pt_regs *regs, long err)
117 if (!user_mode_vm(regs))
122 static void __kprobes
123 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
124 long error_code, siginfo_t *info)
126 struct task_struct *tsk = current;
129 if (regs->flags & X86_VM_MASK) {
131 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
132 * On nmi (interrupt 2), do_trap should not be called.
140 if (!user_mode(regs))
147 * We want error_code and trap_no set for userspace faults and
148 * kernelspace faults which result in die(), but not
149 * kernelspace faults which are fixed up. die() gives the
150 * process no chance to handle the signal and notice the
151 * kernel fault information, so that won't result in polluting
152 * the information about previously queued, but not yet
153 * delivered, faults. See also do_general_protection below.
155 tsk->thread.error_code = error_code;
156 tsk->thread.trap_no = trapnr;
159 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
160 printk_ratelimit()) {
162 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
163 tsk->comm, tsk->pid, str,
164 regs->ip, regs->sp, error_code);
165 print_vma_addr(" in ", regs->ip);
171 force_sig_info(signr, info, tsk);
173 force_sig(signr, tsk);
177 if (!fixup_exception(regs)) {
178 tsk->thread.error_code = error_code;
179 tsk->thread.trap_no = trapnr;
180 die(str, regs, error_code);
186 if (handle_vm86_trap((struct kernel_vm86_regs *) regs,
193 #define DO_ERROR(trapnr, signr, str, name) \
194 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
196 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
199 conditional_sti(regs); \
200 do_trap(trapnr, signr, str, regs, error_code, NULL); \
203 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
204 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
207 info.si_signo = signr; \
209 info.si_code = sicode; \
210 info.si_addr = (void __user *)siaddr; \
211 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
214 conditional_sti(regs); \
215 do_trap(trapnr, signr, str, regs, error_code, &info); \
218 DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
219 DO_ERROR(4, SIGSEGV, "overflow", overflow)
220 DO_ERROR(5, SIGSEGV, "bounds", bounds)
221 DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
222 DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
223 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
224 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
226 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
228 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
231 /* Runs on IST stack */
232 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
234 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
235 12, SIGBUS) == NOTIFY_STOP)
237 preempt_conditional_sti(regs);
238 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
239 preempt_conditional_cli(regs);
242 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
244 static const char str[] = "double fault";
245 struct task_struct *tsk = current;
247 /* Return not checked because double check cannot be ignored */
248 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
250 tsk->thread.error_code = error_code;
251 tsk->thread.trap_no = 8;
254 * This is always a kernel trap and never fixable (and thus must
258 die(str, regs, error_code);
262 dotraplinkage void __kprobes
263 do_general_protection(struct pt_regs *regs, long error_code)
265 struct task_struct *tsk;
267 conditional_sti(regs);
270 if (regs->flags & X86_VM_MASK)
275 if (!user_mode(regs))
278 tsk->thread.error_code = error_code;
279 tsk->thread.trap_no = 13;
281 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
282 printk_ratelimit()) {
284 "%s[%d] general protection ip:%lx sp:%lx error:%lx",
285 tsk->comm, task_pid_nr(tsk),
286 regs->ip, regs->sp, error_code);
287 print_vma_addr(" in ", regs->ip);
291 force_sig(SIGSEGV, tsk);
297 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
302 if (fixup_exception(regs))
305 tsk->thread.error_code = error_code;
306 tsk->thread.trap_no = 13;
307 if (notify_die(DIE_GPF, "general protection fault", regs,
308 error_code, 13, SIGSEGV) == NOTIFY_STOP)
310 die("general protection fault", regs, error_code);
313 static notrace __kprobes void
314 mem_parity_error(unsigned char reason, struct pt_regs *regs)
317 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
318 reason, smp_processor_id());
321 "You have some hardware problem, likely on the PCI bus.\n");
323 #if defined(CONFIG_EDAC)
324 if (edac_handler_set()) {
325 edac_atomic_assert_error();
330 if (panic_on_unrecovered_nmi)
331 panic("NMI: Not continuing");
333 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
335 /* Clear and disable the memory parity error line. */
336 reason = (reason & 0xf) | 4;
340 static notrace __kprobes void
341 io_check_error(unsigned char reason, struct pt_regs *regs)
345 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
346 show_registers(regs);
348 /* Re-enable the IOCK line, wait for a few seconds */
349 reason = (reason & 0xf) | 8;
360 static notrace __kprobes void
361 unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
363 if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) ==
368 * Might actually be able to figure out what the guilty party
377 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
378 reason, smp_processor_id());
380 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
381 if (panic_on_unrecovered_nmi)
382 panic("NMI: Not continuing");
384 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
387 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
389 unsigned char reason = 0;
392 cpu = smp_processor_id();
394 /* Only the BSP gets external NMIs from the system. */
396 reason = get_nmi_reason();
398 if (!(reason & 0xc0)) {
399 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
402 #ifdef CONFIG_X86_LOCAL_APIC
404 * Ok, so this is none of the documented NMI sources,
405 * so it must be the NMI watchdog.
407 if (nmi_watchdog_tick(regs, reason))
409 if (!do_nmi_callback(regs, cpu))
410 unknown_nmi_error(reason, regs);
412 unknown_nmi_error(reason, regs);
417 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
420 /* AK: following checks seem to be broken on modern chipsets. FIXME */
422 mem_parity_error(reason, regs);
424 io_check_error(reason, regs);
427 * Reassert NMI in case it became active meanwhile
428 * as it's edge-triggered:
434 dotraplinkage notrace __kprobes void
435 do_nmi(struct pt_regs *regs, long error_code)
439 inc_irq_stat(__nmi_count);
442 default_do_nmi(regs);
453 void restart_nmi(void)
459 /* May run on IST stack. */
460 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
462 #ifdef CONFIG_KPROBES
463 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
467 if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
472 preempt_conditional_sti(regs);
473 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
474 preempt_conditional_cli(regs);
479 * Help handler running on IST stack to switch back to user stack
480 * for scheduling or signal handling. The actual stack switch is done in
483 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
485 struct pt_regs *regs = eregs;
486 /* Did already sync */
487 if (eregs == (struct pt_regs *)eregs->sp)
489 /* Exception from user space */
490 else if (user_mode(eregs))
491 regs = task_pt_regs(current);
493 * Exception from kernel and interrupts are enabled. Move to
494 * kernel process stack.
496 else if (eregs->flags & X86_EFLAGS_IF)
497 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
505 * Our handling of the processor debug registers is non-trivial.
506 * We do not clear them on entry and exit from the kernel. Therefore
507 * it is possible to get a watchpoint trap here from inside the kernel.
508 * However, the code in ./ptrace.c has ensured that the user can
509 * only set watchpoints on userspace addresses. Therefore the in-kernel
510 * watchpoint trap can only occur in code which is reading/writing
511 * from user space. Such code must not hold kernel locks (since it
512 * can equally take a page fault), therefore it is safe to call
513 * force_sig_info even though that claims and releases locks.
515 * Code in ./signal.c ensures that the debug control register
516 * is restored before we deliver any signal, and therefore that
517 * user code runs with the correct debug control register even though
520 * Being careful here means that we don't have to be as careful in a
521 * lot of more complicated places (task switching can be a bit lazy
522 * about restoring all the debug state, and ptrace doesn't have to
523 * find every occurrence of the TF bit that could be saved away even
526 * May run on IST stack.
528 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
530 struct task_struct *tsk = current;
531 unsigned long condition;
534 get_debugreg(condition, 6);
537 * The processor cleared BTF, so don't mark that we need it set.
539 clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
540 tsk->thread.debugctlmsr = 0;
542 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
543 SIGTRAP) == NOTIFY_STOP)
546 /* It's safe to allow irq's after DR6 has been saved */
547 preempt_conditional_sti(regs);
549 /* Mask out spurious debug traps due to lazy DR7 setting */
550 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
551 if (!tsk->thread.debugreg7)
556 if (regs->flags & X86_VM_MASK)
560 /* Save debug status register where ptrace can see it */
561 tsk->thread.debugreg6 = condition;
564 * Single-stepping through TF: make sure we ignore any events in
565 * kernel space (but re-enable TF when returning to user mode).
567 if (condition & DR_STEP) {
568 if (!user_mode(regs))
569 goto clear_TF_reenable;
572 si_code = get_si_code(condition);
573 /* Ok, finally something we can handle */
574 send_sigtrap(tsk, regs, error_code, si_code);
577 * Disable additional traps. They'll be re-enabled when
578 * the signal is delivered.
582 preempt_conditional_cli(regs);
587 /* reenable preemption: handle_vm86_trap() might sleep */
589 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
590 conditional_cli(regs);
595 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
596 regs->flags &= ~X86_EFLAGS_TF;
597 preempt_conditional_cli(regs);
602 static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
604 if (fixup_exception(regs))
607 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
608 /* Illegal floating point operation in the kernel */
609 current->thread.trap_no = trapnr;
616 * Note that we play around with the 'TS' bit in an attempt to get
617 * the correct behaviour even in the presence of the asynchronous
620 void math_error(void __user *ip)
622 struct task_struct *task;
624 unsigned short cwd, swd, err;
627 * Save the info for the exception handler and clear the error.
631 task->thread.trap_no = 16;
632 task->thread.error_code = 0;
633 info.si_signo = SIGFPE;
637 * (~cwd & swd) will mask out exceptions that are not set to unmasked
638 * status. 0x3f is the exception bits in these regs, 0x200 is the
639 * C1 reg you need in case of a stack fault, 0x040 is the stack
640 * fault bit. We should only be taking one exception at a time,
641 * so if this combination doesn't produce any single exception,
642 * then we have a bad program that isn't synchronizing its FPU usage
643 * and it will suffer the consequences since we won't be able to
644 * fully reproduce the context of the exception
646 cwd = get_fpu_cwd(task);
647 swd = get_fpu_swd(task);
651 if (err & 0x001) { /* Invalid op */
653 * swd & 0x240 == 0x040: Stack Underflow
654 * swd & 0x240 == 0x240: Stack Overflow
655 * User must clear the SF bit (0x40) if set
657 info.si_code = FPE_FLTINV;
658 } else if (err & 0x004) { /* Divide by Zero */
659 info.si_code = FPE_FLTDIV;
660 } else if (err & 0x008) { /* Overflow */
661 info.si_code = FPE_FLTOVF;
662 } else if (err & 0x012) { /* Denormal, Underflow */
663 info.si_code = FPE_FLTUND;
664 } else if (err & 0x020) { /* Precision */
665 info.si_code = FPE_FLTRES;
668 * If we're using IRQ 13, or supposedly even some trap 16
669 * implementations, it's possible we get a spurious trap...
671 return; /* Spurious trap, no error */
673 force_sig_info(SIGFPE, &info, task);
676 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
678 conditional_sti(regs);
683 if (!user_mode(regs) &&
684 kernel_math_error(regs, "kernel x87 math error", 16))
688 math_error((void __user *)regs->ip);
691 static void simd_math_error(void __user *ip)
693 struct task_struct *task;
695 unsigned short mxcsr;
698 * Save the info for the exception handler and clear the error.
702 task->thread.trap_no = 19;
703 task->thread.error_code = 0;
704 info.si_signo = SIGFPE;
706 info.si_code = __SI_FAULT;
709 * The SIMD FPU exceptions are handled a little differently, as there
710 * is only a single status/control register. Thus, to determine which
711 * unmasked exception was caught we must mask the exception mask bits
712 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
714 mxcsr = get_fpu_mxcsr(task);
715 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
719 case 0x001: /* Invalid Op */
720 info.si_code = FPE_FLTINV;
722 case 0x002: /* Denormalize */
723 case 0x010: /* Underflow */
724 info.si_code = FPE_FLTUND;
726 case 0x004: /* Zero Divide */
727 info.si_code = FPE_FLTDIV;
729 case 0x008: /* Overflow */
730 info.si_code = FPE_FLTOVF;
732 case 0x020: /* Precision */
733 info.si_code = FPE_FLTRES;
736 force_sig_info(SIGFPE, &info, task);
740 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
742 conditional_sti(regs);
746 /* Handle SIMD FPU exceptions on PIII+ processors. */
748 simd_math_error((void __user *)regs->ip);
752 * Handle strange cache flush from user space exception
753 * in all other cases. This is undocumented behaviour.
755 if (regs->flags & X86_VM_MASK) {
756 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
759 current->thread.trap_no = 19;
760 current->thread.error_code = error_code;
761 die_if_kernel("cache flush denied", regs, error_code);
762 force_sig(SIGSEGV, current);
764 if (!user_mode(regs) &&
765 kernel_math_error(regs, "kernel simd math error", 19))
767 simd_math_error((void __user *)regs->ip);
772 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
774 conditional_sti(regs);
776 /* No need to warn about this any longer. */
777 printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
782 unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
784 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
785 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
786 unsigned long new_kesp = kesp - base;
787 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
788 __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
790 /* Set up base for espfix segment */
791 desc &= 0x00f0ff0000000000ULL;
792 desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
793 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
794 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
795 (lim_pages & 0xffff);
796 *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
802 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
806 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
811 * 'math_state_restore()' saves the current math information in the
812 * old math state array, and gets the new ones from the current task
814 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
815 * Don't touch unless you *really* know how it works.
817 * Must be called with kernel preemption disabled (in this case,
818 * local interrupts are disabled at the call-site in entry.S).
820 asmlinkage void math_state_restore(void)
822 struct thread_info *thread = current_thread_info();
823 struct task_struct *tsk = thread->task;
825 if (!tsk_used_math(tsk)) {
828 * does a slab alloc which can sleep
834 do_group_exit(SIGKILL);
840 clts(); /* Allow maths ops (or we recurse) */
842 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
844 if (unlikely(restore_fpu_checking(tsk))) {
846 force_sig(SIGSEGV, tsk);
850 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
853 EXPORT_SYMBOL_GPL(math_state_restore);
855 #ifndef CONFIG_MATH_EMULATION
856 void math_emulate(struct math_emu_info *info)
859 "math-emulation not enabled and no coprocessor found.\n");
860 printk(KERN_EMERG "killing %s.\n", current->comm);
861 force_sig(SIGFPE, current);
864 #endif /* CONFIG_MATH_EMULATION */
866 dotraplinkage void __kprobes
867 do_device_not_available(struct pt_regs *regs, long error_code)
870 if (read_cr0() & X86_CR0_EM) {
871 struct math_emu_info info = { };
873 conditional_sti(regs);
878 math_state_restore(); /* interrupts still off */
879 conditional_sti(regs);
882 math_state_restore();
887 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
892 info.si_signo = SIGILL;
894 info.si_code = ILL_BADSTK;
896 if (notify_die(DIE_TRAP, "iret exception",
897 regs, error_code, 32, SIGILL) == NOTIFY_STOP)
899 do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
903 void __init trap_init(void)
908 void __iomem *p = early_ioremap(0x0FFFD9, 4);
910 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
915 set_intr_gate(0, ÷_error);
916 set_intr_gate_ist(1, &debug, DEBUG_STACK);
917 set_intr_gate_ist(2, &nmi, NMI_STACK);
918 /* int3 can be called from all */
919 set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
920 /* int4 can be called from all */
921 set_system_intr_gate(4, &overflow);
922 set_intr_gate(5, &bounds);
923 set_intr_gate(6, &invalid_op);
924 set_intr_gate(7, &device_not_available);
926 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
928 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
930 set_intr_gate(9, &coprocessor_segment_overrun);
931 set_intr_gate(10, &invalid_TSS);
932 set_intr_gate(11, &segment_not_present);
933 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
934 set_intr_gate(13, &general_protection);
935 set_intr_gate(14, &page_fault);
936 set_intr_gate(15, &spurious_interrupt_bug);
937 set_intr_gate(16, &coprocessor_error);
938 set_intr_gate(17, &alignment_check);
939 #ifdef CONFIG_X86_MCE
940 set_intr_gate_ist(18, &machine_check, MCE_STACK);
942 set_intr_gate(19, &simd_coprocessor_error);
944 /* Reserve all the builtin and the syscall vector: */
945 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
946 set_bit(i, used_vectors);
948 #ifdef CONFIG_IA32_EMULATION
949 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
950 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
955 printk(KERN_INFO "Enabling fast FPU save and restore... ");
956 set_in_cr4(X86_CR4_OSFXSR);
961 "Enabling unmasked SIMD FPU exception support... ");
962 set_in_cr4(X86_CR4_OSXMMEXCPT);
966 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
967 set_bit(SYSCALL_VECTOR, used_vectors);
971 * Should be a barrier for any external CPU state:
976 x86_quirk_trap_init();