AMD IOMMU: move invalidation command building to a separate function
[safe/jmp/linux-2.6] / arch / x86 / kernel / process_32.c
index f8476df..3ba155d 100644 (file)
@@ -37,6 +37,8 @@
 #include <linux/tick.h>
 #include <linux/percpu.h>
 #include <linux/prctl.h>
+#include <linux/dmi.h>
+#include <linux/ftrace.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/cpu.h>
 #include <asm/kdebug.h>
+#include <asm/idle.h>
+#include <asm/syscalls.h>
+#include <asm/smp.h>
+#include <asm/ds.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
-static int hlt_counter;
-
-unsigned long boot_option_idle_override = 0;
-EXPORT_SYMBOL(boot_option_idle_override);
-
 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
 
@@ -77,80 +78,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
        return ((unsigned long *)tsk->thread.sp)[3];
 }
 
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
-void disable_hlt(void)
-{
-       hlt_counter++;
-}
-
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
-       hlt_counter--;
-}
-
-EXPORT_SYMBOL(enable_hlt);
-
-/*
- * We use this if we don't have any better
- * idle routine..
- */
-void default_idle(void)
-{
-       if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
-               current_thread_info()->status &= ~TS_POLLING;
-               /*
-                * TS_POLLING-cleared state must be visible before we
-                * test NEED_RESCHED:
-                */
-               smp_mb();
-
-               if (!need_resched())
-                       safe_halt();    /* enables interrupts racelessly */
-               else
-                       local_irq_enable();
-               current_thread_info()->status |= TS_POLLING;
-       } else {
-               local_irq_enable();
-               /* loop is done by the caller */
-               cpu_relax();
-       }
-}
-#ifdef CONFIG_APM_MODULE
-EXPORT_SYMBOL(default_idle);
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-#include <asm/nmi.h>
-/* We don't actually take CPU down, just spin without interrupts. */
-static inline void play_dead(void)
-{
-       /* This must be done before dead CPU ack */
-       cpu_exit_clear();
-       wbinvd();
-       mb();
-       /* Ack it */
-       __get_cpu_var(cpu_state) = CPU_DEAD;
-
-       /*
-        * With physical CPU hotplug, we should halt the cpu
-        */
-       local_irq_disable();
-       while (1)
-               halt();
-}
-#else
+#ifndef CONFIG_SMP
 static inline void play_dead(void)
 {
        BUG();
 }
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif
 
 /*
  * The idle thread. There's no useful work to be
@@ -166,26 +99,24 @@ void cpu_idle(void)
 
        /* endless idle loop with no priority at all */
        while (1) {
-               tick_nohz_stop_sched_tick();
+               tick_nohz_stop_sched_tick(1);
                while (!need_resched()) {
-                       void (*idle)(void);
 
                        check_pgt_cache();
                        rmb();
-                       idle = pm_idle;
 
                        if (rcu_pending(cpu))
                                rcu_check_callbacks(cpu, 0);
 
-                       if (!idle)
-                               idle = default_idle;
-
                        if (cpu_is_offline(cpu))
                                play_dead();
 
                        local_irq_disable();
                        __get_cpu_var(irq_stat).idle_timestamp = jiffies;
-                       idle();
+                       /* Don't trace irqs off for idle */
+                       stop_critical_timings();
+                       pm_idle();
+                       start_critical_timings();
                }
                tick_nohz_restart_sched_tick();
                preempt_enable_no_resched();
@@ -194,12 +125,13 @@ void cpu_idle(void)
        }
 }
 
-void __show_registers(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, int all)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
        unsigned long d0, d1, d2, d3, d6, d7;
        unsigned long sp;
        unsigned short ss, gs;
+       const char *board;
 
        if (user_mode_vm(regs)) {
                sp = regs->sp;
@@ -212,11 +144,15 @@ void __show_registers(struct pt_regs *regs, int all)
        }
 
        printk("\n");
-       printk("Pid: %d, comm: %s %s (%s %.*s)\n",
+
+       board = dmi_get_system_info(DMI_PRODUCT_NAME);
+       if (!board)
+               board = "";
+       printk("Pid: %d, comm: %s %s (%s %.*s) %s\n",
                        task_pid_nr(current), current->comm,
                        print_tainted(), init_utsname()->release,
                        (int)strcspn(init_utsname()->version, " "),
-                       init_utsname()->version);
+                       init_utsname()->version, board);
 
        printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
                        (u16)regs->cs, regs->ip, regs->flags,
@@ -255,7 +191,7 @@ void __show_registers(struct pt_regs *regs, int all)
 
 void show_regs(struct pt_regs *regs)
 {
-       __show_registers(regs, 1);
+       __show_regs(regs, 1);
        show_trace(NULL, regs, &regs->sp, regs->bp);
 }
 
@@ -316,6 +252,8 @@ void exit_thread(void)
                tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
                put_cpu();
        }
+
+       ds_exit_thread(current);
 }
 
 void flush_thread(void)
@@ -333,6 +271,7 @@ void flush_thread(void)
        /*
         * Forget coprocessor state..
         */
+       tsk->fpu_counter = 0;
        clear_fpu(tsk);
        clear_used_math();
 }
@@ -396,6 +335,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
                kfree(p->thread.io_bitmap_ptr);
                p->thread.io_bitmap_max = 0;
        }
+
+       ds_copy_thread(p, current);
+
+       clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
+       p->thread.debugctlmsr = 0;
+
        return err;
 }
 
@@ -481,21 +426,14 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                 struct tss_struct *tss)
 {
        struct thread_struct *prev, *next;
-       unsigned long debugctl;
 
        prev = &prev_p->thread;
        next = &next_p->thread;
 
-       debugctl = prev->debugctlmsr;
-       if (next->ds_area_msr != prev->ds_area_msr) {
-               /* we clear debugctl to make sure DS
-                * is not in use when we change it */
-               debugctl = 0;
-               update_debugctlmsr(0);
-               wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
-       }
-
-       if (next->debugctlmsr != debugctl)
+       if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
+           test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
+               ds_switch_to(prev_p, next_p);
+       else if (next->debugctlmsr != prev->debugctlmsr)
                update_debugctlmsr(next->debugctlmsr);
 
        if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
@@ -517,15 +455,6 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                        hard_enable_TSC();
        }
 
-#ifdef X86_BTS
-       if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
-               ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
-
-       if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
-               ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
-#endif
-
-
        if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
                /*
                 * Disable the bitmap via an invalid offset. We still cache
@@ -583,7 +512,8 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
  * the task-switch, and shows up in ret_from_fork in entry.S,
  * for example.
  */
-struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+__notrace_funcgraph struct task_struct *
+__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
        struct thread_struct *prev = &prev_p->thread,
                                 *next = &next_p->thread;
@@ -649,8 +579,11 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct
        /* If the task has used fpu the last 5 timeslices, just do a full
         * restore of the math state immediately to avoid the trap; the
         * chances of needing FPU soon are obviously high now
+        *
+        * tsk_used_math() checks prevent calling math_state_restore(),
+        * which can sleep in the case of !tsk_used_math()
         */
-       if (next_p->fpu_counter > 5)
+       if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
                math_state_restore();
 
        /*