Merge commit 'v2.6.34-rc6' into perf/core
[safe/jmp/linux-2.6] / arch / x86 / kernel / process_64.c
index 3e3d503..50cc84a 100644 (file)
@@ -14,8 +14,7 @@
  * This file handles the architecture-dependent parts of process handling..
  */
 
-#include <stdarg.h>
-
+#include <linux/stackprotector.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/user.h>
 #include <linux/interrupt.h>
-#include <linux/utsname.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/ptrace.h>
-#include <linux/random.h>
 #include <linux/notifier.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/tick.h>
 #include <linux/prctl.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/ftrace.h>
 
-#include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/system.h>
-#include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
 #include <asm/mmu_context.h>
-#include <asm/pda.h>
 #include <asm/prctl.h>
 #include <asm/desc.h>
 #include <asm/proto.h>
 #include <asm/ia32.h>
 #include <asm/idle.h>
+#include <asm/syscalls.h>
+#include <asm/debugreg.h>
 
 asmlinkage extern void ret_from_fork(void);
 
-unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
+DEFINE_PER_CPU(unsigned long, old_rsp);
+static DEFINE_PER_CPU(unsigned char, is_idle);
 
 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
 
@@ -72,13 +72,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 
 void enter_idle(void)
 {
-       write_pda(isidle, 1);
+       percpu_write(is_idle, 1);
        atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
 }
 
 static void __exit_idle(void)
 {
-       if (test_and_clear_bit_pda(0, isidle) == 0)
+       if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
                return;
        atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
 }
@@ -92,30 +92,12 @@ void exit_idle(void)
        __exit_idle();
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-DECLARE_PER_CPU(int, cpu_state);
-
-#include <asm/nmi.h>
-/* We halt the CPU with physical CPU hotplug */
-static inline void play_dead(void)
-{
-       idle_task_exit();
-       c1e_remove_cpu(raw_smp_processor_id());
-
-       mb();
-       /* Ack it */
-       __get_cpu_var(cpu_state) = CPU_DEAD;
-
-       local_irq_disable();
-       /* mask all interrupts, flush any and all caches, and halt */
-       wbinvd_halt();
-}
-#else
+#ifndef CONFIG_SMP
 static inline void play_dead(void)
 {
        BUG();
 }
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif
 
 /*
  * The idle thread. There's no useful work to be
@@ -126,6 +108,16 @@ static inline void play_dead(void)
 void cpu_idle(void)
 {
        current_thread_info()->status |= TS_POLLING;
+
+       /*
+        * If we're the non-boot CPU, nothing set the stack canary up
+        * for us.  CPU0 already has it initialized but no harm in
+        * doing it again.  This is a good place for updating it, as
+        * we wont ever return from this function (so the invalid
+        * canaries already on the stack wont ever trigger).
+        */
+       boot_init_stack_canary();
+
        /* endless idle loop with no priority at all */
        while (1) {
                tick_nohz_stop_sched_tick(1);
@@ -160,123 +152,62 @@ void cpu_idle(void)
 }
 
 /* Prints also some state that isn't saved in the pt_regs */
-void __show_regs(struct pt_regs * regs)
+void __show_regs(struct pt_regs *regs, int all)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
        unsigned long d0, d1, d2, d3, d6, d7;
        unsigned int fsindex, gsindex;
        unsigned int ds, cs, es;
 
-       printk("\n");
-       print_modules();
-       printk("Pid: %d, comm: %.20s %s %s %.*s\n",
-               current->pid, current->comm, print_tainted(),
-               init_utsname()->release,
-               (int)strcspn(init_utsname()->version, " "),
-               init_utsname()->version);
-       printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
+       show_regs_common();
+       printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
        printk_address(regs->ip, 1);
-       printk("RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, regs->sp,
-               regs->flags);
-       printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
+       printk(KERN_DEFAULT "RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss,
+                       regs->sp, regs->flags);
+       printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
               regs->ax, regs->bx, regs->cx);
-       printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
+       printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
               regs->dx, regs->si, regs->di);
-       printk("RBP: %016lx R08: %016lx R09: %016lx\n",
+       printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
               regs->bp, regs->r8, regs->r9);
-       printk("R10: %016lx R11: %016lx R12: %016lx\n",
-              regs->r10, regs->r11, regs->r12); 
-       printk("R13: %016lx R14: %016lx R15: %016lx\n",
-              regs->r13, regs->r14, regs->r15); 
-
-       asm("movl %%ds,%0" : "=r" (ds)); 
-       asm("movl %%cs,%0" : "=r" (cs)); 
-       asm("movl %%es,%0" : "=r" (es)); 
+       printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
+              regs->r10, regs->r11, regs->r12);
+       printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
+              regs->r13, regs->r14, regs->r15);
+
+       asm("movl %%ds,%0" : "=r" (ds));
+       asm("movl %%cs,%0" : "=r" (cs));
+       asm("movl %%es,%0" : "=r" (es));
        asm("movl %%fs,%0" : "=r" (fsindex));
        asm("movl %%gs,%0" : "=r" (gsindex));
 
        rdmsrl(MSR_FS_BASE, fs);
-       rdmsrl(MSR_GS_BASE, gs); 
-       rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); 
+       rdmsrl(MSR_GS_BASE, gs);
+       rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+
+       if (!all)
+               return;
 
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = read_cr3();
        cr4 = read_cr4();
 
-       printk("FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", 
-              fs,fsindex,gs,gsindex,shadowgs); 
-       printk("CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); 
-       printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
+       printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
+              fs, fsindex, gs, gsindex, shadowgs);
+       printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
+                       es, cr0);
+       printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
+                       cr4);
 
        get_debugreg(d0, 0);
        get_debugreg(d1, 1);
        get_debugreg(d2, 2);
-       printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
+       printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
        get_debugreg(d3, 3);
        get_debugreg(d6, 6);
        get_debugreg(d7, 7);
-       printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
-}
-
-void show_regs(struct pt_regs *regs)
-{
-       printk("CPU %d:", smp_processor_id());
-       __show_regs(regs);
-       show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
-}
-
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(void)
-{
-       struct task_struct *me = current;
-       struct thread_struct *t = &me->thread;
-
-       if (me->thread.io_bitmap_ptr) {
-               struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-
-               kfree(t->io_bitmap_ptr);
-               t->io_bitmap_ptr = NULL;
-               clear_thread_flag(TIF_IO_BITMAP);
-               /*
-                * Careful, clear this in the TSS too:
-                */
-               memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
-               t->io_bitmap_max = 0;
-               put_cpu();
-       }
-}
-
-void flush_thread(void)
-{
-       struct task_struct *tsk = current;
-
-       if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
-               clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
-               if (test_tsk_thread_flag(tsk, TIF_IA32)) {
-                       clear_tsk_thread_flag(tsk, TIF_IA32);
-               } else {
-                       set_tsk_thread_flag(tsk, TIF_IA32);
-                       current_thread_info()->status |= TS_COMPAT;
-               }
-       }
-       clear_tsk_thread_flag(tsk, TIF_DEBUG);
-
-       tsk->thread.debugreg0 = 0;
-       tsk->thread.debugreg1 = 0;
-       tsk->thread.debugreg2 = 0;
-       tsk->thread.debugreg3 = 0;
-       tsk->thread.debugreg6 = 0;
-       tsk->thread.debugreg7 = 0;
-       memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
-       /*
-        * Forget coprocessor state..
-        */
-       tsk->fpu_counter = 0;
-       clear_fpu(tsk);
-       clear_used_math();
+       printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
 }
 
 void release_thread(struct task_struct *dead_task)
@@ -320,12 +251,12 @@ void prepare_to_copy(struct task_struct *tsk)
        unlazy_fpu(tsk);
 }
 
-int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+int copy_thread(unsigned long clone_flags, unsigned long sp,
                unsigned long unused,
-       struct task_struct * p, struct pt_regs * regs)
+       struct task_struct *p, struct pt_regs *regs)
 {
        int err;
-       struct pt_regs * childregs;
+       struct pt_regs *childregs;
        struct task_struct *me = current;
 
        childregs = ((struct pt_regs *)
@@ -333,8 +264,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
        *childregs = *regs;
 
        childregs->ax = 0;
-       childregs->sp = sp;
-       if (sp == ~0UL)
+       if (user_mode(regs))
+               childregs->sp = sp;
+       else
                childregs->sp = (unsigned long)childregs;
 
        p->thread.sp = (unsigned long) childregs;
@@ -343,14 +275,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
 
        set_tsk_thread_flag(p, TIF_FORK);
 
-       p->thread.fs = me->thread.fs;
-       p->thread.gs = me->thread.gs;
+       p->thread.io_bitmap_ptr = NULL;
 
        savesegment(gs, p->thread.gsindex);
+       p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
        savesegment(fs, p->thread.fsindex);
+       p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
        savesegment(es, p->thread.es);
        savesegment(ds, p->thread.ds);
 
+       err = -ENOMEM;
+       memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
+
        if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
                p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
                if (!p->thread.io_bitmap_ptr) {
@@ -370,10 +306,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
                if (test_thread_flag(TIF_IA32))
                        err = do_set_thread_area(p, -1,
                                (struct user_desc __user *)childregs->si, 0);
-               else                    
-#endif  
-                       err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); 
-               if (err) 
+               else
+#endif
+                       err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
+               if (err)
                        goto out;
        }
        err = 0;
@@ -382,156 +318,46 @@ out:
                kfree(p->thread.io_bitmap_ptr);
                p->thread.io_bitmap_max = 0;
        }
+
        return err;
 }
 
-void
-start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+static void
+start_thread_common(struct pt_regs *regs, unsigned long new_ip,
+                   unsigned long new_sp,
+                   unsigned int _cs, unsigned int _ss, unsigned int _ds)
 {
        loadsegment(fs, 0);
-       loadsegment(es, 0);
-       loadsegment(ds, 0);
+       loadsegment(es, _ds);
+       loadsegment(ds, _ds);
        load_gs_index(0);
        regs->ip                = new_ip;
        regs->sp                = new_sp;
-       write_pda(oldrsp, new_sp);
-       regs->cs                = __USER_CS;
-       regs->ss                = __USER_DS;
-       regs->flags             = 0x200;
+       percpu_write(old_rsp, new_sp);
+       regs->cs                = _cs;
+       regs->ss                = _ss;
+       regs->flags             = X86_EFLAGS_IF;
        set_fs(USER_DS);
        /*
         * Free the old FP and other extended state
         */
        free_thread_xstate(current);
 }
-EXPORT_SYMBOL_GPL(start_thread);
-
-static void hard_disable_TSC(void)
-{
-       write_cr4(read_cr4() | X86_CR4_TSD);
-}
-
-void disable_TSC(void)
-{
-       preempt_disable();
-       if (!test_and_set_thread_flag(TIF_NOTSC))
-               /*
-                * Must flip the CPU state synchronously with
-                * TIF_NOTSC in the current running context.
-                */
-               hard_disable_TSC();
-       preempt_enable();
-}
 
-static void hard_enable_TSC(void)
-{
-       write_cr4(read_cr4() & ~X86_CR4_TSD);
-}
-
-static void enable_TSC(void)
-{
-       preempt_disable();
-       if (test_and_clear_thread_flag(TIF_NOTSC))
-               /*
-                * Must flip the CPU state synchronously with
-                * TIF_NOTSC in the current running context.
-                */
-               hard_enable_TSC();
-       preempt_enable();
-}
-
-int get_tsc_mode(unsigned long adr)
+void
+start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 {
-       unsigned int val;
-
-       if (test_thread_flag(TIF_NOTSC))
-               val = PR_TSC_SIGSEGV;
-       else
-               val = PR_TSC_ENABLE;
-
-       return put_user(val, (unsigned int __user *)adr);
+       start_thread_common(regs, new_ip, new_sp,
+                           __USER_CS, __USER_DS, 0);
 }
 
-int set_tsc_mode(unsigned int val)
+#ifdef CONFIG_IA32_EMULATION
+void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
 {
-       if (val == PR_TSC_SIGSEGV)
-               disable_TSC();
-       else if (val == PR_TSC_ENABLE)
-               enable_TSC();
-       else
-               return -EINVAL;
-
-       return 0;
+       start_thread_common(regs, new_ip, new_sp,
+                           __USER32_CS, __USER32_DS, __USER32_DS);
 }
-
-/*
- * This special macro can be used to load a debugging register
- */
-#define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
-
-static inline void __switch_to_xtra(struct task_struct *prev_p,
-                                   struct task_struct *next_p,
-                                   struct tss_struct *tss)
-{
-       struct thread_struct *prev, *next;
-       unsigned long debugctl;
-
-       prev = &prev_p->thread,
-       next = &next_p->thread;
-
-       debugctl = prev->debugctlmsr;
-       if (next->ds_area_msr != prev->ds_area_msr) {
-               /* we clear debugctl to make sure DS
-                * is not in use when we change it */
-               debugctl = 0;
-               update_debugctlmsr(0);
-               wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
-       }
-
-       if (next->debugctlmsr != debugctl)
-               update_debugctlmsr(next->debugctlmsr);
-
-       if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
-               loaddebug(next, 0);
-               loaddebug(next, 1);
-               loaddebug(next, 2);
-               loaddebug(next, 3);
-               /* no 4 and 5 */
-               loaddebug(next, 6);
-               loaddebug(next, 7);
-       }
-
-       if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
-           test_tsk_thread_flag(next_p, TIF_NOTSC)) {
-               /* prev and next are different */
-               if (test_tsk_thread_flag(next_p, TIF_NOTSC))
-                       hard_disable_TSC();
-               else
-                       hard_enable_TSC();
-       }
-
-       if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
-               /*
-                * Copy the relevant range of the IO bitmap.
-                * Normally this is 128 bytes or less:
-                */
-               memcpy(tss->io_bitmap, next->io_bitmap_ptr,
-                      max(prev->io_bitmap_max, next->io_bitmap_max));
-       } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
-               /*
-                * Clear any possible leftover bits:
-                */
-               memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
-       }
-
-#ifdef X86_BTS
-       if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
-               ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
-
-       if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
-               ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
 #endif
-}
 
 /*
  *     switch_to(x,y) should switch tasks from x to y.
@@ -541,8 +367,9 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
  * - could test fs/gs bitsliced
  *
  * Kprobes not supported here. Set the probe on schedule instead.
+ * Function graph tracer not supported too.
  */
-struct task_struct *
+__notrace_funcgraph struct task_struct *
 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
        struct thread_struct *prev = &prev_p->thread;
@@ -550,9 +377,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
        unsigned fsindex, gsindex;
+       bool preload_fpu;
+
+       /*
+        * If the task has used fpu the last 5 timeslices, just do a full
+        * restore of the math state immediately to avoid the trap; the
+        * chances of needing FPU soon are obviously high now
+        */
+       preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
 
        /* we're going to use this soon, after a few expensive things */
-       if (next_p->fpu_counter>5)
+       if (preload_fpu)
                prefetch(next->xstate);
 
        /*
@@ -560,13 +395,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         */
        load_sp0(tss, next);
 
-       /* 
+       /*
         * Switch DS and ES.
         * This won't pick up thread selector changes, but I guess that is ok.
         */
        savesegment(es, prev->es);
        if (unlikely(next->es | prev->es))
-               loadsegment(es, next->es); 
+               loadsegment(es, next->es);
 
        savesegment(ds, prev->ds);
        if (unlikely(next->ds | prev->ds))
@@ -583,6 +418,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        load_TLS(next, cpu);
 
+       /* Must be after DS reload */
+       unlazy_fpu(prev_p);
+
+       /* Make sure cpu is ready for new context */
+       if (preload_fpu)
+               clts();
+
        /*
         * Leave lazy mode, flushing any hypercalls made here.
         * This must be done before restoring TLS segments so
@@ -590,9 +432,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * done before math_state_restore, so the TS bit is up
         * to date.
         */
-       arch_leave_lazy_cpu_mode();
+       arch_end_context_switch(next_p);
 
-       /* 
+       /*
         * Switch FS and GS.
         *
         * Segment register != 0 always requires a reload.  Also
@@ -601,13 +443,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         */
        if (unlikely(fsindex | next->fsindex | prev->fs)) {
                loadsegment(fs, next->fsindex);
-               /* 
+               /*
                 * Check if the user used a selector != 0; if yes
                 *  clear 64bit base, since overloaded base is always
                 *  mapped to the Null selector
                 */
                if (fsindex)
-                       prev->fs = 0;                           
+                       prev->fs = 0;
        }
        /* when next process has a 64bit base use it */
        if (next->fs)
@@ -617,33 +459,22 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        if (unlikely(gsindex | next->gsindex | prev->gs)) {
                load_gs_index(next->gsindex);
                if (gsindex)
-                       prev->gs = 0;                           
+                       prev->gs = 0;
        }
        if (next->gs)
                wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
        prev->gsindex = gsindex;
 
-       /* Must be after DS reload */
-       unlazy_fpu(prev_p);
-
-       /* 
+       /*
         * Switch the PDA and FPU contexts.
         */
-       prev->usersp = read_pda(oldrsp);
-       write_pda(oldrsp, next->usersp);
-       write_pda(pcurrent, next_p); 
+       prev->usersp = percpu_read(old_rsp);
+       percpu_write(old_rsp, next->usersp);
+       percpu_write(current_task, next_p);
 
-       write_pda(kernelstack,
+       percpu_write(kernel_stack,
                  (unsigned long)task_stack_page(next_p) +
-                 THREAD_SIZE - PDA_STACKOFFSET);
-#ifdef CONFIG_CC_STACKPROTECTOR
-       write_pda(stack_canary, next_p->stack_canary);
-       /*
-        * Build time only check to make sure the stack_canary is at
-        * offset 40 in the pda; this is a gcc ABI requirement
-        */
-       BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
-#endif
+                 THREAD_SIZE - KERNEL_STACK_OFFSET);
 
        /*
         * Now maybe reload the debug registers and handle I/O bitmaps
@@ -652,35 +483,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
                __switch_to_xtra(prev_p, next_p, tss);
 
-       /* If the task has used fpu the last 5 timeslices, just do a full
-        * restore of the math state immediately to avoid the trap; the
-        * chances of needing FPU soon are obviously high now
-        *
-        * tsk_used_math() checks prevent calling math_state_restore(),
-        * which can sleep in the case of !tsk_used_math()
+       /*
+        * Preload the FPU context, now that we've determined that the
+        * task is likely to be using it. 
         */
-       if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
-               math_state_restore();
-       return prev_p;
-}
+       if (preload_fpu)
+               __math_state_restore();
 
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage
-long sys_execve(char __user *name, char __user * __user *argv,
-               char __user * __user *envp, struct pt_regs *regs)
-{
-       long error;
-       char * filename;
-
-       filename = getname(name);
-       error = PTR_ERR(filename);
-       if (IS_ERR(filename))
-               return error;
-       error = do_execve(filename, argv, envp, regs);
-       putname(filename);
-       return error;
+       return prev_p;
 }
 
 void set_personality_64bit(void)
@@ -697,88 +507,70 @@ void set_personality_64bit(void)
        current->personality &= ~READ_IMPLIES_EXEC;
 }
 
-asmlinkage long sys_fork(struct pt_regs *regs)
+void set_personality_ia32(void)
 {
-       return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
-}
+       /* inherit personality from parent */
 
-asmlinkage long
-sys_clone(unsigned long clone_flags, unsigned long newsp,
-         void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
-{
-       if (!newsp)
-               newsp = regs->sp;
-       return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
-}
+       /* Make sure to be in 32bit mode */
+       set_thread_flag(TIF_IA32);
+       current->personality |= force_personality32;
 
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage long sys_vfork(struct pt_regs *regs)
-{
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
-                   NULL, NULL);
+       /* Prepare the first "return" to user space */
+       current_thread_info()->status |= TS_COMPAT;
 }
 
 unsigned long get_wchan(struct task_struct *p)
 {
        unsigned long stack;
-       u64 fp,ip;
+       u64 fp, ip;
        int count = 0;
 
-       if (!p || p == current || p->state==TASK_RUNNING)
-               return 0; 
+       if (!p || p == current || p->state == TASK_RUNNING)
+               return 0;
        stack = (unsigned long)task_stack_page(p);
-       if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
+       if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
                return 0;
        fp = *(u64 *)(p->thread.sp);
-       do { 
+       do {
                if (fp < (unsigned long)stack ||
-                   fp > (unsigned long)stack+THREAD_SIZE)
-                       return 0; 
+                   fp >= (unsigned long)stack+THREAD_SIZE)
+                       return 0;
                ip = *(u64 *)(fp+8);
                if (!in_sched_functions(ip))
                        return ip;
-               fp = *(u64 *)fp; 
-       } while (count++ < 16); 
+               fp = *(u64 *)fp;
+       } while (count++ < 16);
        return 0;
 }
 
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
-{ 
-       int ret = 0; 
+{
+       int ret = 0;
        int doit = task == current;
        int cpu;
 
-       switch (code) { 
+       switch (code) {
        case ARCH_SET_GS:
                if (addr >= TASK_SIZE_OF(task))
-                       return -EPERM; 
+                       return -EPERM;
                cpu = get_cpu();
-               /* handle small bases via the GDT because that's faster to 
+               /* handle small bases via the GDT because that's faster to
                   switch. */
-               if (addr <= 0xffffffff) {  
-                       set_32bit_tls(task, GS_TLS, addr); 
-                       if (doit) { 
+               if (addr <= 0xffffffff) {
+                       set_32bit_tls(task, GS_TLS, addr);
+                       if (doit) {
                                load_TLS(&task->thread, cpu);
-                               load_gs_index(GS_TLS_SEL); 
+                               load_gs_index(GS_TLS_SEL);
                        }
-                       task->thread.gsindex = GS_TLS_SEL; 
+                       task->thread.gsindex = GS_TLS_SEL;
                        task->thread.gs = 0;
-               } else { 
+               } else {
                        task->thread.gsindex = 0;
                        task->thread.gs = addr;
                        if (doit) {
                                load_gs_index(0);
                                ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
-                       } 
+                       }
                }
                put_cpu();
                break;
@@ -832,8 +624,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                                rdmsrl(MSR_KERNEL_GS_BASE, base);
                        else
                                base = task->thread.gs;
-               }
-               else
+               } else
                        base = task->thread.gs;
                ret = put_user(base, (unsigned long __user *)addr);
                break;
@@ -852,15 +643,8 @@ long sys_arch_prctl(int code, unsigned long addr)
        return do_arch_prctl(current, code, addr);
 }
 
-unsigned long arch_align_stack(unsigned long sp)
-{
-       if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= get_random_int() % 8192;
-       return sp & ~0xf;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
+unsigned long KSTK_ESP(struct task_struct *task)
 {
-       unsigned long range_end = mm->brk + 0x02000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+       return (test_tsk_thread_flag(task, TIF_IA32)) ?
+                       (task_pt_regs(task)->sp) : ((task)->thread.usersp);
 }