#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/user.h>
-#include <linux/a.out.h>
#include <linux/interrupt.h>
#include <linux/utsname.h>
#include <linux/delay.h>
#include <linux/personality.h>
#include <linux/tick.h>
#include <linux/percpu.h>
+#include <linux/prctl.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/desc.h>
-#include <asm/vm86.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
*/
void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
-static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
void disable_hlt(void)
{
*/
smp_mb();
- local_irq_disable();
- if (!need_resched()) {
- ktime_t t0, t1;
- u64 t0n, t1n;
-
- t0 = ktime_get();
- t0n = ktime_to_ns(t0);
+ if (!need_resched())
safe_halt(); /* enables interrupts racelessly */
- local_irq_disable();
- t1 = ktime_get();
- t1n = ktime_to_ns(t1);
- sched_clock_idle_wakeup_event(t1n - t0n);
- }
- local_irq_enable();
+ else
+ local_irq_enable();
current_thread_info()->status |= TS_POLLING;
} else {
+ local_irq_enable();
/* loop is done by the caller */
cpu_relax();
}
EXPORT_SYMBOL(default_idle);
#endif
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
-{
- cpu_relax();
-}
-
#ifdef CONFIG_HOTPLUG_CPU
#include <asm/nmi.h>
/* We don't actually take CPU down, just spin without interrupts. */
while (!need_resched()) {
void (*idle)(void);
- if (__get_cpu_var(cpu_idle_state))
- __get_cpu_var(cpu_idle_state) = 0;
-
check_pgt_cache();
rmb();
idle = pm_idle;
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, 0);
+
if (!idle)
idle = default_idle;
if (cpu_is_offline(cpu))
play_dead();
+ local_irq_disable();
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
idle();
}
}
}
-static void do_nothing(void *unused)
-{
-}
-
-void cpu_idle_wait(void)
-{
- unsigned int cpu, this_cpu = get_cpu();
- cpumask_t map, tmp = current->cpus_allowed;
-
- set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
- put_cpu();
-
- cpus_clear(map);
- for_each_online_cpu(cpu) {
- per_cpu(cpu_idle_state, cpu) = 1;
- cpu_set(cpu, map);
- }
-
- __get_cpu_var(cpu_idle_state) = 0;
-
- wmb();
- do {
- ssleep(1);
- for_each_online_cpu(cpu) {
- if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
- cpu_clear(cpu, map);
- }
- cpus_and(map, map, cpu_online_map);
- /*
- * We waited 1 sec, if a CPU still did not call idle
- * it may be because it is in idle and not waking up
- * because it has nothing to do.
- * Give all the remaining CPUS a kick.
- */
- smp_call_function_mask(map, do_nothing, 0, 0);
- } while (!cpus_empty(map));
-
- set_cpus_allowed(current, tmp);
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
- if (!need_resched()) {
- __monitor((void *)¤t_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(ax, cx);
- }
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
- local_irq_enable();
- mwait_idle_with_hints(0, 0);
-}
-
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
-{
- if (cpu_has(c, X86_FEATURE_MWAIT)) {
- printk("monitor/mwait feature present.\n");
- /*
- * Skip, if setup has overridden idle.
- * One CPU supports mwait => All CPUs supports mwait
- */
- if (!pm_idle) {
- printk("using mwait in idle threads.\n");
- pm_idle = mwait_idle;
- }
- }
-}
-
-static int __init idle_setup(char *str)
-{
- if (!strcmp(str, "poll")) {
- printk("using polling idle threads.\n");
- pm_idle = poll_idle;
-#ifdef CONFIG_X86_SMP
- if (smp_num_siblings > 1)
- printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
-#endif
- } else if (!strcmp(str, "mwait"))
- force_mwait = 1;
- else
- return -1;
-
- boot_option_idle_override = 1;
- return 0;
-}
-early_param("idle", idle_setup);
-
void __show_registers(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
init_utsname()->version);
printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
- 0xffff & regs->cs, regs->ip, regs->flags,
+ (u16)regs->cs, regs->ip, regs->flags,
smp_processor_id());
print_symbol("EIP is at %s\n", regs->ip);
printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
regs->si, regs->di, regs->bp, sp);
printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
- regs->ds & 0xffff, regs->es & 0xffff,
- regs->fs & 0xffff, gs, ss);
+ (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
if (!all)
return;
return err;
}
-/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
+void
+start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
{
- u16 gs;
-
-/* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
- dump->u_debugreg[0] = current->thread.debugreg0;
- dump->u_debugreg[1] = current->thread.debugreg1;
- dump->u_debugreg[2] = current->thread.debugreg2;
- dump->u_debugreg[3] = current->thread.debugreg3;
- dump->u_debugreg[4] = 0;
- dump->u_debugreg[5] = 0;
- dump->u_debugreg[6] = current->thread.debugreg6;
- dump->u_debugreg[7] = current->thread.debugreg7;
-
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-
- dump->regs.bx = regs->bx;
- dump->regs.cx = regs->cx;
- dump->regs.dx = regs->dx;
- dump->regs.si = regs->si;
- dump->regs.di = regs->di;
- dump->regs.bp = regs->bp;
- dump->regs.ax = regs->ax;
- dump->regs.ds = (u16)regs->ds;
- dump->regs.es = (u16)regs->es;
- dump->regs.fs = (u16)regs->fs;
- savesegment(gs,gs);
- dump->regs.orig_ax = regs->orig_ax;
- dump->regs.ip = regs->ip;
- dump->regs.cs = (u16)regs->cs;
- dump->regs.flags = regs->flags;
- dump->regs.sp = regs->sp;
- dump->regs.ss = (u16)regs->ss;
-
- dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+ __asm__("movl %0, %%gs" :: "r"(0));
+ regs->fs = 0;
+ set_fs(USER_DS);
+ regs->ds = __USER_DS;
+ regs->es = __USER_DS;
+ regs->ss = __USER_DS;
+ regs->cs = __USER_CS;
+ regs->ip = new_ip;
+ regs->sp = new_sp;
+ /*
+ * Free the old FP and other extended state
+ */
+ free_thread_xstate(current);
}
-EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL_GPL(start_thread);
-#ifdef CONFIG_SECCOMP
static void hard_disable_TSC(void)
{
write_cr4(read_cr4() | X86_CR4_TSD);
}
+
void disable_TSC(void)
{
preempt_disable();
hard_disable_TSC();
preempt_enable();
}
+
static void hard_enable_TSC(void)
{
write_cr4(read_cr4() & ~X86_CR4_TSD);
}
-#endif /* CONFIG_SECCOMP */
+
+static void enable_TSC(void)
+{
+ preempt_disable();
+ if (test_and_clear_thread_flag(TIF_NOTSC))
+ /*
+ * Must flip the CPU state synchronously with
+ * TIF_NOTSC in the current running context.
+ */
+ hard_enable_TSC();
+ preempt_enable();
+}
+
+int get_tsc_mode(unsigned long adr)
+{
+ unsigned int val;
+
+ if (test_thread_flag(TIF_NOTSC))
+ val = PR_TSC_SIGSEGV;
+ else
+ val = PR_TSC_ENABLE;
+
+ return put_user(val, (unsigned int __user *)adr);
+}
+
+int set_tsc_mode(unsigned int val)
+{
+ if (val == PR_TSC_SIGSEGV)
+ disable_TSC();
+ else if (val == PR_TSC_ENABLE)
+ enable_TSC();
+ else
+ return -EINVAL;
+
+ return 0;
+}
static noinline void
__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
/* we clear debugctl to make sure DS
* is not in use when we change it */
debugctl = 0;
- wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
+ update_debugctlmsr(0);
wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
}
if (next->debugctlmsr != debugctl)
- wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0);
+ update_debugctlmsr(next->debugctlmsr);
if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
set_debugreg(next->debugreg0, 0);
set_debugreg(next->debugreg7, 7);
}
-#ifdef CONFIG_SECCOMP
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
/* prev and next are different */
else
hard_enable_TSC();
}
-#endif
+#ifdef X86_BTS
if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
+#endif
if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
/* we're going to use this soon, after a few expensive things */
if (next_p->fpu_counter > 5)
- prefetch(&next->i387.fxsave);
+ prefetch(next->xstate);
/*
* Reload esp0.
/* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
+ *
+ * tsk_used_math() checks prevent calling math_state_restore(),
+ * which can sleep in the case of !tsk_used_math()
*/
- if (next_p->fpu_counter > 5)
+ if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
math_state_restore();
/*