1 #include <linux/errno.h>
2 #include <linux/kernel.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
10 #include <linux/clockchips.h>
11 #include <trace/power.h>
12 #include <asm/system.h>
15 #include <asm/uaccess.h>
18 #include <asm/debugreg.h>
19 #include <asm/hw_breakpoint.h>
21 unsigned long idle_halt;
22 EXPORT_SYMBOL(idle_halt);
23 unsigned long idle_nomwait;
24 EXPORT_SYMBOL(idle_nomwait);
26 struct kmem_cache *task_xstate_cachep;
28 DEFINE_TRACE(power_start);
29 DEFINE_TRACE(power_end);
31 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
34 if (src->thread.xstate) {
35 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
37 if (!dst->thread.xstate)
39 WARN_ON((unsigned long)dst->thread.xstate & 15);
40 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
45 void free_thread_xstate(struct task_struct *tsk)
47 if (tsk->thread.xstate) {
48 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
49 tsk->thread.xstate = NULL;
51 if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG)))
52 flush_thread_hw_breakpoint(tsk);
54 WARN(tsk->thread.ds_ctx, "leaking DS context\n");
57 void free_thread_info(struct thread_info *ti)
59 free_thread_xstate(ti->task);
60 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
63 void arch_task_cache_init(void)
66 kmem_cache_create("task_xstate", xstate_size,
67 __alignof__(union thread_xstate),
72 * Free current thread data structures etc..
74 void exit_thread(void)
76 struct task_struct *me = current;
77 struct thread_struct *t = &me->thread;
78 unsigned long *bp = t->io_bitmap_ptr;
81 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
83 t->io_bitmap_ptr = NULL;
84 clear_thread_flag(TIF_IO_BITMAP);
86 * Careful, clear this in the TSS too:
88 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
95 void flush_thread(void)
97 struct task_struct *tsk = current;
100 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
101 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
102 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
103 clear_tsk_thread_flag(tsk, TIF_IA32);
105 set_tsk_thread_flag(tsk, TIF_IA32);
106 current_thread_info()->status |= TS_COMPAT;
111 clear_tsk_thread_flag(tsk, TIF_DEBUG);
113 if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG)))
114 flush_thread_hw_breakpoint(tsk);
115 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
117 * Forget coprocessor state..
119 tsk->fpu_counter = 0;
124 static void hard_disable_TSC(void)
126 write_cr4(read_cr4() | X86_CR4_TSD);
129 void disable_TSC(void)
132 if (!test_and_set_thread_flag(TIF_NOTSC))
134 * Must flip the CPU state synchronously with
135 * TIF_NOTSC in the current running context.
141 static void hard_enable_TSC(void)
143 write_cr4(read_cr4() & ~X86_CR4_TSD);
146 static void enable_TSC(void)
149 if (test_and_clear_thread_flag(TIF_NOTSC))
151 * Must flip the CPU state synchronously with
152 * TIF_NOTSC in the current running context.
158 int get_tsc_mode(unsigned long adr)
162 if (test_thread_flag(TIF_NOTSC))
163 val = PR_TSC_SIGSEGV;
167 return put_user(val, (unsigned int __user *)adr);
170 int set_tsc_mode(unsigned int val)
172 if (val == PR_TSC_SIGSEGV)
174 else if (val == PR_TSC_ENABLE)
182 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
183 struct tss_struct *tss)
185 struct thread_struct *prev, *next;
187 prev = &prev_p->thread;
188 next = &next_p->thread;
190 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
191 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
192 ds_switch_to(prev_p, next_p);
193 else if (next->debugctlmsr != prev->debugctlmsr)
194 update_debugctlmsr(next->debugctlmsr);
196 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
197 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
198 /* prev and next are different */
199 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
205 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
207 * Copy the relevant range of the IO bitmap.
208 * Normally this is 128 bytes or less:
210 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
211 max(prev->io_bitmap_max, next->io_bitmap_max));
212 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
214 * Clear any possible leftover bits:
216 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
220 int sys_fork(struct pt_regs *regs)
222 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
226 * This is trivial, and on the face of it looks like it
227 * could equally well be done in user mode.
229 * Not so, for quite unobvious reasons - register pressure.
230 * In user mode vfork() cannot have a stack frame, and if
231 * done by calling the "clone()" system call directly, you
232 * do not have enough call-clobbered registers to hold all
233 * the information you need.
235 int sys_vfork(struct pt_regs *regs)
237 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
243 * Idle related variables and functions
245 unsigned long boot_option_idle_override = 0;
246 EXPORT_SYMBOL(boot_option_idle_override);
249 * Powermanagement idle function, if any..
251 void (*pm_idle)(void);
252 EXPORT_SYMBOL(pm_idle);
256 * This halt magic was a workaround for ancient floppy DMA
257 * wreckage. It should be safe to remove.
259 static int hlt_counter;
260 void disable_hlt(void)
264 EXPORT_SYMBOL(disable_hlt);
266 void enable_hlt(void)
270 EXPORT_SYMBOL(enable_hlt);
272 static inline int hlt_use_halt(void)
274 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
277 static inline int hlt_use_halt(void)
284 * We use this if we don't have any better
287 void default_idle(void)
289 if (hlt_use_halt()) {
290 struct power_trace it;
292 trace_power_start(&it, POWER_CSTATE, 1);
293 current_thread_info()->status &= ~TS_POLLING;
295 * TS_POLLING-cleared state must be visible before we
301 safe_halt(); /* enables interrupts racelessly */
304 current_thread_info()->status |= TS_POLLING;
305 trace_power_end(&it);
308 /* loop is done by the caller */
312 #ifdef CONFIG_APM_MODULE
313 EXPORT_SYMBOL(default_idle);
316 void stop_this_cpu(void *dummy)
322 set_cpu_online(smp_processor_id(), false);
323 disable_local_APIC();
326 if (hlt_works(smp_processor_id()))
331 static void do_nothing(void *unused)
336 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
337 * pm_idle and update to new pm_idle value. Required while changing pm_idle
338 * handler on SMP systems.
340 * Caller must have changed pm_idle to the new value before the call. Old
341 * pm_idle value will not be used by any CPU after the return of this function.
343 void cpu_idle_wait(void)
346 /* kick all the CPUs so that they exit out of pm_idle */
347 smp_call_function(do_nothing, NULL, 1);
349 EXPORT_SYMBOL_GPL(cpu_idle_wait);
352 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
353 * which can obviate IPI to trigger checking of need_resched.
354 * We execute MONITOR against need_resched and enter optimized wait state
355 * through MWAIT. Whenever someone changes need_resched, we would be woken
356 * up from MWAIT (without an IPI).
358 * New with Core Duo processors, MWAIT can take some hints based on CPU
361 void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
363 struct power_trace it;
365 trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
366 if (!need_resched()) {
367 if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
368 clflush((void *)¤t_thread_info()->flags);
370 __monitor((void *)¤t_thread_info()->flags, 0, 0);
375 trace_power_end(&it);
378 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
379 static void mwait_idle(void)
381 struct power_trace it;
382 if (!need_resched()) {
383 trace_power_start(&it, POWER_CSTATE, 1);
384 if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
385 clflush((void *)¤t_thread_info()->flags);
387 __monitor((void *)¤t_thread_info()->flags, 0, 0);
393 trace_power_end(&it);
399 * On SMP it's slightly faster (but much more power-consuming!)
400 * to poll the ->work.need_resched flag instead of waiting for the
401 * cross-CPU IPI to arrive. Use this option with caution.
403 static void poll_idle(void)
405 struct power_trace it;
407 trace_power_start(&it, POWER_CSTATE, 0);
409 while (!need_resched())
411 trace_power_end(&it);
415 * mwait selection logic:
417 * It depends on the CPU. For AMD CPUs that support MWAIT this is
418 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
419 * then depend on a clock divisor and current Pstate of the core. If
420 * all cores of a processor are in halt state (C1) the processor can
421 * enter the C1E (C1 enhanced) state. If mwait is used this will never
424 * idle=mwait overrides this decision and forces the usage of mwait.
426 static int __cpuinitdata force_mwait;
428 #define MWAIT_INFO 0x05
429 #define MWAIT_ECX_EXTENDED_INFO 0x01
430 #define MWAIT_EDX_C1 0xf0
432 static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
434 u32 eax, ebx, ecx, edx;
439 if (c->cpuid_level < MWAIT_INFO)
442 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
443 /* Check, whether EDX has extended info about MWAIT */
444 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
448 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
451 return (edx & MWAIT_EDX_C1);
455 * Check for AMD CPUs, which have potentially C1E support
457 static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
459 if (c->x86_vendor != X86_VENDOR_AMD)
465 /* Family 0x0f models < rev F do not have C1E */
466 if (c->x86 == 0x0f && c->x86_model < 0x40)
472 static cpumask_var_t c1e_mask;
473 static int c1e_detected;
475 void c1e_remove_cpu(int cpu)
477 if (c1e_mask != NULL)
478 cpumask_clear_cpu(cpu, c1e_mask);
482 * C1E aware idle routine. We check for C1E active in the interrupt
483 * pending message MSR. If we detect C1E, then we handle it the same
484 * way as C3 power states (local apic timer and TSC stop)
486 static void c1e_idle(void)
494 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
495 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
497 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
498 mark_tsc_unstable("TSC halt in AMD C1E");
499 printk(KERN_INFO "System has AMD C1E enabled\n");
500 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
505 int cpu = smp_processor_id();
507 if (!cpumask_test_cpu(cpu, c1e_mask)) {
508 cpumask_set_cpu(cpu, c1e_mask);
510 * Force broadcast so ACPI can not interfere. Needs
511 * to run with interrupts enabled as it uses
515 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
517 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
521 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
526 * The switch back from broadcast mode needs to be
527 * called with interrupts disabled.
530 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
536 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
539 if (pm_idle == poll_idle && smp_num_siblings > 1) {
540 printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
541 " performance may degrade.\n");
547 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
549 * One CPU supports mwait => All CPUs supports mwait
551 printk(KERN_INFO "using mwait in idle threads.\n");
552 pm_idle = mwait_idle;
553 } else if (check_c1e_idle(c)) {
554 printk(KERN_INFO "using C1E aware idle routine\n");
557 pm_idle = default_idle;
560 void __init init_c1e_mask(void)
562 /* If we're using c1e_idle, we need to allocate c1e_mask. */
563 if (pm_idle == c1e_idle) {
564 alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
565 cpumask_clear(c1e_mask);
569 static int __init idle_setup(char *str)
574 if (!strcmp(str, "poll")) {
575 printk("using polling idle threads.\n");
577 } else if (!strcmp(str, "mwait"))
579 else if (!strcmp(str, "halt")) {
581 * When the boot option of idle=halt is added, halt is
582 * forced to be used for CPU idle. In such case CPU C2/C3
583 * won't be used again.
584 * To continue to load the CPU idle driver, don't touch
585 * the boot_option_idle_override.
587 pm_idle = default_idle;
590 } else if (!strcmp(str, "nomwait")) {
592 * If the boot option of "idle=nomwait" is added,
593 * it means that mwait will be disabled for CPU C2/C3
594 * states. In such case it won't touch the variable
595 * of boot_option_idle_override.
602 boot_option_idle_override = 1;
605 early_param("idle", idle_setup);