2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/pm_qos_params.h>
42 #include <linux/clockchips.h>
43 #include <linux/cpuidle.h>
44 #include <linux/irqflags.h>
47 * Include the apic definitions for x86 to have the APIC timer related defines
48 * available also for UP (on SMP it gets magically included via linux/smp.h).
49 * asm/acpi.h is not an option, as it would require more include magic. Also
50 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
57 #include <asm/uaccess.h>
59 #include <acpi/acpi_bus.h>
60 #include <acpi/processor.h>
61 #include <asm/processor.h>
63 #define ACPI_PROCESSOR_CLASS "processor"
64 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
65 ACPI_MODULE_NAME("processor_idle");
66 #define ACPI_PROCESSOR_FILE_POWER "power"
67 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
68 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
69 #ifndef CONFIG_CPU_IDLE
70 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
71 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
72 static void (*pm_idle_save) (void) __read_mostly;
74 #define C2_OVERHEAD 1 /* 1us */
75 #define C3_OVERHEAD 1 /* 1us */
77 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
79 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
80 #ifdef CONFIG_CPU_IDLE
81 module_param(max_cstate, uint, 0000);
83 module_param(max_cstate, uint, 0644);
85 static unsigned int nocst __read_mostly;
86 module_param(nocst, uint, 0000);
88 #ifndef CONFIG_CPU_IDLE
90 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
91 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
92 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
93 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
94 * reduce history for more aggressive entry into C3
96 static unsigned int bm_history __read_mostly =
97 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
98 module_param(bm_history, uint, 0644);
100 static int acpi_processor_set_power_policy(struct acpi_processor *pr);
102 #else /* CONFIG_CPU_IDLE */
103 static unsigned int latency_factor __read_mostly = 2;
104 module_param(latency_factor, uint, 0644);
108 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
109 * For now disable this. Probably a bug somewhere else.
111 * To skip this limit, boot/load with a large max_cstate limit.
113 static int set_max_cstate(const struct dmi_system_id *id)
115 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
118 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
119 " Override with \"processor.max_cstate=%d\"\n", id->ident,
120 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
122 max_cstate = (long)id->driver_data;
127 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
128 callers to only run once -AK */
129 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
130 { set_max_cstate, "IBM ThinkPad R40e", {
131 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
132 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
133 { set_max_cstate, "IBM ThinkPad R40e", {
134 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
135 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
136 { set_max_cstate, "IBM ThinkPad R40e", {
137 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
138 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
139 { set_max_cstate, "IBM ThinkPad R40e", {
140 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
141 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
142 { set_max_cstate, "IBM ThinkPad R40e", {
143 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
144 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
145 { set_max_cstate, "IBM ThinkPad R40e", {
146 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
147 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
148 { set_max_cstate, "IBM ThinkPad R40e", {
149 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
150 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
151 { set_max_cstate, "IBM ThinkPad R40e", {
152 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
153 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
154 { set_max_cstate, "IBM ThinkPad R40e", {
155 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
156 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
157 { set_max_cstate, "IBM ThinkPad R40e", {
158 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
159 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
160 { set_max_cstate, "IBM ThinkPad R40e", {
161 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
162 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
163 { set_max_cstate, "IBM ThinkPad R40e", {
164 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
165 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
166 { set_max_cstate, "IBM ThinkPad R40e", {
167 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
168 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
169 { set_max_cstate, "IBM ThinkPad R40e", {
170 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
171 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
172 { set_max_cstate, "IBM ThinkPad R40e", {
173 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
174 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
175 { set_max_cstate, "IBM ThinkPad R40e", {
176 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
177 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
178 { set_max_cstate, "Medion 41700", {
179 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
180 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
181 { set_max_cstate, "Clevo 5600D", {
182 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
183 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
188 static inline u32 ticks_elapsed(u32 t1, u32 t2)
192 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
193 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
195 return ((0xFFFFFFFF - t1) + t2);
198 static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
201 return PM_TIMER_TICKS_TO_US(t2 - t1);
202 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
203 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
205 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
209 * Callers should disable interrupts before the call and enable
210 * interrupts after return.
212 static void acpi_safe_halt(void)
214 current_thread_info()->status &= ~TS_POLLING;
216 * TS_POLLING-cleared state must be visible before we
220 if (!need_resched()) {
224 current_thread_info()->status |= TS_POLLING;
227 #ifndef CONFIG_CPU_IDLE
230 acpi_processor_power_activate(struct acpi_processor *pr,
231 struct acpi_processor_cx *new)
233 struct acpi_processor_cx *old;
238 old = pr->power.state;
241 old->promotion.count = 0;
242 new->demotion.count = 0;
244 /* Cleanup from old state. */
248 /* Disable bus master reload */
249 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
250 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
255 /* Prepare to use new state. */
258 /* Enable bus master reload */
259 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
260 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
264 pr->power.state = new;
269 static atomic_t c3_cpu_count;
271 /* Common C-state entry for C2, C3, .. */
272 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
276 /* Don't trace irqs off for idle */
277 stop_critical_timings();
278 perf_flags = hw_perf_save_disable();
279 if (cstate->entry_method == ACPI_CSTATE_FFH) {
280 /* Call into architectural FFH based C-state */
281 acpi_processor_ffh_cstate_enter(cstate);
284 /* IO port based C-state */
285 inb(cstate->address);
286 /* Dummy wait op - must do something useless after P_LVL2 read
287 because chipsets cannot guarantee that STPCLK# signal
288 gets asserted in time to freeze execution properly. */
289 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
291 hw_perf_restore(perf_flags);
292 start_critical_timings();
294 #endif /* !CONFIG_CPU_IDLE */
296 #ifdef ARCH_APICTIMER_STOPS_ON_C3
299 * Some BIOS implementations switch to C3 in the published C2 state.
300 * This seems to be a common problem on AMD boxen, but other vendors
301 * are affected too. We pick the most conservative approach: we assume
302 * that the local APIC stops in both C2 and C3.
304 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
305 struct acpi_processor_cx *cx)
307 struct acpi_processor_power *pwr = &pr->power;
308 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
311 * Check, if one of the previous states already marked the lapic
314 if (pwr->timer_broadcast_on_state < state)
317 if (cx->type >= type)
318 pr->power.timer_broadcast_on_state = state;
321 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
323 unsigned long reason;
325 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
326 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
328 clockevents_notify(reason, &pr->id);
331 /* Power(C) State timer broadcast control */
332 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
333 struct acpi_processor_cx *cx,
336 int state = cx - pr->power.states;
338 if (state >= pr->power.timer_broadcast_on_state) {
339 unsigned long reason;
341 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
342 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
343 clockevents_notify(reason, &pr->id);
349 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
350 struct acpi_processor_cx *cstate) { }
351 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
352 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
353 struct acpi_processor_cx *cx,
361 * Suspend / resume control
363 static int acpi_idle_suspend;
365 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
367 acpi_idle_suspend = 1;
371 int acpi_processor_resume(struct acpi_device * device)
373 acpi_idle_suspend = 0;
377 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
378 static int tsc_halts_in_c(int state)
380 switch (boot_cpu_data.x86_vendor) {
382 case X86_VENDOR_INTEL:
384 * AMD Fam10h TSC will tick in all
385 * C/P/S0/S1 states when this bit is set.
387 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
392 return state > ACPI_STATE_C1;
397 #ifndef CONFIG_CPU_IDLE
398 static void acpi_processor_idle(void)
400 struct acpi_processor *pr = NULL;
401 struct acpi_processor_cx *cx = NULL;
402 struct acpi_processor_cx *next_state = NULL;
407 * Interrupts must be disabled during bus mastering calculations and
408 * for C2/C3 transitions.
412 pr = __get_cpu_var(processors);
419 * Check whether we truly need to go idle, or should
422 if (unlikely(need_resched())) {
427 cx = pr->power.state;
428 if (!cx || acpi_idle_suspend) {
430 pm_idle_save(); /* enables IRQs */
442 * Check for bus mastering activity (if required), record, and check
445 if (pr->flags.bm_check) {
447 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
452 pr->power.bm_activity <<= diff;
454 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
456 pr->power.bm_activity |= 0x1;
457 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
460 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
461 * the true state of bus mastering activity; forcing us to
462 * manually check the BMIDEA bit of each IDE channel.
464 else if (errata.piix4.bmisx) {
465 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
466 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
467 pr->power.bm_activity |= 0x1;
470 pr->power.bm_check_timestamp = jiffies;
473 * If bus mastering is or was active this jiffy, demote
474 * to avoid a faulty transition. Note that the processor
475 * won't enter a low-power state during this call (to this
476 * function) but should upon the next.
478 * TBD: A better policy might be to fallback to the demotion
479 * state (use it for this quantum only) istead of
480 * demoting -- and rely on duration as our sole demotion
481 * qualification. This may, however, introduce DMA
482 * issues (e.g. floppy DMA transfer overrun/underrun).
484 if ((pr->power.bm_activity & 0x1) &&
485 cx->demotion.threshold.bm) {
487 next_state = cx->demotion.state;
492 #ifdef CONFIG_HOTPLUG_CPU
494 * Check for P_LVL2_UP flag before entering C2 and above on
495 * an SMP system. We do it here instead of doing it at _CST/P_LVL
496 * detection phase, to work cleanly with logical CPU hotplug.
498 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
499 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
500 cx = &pr->power.states[ACPI_STATE_C1];
506 * Invoke the current Cx state to put the processor to sleep.
508 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
509 current_thread_info()->status &= ~TS_POLLING;
511 * TS_POLLING-cleared state must be visible before we
515 if (need_resched()) {
516 current_thread_info()->status |= TS_POLLING;
527 * Use the appropriate idle routine, the one that would
528 * be used without acpi C-states.
531 pm_idle_save(); /* enables IRQs */
538 * TBD: Can't get time duration while in C1, as resumes
539 * go to an ISR rather than here. Need to instrument
540 * base interrupt handler.
542 * Note: the TSC better not stop in C1, sched_clock() will
545 sleep_ticks = 0xFFFFFFFF;
550 /* Get start time (ticks) */
551 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
552 /* Tell the scheduler that we are going deep-idle: */
553 sched_clock_idle_sleep_event();
555 acpi_state_timer_broadcast(pr, cx, 1);
556 acpi_cstate_enter(cx);
557 /* Get end time (ticks) */
558 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
560 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
561 /* TSC halts in C2, so notify users */
562 if (tsc_halts_in_c(ACPI_STATE_C2))
563 mark_tsc_unstable("possible TSC halt in C2");
565 /* Compute time (ticks) that we were actually asleep */
566 sleep_ticks = ticks_elapsed(t1, t2);
568 /* Tell the scheduler how much we idled: */
569 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
571 /* Re-enable interrupts */
573 /* Do not account our idle-switching overhead: */
574 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
576 current_thread_info()->status |= TS_POLLING;
577 acpi_state_timer_broadcast(pr, cx, 0);
581 acpi_unlazy_tlb(smp_processor_id());
583 * Must be done before busmaster disable as we might
584 * need to access HPET !
586 acpi_state_timer_broadcast(pr, cx, 1);
589 * bm_check implies we need ARB_DIS
590 * !bm_check implies we need cache flush
591 * bm_control implies whether we can do ARB_DIS
593 * That leaves a case where bm_check is set and bm_control is
594 * not set. In that case we cannot do much, we enter C3
595 * without doing anything.
597 if (pr->flags.bm_check && pr->flags.bm_control) {
598 if (atomic_inc_return(&c3_cpu_count) ==
601 * All CPUs are trying to go to C3
602 * Disable bus master arbitration
604 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
606 } else if (!pr->flags.bm_check) {
607 /* SMP with no shared cache... Invalidate cache */
608 ACPI_FLUSH_CPU_CACHE();
611 /* Get start time (ticks) */
612 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
614 /* Tell the scheduler that we are going deep-idle: */
615 sched_clock_idle_sleep_event();
616 acpi_cstate_enter(cx);
617 /* Get end time (ticks) */
618 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
619 if (pr->flags.bm_check && pr->flags.bm_control) {
620 /* Enable bus master arbitration */
621 atomic_dec(&c3_cpu_count);
622 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
625 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
626 /* TSC halts in C3, so notify users */
627 if (tsc_halts_in_c(ACPI_STATE_C3))
628 mark_tsc_unstable("TSC halts in C3");
630 /* Compute time (ticks) that we were actually asleep */
631 sleep_ticks = ticks_elapsed(t1, t2);
632 /* Tell the scheduler how much we idled: */
633 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
635 /* Re-enable interrupts */
637 /* Do not account our idle-switching overhead: */
638 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
640 current_thread_info()->status |= TS_POLLING;
641 acpi_state_timer_broadcast(pr, cx, 0);
649 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
650 cx->time += sleep_ticks;
652 next_state = pr->power.state;
654 #ifdef CONFIG_HOTPLUG_CPU
655 /* Don't do promotion/demotion */
656 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
657 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
666 * Track the number of longs (time asleep is greater than threshold)
667 * and promote when the count threshold is reached. Note that bus
668 * mastering activity may prevent promotions.
669 * Do not promote above max_cstate.
671 if (cx->promotion.state &&
672 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
673 if (sleep_ticks > cx->promotion.threshold.ticks &&
674 cx->promotion.state->latency <=
675 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
676 cx->promotion.count++;
677 cx->demotion.count = 0;
678 if (cx->promotion.count >=
679 cx->promotion.threshold.count) {
680 if (pr->flags.bm_check) {
682 (pr->power.bm_activity & cx->
683 promotion.threshold.bm)) {
689 next_state = cx->promotion.state;
699 * Track the number of shorts (time asleep is less than time threshold)
700 * and demote when the usage threshold is reached.
702 if (cx->demotion.state) {
703 if (sleep_ticks < cx->demotion.threshold.ticks) {
704 cx->demotion.count++;
705 cx->promotion.count = 0;
706 if (cx->demotion.count >= cx->demotion.threshold.count) {
707 next_state = cx->demotion.state;
715 * Demote if current state exceeds max_cstate
716 * or if the latency of the current state is unacceptable
718 if ((pr->power.state - pr->power.states) > max_cstate ||
719 pr->power.state->latency >
720 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
721 if (cx->demotion.state)
722 next_state = cx->demotion.state;
728 * If we're going to start using a new Cx state we must clean up
729 * from the previous and prepare to use the new.
731 if (next_state != pr->power.state)
732 acpi_processor_power_activate(pr, next_state);
735 static int acpi_processor_set_power_policy(struct acpi_processor *pr)
738 unsigned int state_is_set = 0;
739 struct acpi_processor_cx *lower = NULL;
740 struct acpi_processor_cx *higher = NULL;
741 struct acpi_processor_cx *cx;
748 * This function sets the default Cx state policy (OS idle handler).
749 * Our scheme is to promote quickly to C2 but more conservatively
750 * to C3. We're favoring C2 for its characteristics of low latency
751 * (quick response), good power savings, and ability to allow bus
752 * mastering activity. Note that the Cx state policy is completely
753 * customizable and can be altered dynamically.
757 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
758 cx = &pr->power.states[i];
763 pr->power.state = cx;
772 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
773 cx = &pr->power.states[i];
778 cx->demotion.state = lower;
779 cx->demotion.threshold.ticks = cx->latency_ticks;
780 cx->demotion.threshold.count = 1;
781 if (cx->type == ACPI_STATE_C3)
782 cx->demotion.threshold.bm = bm_history;
789 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
790 cx = &pr->power.states[i];
795 cx->promotion.state = higher;
796 cx->promotion.threshold.ticks = cx->latency_ticks;
797 if (cx->type >= ACPI_STATE_C2)
798 cx->promotion.threshold.count = 4;
800 cx->promotion.threshold.count = 10;
801 if (higher->type == ACPI_STATE_C3)
802 cx->promotion.threshold.bm = bm_history;
810 #endif /* !CONFIG_CPU_IDLE */
812 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
821 /* if info is obtained from pblk/fadt, type equals state */
822 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
823 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
825 #ifndef CONFIG_HOTPLUG_CPU
827 * Check for P_LVL2_UP flag before entering C2 and above on
830 if ((num_online_cpus() > 1) &&
831 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
835 /* determine C2 and C3 address from pblk */
836 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
837 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
839 /* determine latencies from FADT */
840 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
841 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
843 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
844 "lvl2[0x%08x] lvl3[0x%08x]\n",
845 pr->power.states[ACPI_STATE_C2].address,
846 pr->power.states[ACPI_STATE_C3].address));
851 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
853 if (!pr->power.states[ACPI_STATE_C1].valid) {
854 /* set the first C-State to C1 */
855 /* all processors need to support C1 */
856 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
857 pr->power.states[ACPI_STATE_C1].valid = 1;
858 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
860 /* the C0 state only exists as a filler in our array */
861 pr->power.states[ACPI_STATE_C0].valid = 1;
865 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
867 acpi_status status = 0;
871 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
872 union acpi_object *cst;
880 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
881 if (ACPI_FAILURE(status)) {
882 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
886 cst = buffer.pointer;
888 /* There must be at least 2 elements */
889 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
890 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
895 count = cst->package.elements[0].integer.value;
897 /* Validate number of power states. */
898 if (count < 1 || count != cst->package.count - 1) {
899 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
904 /* Tell driver that at least _CST is supported. */
905 pr->flags.has_cst = 1;
907 for (i = 1; i <= count; i++) {
908 union acpi_object *element;
909 union acpi_object *obj;
910 struct acpi_power_register *reg;
911 struct acpi_processor_cx cx;
913 memset(&cx, 0, sizeof(cx));
915 element = &(cst->package.elements[i]);
916 if (element->type != ACPI_TYPE_PACKAGE)
919 if (element->package.count != 4)
922 obj = &(element->package.elements[0]);
924 if (obj->type != ACPI_TYPE_BUFFER)
927 reg = (struct acpi_power_register *)obj->buffer.pointer;
929 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
930 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
933 /* There should be an easy way to extract an integer... */
934 obj = &(element->package.elements[1]);
935 if (obj->type != ACPI_TYPE_INTEGER)
938 cx.type = obj->integer.value;
940 * Some buggy BIOSes won't list C1 in _CST -
941 * Let acpi_processor_get_power_info_default() handle them later
943 if (i == 1 && cx.type != ACPI_STATE_C1)
946 cx.address = reg->address;
947 cx.index = current_count + 1;
949 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
950 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
951 if (acpi_processor_ffh_cstate_probe
952 (pr->id, &cx, reg) == 0) {
953 cx.entry_method = ACPI_CSTATE_FFH;
954 } else if (cx.type == ACPI_STATE_C1) {
956 * C1 is a special case where FIXED_HARDWARE
957 * can be handled in non-MWAIT way as well.
958 * In that case, save this _CST entry info.
959 * Otherwise, ignore this info and continue.
961 cx.entry_method = ACPI_CSTATE_HALT;
962 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
966 if (cx.type == ACPI_STATE_C1 &&
967 (idle_halt || idle_nomwait)) {
969 * In most cases the C1 space_id obtained from
970 * _CST object is FIXED_HARDWARE access mode.
971 * But when the option of idle=halt is added,
972 * the entry_method type should be changed from
973 * CSTATE_FFH to CSTATE_HALT.
974 * When the option of idle=nomwait is added,
975 * the C1 entry_method type should be
978 cx.entry_method = ACPI_CSTATE_HALT;
979 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
982 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
986 if (cx.type == ACPI_STATE_C1) {
990 obj = &(element->package.elements[2]);
991 if (obj->type != ACPI_TYPE_INTEGER)
994 cx.latency = obj->integer.value;
996 obj = &(element->package.elements[3]);
997 if (obj->type != ACPI_TYPE_INTEGER)
1000 cx.power = obj->integer.value;
1003 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
1006 * We support total ACPI_PROCESSOR_MAX_POWER - 1
1007 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
1009 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
1011 "Limiting number of power states to max (%d)\n",
1012 ACPI_PROCESSOR_MAX_POWER);
1014 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1019 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
1022 /* Validate number of power states discovered */
1023 if (current_count < 2)
1027 kfree(buffer.pointer);
1032 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
1039 * C2 latency must be less than or equal to 100
1042 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
1043 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1044 "latency too large [%d]\n", cx->latency));
1049 * Otherwise we've met all of our C2 requirements.
1050 * Normalize the C2 latency to expidite policy
1054 #ifndef CONFIG_CPU_IDLE
1055 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1057 cx->latency_ticks = cx->latency;
1063 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1064 struct acpi_processor_cx *cx)
1066 static int bm_check_flag;
1073 * C3 latency must be less than or equal to 1000
1076 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1077 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1078 "latency too large [%d]\n", cx->latency));
1083 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1084 * DMA transfers are used by any ISA device to avoid livelock.
1085 * Note that we could disable Type-F DMA (as recommended by
1086 * the erratum), but this is known to disrupt certain ISA
1087 * devices thus we take the conservative approach.
1089 else if (errata.piix4.fdma) {
1090 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1091 "C3 not supported on PIIX4 with Type-F DMA\n"));
1095 /* All the logic here assumes flags.bm_check is same across all CPUs */
1096 if (!bm_check_flag) {
1097 /* Determine whether bm_check is needed based on CPU */
1098 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1099 bm_check_flag = pr->flags.bm_check;
1101 pr->flags.bm_check = bm_check_flag;
1104 if (pr->flags.bm_check) {
1105 if (!pr->flags.bm_control) {
1106 if (pr->flags.has_cst != 1) {
1107 /* bus mastering control is necessary */
1108 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1109 "C3 support requires BM control\n"));
1112 /* Here we enter C3 without bus mastering */
1113 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1114 "C3 support without BM control\n"));
1119 * WBINVD should be set in fadt, for C3 state to be
1120 * supported on when bm_check is not required.
1122 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1123 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1124 "Cache invalidation should work properly"
1125 " for C3 to be enabled on SMP systems\n"));
1128 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1132 * Otherwise we've met all of our C3 requirements.
1133 * Normalize the C3 latency to expidite policy. Enable
1134 * checking of bus mastering status (bm_check) so we can
1135 * use this in our C3 policy
1139 #ifndef CONFIG_CPU_IDLE
1140 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1142 cx->latency_ticks = cx->latency;
1148 static int acpi_processor_power_verify(struct acpi_processor *pr)
1151 unsigned int working = 0;
1153 pr->power.timer_broadcast_on_state = INT_MAX;
1155 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1156 struct acpi_processor_cx *cx = &pr->power.states[i];
1164 acpi_processor_power_verify_c2(cx);
1166 acpi_timer_check_state(i, pr, cx);
1170 acpi_processor_power_verify_c3(pr, cx);
1172 acpi_timer_check_state(i, pr, cx);
1180 acpi_propagate_timer_broadcast(pr);
1185 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1191 /* NOTE: the idle thread may not be running while calling
1194 /* Zero initialize all the C-states info. */
1195 memset(pr->power.states, 0, sizeof(pr->power.states));
1197 result = acpi_processor_get_power_info_cst(pr);
1198 if (result == -ENODEV)
1199 result = acpi_processor_get_power_info_fadt(pr);
1204 acpi_processor_get_power_info_default(pr);
1206 pr->power.count = acpi_processor_power_verify(pr);
1208 #ifndef CONFIG_CPU_IDLE
1210 * Set Default Policy
1211 * ------------------
1212 * Now that we know which states are supported, set the default
1213 * policy. Note that this policy can be changed dynamically
1214 * (e.g. encourage deeper sleeps to conserve battery life when
1217 result = acpi_processor_set_power_policy(pr);
1223 * if one state of type C2 or C3 is available, mark this
1224 * CPU as being "idle manageable"
1226 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1227 if (pr->power.states[i].valid) {
1228 pr->power.count = i;
1229 if (pr->power.states[i].type >= ACPI_STATE_C2)
1230 pr->flags.power = 1;
1237 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1239 struct acpi_processor *pr = seq->private;
1246 seq_printf(seq, "active state: C%zd\n"
1248 "bus master activity: %08x\n"
1249 "maximum allowed latency: %d usec\n",
1250 pr->power.state ? pr->power.state - pr->power.states : 0,
1251 max_cstate, (unsigned)pr->power.bm_activity,
1252 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
1254 seq_puts(seq, "states:\n");
1256 for (i = 1; i <= pr->power.count; i++) {
1257 seq_printf(seq, " %cC%d: ",
1258 (&pr->power.states[i] ==
1259 pr->power.state ? '*' : ' '), i);
1261 if (!pr->power.states[i].valid) {
1262 seq_puts(seq, "<not supported>\n");
1266 switch (pr->power.states[i].type) {
1268 seq_printf(seq, "type[C1] ");
1271 seq_printf(seq, "type[C2] ");
1274 seq_printf(seq, "type[C3] ");
1277 seq_printf(seq, "type[--] ");
1281 if (pr->power.states[i].promotion.state)
1282 seq_printf(seq, "promotion[C%zd] ",
1283 (pr->power.states[i].promotion.state -
1286 seq_puts(seq, "promotion[--] ");
1288 if (pr->power.states[i].demotion.state)
1289 seq_printf(seq, "demotion[C%zd] ",
1290 (pr->power.states[i].demotion.state -
1293 seq_puts(seq, "demotion[--] ");
1295 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1296 pr->power.states[i].latency,
1297 pr->power.states[i].usage,
1298 (unsigned long long)pr->power.states[i].time);
1305 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1307 return single_open(file, acpi_processor_power_seq_show,
1311 static const struct file_operations acpi_processor_power_fops = {
1312 .owner = THIS_MODULE,
1313 .open = acpi_processor_power_open_fs,
1315 .llseek = seq_lseek,
1316 .release = single_release,
1319 #ifndef CONFIG_CPU_IDLE
1321 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1325 if (boot_option_idle_override)
1335 if (!pr->flags.power_setup_done)
1339 * Fall back to the default idle loop, when pm_idle_save had
1343 pm_idle = pm_idle_save;
1344 /* Relies on interrupts forcing exit from idle. */
1345 synchronize_sched();
1348 pr->flags.power = 0;
1349 result = acpi_processor_get_power_info(pr);
1350 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1351 pm_idle = acpi_processor_idle;
1357 static void smp_callback(void *v)
1359 /* we already woke the CPU up, nothing more to do */
1363 * This function gets called when a part of the kernel has a new latency
1364 * requirement. This means we need to get all processors out of their C-state,
1365 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1366 * wakes them all right up.
1368 static int acpi_processor_latency_notify(struct notifier_block *b,
1369 unsigned long l, void *v)
1371 smp_call_function(smp_callback, NULL, 1);
1375 static struct notifier_block acpi_processor_latency_notifier = {
1376 .notifier_call = acpi_processor_latency_notify,
1381 #else /* CONFIG_CPU_IDLE */
1384 * acpi_idle_bm_check - checks if bus master activity was detected
1386 static int acpi_idle_bm_check(void)
1390 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1392 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1394 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1395 * the true state of bus mastering activity; forcing us to
1396 * manually check the BMIDEA bit of each IDE channel.
1398 else if (errata.piix4.bmisx) {
1399 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1400 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1407 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1408 * @pr: the processor
1409 * @target: the new target state
1411 static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1412 struct acpi_processor_cx *target)
1414 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1415 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1416 pr->flags.bm_rld_set = 0;
1419 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1420 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1421 pr->flags.bm_rld_set = 1;
1426 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1429 * Caller disables interrupt before call and enables interrupt after return.
1431 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1435 /* Don't trace irqs off for idle */
1436 stop_critical_timings();
1437 pctrl = hw_perf_save_disable();
1438 if (cx->entry_method == ACPI_CSTATE_FFH) {
1439 /* Call into architectural FFH based C-state */
1440 acpi_processor_ffh_cstate_enter(cx);
1441 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
1445 /* IO port based C-state */
1447 /* Dummy wait op - must do something useless after P_LVL2 read
1448 because chipsets cannot guarantee that STPCLK# signal
1449 gets asserted in time to freeze execution properly. */
1450 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1452 hw_perf_restore(pctrl);
1453 start_critical_timings();
1457 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1458 * @dev: the target CPU
1459 * @state: the state data
1461 * This is equivalent to the HALT instruction.
1463 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1464 struct cpuidle_state *state)
1467 struct acpi_processor *pr;
1468 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1470 pr = __get_cpu_var(processors);
1475 local_irq_disable();
1477 /* Do not access any ACPI IO ports in suspend path */
1478 if (acpi_idle_suspend) {
1484 if (pr->flags.bm_check)
1485 acpi_idle_update_bm_rld(pr, cx);
1487 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1488 acpi_idle_do_entry(cx);
1489 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1494 return ticks_elapsed_in_us(t1, t2);
1498 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1499 * @dev: the target CPU
1500 * @state: the state data
1502 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1503 struct cpuidle_state *state)
1505 struct acpi_processor *pr;
1506 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1508 int sleep_ticks = 0;
1510 pr = __get_cpu_var(processors);
1515 if (acpi_idle_suspend)
1516 return(acpi_idle_enter_c1(dev, state));
1518 local_irq_disable();
1519 current_thread_info()->status &= ~TS_POLLING;
1521 * TS_POLLING-cleared state must be visible before we test
1526 if (unlikely(need_resched())) {
1527 current_thread_info()->status |= TS_POLLING;
1533 * Must be done before busmaster disable as we might need to
1536 acpi_state_timer_broadcast(pr, cx, 1);
1538 if (pr->flags.bm_check)
1539 acpi_idle_update_bm_rld(pr, cx);
1541 if (cx->type == ACPI_STATE_C3)
1542 ACPI_FLUSH_CPU_CACHE();
1544 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1545 /* Tell the scheduler that we are going deep-idle: */
1546 sched_clock_idle_sleep_event();
1547 acpi_idle_do_entry(cx);
1548 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1550 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1551 /* TSC could halt in idle, so notify users */
1552 if (tsc_halts_in_c(cx->type))
1553 mark_tsc_unstable("TSC halts in idle");;
1555 sleep_ticks = ticks_elapsed(t1, t2);
1557 /* Tell the scheduler how much we idled: */
1558 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1561 current_thread_info()->status |= TS_POLLING;
1565 acpi_state_timer_broadcast(pr, cx, 0);
1566 cx->time += sleep_ticks;
1567 return ticks_elapsed_in_us(t1, t2);
1570 static int c3_cpu_count;
1571 static DEFINE_SPINLOCK(c3_lock);
1574 * acpi_idle_enter_bm - enters C3 with proper BM handling
1575 * @dev: the target CPU
1576 * @state: the state data
1578 * If BM is detected, the deepest non-C3 idle state is entered instead.
1580 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1581 struct cpuidle_state *state)
1583 struct acpi_processor *pr;
1584 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1586 int sleep_ticks = 0;
1588 pr = __get_cpu_var(processors);
1593 if (acpi_idle_suspend)
1594 return(acpi_idle_enter_c1(dev, state));
1596 if (acpi_idle_bm_check()) {
1597 if (dev->safe_state) {
1598 dev->last_state = dev->safe_state;
1599 return dev->safe_state->enter(dev, dev->safe_state);
1601 local_irq_disable();
1608 local_irq_disable();
1609 current_thread_info()->status &= ~TS_POLLING;
1611 * TS_POLLING-cleared state must be visible before we test
1616 if (unlikely(need_resched())) {
1617 current_thread_info()->status |= TS_POLLING;
1622 acpi_unlazy_tlb(smp_processor_id());
1624 /* Tell the scheduler that we are going deep-idle: */
1625 sched_clock_idle_sleep_event();
1627 * Must be done before busmaster disable as we might need to
1630 acpi_state_timer_broadcast(pr, cx, 1);
1632 acpi_idle_update_bm_rld(pr, cx);
1635 * disable bus master
1636 * bm_check implies we need ARB_DIS
1637 * !bm_check implies we need cache flush
1638 * bm_control implies whether we can do ARB_DIS
1640 * That leaves a case where bm_check is set and bm_control is
1641 * not set. In that case we cannot do much, we enter C3
1642 * without doing anything.
1644 if (pr->flags.bm_check && pr->flags.bm_control) {
1645 spin_lock(&c3_lock);
1647 /* Disable bus master arbitration when all CPUs are in C3 */
1648 if (c3_cpu_count == num_online_cpus())
1649 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1650 spin_unlock(&c3_lock);
1651 } else if (!pr->flags.bm_check) {
1652 ACPI_FLUSH_CPU_CACHE();
1655 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1656 acpi_idle_do_entry(cx);
1657 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1659 /* Re-enable bus master arbitration */
1660 if (pr->flags.bm_check && pr->flags.bm_control) {
1661 spin_lock(&c3_lock);
1662 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1664 spin_unlock(&c3_lock);
1667 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1668 /* TSC could halt in idle, so notify users */
1669 if (tsc_halts_in_c(ACPI_STATE_C3))
1670 mark_tsc_unstable("TSC halts in idle");
1672 sleep_ticks = ticks_elapsed(t1, t2);
1673 /* Tell the scheduler how much we idled: */
1674 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1677 current_thread_info()->status |= TS_POLLING;
1681 acpi_state_timer_broadcast(pr, cx, 0);
1682 cx->time += sleep_ticks;
1683 return ticks_elapsed_in_us(t1, t2);
1686 struct cpuidle_driver acpi_idle_driver = {
1687 .name = "acpi_idle",
1688 .owner = THIS_MODULE,
1692 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1693 * @pr: the ACPI processor
1695 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1697 int i, count = CPUIDLE_DRIVER_STATE_START;
1698 struct acpi_processor_cx *cx;
1699 struct cpuidle_state *state;
1700 struct cpuidle_device *dev = &pr->power.dev;
1702 if (!pr->flags.power_setup_done)
1705 if (pr->flags.power == 0) {
1710 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1711 dev->states[i].name[0] = '\0';
1712 dev->states[i].desc[0] = '\0';
1715 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1716 cx = &pr->power.states[i];
1717 state = &dev->states[count];
1722 #ifdef CONFIG_HOTPLUG_CPU
1723 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1724 !pr->flags.has_cst &&
1725 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1728 cpuidle_set_statedata(state, cx);
1730 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1731 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1732 state->exit_latency = cx->latency;
1733 state->target_residency = cx->latency * latency_factor;
1734 state->power_usage = cx->power;
1739 state->flags |= CPUIDLE_FLAG_SHALLOW;
1740 if (cx->entry_method == ACPI_CSTATE_FFH)
1741 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1743 state->enter = acpi_idle_enter_c1;
1744 dev->safe_state = state;
1748 state->flags |= CPUIDLE_FLAG_BALANCED;
1749 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1750 state->enter = acpi_idle_enter_simple;
1751 dev->safe_state = state;
1755 state->flags |= CPUIDLE_FLAG_DEEP;
1756 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1757 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1758 state->enter = pr->flags.bm_check ?
1759 acpi_idle_enter_bm :
1760 acpi_idle_enter_simple;
1765 if (count == CPUIDLE_STATE_MAX)
1769 dev->state_count = count;
1777 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1781 if (boot_option_idle_override)
1791 if (!pr->flags.power_setup_done)
1794 cpuidle_pause_and_lock();
1795 cpuidle_disable_device(&pr->power.dev);
1796 acpi_processor_get_power_info(pr);
1797 if (pr->flags.power) {
1798 acpi_processor_setup_cpuidle(pr);
1799 ret = cpuidle_enable_device(&pr->power.dev);
1801 cpuidle_resume_and_unlock();
1806 #endif /* CONFIG_CPU_IDLE */
1808 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1809 struct acpi_device *device)
1811 acpi_status status = 0;
1812 static int first_run;
1813 struct proc_dir_entry *entry = NULL;
1816 if (boot_option_idle_override)
1822 * When the boot option of "idle=halt" is added, halt
1823 * is used for CPU IDLE.
1824 * In such case C2/C3 is meaningless. So the max_cstate
1829 dmi_check_system(processor_power_dmi_table);
1830 max_cstate = acpi_processor_cstate_check(max_cstate);
1831 if (max_cstate < ACPI_C_STATES_MAX)
1833 "ACPI: processor limited to max C-state %d\n",
1836 #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1837 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1838 &acpi_processor_latency_notifier);
1845 if (acpi_gbl_FADT.cst_control && !nocst) {
1847 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1848 if (ACPI_FAILURE(status)) {
1849 ACPI_EXCEPTION((AE_INFO, status,
1850 "Notifying BIOS of _CST ability failed"));
1854 acpi_processor_get_power_info(pr);
1855 pr->flags.power_setup_done = 1;
1858 * Install the idle handler if processor power management is supported.
1859 * Note that we use previously set idle handler will be used on
1860 * platforms that only support C1.
1862 if (pr->flags.power) {
1863 #ifdef CONFIG_CPU_IDLE
1864 acpi_processor_setup_cpuidle(pr);
1865 if (cpuidle_register_device(&pr->power.dev))
1869 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1870 for (i = 1; i <= pr->power.count; i++)
1871 if (pr->power.states[i].valid)
1872 printk(" C%d[C%d]", i,
1873 pr->power.states[i].type);
1876 #ifndef CONFIG_CPU_IDLE
1878 pm_idle_save = pm_idle;
1879 pm_idle = acpi_processor_idle;
1885 entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1886 S_IRUGO, acpi_device_dir(device),
1887 &acpi_processor_power_fops,
1888 acpi_driver_data(device));
1894 int acpi_processor_power_exit(struct acpi_processor *pr,
1895 struct acpi_device *device)
1897 if (boot_option_idle_override)
1900 #ifdef CONFIG_CPU_IDLE
1901 cpuidle_unregister_device(&pr->power.dev);
1903 pr->flags.power_setup_done = 0;
1905 if (acpi_device_dir(device))
1906 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1907 acpi_device_dir(device));
1909 #ifndef CONFIG_CPU_IDLE
1911 /* Unregister the idle handler when processor #0 is removed. */
1914 pm_idle = pm_idle_save;
1917 * We are about to unload the current idle thread pm callback
1918 * (pm_idle), Wait for all processors to update cached/local
1919 * copies of pm_idle before proceeding.
1923 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1924 &acpi_processor_latency_notifier);