#include <linux/pm_qos_params.h>
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
-#include <linux/cpuidle.h>
+#include <linux/irqflags.h>
/*
* Include the apic definitions for x86 to have the APIC timer related defines
#include <acpi/processor.h>
#include <asm/processor.h>
-#define ACPI_PROCESSOR_COMPONENT 0x01000000
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
/* Common C-state entry for C2, C3, .. */
static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
{
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
if (cstate->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cstate);
gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
+ start_critical_timings();
}
#endif /* !CONFIG_CPU_IDLE */
{
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
+ case X86_VENDOR_INTEL:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
*/
- if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+ if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
return 0;
+
/*FALL THROUGH*/
- case X86_VENDOR_INTEL:
- /* Several cases known where TSC halts in C2 too */
default:
return state > ACPI_STATE_C1;
}
} else {
continue;
}
- if (cx.type == ACPI_STATE_C1 && idle_halt) {
+ if (cx.type == ACPI_STATE_C1 &&
+ (idle_halt || idle_nomwait)) {
/*
* In most cases the C1 space_id obtained from
* _CST object is FIXED_HARDWARE access mode.
* But when the option of idle=halt is added,
* the entry_method type should be changed from
* CSTATE_FFH to CSTATE_HALT.
+ * When the option of idle=nomwait is added,
+ * the C1 entry_method type should be
+ * CSTATE_HALT.
*/
cx.entry_method = ACPI_CSTATE_HALT;
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
if (!pr->flags.power_setup_done)
return -ENODEV;
- /* Fall back to the default idle loop */
- pm_idle = pm_idle_save;
- synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
+ /*
+ * Fall back to the default idle loop, when pm_idle_save had
+ * been initialized.
+ */
+ if (pm_idle_save) {
+ pm_idle = pm_idle_save;
+ /* Relies on interrupts forcing exit from idle. */
+ synchronize_sched();
+ }
pr->flags.power = 0;
result = acpi_processor_get_power_info(pr);
*/
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
+ start_critical_timings();
}
/**
if (acpi_idle_bm_check()) {
if (dev->safe_state) {
+ dev->last_state = dev->safe_state;
return dev->safe_state->enter(dev, dev->safe_state);
} else {
local_irq_disable();
/* Unregister the idle handler when processor #0 is removed. */
if (pr->id == 0) {
- pm_idle = pm_idle_save;
+ if (pm_idle_save)
+ pm_idle = pm_idle_save;
/*
* We are about to unload the current idle thread pm callback