X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Facpi%2Fprocessor_idle.c;h=66a9d81455628454f628496415f4bbee495732b4;hb=d426e60dbf6aa2c3199e37a59c6d134eb204d628;hp=8537c429a02e9f2a9e7e4341ca0ba466edf635c6;hpb=7af8b66004fa827958b4871112e59a07db5b3f6b;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 8537c42..66a9d81 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -38,29 +38,54 @@ #include #include #include /* need_resched() */ -#include +#include +#include +#include +#include + +/* + * Include the apic definitions for x86 to have the APIC timer related defines + * available also for UP (on SMP it gets magically included via linux/smp.h). + * asm/acpi.h is not an option, as it would require more include magic. Also + * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. + */ +#ifdef CONFIG_X86 +#include +#endif #include #include #include #include +#include -#define ACPI_PROCESSOR_COMPONENT 0x01000000 #define ACPI_PROCESSOR_CLASS "processor" -#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver" #define _COMPONENT ACPI_PROCESSOR_COMPONENT -ACPI_MODULE_NAME("acpi_processor") +ACPI_MODULE_NAME("processor_idle"); #define ACPI_PROCESSOR_FILE_POWER "power" #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) +#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) +#ifndef CONFIG_CPU_IDLE #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ static void (*pm_idle_save) (void) __read_mostly; -module_param(max_cstate, uint, 0644); +#else +#define C2_OVERHEAD 1 /* 1us */ +#define C3_OVERHEAD 1 /* 1us */ +#endif +#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) +static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; +#ifdef CONFIG_CPU_IDLE +module_param(max_cstate, uint, 0000); +#else +module_param(max_cstate, uint, 0644); +#endif static unsigned int nocst __read_mostly; module_param(nocst, uint, 0000); +#ifndef CONFIG_CPU_IDLE /* * bm_history -- bit-mask with a bit per jiffy of bus-master activity * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms @@ -71,9 +96,13 @@ module_param(nocst, uint, 0000); static unsigned int bm_history __read_mostly = (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); module_param(bm_history, uint, 0644); -/* -------------------------------------------------------------------------- - Power Management - -------------------------------------------------------------------------- */ + +static int acpi_processor_set_power_policy(struct acpi_processor *pr); + +#else /* CONFIG_CPU_IDLE */ +static unsigned int latency_factor __read_mostly = 2; +module_param(latency_factor, uint, 0644); +#endif /* * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. @@ -81,7 +110,7 @@ module_param(bm_history, uint, 0644); * * To skip this limit, boot/load with a large max_cstate limit. */ -static int set_max_cstate(struct dmi_system_id *id) +static int set_max_cstate(const struct dmi_system_id *id) { if (max_cstate > ACPI_PROCESSOR_MAX_POWER) return 0; @@ -160,12 +189,43 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2) { if (t2 >= t1) return (t2 - t1); - else if (!acpi_fadt.tmr_val_ext) + else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); else return ((0xFFFFFFFF - t1) + t2); } +static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) +{ + if (t2 >= t1) + return PM_TIMER_TICKS_TO_US(t2 - t1); + else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) + return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); + else + return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); +} + +/* + * Callers should disable interrupts before the call and enable + * interrupts after return. + */ +static void acpi_safe_halt(void) +{ + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we + * test NEED_RESCHED: + */ + smp_mb(); + if (!need_resched()) { + safe_halt(); + local_irq_disable(); + } + current_thread_info()->status |= TS_POLLING; +} + +#ifndef CONFIG_CPU_IDLE + static void acpi_processor_power_activate(struct acpi_processor *pr, struct acpi_processor_cx *new) @@ -187,8 +247,7 @@ acpi_processor_power_activate(struct acpi_processor *pr, case ACPI_STATE_C3: /* Disable bus master reload */ if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, - ACPI_MTX_DO_NOT_LOCK); + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); break; } } @@ -198,8 +257,7 @@ acpi_processor_power_activate(struct acpi_processor *pr, case ACPI_STATE_C3: /* Enable bus master reload */ if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, - ACPI_MTX_DO_NOT_LOCK); + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); break; } @@ -208,17 +266,131 @@ acpi_processor_power_activate(struct acpi_processor *pr, return; } -static void acpi_safe_halt(void) +static atomic_t c3_cpu_count; + +/* Common C-state entry for C2, C3, .. */ +static void acpi_cstate_enter(struct acpi_processor_cx *cstate) { - current_thread_info()->status &= ~TS_POLLING; - smp_mb__after_clear_bit(); - if (!need_resched()) - safe_halt(); - current_thread_info()->status |= TS_POLLING; + /* Don't trace irqs off for idle */ + stop_critical_timings(); + if (cstate->entry_method == ACPI_CSTATE_FFH) { + /* Call into architectural FFH based C-state */ + acpi_processor_ffh_cstate_enter(cstate); + } else { + int unused; + /* IO port based C-state */ + inb(cstate->address); + /* Dummy wait op - must do something useless after P_LVL2 read + because chipsets cannot guarantee that STPCLK# signal + gets asserted in time to freeze execution properly. */ + unused = inl(acpi_gbl_FADT.xpm_timer_block.address); + } + start_critical_timings(); } +#endif /* !CONFIG_CPU_IDLE */ -static atomic_t c3_cpu_count; +#ifdef ARCH_APICTIMER_STOPS_ON_C3 +/* + * Some BIOS implementations switch to C3 in the published C2 state. + * This seems to be a common problem on AMD boxen, but other vendors + * are affected too. We pick the most conservative approach: we assume + * that the local APIC stops in both C2 and C3. + */ +static void acpi_timer_check_state(int state, struct acpi_processor *pr, + struct acpi_processor_cx *cx) +{ + struct acpi_processor_power *pwr = &pr->power; + u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; + + /* + * Check, if one of the previous states already marked the lapic + * unstable + */ + if (pwr->timer_broadcast_on_state < state) + return; + + if (cx->type >= type) + pr->power.timer_broadcast_on_state = state; +} + +static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) +{ + unsigned long reason; + + reason = pr->power.timer_broadcast_on_state < INT_MAX ? + CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; + + clockevents_notify(reason, &pr->id); +} + +/* Power(C) State timer broadcast control */ +static void acpi_state_timer_broadcast(struct acpi_processor *pr, + struct acpi_processor_cx *cx, + int broadcast) +{ + int state = cx - pr->power.states; + + if (state >= pr->power.timer_broadcast_on_state) { + unsigned long reason; + + reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : + CLOCK_EVT_NOTIFY_BROADCAST_EXIT; + clockevents_notify(reason, &pr->id); + } +} + +#else + +static void acpi_timer_check_state(int state, struct acpi_processor *pr, + struct acpi_processor_cx *cstate) { } +static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } +static void acpi_state_timer_broadcast(struct acpi_processor *pr, + struct acpi_processor_cx *cx, + int broadcast) +{ +} + +#endif + +/* + * Suspend / resume control + */ +static int acpi_idle_suspend; + +int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) +{ + acpi_idle_suspend = 1; + return 0; +} + +int acpi_processor_resume(struct acpi_device * device) +{ + acpi_idle_suspend = 0; + return 0; +} + +#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) +static int tsc_halts_in_c(int state) +{ + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + case X86_VENDOR_INTEL: + /* + * AMD Fam10h TSC will tick in all + * C/P/S0/S1 states when this bit is set. + */ + if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + return 0; + + /*FALL THROUGH*/ + default: + return state > ACPI_STATE_C1; + } +} +#endif + +#ifndef CONFIG_CPU_IDLE static void acpi_processor_idle(void) { struct acpi_processor *pr = NULL; @@ -227,16 +399,18 @@ static void acpi_processor_idle(void) int sleep_ticks = 0; u32 t1, t2 = 0; - pr = processors[smp_processor_id()]; - if (!pr) - return; - /* * Interrupts must be disabled during bus mastering calculations and * for C2/C3 transitions. */ local_irq_disable(); + pr = __get_cpu_var(processors); + if (!pr) { + local_irq_enable(); + return; + } + /* * Check whether we truly need to go idle, or should * reschedule: @@ -247,11 +421,14 @@ static void acpi_processor_idle(void) } cx = pr->power.state; - if (!cx) { - if (pm_idle_save) - pm_idle_save(); - else + if (!cx || acpi_idle_suspend) { + if (pm_idle_save) { + pm_idle_save(); /* enables IRQs */ + } else { acpi_safe_halt(); + local_irq_enable(); + } + return; } @@ -270,12 +447,10 @@ static void acpi_processor_idle(void) pr->power.bm_activity <<= diff; - acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, - &bm_status, ACPI_MTX_DO_NOT_LOCK); + acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); if (bm_status) { pr->power.bm_activity |= 0x1; - acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, - 1, ACPI_MTX_DO_NOT_LOCK); + acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); } /* * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect @@ -316,8 +491,8 @@ static void acpi_processor_idle(void) * an SMP system. We do it here instead of doing it at _CST/P_LVL * detection phase, to work cleanly with logical CPU hotplug. */ - if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && - !pr->flags.has_cst && !acpi_fadt.plvl2_up) + if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) cx = &pr->power.states[ACPI_STATE_C1]; #endif @@ -328,7 +503,11 @@ static void acpi_processor_idle(void) */ if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { current_thread_info()->status &= ~TS_POLLING; - smp_mb__after_clear_bit(); + /* + * TS_POLLING-cleared state must be visible before we + * test NEED_RESCHED: + */ + smp_mb(); if (need_resched()) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); @@ -344,85 +523,118 @@ static void acpi_processor_idle(void) * Use the appropriate idle routine, the one that would * be used without acpi C-states. */ - if (pm_idle_save) - pm_idle_save(); - else + if (pm_idle_save) { + pm_idle_save(); /* enables IRQs */ + } else { acpi_safe_halt(); + local_irq_enable(); + } /* * TBD: Can't get time duration while in C1, as resumes * go to an ISR rather than here. Need to instrument * base interrupt handler. + * + * Note: the TSC better not stop in C1, sched_clock() will + * skew otherwise. */ sleep_ticks = 0xFFFFFFFF; + break; case ACPI_STATE_C2: /* Get start time (ticks) */ - t1 = inl(acpi_fadt.xpm_tmr_blk.address); + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + /* Tell the scheduler that we are going deep-idle: */ + sched_clock_idle_sleep_event(); /* Invoke C2 */ - inb(cx->address); - /* Dummy wait op - must do something useless after P_LVL2 read - because chipsets cannot guarantee that STPCLK# signal - gets asserted in time to freeze execution properly. */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); + acpi_state_timer_broadcast(pr, cx, 1); + acpi_cstate_enter(cx); /* Get end time (ticks) */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); -#ifdef CONFIG_GENERIC_TIME +#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) /* TSC halts in C2, so notify users */ - mark_tsc_unstable(); + if (tsc_halts_in_c(ACPI_STATE_C2)) + mark_tsc_unstable("possible TSC halt in C2"); #endif + /* Compute time (ticks) that we were actually asleep */ + sleep_ticks = ticks_elapsed(t1, t2); + + /* Tell the scheduler how much we idled: */ + sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); + /* Re-enable interrupts */ local_irq_enable(); + /* Do not account our idle-switching overhead: */ + sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; + current_thread_info()->status |= TS_POLLING; - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = - ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; + acpi_state_timer_broadcast(pr, cx, 0); break; case ACPI_STATE_C3: - - if (pr->flags.bm_check) { + acpi_unlazy_tlb(smp_processor_id()); + /* + * Must be done before busmaster disable as we might + * need to access HPET ! + */ + acpi_state_timer_broadcast(pr, cx, 1); + /* + * disable bus master + * bm_check implies we need ARB_DIS + * !bm_check implies we need cache flush + * bm_control implies whether we can do ARB_DIS + * + * That leaves a case where bm_check is set and bm_control is + * not set. In that case we cannot do much, we enter C3 + * without doing anything. + */ + if (pr->flags.bm_check && pr->flags.bm_control) { if (atomic_inc_return(&c3_cpu_count) == num_online_cpus()) { /* * All CPUs are trying to go to C3 * Disable bus master arbitration */ - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, - ACPI_MTX_DO_NOT_LOCK); + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); } - } else { + } else if (!pr->flags.bm_check) { /* SMP with no shared cache... Invalidate cache */ ACPI_FLUSH_CPU_CACHE(); } /* Get start time (ticks) */ - t1 = inl(acpi_fadt.xpm_tmr_blk.address); + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); /* Invoke C3 */ - inb(cx->address); - /* Dummy wait op (see above) */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); + /* Tell the scheduler that we are going deep-idle: */ + sched_clock_idle_sleep_event(); + acpi_cstate_enter(cx); /* Get end time (ticks) */ - t2 = inl(acpi_fadt.xpm_tmr_blk.address); - if (pr->flags.bm_check) { + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + if (pr->flags.bm_check && pr->flags.bm_control) { /* Enable bus master arbitration */ atomic_dec(&c3_cpu_count); - acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, - ACPI_MTX_DO_NOT_LOCK); + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); } -#ifdef CONFIG_GENERIC_TIME +#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) /* TSC halts in C3, so notify users */ - mark_tsc_unstable(); + if (tsc_halts_in_c(ACPI_STATE_C3)) + mark_tsc_unstable("TSC halts in C3"); #endif + /* Compute time (ticks) that we were actually asleep */ + sleep_ticks = ticks_elapsed(t1, t2); + /* Tell the scheduler how much we idled: */ + sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); + /* Re-enable interrupts */ local_irq_enable(); + /* Do not account our idle-switching overhead: */ + sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; + current_thread_info()->status |= TS_POLLING; - /* Compute time (ticks) that we were actually asleep */ - sleep_ticks = - ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; + acpi_state_timer_broadcast(pr, cx, 0); break; default: @@ -438,7 +650,7 @@ static void acpi_processor_idle(void) #ifdef CONFIG_HOTPLUG_CPU /* Don't do promotion/demotion */ if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && - !pr->flags.has_cst && !acpi_fadt.plvl2_up) { + !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { next_state = cx; goto end; } @@ -455,7 +667,8 @@ static void acpi_processor_idle(void) if (cx->promotion.state && ((cx->promotion.state - pr->power.states) <= max_cstate)) { if (sleep_ticks > cx->promotion.threshold.ticks && - cx->promotion.state->latency <= system_latency_constraint()) { + cx->promotion.state->latency <= + pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { cx->promotion.count++; cx->demotion.count = 0; if (cx->promotion.count >= @@ -499,7 +712,8 @@ static void acpi_processor_idle(void) * or if the latency of the current state is unacceptable */ if ((pr->power.state - pr->power.states) > max_cstate || - pr->power.state->latency > system_latency_constraint()) { + pr->power.state->latency > + pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { if (cx->demotion.state) next_state = cx->demotion.state; } @@ -589,6 +803,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr) return 0; } +#endif /* !CONFIG_CPU_IDLE */ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) { @@ -606,9 +821,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) #ifndef CONFIG_HOTPLUG_CPU /* * Check for P_LVL2_UP flag before entering C2 and above on - * an SMP system. + * an SMP system. */ - if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) + if ((num_online_cpus() > 1) && + !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) return -ENODEV; #endif @@ -617,8 +833,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; /* determine latencies from FADT */ - pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; - pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; + pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; + pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "lvl2[0x%08x] lvl3[0x%08x]\n", @@ -628,20 +844,17 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) return 0; } -static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr) +static int acpi_processor_get_power_info_default(struct acpi_processor *pr) { - - /* Zero initialize all the C-states info. */ - memset(pr->power.states, 0, sizeof(pr->power.states)); - - /* set the first C-State to C1 */ - pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; - - /* the C0 state only exists as a filler in our array, - * and all processors need to support C1 */ + if (!pr->power.states[ACPI_STATE_C1].valid) { + /* set the first C-State to C1 */ + /* all processors need to support C1 */ + pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; + pr->power.states[ACPI_STATE_C1].valid = 1; + pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; + } + /* the C0 state only exists as a filler in our array */ pr->power.states[ACPI_STATE_C0].valid = 1; - pr->power.states[ACPI_STATE_C1].valid = 1; - return 0; } @@ -658,12 +871,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) if (nocst) return -ENODEV; - current_count = 1; - - /* Zero initialize C2 onwards and prepare for fresh CST lookup */ - for (i = 2; i < ACPI_PROCESSOR_MAX_POWER; i++) - memset(&(pr->power.states[i]), 0, - sizeof(struct acpi_processor_cx)); + current_count = 0; status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); if (ACPI_FAILURE(status)) { @@ -671,7 +879,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) return -ENODEV; } - cst = (union acpi_object *)buffer.pointer; + cst = buffer.pointer; /* There must be at least 2 elements */ if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { @@ -700,14 +908,14 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) memset(&cx, 0, sizeof(cx)); - element = (union acpi_object *)&(cst->package.elements[i]); + element = &(cst->package.elements[i]); if (element->type != ACPI_TYPE_PACKAGE) continue; if (element->package.count != 4) continue; - obj = (union acpi_object *)&(element->package.elements[0]); + obj = &(element->package.elements[0]); if (obj->type != ACPI_TYPE_BUFFER) continue; @@ -718,30 +926,70 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) continue; - cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ? - 0 : reg->address; - /* There should be an easy way to extract an integer... */ - obj = (union acpi_object *)&(element->package.elements[1]); + obj = &(element->package.elements[1]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.type = obj->integer.value; + /* + * Some buggy BIOSes won't list C1 in _CST - + * Let acpi_processor_get_power_info_default() handle them later + */ + if (i == 1 && cx.type != ACPI_STATE_C1) + current_count++; + + cx.address = reg->address; + cx.index = current_count + 1; + + cx.entry_method = ACPI_CSTATE_SYSTEMIO; + if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { + if (acpi_processor_ffh_cstate_probe + (pr->id, &cx, reg) == 0) { + cx.entry_method = ACPI_CSTATE_FFH; + } else if (cx.type == ACPI_STATE_C1) { + /* + * C1 is a special case where FIXED_HARDWARE + * can be handled in non-MWAIT way as well. + * In that case, save this _CST entry info. + * Otherwise, ignore this info and continue. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } else { + continue; + } + if (cx.type == ACPI_STATE_C1 && + (idle_halt || idle_nomwait)) { + /* + * In most cases the C1 space_id obtained from + * _CST object is FIXED_HARDWARE access mode. + * But when the option of idle=halt is added, + * the entry_method type should be changed from + * CSTATE_FFH to CSTATE_HALT. + * When the option of idle=nomwait is added, + * the C1 entry_method type should be + * CSTATE_HALT. + */ + cx.entry_method = ACPI_CSTATE_HALT; + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); + } + } else { + snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", + cx.address); + } - if ((cx.type != ACPI_STATE_C1) && - (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) - continue; - - if ((cx.type < ACPI_STATE_C2) || (cx.type > ACPI_STATE_C3)) - continue; + if (cx.type == ACPI_STATE_C1) { + cx.valid = 1; + } - obj = (union acpi_object *)&(element->package.elements[2]); + obj = &(element->package.elements[2]); if (obj->type != ACPI_TYPE_INTEGER) continue; cx.latency = obj->integer.value; - obj = (union acpi_object *)&(element->package.elements[3]); + obj = &(element->package.elements[3]); if (obj->type != ACPI_TYPE_INTEGER) continue; @@ -798,7 +1046,12 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) * Normalize the C2 latency to expidite policy */ cx->valid = 1; + +#ifndef CONFIG_CPU_IDLE cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); +#else + cx->latency_ticks = cx->latency; +#endif return; } @@ -845,25 +1098,30 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, } if (pr->flags.bm_check) { - /* bus mastering control is necessary */ if (!pr->flags.bm_control) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "C3 support requires bus mastering control\n")); - return; + if (pr->flags.has_cst != 1) { + /* bus mastering control is necessary */ + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 support requires BM control\n")); + return; + } else { + /* Here we enter C3 without bus mastering */ + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "C3 support without BM control\n")); + } } } else { /* * WBINVD should be set in fadt, for C3 state to be * supported on when bm_check is not required. */ - if (acpi_fadt.wb_invd != 1) { + if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cache invalidation should work properly" " for C3 to be enabled on SMP systems\n")); return; } - acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, - 0, ACPI_MTX_DO_NOT_LOCK); + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); } /* @@ -873,7 +1131,12 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, * use this in our C3 policy */ cx->valid = 1; + +#ifndef CONFIG_CPU_IDLE cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); +#else + cx->latency_ticks = cx->latency; +#endif return; } @@ -883,11 +1146,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) unsigned int i; unsigned int working = 0; -#ifdef ARCH_APICTIMER_STOPS_ON_C3 - int timer_broadcast = 0; - cpumask_t mask = cpumask_of_cpu(pr->id); - on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); -#endif + pr->power.timer_broadcast_on_state = INT_MAX; for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { struct acpi_processor_cx *cx = &pr->power.states[i]; @@ -899,21 +1158,14 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) case ACPI_STATE_C2: acpi_processor_power_verify_c2(cx); -#ifdef ARCH_APICTIMER_STOPS_ON_C3 - /* Some AMD systems fake C3 as C2, but still - have timer troubles */ - if (cx->valid && - boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - timer_broadcast++; -#endif + if (cx->valid) + acpi_timer_check_state(i, pr, cx); break; case ACPI_STATE_C3: acpi_processor_power_verify_c3(pr, cx); -#ifdef ARCH_APICTIMER_STOPS_ON_C3 if (cx->valid) - timer_broadcast++; -#endif + acpi_timer_check_state(i, pr, cx); break; } @@ -921,10 +1173,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) working++; } -#ifdef ARCH_APICTIMER_STOPS_ON_C3 - if (timer_broadcast) - on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); -#endif + acpi_propagate_timer_broadcast(pr); return (working); } @@ -938,14 +1187,21 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) /* NOTE: the idle thread may not be running while calling * this function */ - /* Adding C1 state */ - acpi_processor_get_power_info_default_c1(pr); + /* Zero initialize all the C-states info. */ + memset(pr->power.states, 0, sizeof(pr->power.states)); + result = acpi_processor_get_power_info_cst(pr); if (result == -ENODEV) - acpi_processor_get_power_info_fadt(pr); + result = acpi_processor_get_power_info_fadt(pr); + + if (result) + return result; + + acpi_processor_get_power_info_default(pr); pr->power.count = acpi_processor_power_verify(pr); +#ifndef CONFIG_CPU_IDLE /* * Set Default Policy * ------------------ @@ -957,6 +1213,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) result = acpi_processor_set_power_policy(pr); if (result) return result; +#endif /* * if one state of type C2 or C3 is available, mark this @@ -973,38 +1230,9 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr) return 0; } -int acpi_processor_cst_has_changed(struct acpi_processor *pr) -{ - int result = 0; - - - if (!pr) - return -EINVAL; - - if (nocst) { - return -ENODEV; - } - - if (!pr->flags.power_setup_done) - return -ENODEV; - - /* Fall back to the default idle loop */ - pm_idle = pm_idle_save; - synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ - - pr->flags.power = 0; - result = acpi_processor_get_power_info(pr); - if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) - pm_idle = acpi_processor_idle; - - return result; -} - -/* proc interface */ - static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) { - struct acpi_processor *pr = (struct acpi_processor *)seq->private; + struct acpi_processor *pr = seq->private; unsigned int i; @@ -1017,7 +1245,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) "maximum allowed latency: %d usec\n", pr->power.state ? pr->power.state - pr->power.states : 0, max_cstate, (unsigned)pr->power.bm_activity, - system_latency_constraint()); + pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); seq_puts(seq, "states:\n"); @@ -1063,7 +1291,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", pr->power.states[i].latency, pr->power.states[i].usage, - pr->power.states[i].time); + (unsigned long long)pr->power.states[i].time); } end: @@ -1077,12 +1305,51 @@ static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) } static const struct file_operations acpi_processor_power_fops = { + .owner = THIS_MODULE, .open = acpi_processor_power_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; +#ifndef CONFIG_CPU_IDLE + +int acpi_processor_cst_has_changed(struct acpi_processor *pr) +{ + int result = 0; + + if (boot_option_idle_override) + return 0; + + if (!pr) + return -EINVAL; + + if (nocst) { + return -ENODEV; + } + + if (!pr->flags.power_setup_done) + return -ENODEV; + + /* + * Fall back to the default idle loop, when pm_idle_save had + * been initialized. + */ + if (pm_idle_save) { + pm_idle = pm_idle_save; + /* Relies on interrupts forcing exit from idle. */ + synchronize_sched(); + } + + pr->flags.power = 0; + result = acpi_processor_get_power_info(pr); + if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) + pm_idle = acpi_processor_idle; + + return result; +} + +#ifdef CONFIG_SMP static void smp_callback(void *v) { /* we already woke the CPU up, nothing more to do */ @@ -1097,7 +1364,7 @@ static void smp_callback(void *v) static int acpi_processor_latency_notify(struct notifier_block *b, unsigned long l, void *v) { - smp_call_function(smp_callback, NULL, 0, 1); + smp_call_function(smp_callback, NULL, 1); return NOTIFY_OK; } @@ -1105,6 +1372,431 @@ static struct notifier_block acpi_processor_latency_notifier = { .notifier_call = acpi_processor_latency_notify, }; +#endif + +#else /* CONFIG_CPU_IDLE */ + +/** + * acpi_idle_bm_check - checks if bus master activity was detected + */ +static int acpi_idle_bm_check(void) +{ + u32 bm_status = 0; + + acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); + if (bm_status) + acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); + /* + * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect + * the true state of bus mastering activity; forcing us to + * manually check the BMIDEA bit of each IDE channel. + */ + else if (errata.piix4.bmisx) { + if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) + || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) + bm_status = 1; + } + return bm_status; +} + +/** + * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state + * @pr: the processor + * @target: the new target state + */ +static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, + struct acpi_processor_cx *target) +{ + if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) { + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); + pr->flags.bm_rld_set = 0; + } + + if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) { + acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); + pr->flags.bm_rld_set = 1; + } +} + +/** + * acpi_idle_do_entry - a helper function that does C2 and C3 type entry + * @cx: cstate data + * + * Caller disables interrupt before call and enables interrupt after return. + */ +static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) +{ + /* Don't trace irqs off for idle */ + stop_critical_timings(); + if (cx->entry_method == ACPI_CSTATE_FFH) { + /* Call into architectural FFH based C-state */ + acpi_processor_ffh_cstate_enter(cx); + } else if (cx->entry_method == ACPI_CSTATE_HALT) { + acpi_safe_halt(); + } else { + int unused; + /* IO port based C-state */ + inb(cx->address); + /* Dummy wait op - must do something useless after P_LVL2 read + because chipsets cannot guarantee that STPCLK# signal + gets asserted in time to freeze execution properly. */ + unused = inl(acpi_gbl_FADT.xpm_timer_block.address); + } + start_critical_timings(); +} + +/** + * acpi_idle_enter_c1 - enters an ACPI C1 state-type + * @dev: the target CPU + * @state: the state data + * + * This is equivalent to the HALT instruction. + */ +static int acpi_idle_enter_c1(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + u32 t1, t2; + struct acpi_processor *pr; + struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + + pr = __get_cpu_var(processors); + + if (unlikely(!pr)) + return 0; + + local_irq_disable(); + + /* Do not access any ACPI IO ports in suspend path */ + if (acpi_idle_suspend) { + acpi_safe_halt(); + local_irq_enable(); + return 0; + } + + if (pr->flags.bm_check) + acpi_idle_update_bm_rld(pr, cx); + + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + acpi_idle_do_entry(cx); + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + + local_irq_enable(); + cx->usage++; + + return ticks_elapsed_in_us(t1, t2); +} + +/** + * acpi_idle_enter_simple - enters an ACPI state without BM handling + * @dev: the target CPU + * @state: the state data + */ +static int acpi_idle_enter_simple(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + struct acpi_processor *pr; + struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + u32 t1, t2; + int sleep_ticks = 0; + + pr = __get_cpu_var(processors); + + if (unlikely(!pr)) + return 0; + + if (acpi_idle_suspend) + return(acpi_idle_enter_c1(dev, state)); + + local_irq_disable(); + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we test + * NEED_RESCHED: + */ + smp_mb(); + + if (unlikely(need_resched())) { + current_thread_info()->status |= TS_POLLING; + local_irq_enable(); + return 0; + } + + /* + * Must be done before busmaster disable as we might need to + * access HPET ! + */ + acpi_state_timer_broadcast(pr, cx, 1); + + if (pr->flags.bm_check) + acpi_idle_update_bm_rld(pr, cx); + + if (cx->type == ACPI_STATE_C3) + ACPI_FLUSH_CPU_CACHE(); + + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + /* Tell the scheduler that we are going deep-idle: */ + sched_clock_idle_sleep_event(); + acpi_idle_do_entry(cx); + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + +#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) + /* TSC could halt in idle, so notify users */ + if (tsc_halts_in_c(cx->type)) + mark_tsc_unstable("TSC halts in idle");; +#endif + sleep_ticks = ticks_elapsed(t1, t2); + + /* Tell the scheduler how much we idled: */ + sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); + + local_irq_enable(); + current_thread_info()->status |= TS_POLLING; + + cx->usage++; + + acpi_state_timer_broadcast(pr, cx, 0); + cx->time += sleep_ticks; + return ticks_elapsed_in_us(t1, t2); +} + +static int c3_cpu_count; +static DEFINE_SPINLOCK(c3_lock); + +/** + * acpi_idle_enter_bm - enters C3 with proper BM handling + * @dev: the target CPU + * @state: the state data + * + * If BM is detected, the deepest non-C3 idle state is entered instead. + */ +static int acpi_idle_enter_bm(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + struct acpi_processor *pr; + struct acpi_processor_cx *cx = cpuidle_get_statedata(state); + u32 t1, t2; + int sleep_ticks = 0; + + pr = __get_cpu_var(processors); + + if (unlikely(!pr)) + return 0; + + if (acpi_idle_suspend) + return(acpi_idle_enter_c1(dev, state)); + + if (acpi_idle_bm_check()) { + if (dev->safe_state) { + dev->last_state = dev->safe_state; + return dev->safe_state->enter(dev, dev->safe_state); + } else { + local_irq_disable(); + acpi_safe_halt(); + local_irq_enable(); + return 0; + } + } + + local_irq_disable(); + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we test + * NEED_RESCHED: + */ + smp_mb(); + + if (unlikely(need_resched())) { + current_thread_info()->status |= TS_POLLING; + local_irq_enable(); + return 0; + } + + acpi_unlazy_tlb(smp_processor_id()); + + /* Tell the scheduler that we are going deep-idle: */ + sched_clock_idle_sleep_event(); + /* + * Must be done before busmaster disable as we might need to + * access HPET ! + */ + acpi_state_timer_broadcast(pr, cx, 1); + + acpi_idle_update_bm_rld(pr, cx); + + /* + * disable bus master + * bm_check implies we need ARB_DIS + * !bm_check implies we need cache flush + * bm_control implies whether we can do ARB_DIS + * + * That leaves a case where bm_check is set and bm_control is + * not set. In that case we cannot do much, we enter C3 + * without doing anything. + */ + if (pr->flags.bm_check && pr->flags.bm_control) { + spin_lock(&c3_lock); + c3_cpu_count++; + /* Disable bus master arbitration when all CPUs are in C3 */ + if (c3_cpu_count == num_online_cpus()) + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); + spin_unlock(&c3_lock); + } else if (!pr->flags.bm_check) { + ACPI_FLUSH_CPU_CACHE(); + } + + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); + acpi_idle_do_entry(cx); + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); + + /* Re-enable bus master arbitration */ + if (pr->flags.bm_check && pr->flags.bm_control) { + spin_lock(&c3_lock); + acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); + c3_cpu_count--; + spin_unlock(&c3_lock); + } + +#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86) + /* TSC could halt in idle, so notify users */ + if (tsc_halts_in_c(ACPI_STATE_C3)) + mark_tsc_unstable("TSC halts in idle"); +#endif + sleep_ticks = ticks_elapsed(t1, t2); + /* Tell the scheduler how much we idled: */ + sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); + + local_irq_enable(); + current_thread_info()->status |= TS_POLLING; + + cx->usage++; + + acpi_state_timer_broadcast(pr, cx, 0); + cx->time += sleep_ticks; + return ticks_elapsed_in_us(t1, t2); +} + +struct cpuidle_driver acpi_idle_driver = { + .name = "acpi_idle", + .owner = THIS_MODULE, +}; + +/** + * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE + * @pr: the ACPI processor + */ +static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) +{ + int i, count = CPUIDLE_DRIVER_STATE_START; + struct acpi_processor_cx *cx; + struct cpuidle_state *state; + struct cpuidle_device *dev = &pr->power.dev; + + if (!pr->flags.power_setup_done) + return -EINVAL; + + if (pr->flags.power == 0) { + return -EINVAL; + } + + dev->cpu = pr->id; + for (i = 0; i < CPUIDLE_STATE_MAX; i++) { + dev->states[i].name[0] = '\0'; + dev->states[i].desc[0] = '\0'; + } + + for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { + cx = &pr->power.states[i]; + state = &dev->states[count]; + + if (!cx->valid) + continue; + +#ifdef CONFIG_HOTPLUG_CPU + if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && + !pr->flags.has_cst && + !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) + continue; +#endif + cpuidle_set_statedata(state, cx); + + snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); + strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); + state->exit_latency = cx->latency; + state->target_residency = cx->latency * latency_factor; + state->power_usage = cx->power; + + state->flags = 0; + switch (cx->type) { + case ACPI_STATE_C1: + state->flags |= CPUIDLE_FLAG_SHALLOW; + if (cx->entry_method == ACPI_CSTATE_FFH) + state->flags |= CPUIDLE_FLAG_TIME_VALID; + + state->enter = acpi_idle_enter_c1; + dev->safe_state = state; + break; + + case ACPI_STATE_C2: + state->flags |= CPUIDLE_FLAG_BALANCED; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = acpi_idle_enter_simple; + dev->safe_state = state; + break; + + case ACPI_STATE_C3: + state->flags |= CPUIDLE_FLAG_DEEP; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->flags |= CPUIDLE_FLAG_CHECK_BM; + state->enter = pr->flags.bm_check ? + acpi_idle_enter_bm : + acpi_idle_enter_simple; + break; + } + + count++; + if (count == CPUIDLE_STATE_MAX) + break; + } + + dev->state_count = count; + + if (!count) + return -EINVAL; + + return 0; +} + +int acpi_processor_cst_has_changed(struct acpi_processor *pr) +{ + int ret = 0; + + if (boot_option_idle_override) + return 0; + + if (!pr) + return -EINVAL; + + if (nocst) { + return -ENODEV; + } + + if (!pr->flags.power_setup_done) + return -ENODEV; + + cpuidle_pause_and_lock(); + cpuidle_disable_device(&pr->power.dev); + acpi_processor_get_power_info(pr); + if (pr->flags.power) { + acpi_processor_setup_cpuidle(pr); + ret = cpuidle_enable_device(&pr->power.dev); + } + cpuidle_resume_and_unlock(); + + return ret; +} + +#endif /* CONFIG_CPU_IDLE */ + int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { @@ -1113,23 +1805,38 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct proc_dir_entry *entry = NULL; unsigned int i; + if (boot_option_idle_override) + return 0; if (!first_run) { + if (idle_halt) { + /* + * When the boot option of "idle=halt" is added, halt + * is used for CPU IDLE. + * In such case C2/C3 is meaningless. So the max_cstate + * is set to one. + */ + max_cstate = 1; + } dmi_check_system(processor_power_dmi_table); + max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", max_cstate); first_run++; - register_latency_notifier(&acpi_processor_latency_notifier); +#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP) + pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, + &acpi_processor_latency_notifier); +#endif } if (!pr) return -EINVAL; - if (acpi_fadt.cst_cnt && !nocst) { + if (acpi_gbl_FADT.cst_control && !nocst) { status = - acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); + acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Notifying BIOS of _CST ability failed")); @@ -1137,13 +1844,20 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, } acpi_processor_get_power_info(pr); + pr->flags.power_setup_done = 1; /* * Install the idle handler if processor power management is supported. * Note that we use previously set idle handler will be used on * platforms that only support C1. */ - if ((pr->flags.power) && (!boot_option_idle_override)) { + if (pr->flags.power) { +#ifdef CONFIG_CPU_IDLE + acpi_processor_setup_cpuidle(pr); + if (cpuidle_register_device(&pr->power.dev)) + return -EIO; +#endif + printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); for (i = 1; i <= pr->power.count; i++) if (pr->power.states[i].valid) @@ -1151,41 +1865,45 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, pr->power.states[i].type); printk(")\n"); +#ifndef CONFIG_CPU_IDLE if (pr->id == 0) { pm_idle_save = pm_idle; pm_idle = acpi_processor_idle; } +#endif } /* 'power' [R] */ - entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, - S_IRUGO, acpi_device_dir(device)); + entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER, + S_IRUGO, acpi_device_dir(device), + &acpi_processor_power_fops, + acpi_driver_data(device)); if (!entry) return -EIO; - else { - entry->proc_fops = &acpi_processor_power_fops; - entry->data = acpi_driver_data(device); - entry->owner = THIS_MODULE; - } - - pr->flags.power_setup_done = 1; - return 0; } int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device) { + if (boot_option_idle_override) + return 0; +#ifdef CONFIG_CPU_IDLE + cpuidle_unregister_device(&pr->power.dev); +#endif pr->flags.power_setup_done = 0; if (acpi_device_dir(device)) remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, acpi_device_dir(device)); +#ifndef CONFIG_CPU_IDLE + /* Unregister the idle handler when processor #0 is removed. */ if (pr->id == 0) { - pm_idle = pm_idle_save; + if (pm_idle_save) + pm_idle = pm_idle_save; /* * We are about to unload the current idle thread pm callback @@ -1193,8 +1911,12 @@ int acpi_processor_power_exit(struct acpi_processor *pr, * copies of pm_idle before proceeding. */ cpu_idle_wait(); - unregister_latency_notifier(&acpi_processor_latency_notifier); +#ifdef CONFIG_SMP + pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY, + &acpi_processor_latency_notifier); +#endif } +#endif return 0; }