Merge branches 'release', 'acpica', 'bugzilla-10224', 'bugzilla-9772', 'bugzilla...
[safe/jmp/linux-2.6] / drivers / acpi / processor_idle.c
1 /*
2  * processor_idle - idle state submodule to the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10  *                      - Added support for C3 on SMP
11  *
12  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13  *
14  *  This program is free software; you can redistribute it and/or modify
15  *  it under the terms of the GNU General Public License as published by
16  *  the Free Software Foundation; either version 2 of the License, or (at
17  *  your option) any later version.
18  *
19  *  This program is distributed in the hope that it will be useful, but
20  *  WITHOUT ANY WARRANTY; without even the implied warranty of
21  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22  *  General Public License for more details.
23  *
24  *  You should have received a copy of the GNU General Public License along
25  *  with this program; if not, write to the Free Software Foundation, Inc.,
26  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27  *
28  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29  */
30
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h>        /* need_resched() */
41 #include <linux/pm_qos_params.h>
42 #include <linux/clockchips.h>
43 #include <linux/cpuidle.h>
44
45 /*
46  * Include the apic definitions for x86 to have the APIC timer related defines
47  * available also for UP (on SMP it gets magically included via linux/smp.h).
48  * asm/acpi.h is not an option, as it would require more include magic. Also
49  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
50  */
51 #ifdef CONFIG_X86
52 #include <asm/apic.h>
53 #endif
54
55 #include <asm/io.h>
56 #include <asm/uaccess.h>
57
58 #include <acpi/acpi_bus.h>
59 #include <acpi/processor.h>
60
61 #define ACPI_PROCESSOR_COMPONENT        0x01000000
62 #define ACPI_PROCESSOR_CLASS            "processor"
63 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
64 ACPI_MODULE_NAME("processor_idle");
65 #define ACPI_PROCESSOR_FILE_POWER       "power"
66 #define US_TO_PM_TIMER_TICKS(t)         ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
67 #define PM_TIMER_TICK_NS                (1000000000ULL/PM_TIMER_FREQUENCY)
68 #ifndef CONFIG_CPU_IDLE
69 #define C2_OVERHEAD                     4       /* 1us (3.579 ticks per us) */
70 #define C3_OVERHEAD                     4       /* 1us (3.579 ticks per us) */
71 static void (*pm_idle_save) (void) __read_mostly;
72 #else
73 #define C2_OVERHEAD                     1       /* 1us */
74 #define C3_OVERHEAD                     1       /* 1us */
75 #endif
76 #define PM_TIMER_TICKS_TO_US(p)         (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
77
78 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
79 #ifdef CONFIG_CPU_IDLE
80 module_param(max_cstate, uint, 0000);
81 #else
82 module_param(max_cstate, uint, 0644);
83 #endif
84 static unsigned int nocst __read_mostly;
85 module_param(nocst, uint, 0000);
86
87 #ifndef CONFIG_CPU_IDLE
88 /*
89  * bm_history -- bit-mask with a bit per jiffy of bus-master activity
90  * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
91  * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
92  * 100 HZ: 0x0000000F: 4 jiffies = 40ms
93  * reduce history for more aggressive entry into C3
94  */
95 static unsigned int bm_history __read_mostly =
96     (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
97 module_param(bm_history, uint, 0644);
98
99 static int acpi_processor_set_power_policy(struct acpi_processor *pr);
100
101 #else   /* CONFIG_CPU_IDLE */
102 static unsigned int latency_factor __read_mostly = 2;
103 module_param(latency_factor, uint, 0644);
104 #endif
105
106 /*
107  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
108  * For now disable this. Probably a bug somewhere else.
109  *
110  * To skip this limit, boot/load with a large max_cstate limit.
111  */
112 static int set_max_cstate(const struct dmi_system_id *id)
113 {
114         if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
115                 return 0;
116
117         printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
118                " Override with \"processor.max_cstate=%d\"\n", id->ident,
119                (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
120
121         max_cstate = (long)id->driver_data;
122
123         return 0;
124 }
125
126 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
127    callers to only run once -AK */
128 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
129         { set_max_cstate, "IBM ThinkPad R40e", {
130           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131           DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
132         { set_max_cstate, "IBM ThinkPad R40e", {
133           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134           DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
135         { set_max_cstate, "IBM ThinkPad R40e", {
136           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137           DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
138         { set_max_cstate, "IBM ThinkPad R40e", {
139           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140           DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
141         { set_max_cstate, "IBM ThinkPad R40e", {
142           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143           DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
144         { set_max_cstate, "IBM ThinkPad R40e", {
145           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146           DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
147         { set_max_cstate, "IBM ThinkPad R40e", {
148           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149           DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
150         { set_max_cstate, "IBM ThinkPad R40e", {
151           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152           DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
153         { set_max_cstate, "IBM ThinkPad R40e", {
154           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155           DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
156         { set_max_cstate, "IBM ThinkPad R40e", {
157           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158           DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
159         { set_max_cstate, "IBM ThinkPad R40e", {
160           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
161           DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
162         { set_max_cstate, "IBM ThinkPad R40e", {
163           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
164           DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
165         { set_max_cstate, "IBM ThinkPad R40e", {
166           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
167           DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
168         { set_max_cstate, "IBM ThinkPad R40e", {
169           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
170           DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
171         { set_max_cstate, "IBM ThinkPad R40e", {
172           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
173           DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
174         { set_max_cstate, "IBM ThinkPad R40e", {
175           DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
176           DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
177         { set_max_cstate, "Medion 41700", {
178           DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
179           DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
180         { set_max_cstate, "Clevo 5600D", {
181           DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
182           DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
183          (void *)2},
184         {},
185 };
186
187 static inline u32 ticks_elapsed(u32 t1, u32 t2)
188 {
189         if (t2 >= t1)
190                 return (t2 - t1);
191         else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
192                 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
193         else
194                 return ((0xFFFFFFFF - t1) + t2);
195 }
196
197 static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
198 {
199         if (t2 >= t1)
200                 return PM_TIMER_TICKS_TO_US(t2 - t1);
201         else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
202                 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
203         else
204                 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
205 }
206
207 /*
208  * Callers should disable interrupts before the call and enable
209  * interrupts after return.
210  */
211 static void acpi_safe_halt(void)
212 {
213         current_thread_info()->status &= ~TS_POLLING;
214         /*
215          * TS_POLLING-cleared state must be visible before we
216          * test NEED_RESCHED:
217          */
218         smp_mb();
219         if (!need_resched()) {
220                 safe_halt();
221                 local_irq_disable();
222         }
223         current_thread_info()->status |= TS_POLLING;
224 }
225
226 #ifndef CONFIG_CPU_IDLE
227
228 static void
229 acpi_processor_power_activate(struct acpi_processor *pr,
230                               struct acpi_processor_cx *new)
231 {
232         struct acpi_processor_cx *old;
233
234         if (!pr || !new)
235                 return;
236
237         old = pr->power.state;
238
239         if (old)
240                 old->promotion.count = 0;
241         new->demotion.count = 0;
242
243         /* Cleanup from old state. */
244         if (old) {
245                 switch (old->type) {
246                 case ACPI_STATE_C3:
247                         /* Disable bus master reload */
248                         if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
249                                 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
250                         break;
251                 }
252         }
253
254         /* Prepare to use new state. */
255         switch (new->type) {
256         case ACPI_STATE_C3:
257                 /* Enable bus master reload */
258                 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
259                         acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
260                 break;
261         }
262
263         pr->power.state = new;
264
265         return;
266 }
267
268 static atomic_t c3_cpu_count;
269
270 /* Common C-state entry for C2, C3, .. */
271 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
272 {
273         if (cstate->entry_method == ACPI_CSTATE_FFH) {
274                 /* Call into architectural FFH based C-state */
275                 acpi_processor_ffh_cstate_enter(cstate);
276         } else {
277                 int unused;
278                 /* IO port based C-state */
279                 inb(cstate->address);
280                 /* Dummy wait op - must do something useless after P_LVL2 read
281                    because chipsets cannot guarantee that STPCLK# signal
282                    gets asserted in time to freeze execution properly. */
283                 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
284         }
285 }
286 #endif /* !CONFIG_CPU_IDLE */
287
288 #ifdef ARCH_APICTIMER_STOPS_ON_C3
289
290 /*
291  * Some BIOS implementations switch to C3 in the published C2 state.
292  * This seems to be a common problem on AMD boxen, but other vendors
293  * are affected too. We pick the most conservative approach: we assume
294  * that the local APIC stops in both C2 and C3.
295  */
296 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
297                                    struct acpi_processor_cx *cx)
298 {
299         struct acpi_processor_power *pwr = &pr->power;
300         u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
301
302         /*
303          * Check, if one of the previous states already marked the lapic
304          * unstable
305          */
306         if (pwr->timer_broadcast_on_state < state)
307                 return;
308
309         if (cx->type >= type)
310                 pr->power.timer_broadcast_on_state = state;
311 }
312
313 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
314 {
315         unsigned long reason;
316
317         reason = pr->power.timer_broadcast_on_state < INT_MAX ?
318                 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
319
320         clockevents_notify(reason, &pr->id);
321 }
322
323 /* Power(C) State timer broadcast control */
324 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
325                                        struct acpi_processor_cx *cx,
326                                        int broadcast)
327 {
328         int state = cx - pr->power.states;
329
330         if (state >= pr->power.timer_broadcast_on_state) {
331                 unsigned long reason;
332
333                 reason = broadcast ?  CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
334                         CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
335                 clockevents_notify(reason, &pr->id);
336         }
337 }
338
339 #else
340
341 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
342                                    struct acpi_processor_cx *cstate) { }
343 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
344 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
345                                        struct acpi_processor_cx *cx,
346                                        int broadcast)
347 {
348 }
349
350 #endif
351
352 /*
353  * Suspend / resume control
354  */
355 static int acpi_idle_suspend;
356
357 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
358 {
359         acpi_idle_suspend = 1;
360         return 0;
361 }
362
363 int acpi_processor_resume(struct acpi_device * device)
364 {
365         acpi_idle_suspend = 0;
366         return 0;
367 }
368
369 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
370 static int tsc_halts_in_c(int state)
371 {
372         switch (boot_cpu_data.x86_vendor) {
373         case X86_VENDOR_AMD:
374                 /*
375                  * AMD Fam10h TSC will tick in all
376                  * C/P/S0/S1 states when this bit is set.
377                  */
378                 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
379                         return 0;
380                 /*FALL THROUGH*/
381         case X86_VENDOR_INTEL:
382                 /* Several cases known where TSC halts in C2 too */
383         default:
384                 return state > ACPI_STATE_C1;
385         }
386 }
387 #endif
388
389 #ifndef CONFIG_CPU_IDLE
390 static void acpi_processor_idle(void)
391 {
392         struct acpi_processor *pr = NULL;
393         struct acpi_processor_cx *cx = NULL;
394         struct acpi_processor_cx *next_state = NULL;
395         int sleep_ticks = 0;
396         u32 t1, t2 = 0;
397
398         /*
399          * Interrupts must be disabled during bus mastering calculations and
400          * for C2/C3 transitions.
401          */
402         local_irq_disable();
403
404         pr = processors[smp_processor_id()];
405         if (!pr) {
406                 local_irq_enable();
407                 return;
408         }
409
410         /*
411          * Check whether we truly need to go idle, or should
412          * reschedule:
413          */
414         if (unlikely(need_resched())) {
415                 local_irq_enable();
416                 return;
417         }
418
419         cx = pr->power.state;
420         if (!cx || acpi_idle_suspend) {
421                 if (pm_idle_save) {
422                         pm_idle_save(); /* enables IRQs */
423                 } else {
424                         acpi_safe_halt();
425                         local_irq_enable();
426                 }
427
428                 return;
429         }
430
431         /*
432          * Check BM Activity
433          * -----------------
434          * Check for bus mastering activity (if required), record, and check
435          * for demotion.
436          */
437         if (pr->flags.bm_check) {
438                 u32 bm_status = 0;
439                 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
440
441                 if (diff > 31)
442                         diff = 31;
443
444                 pr->power.bm_activity <<= diff;
445
446                 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
447                 if (bm_status) {
448                         pr->power.bm_activity |= 0x1;
449                         acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
450                 }
451                 /*
452                  * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
453                  * the true state of bus mastering activity; forcing us to
454                  * manually check the BMIDEA bit of each IDE channel.
455                  */
456                 else if (errata.piix4.bmisx) {
457                         if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
458                             || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
459                                 pr->power.bm_activity |= 0x1;
460                 }
461
462                 pr->power.bm_check_timestamp = jiffies;
463
464                 /*
465                  * If bus mastering is or was active this jiffy, demote
466                  * to avoid a faulty transition.  Note that the processor
467                  * won't enter a low-power state during this call (to this
468                  * function) but should upon the next.
469                  *
470                  * TBD: A better policy might be to fallback to the demotion
471                  *      state (use it for this quantum only) istead of
472                  *      demoting -- and rely on duration as our sole demotion
473                  *      qualification.  This may, however, introduce DMA
474                  *      issues (e.g. floppy DMA transfer overrun/underrun).
475                  */
476                 if ((pr->power.bm_activity & 0x1) &&
477                     cx->demotion.threshold.bm) {
478                         local_irq_enable();
479                         next_state = cx->demotion.state;
480                         goto end;
481                 }
482         }
483
484 #ifdef CONFIG_HOTPLUG_CPU
485         /*
486          * Check for P_LVL2_UP flag before entering C2 and above on
487          * an SMP system. We do it here instead of doing it at _CST/P_LVL
488          * detection phase, to work cleanly with logical CPU hotplug.
489          */
490         if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
491             !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
492                 cx = &pr->power.states[ACPI_STATE_C1];
493 #endif
494
495         /*
496          * Sleep:
497          * ------
498          * Invoke the current Cx state to put the processor to sleep.
499          */
500         if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
501                 current_thread_info()->status &= ~TS_POLLING;
502                 /*
503                  * TS_POLLING-cleared state must be visible before we
504                  * test NEED_RESCHED:
505                  */
506                 smp_mb();
507                 if (need_resched()) {
508                         current_thread_info()->status |= TS_POLLING;
509                         local_irq_enable();
510                         return;
511                 }
512         }
513
514         switch (cx->type) {
515
516         case ACPI_STATE_C1:
517                 /*
518                  * Invoke C1.
519                  * Use the appropriate idle routine, the one that would
520                  * be used without acpi C-states.
521                  */
522                 if (pm_idle_save) {
523                         pm_idle_save(); /* enables IRQs */
524                 } else {
525                         acpi_safe_halt();
526                         local_irq_enable();
527                 }
528
529                 /*
530                  * TBD: Can't get time duration while in C1, as resumes
531                  *      go to an ISR rather than here.  Need to instrument
532                  *      base interrupt handler.
533                  *
534                  * Note: the TSC better not stop in C1, sched_clock() will
535                  *       skew otherwise.
536                  */
537                 sleep_ticks = 0xFFFFFFFF;
538
539                 break;
540
541         case ACPI_STATE_C2:
542                 /* Get start time (ticks) */
543                 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
544                 /* Tell the scheduler that we are going deep-idle: */
545                 sched_clock_idle_sleep_event();
546                 /* Invoke C2 */
547                 acpi_state_timer_broadcast(pr, cx, 1);
548                 acpi_cstate_enter(cx);
549                 /* Get end time (ticks) */
550                 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
551
552 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
553                 /* TSC halts in C2, so notify users */
554                 if (tsc_halts_in_c(ACPI_STATE_C2))
555                         mark_tsc_unstable("possible TSC halt in C2");
556 #endif
557                 /* Compute time (ticks) that we were actually asleep */
558                 sleep_ticks = ticks_elapsed(t1, t2);
559
560                 /* Tell the scheduler how much we idled: */
561                 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
562
563                 /* Re-enable interrupts */
564                 local_irq_enable();
565                 /* Do not account our idle-switching overhead: */
566                 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
567
568                 current_thread_info()->status |= TS_POLLING;
569                 acpi_state_timer_broadcast(pr, cx, 0);
570                 break;
571
572         case ACPI_STATE_C3:
573                 acpi_unlazy_tlb(smp_processor_id());
574                 /*
575                  * Must be done before busmaster disable as we might
576                  * need to access HPET !
577                  */
578                 acpi_state_timer_broadcast(pr, cx, 1);
579                 /*
580                  * disable bus master
581                  * bm_check implies we need ARB_DIS
582                  * !bm_check implies we need cache flush
583                  * bm_control implies whether we can do ARB_DIS
584                  *
585                  * That leaves a case where bm_check is set and bm_control is
586                  * not set. In that case we cannot do much, we enter C3
587                  * without doing anything.
588                  */
589                 if (pr->flags.bm_check && pr->flags.bm_control) {
590                         if (atomic_inc_return(&c3_cpu_count) ==
591                             num_online_cpus()) {
592                                 /*
593                                  * All CPUs are trying to go to C3
594                                  * Disable bus master arbitration
595                                  */
596                                 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
597                         }
598                 } else if (!pr->flags.bm_check) {
599                         /* SMP with no shared cache... Invalidate cache  */
600                         ACPI_FLUSH_CPU_CACHE();
601                 }
602
603                 /* Get start time (ticks) */
604                 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
605                 /* Invoke C3 */
606                 /* Tell the scheduler that we are going deep-idle: */
607                 sched_clock_idle_sleep_event();
608                 acpi_cstate_enter(cx);
609                 /* Get end time (ticks) */
610                 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
611                 if (pr->flags.bm_check && pr->flags.bm_control) {
612                         /* Enable bus master arbitration */
613                         atomic_dec(&c3_cpu_count);
614                         acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
615                 }
616
617 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
618                 /* TSC halts in C3, so notify users */
619                 if (tsc_halts_in_c(ACPI_STATE_C3))
620                         mark_tsc_unstable("TSC halts in C3");
621 #endif
622                 /* Compute time (ticks) that we were actually asleep */
623                 sleep_ticks = ticks_elapsed(t1, t2);
624                 /* Tell the scheduler how much we idled: */
625                 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
626
627                 /* Re-enable interrupts */
628                 local_irq_enable();
629                 /* Do not account our idle-switching overhead: */
630                 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
631
632                 current_thread_info()->status |= TS_POLLING;
633                 acpi_state_timer_broadcast(pr, cx, 0);
634                 break;
635
636         default:
637                 local_irq_enable();
638                 return;
639         }
640         cx->usage++;
641         if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
642                 cx->time += sleep_ticks;
643
644         next_state = pr->power.state;
645
646 #ifdef CONFIG_HOTPLUG_CPU
647         /* Don't do promotion/demotion */
648         if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
649             !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
650                 next_state = cx;
651                 goto end;
652         }
653 #endif
654
655         /*
656          * Promotion?
657          * ----------
658          * Track the number of longs (time asleep is greater than threshold)
659          * and promote when the count threshold is reached.  Note that bus
660          * mastering activity may prevent promotions.
661          * Do not promote above max_cstate.
662          */
663         if (cx->promotion.state &&
664             ((cx->promotion.state - pr->power.states) <= max_cstate)) {
665                 if (sleep_ticks > cx->promotion.threshold.ticks &&
666                   cx->promotion.state->latency <=
667                                 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
668                         cx->promotion.count++;
669                         cx->demotion.count = 0;
670                         if (cx->promotion.count >=
671                             cx->promotion.threshold.count) {
672                                 if (pr->flags.bm_check) {
673                                         if (!
674                                             (pr->power.bm_activity & cx->
675                                              promotion.threshold.bm)) {
676                                                 next_state =
677                                                     cx->promotion.state;
678                                                 goto end;
679                                         }
680                                 } else {
681                                         next_state = cx->promotion.state;
682                                         goto end;
683                                 }
684                         }
685                 }
686         }
687
688         /*
689          * Demotion?
690          * ---------
691          * Track the number of shorts (time asleep is less than time threshold)
692          * and demote when the usage threshold is reached.
693          */
694         if (cx->demotion.state) {
695                 if (sleep_ticks < cx->demotion.threshold.ticks) {
696                         cx->demotion.count++;
697                         cx->promotion.count = 0;
698                         if (cx->demotion.count >= cx->demotion.threshold.count) {
699                                 next_state = cx->demotion.state;
700                                 goto end;
701                         }
702                 }
703         }
704
705       end:
706         /*
707          * Demote if current state exceeds max_cstate
708          * or if the latency of the current state is unacceptable
709          */
710         if ((pr->power.state - pr->power.states) > max_cstate ||
711                 pr->power.state->latency >
712                                 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
713                 if (cx->demotion.state)
714                         next_state = cx->demotion.state;
715         }
716
717         /*
718          * New Cx State?
719          * -------------
720          * If we're going to start using a new Cx state we must clean up
721          * from the previous and prepare to use the new.
722          */
723         if (next_state != pr->power.state)
724                 acpi_processor_power_activate(pr, next_state);
725 }
726
727 static int acpi_processor_set_power_policy(struct acpi_processor *pr)
728 {
729         unsigned int i;
730         unsigned int state_is_set = 0;
731         struct acpi_processor_cx *lower = NULL;
732         struct acpi_processor_cx *higher = NULL;
733         struct acpi_processor_cx *cx;
734
735
736         if (!pr)
737                 return -EINVAL;
738
739         /*
740          * This function sets the default Cx state policy (OS idle handler).
741          * Our scheme is to promote quickly to C2 but more conservatively
742          * to C3.  We're favoring C2  for its characteristics of low latency
743          * (quick response), good power savings, and ability to allow bus
744          * mastering activity.  Note that the Cx state policy is completely
745          * customizable and can be altered dynamically.
746          */
747
748         /* startup state */
749         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
750                 cx = &pr->power.states[i];
751                 if (!cx->valid)
752                         continue;
753
754                 if (!state_is_set)
755                         pr->power.state = cx;
756                 state_is_set++;
757                 break;
758         }
759
760         if (!state_is_set)
761                 return -ENODEV;
762
763         /* demotion */
764         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
765                 cx = &pr->power.states[i];
766                 if (!cx->valid)
767                         continue;
768
769                 if (lower) {
770                         cx->demotion.state = lower;
771                         cx->demotion.threshold.ticks = cx->latency_ticks;
772                         cx->demotion.threshold.count = 1;
773                         if (cx->type == ACPI_STATE_C3)
774                                 cx->demotion.threshold.bm = bm_history;
775                 }
776
777                 lower = cx;
778         }
779
780         /* promotion */
781         for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
782                 cx = &pr->power.states[i];
783                 if (!cx->valid)
784                         continue;
785
786                 if (higher) {
787                         cx->promotion.state = higher;
788                         cx->promotion.threshold.ticks = cx->latency_ticks;
789                         if (cx->type >= ACPI_STATE_C2)
790                                 cx->promotion.threshold.count = 4;
791                         else
792                                 cx->promotion.threshold.count = 10;
793                         if (higher->type == ACPI_STATE_C3)
794                                 cx->promotion.threshold.bm = bm_history;
795                 }
796
797                 higher = cx;
798         }
799
800         return 0;
801 }
802 #endif /* !CONFIG_CPU_IDLE */
803
804 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
805 {
806
807         if (!pr)
808                 return -EINVAL;
809
810         if (!pr->pblk)
811                 return -ENODEV;
812
813         /* if info is obtained from pblk/fadt, type equals state */
814         pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
815         pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
816
817 #ifndef CONFIG_HOTPLUG_CPU
818         /*
819          * Check for P_LVL2_UP flag before entering C2 and above on
820          * an SMP system.
821          */
822         if ((num_online_cpus() > 1) &&
823             !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
824                 return -ENODEV;
825 #endif
826
827         /* determine C2 and C3 address from pblk */
828         pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
829         pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
830
831         /* determine latencies from FADT */
832         pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
833         pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
834
835         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
836                           "lvl2[0x%08x] lvl3[0x%08x]\n",
837                           pr->power.states[ACPI_STATE_C2].address,
838                           pr->power.states[ACPI_STATE_C3].address));
839
840         return 0;
841 }
842
843 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
844 {
845         if (!pr->power.states[ACPI_STATE_C1].valid) {
846                 /* set the first C-State to C1 */
847                 /* all processors need to support C1 */
848                 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
849                 pr->power.states[ACPI_STATE_C1].valid = 1;
850                 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
851         }
852         /* the C0 state only exists as a filler in our array */
853         pr->power.states[ACPI_STATE_C0].valid = 1;
854         return 0;
855 }
856
857 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
858 {
859         acpi_status status = 0;
860         acpi_integer count;
861         int current_count;
862         int i;
863         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
864         union acpi_object *cst;
865
866
867         if (nocst)
868                 return -ENODEV;
869
870         current_count = 0;
871
872         status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
873         if (ACPI_FAILURE(status)) {
874                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
875                 return -ENODEV;
876         }
877
878         cst = buffer.pointer;
879
880         /* There must be at least 2 elements */
881         if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
882                 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
883                 status = -EFAULT;
884                 goto end;
885         }
886
887         count = cst->package.elements[0].integer.value;
888
889         /* Validate number of power states. */
890         if (count < 1 || count != cst->package.count - 1) {
891                 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
892                 status = -EFAULT;
893                 goto end;
894         }
895
896         /* Tell driver that at least _CST is supported. */
897         pr->flags.has_cst = 1;
898
899         for (i = 1; i <= count; i++) {
900                 union acpi_object *element;
901                 union acpi_object *obj;
902                 struct acpi_power_register *reg;
903                 struct acpi_processor_cx cx;
904
905                 memset(&cx, 0, sizeof(cx));
906
907                 element = &(cst->package.elements[i]);
908                 if (element->type != ACPI_TYPE_PACKAGE)
909                         continue;
910
911                 if (element->package.count != 4)
912                         continue;
913
914                 obj = &(element->package.elements[0]);
915
916                 if (obj->type != ACPI_TYPE_BUFFER)
917                         continue;
918
919                 reg = (struct acpi_power_register *)obj->buffer.pointer;
920
921                 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
922                     (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
923                         continue;
924
925                 /* There should be an easy way to extract an integer... */
926                 obj = &(element->package.elements[1]);
927                 if (obj->type != ACPI_TYPE_INTEGER)
928                         continue;
929
930                 cx.type = obj->integer.value;
931                 /*
932                  * Some buggy BIOSes won't list C1 in _CST -
933                  * Let acpi_processor_get_power_info_default() handle them later
934                  */
935                 if (i == 1 && cx.type != ACPI_STATE_C1)
936                         current_count++;
937
938                 cx.address = reg->address;
939                 cx.index = current_count + 1;
940
941                 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
942                 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
943                         if (acpi_processor_ffh_cstate_probe
944                                         (pr->id, &cx, reg) == 0) {
945                                 cx.entry_method = ACPI_CSTATE_FFH;
946                         } else if (cx.type == ACPI_STATE_C1) {
947                                 /*
948                                  * C1 is a special case where FIXED_HARDWARE
949                                  * can be handled in non-MWAIT way as well.
950                                  * In that case, save this _CST entry info.
951                                  * Otherwise, ignore this info and continue.
952                                  */
953                                 cx.entry_method = ACPI_CSTATE_HALT;
954                                 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
955                         } else {
956                                 continue;
957                         }
958                 } else {
959                         snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
960                                  cx.address);
961                 }
962
963                 if (cx.type == ACPI_STATE_C1) {
964                         cx.valid = 1;
965                 }
966
967                 obj = &(element->package.elements[2]);
968                 if (obj->type != ACPI_TYPE_INTEGER)
969                         continue;
970
971                 cx.latency = obj->integer.value;
972
973                 obj = &(element->package.elements[3]);
974                 if (obj->type != ACPI_TYPE_INTEGER)
975                         continue;
976
977                 cx.power = obj->integer.value;
978
979                 current_count++;
980                 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
981
982                 /*
983                  * We support total ACPI_PROCESSOR_MAX_POWER - 1
984                  * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
985                  */
986                 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
987                         printk(KERN_WARNING
988                                "Limiting number of power states to max (%d)\n",
989                                ACPI_PROCESSOR_MAX_POWER);
990                         printk(KERN_WARNING
991                                "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
992                         break;
993                 }
994         }
995
996         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
997                           current_count));
998
999         /* Validate number of power states discovered */
1000         if (current_count < 2)
1001                 status = -EFAULT;
1002
1003       end:
1004         kfree(buffer.pointer);
1005
1006         return status;
1007 }
1008
1009 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
1010 {
1011
1012         if (!cx->address)
1013                 return;
1014
1015         /*
1016          * C2 latency must be less than or equal to 100
1017          * microseconds.
1018          */
1019         else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
1020                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1021                                   "latency too large [%d]\n", cx->latency));
1022                 return;
1023         }
1024
1025         /*
1026          * Otherwise we've met all of our C2 requirements.
1027          * Normalize the C2 latency to expidite policy
1028          */
1029         cx->valid = 1;
1030
1031 #ifndef CONFIG_CPU_IDLE
1032         cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1033 #else
1034         cx->latency_ticks = cx->latency;
1035 #endif
1036
1037         return;
1038 }
1039
1040 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1041                                            struct acpi_processor_cx *cx)
1042 {
1043         static int bm_check_flag;
1044
1045
1046         if (!cx->address)
1047                 return;
1048
1049         /*
1050          * C3 latency must be less than or equal to 1000
1051          * microseconds.
1052          */
1053         else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1054                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1055                                   "latency too large [%d]\n", cx->latency));
1056                 return;
1057         }
1058
1059         /*
1060          * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1061          * DMA transfers are used by any ISA device to avoid livelock.
1062          * Note that we could disable Type-F DMA (as recommended by
1063          * the erratum), but this is known to disrupt certain ISA
1064          * devices thus we take the conservative approach.
1065          */
1066         else if (errata.piix4.fdma) {
1067                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1068                                   "C3 not supported on PIIX4 with Type-F DMA\n"));
1069                 return;
1070         }
1071
1072         /* All the logic here assumes flags.bm_check is same across all CPUs */
1073         if (!bm_check_flag) {
1074                 /* Determine whether bm_check is needed based on CPU  */
1075                 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1076                 bm_check_flag = pr->flags.bm_check;
1077         } else {
1078                 pr->flags.bm_check = bm_check_flag;
1079         }
1080
1081         if (pr->flags.bm_check) {
1082                 if (!pr->flags.bm_control) {
1083                         if (pr->flags.has_cst != 1) {
1084                                 /* bus mastering control is necessary */
1085                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1086                                         "C3 support requires BM control\n"));
1087                                 return;
1088                         } else {
1089                                 /* Here we enter C3 without bus mastering */
1090                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1091                                         "C3 support without BM control\n"));
1092                         }
1093                 }
1094         } else {
1095                 /*
1096                  * WBINVD should be set in fadt, for C3 state to be
1097                  * supported on when bm_check is not required.
1098                  */
1099                 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1100                         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1101                                           "Cache invalidation should work properly"
1102                                           " for C3 to be enabled on SMP systems\n"));
1103                         return;
1104                 }
1105                 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1106         }
1107
1108         /*
1109          * Otherwise we've met all of our C3 requirements.
1110          * Normalize the C3 latency to expidite policy.  Enable
1111          * checking of bus mastering status (bm_check) so we can
1112          * use this in our C3 policy
1113          */
1114         cx->valid = 1;
1115
1116 #ifndef CONFIG_CPU_IDLE
1117         cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1118 #else
1119         cx->latency_ticks = cx->latency;
1120 #endif
1121
1122         return;
1123 }
1124
1125 static int acpi_processor_power_verify(struct acpi_processor *pr)
1126 {
1127         unsigned int i;
1128         unsigned int working = 0;
1129
1130         pr->power.timer_broadcast_on_state = INT_MAX;
1131
1132         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1133                 struct acpi_processor_cx *cx = &pr->power.states[i];
1134
1135                 switch (cx->type) {
1136                 case ACPI_STATE_C1:
1137                         cx->valid = 1;
1138                         break;
1139
1140                 case ACPI_STATE_C2:
1141                         acpi_processor_power_verify_c2(cx);
1142                         if (cx->valid)
1143                                 acpi_timer_check_state(i, pr, cx);
1144                         break;
1145
1146                 case ACPI_STATE_C3:
1147                         acpi_processor_power_verify_c3(pr, cx);
1148                         if (cx->valid)
1149                                 acpi_timer_check_state(i, pr, cx);
1150                         break;
1151                 }
1152
1153                 if (cx->valid)
1154                         working++;
1155         }
1156
1157         acpi_propagate_timer_broadcast(pr);
1158
1159         return (working);
1160 }
1161
1162 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1163 {
1164         unsigned int i;
1165         int result;
1166
1167
1168         /* NOTE: the idle thread may not be running while calling
1169          * this function */
1170
1171         /* Zero initialize all the C-states info. */
1172         memset(pr->power.states, 0, sizeof(pr->power.states));
1173
1174         result = acpi_processor_get_power_info_cst(pr);
1175         if (result == -ENODEV)
1176                 result = acpi_processor_get_power_info_fadt(pr);
1177
1178         if (result)
1179                 return result;
1180
1181         acpi_processor_get_power_info_default(pr);
1182
1183         pr->power.count = acpi_processor_power_verify(pr);
1184
1185 #ifndef CONFIG_CPU_IDLE
1186         /*
1187          * Set Default Policy
1188          * ------------------
1189          * Now that we know which states are supported, set the default
1190          * policy.  Note that this policy can be changed dynamically
1191          * (e.g. encourage deeper sleeps to conserve battery life when
1192          * not on AC).
1193          */
1194         result = acpi_processor_set_power_policy(pr);
1195         if (result)
1196                 return result;
1197 #endif
1198
1199         /*
1200          * if one state of type C2 or C3 is available, mark this
1201          * CPU as being "idle manageable"
1202          */
1203         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1204                 if (pr->power.states[i].valid) {
1205                         pr->power.count = i;
1206                         if (pr->power.states[i].type >= ACPI_STATE_C2)
1207                                 pr->flags.power = 1;
1208                 }
1209         }
1210
1211         return 0;
1212 }
1213
1214 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1215 {
1216         struct acpi_processor *pr = seq->private;
1217         unsigned int i;
1218
1219
1220         if (!pr)
1221                 goto end;
1222
1223         seq_printf(seq, "active state:            C%zd\n"
1224                    "max_cstate:              C%d\n"
1225                    "bus master activity:     %08x\n"
1226                    "maximum allowed latency: %d usec\n",
1227                    pr->power.state ? pr->power.state - pr->power.states : 0,
1228                    max_cstate, (unsigned)pr->power.bm_activity,
1229                    pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
1230
1231         seq_puts(seq, "states:\n");
1232
1233         for (i = 1; i <= pr->power.count; i++) {
1234                 seq_printf(seq, "   %cC%d:                  ",
1235                            (&pr->power.states[i] ==
1236                             pr->power.state ? '*' : ' '), i);
1237
1238                 if (!pr->power.states[i].valid) {
1239                         seq_puts(seq, "<not supported>\n");
1240                         continue;
1241                 }
1242
1243                 switch (pr->power.states[i].type) {
1244                 case ACPI_STATE_C1:
1245                         seq_printf(seq, "type[C1] ");
1246                         break;
1247                 case ACPI_STATE_C2:
1248                         seq_printf(seq, "type[C2] ");
1249                         break;
1250                 case ACPI_STATE_C3:
1251                         seq_printf(seq, "type[C3] ");
1252                         break;
1253                 default:
1254                         seq_printf(seq, "type[--] ");
1255                         break;
1256                 }
1257
1258                 if (pr->power.states[i].promotion.state)
1259                         seq_printf(seq, "promotion[C%zd] ",
1260                                    (pr->power.states[i].promotion.state -
1261                                     pr->power.states));
1262                 else
1263                         seq_puts(seq, "promotion[--] ");
1264
1265                 if (pr->power.states[i].demotion.state)
1266                         seq_printf(seq, "demotion[C%zd] ",
1267                                    (pr->power.states[i].demotion.state -
1268                                     pr->power.states));
1269                 else
1270                         seq_puts(seq, "demotion[--] ");
1271
1272                 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1273                            pr->power.states[i].latency,
1274                            pr->power.states[i].usage,
1275                            (unsigned long long)pr->power.states[i].time);
1276         }
1277
1278       end:
1279         return 0;
1280 }
1281
1282 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1283 {
1284         return single_open(file, acpi_processor_power_seq_show,
1285                            PDE(inode)->data);
1286 }
1287
1288 static const struct file_operations acpi_processor_power_fops = {
1289         .open = acpi_processor_power_open_fs,
1290         .read = seq_read,
1291         .llseek = seq_lseek,
1292         .release = single_release,
1293 };
1294
1295 #ifndef CONFIG_CPU_IDLE
1296
1297 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1298 {
1299         int result = 0;
1300
1301         if (boot_option_idle_override)
1302                 return 0;
1303
1304         if (!pr)
1305                 return -EINVAL;
1306
1307         if (nocst) {
1308                 return -ENODEV;
1309         }
1310
1311         if (!pr->flags.power_setup_done)
1312                 return -ENODEV;
1313
1314         /* Fall back to the default idle loop */
1315         pm_idle = pm_idle_save;
1316         synchronize_sched();    /* Relies on interrupts forcing exit from idle. */
1317
1318         pr->flags.power = 0;
1319         result = acpi_processor_get_power_info(pr);
1320         if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1321                 pm_idle = acpi_processor_idle;
1322
1323         return result;
1324 }
1325
1326 #ifdef CONFIG_SMP
1327 static void smp_callback(void *v)
1328 {
1329         /* we already woke the CPU up, nothing more to do */
1330 }
1331
1332 /*
1333  * This function gets called when a part of the kernel has a new latency
1334  * requirement.  This means we need to get all processors out of their C-state,
1335  * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1336  * wakes them all right up.
1337  */
1338 static int acpi_processor_latency_notify(struct notifier_block *b,
1339                 unsigned long l, void *v)
1340 {
1341         smp_call_function(smp_callback, NULL, 0, 1);
1342         return NOTIFY_OK;
1343 }
1344
1345 static struct notifier_block acpi_processor_latency_notifier = {
1346         .notifier_call = acpi_processor_latency_notify,
1347 };
1348
1349 #endif
1350
1351 #else /* CONFIG_CPU_IDLE */
1352
1353 /**
1354  * acpi_idle_bm_check - checks if bus master activity was detected
1355  */
1356 static int acpi_idle_bm_check(void)
1357 {
1358         u32 bm_status = 0;
1359
1360         acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1361         if (bm_status)
1362                 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1363         /*
1364          * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1365          * the true state of bus mastering activity; forcing us to
1366          * manually check the BMIDEA bit of each IDE channel.
1367          */
1368         else if (errata.piix4.bmisx) {
1369                 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1370                     || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1371                         bm_status = 1;
1372         }
1373         return bm_status;
1374 }
1375
1376 /**
1377  * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1378  * @pr: the processor
1379  * @target: the new target state
1380  */
1381 static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1382                                            struct acpi_processor_cx *target)
1383 {
1384         if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1385                 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1386                 pr->flags.bm_rld_set = 0;
1387         }
1388
1389         if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1390                 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1391                 pr->flags.bm_rld_set = 1;
1392         }
1393 }
1394
1395 /**
1396  * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1397  * @cx: cstate data
1398  *
1399  * Caller disables interrupt before call and enables interrupt after return.
1400  */
1401 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1402 {
1403         if (cx->entry_method == ACPI_CSTATE_FFH) {
1404                 /* Call into architectural FFH based C-state */
1405                 acpi_processor_ffh_cstate_enter(cx);
1406         } else if (cx->entry_method == ACPI_CSTATE_HALT) {
1407                 acpi_safe_halt();
1408         } else {
1409                 int unused;
1410                 /* IO port based C-state */
1411                 inb(cx->address);
1412                 /* Dummy wait op - must do something useless after P_LVL2 read
1413                    because chipsets cannot guarantee that STPCLK# signal
1414                    gets asserted in time to freeze execution properly. */
1415                 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1416         }
1417 }
1418
1419 /**
1420  * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1421  * @dev: the target CPU
1422  * @state: the state data
1423  *
1424  * This is equivalent to the HALT instruction.
1425  */
1426 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1427                               struct cpuidle_state *state)
1428 {
1429         u32 t1, t2;
1430         struct acpi_processor *pr;
1431         struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1432
1433         pr = processors[smp_processor_id()];
1434
1435         if (unlikely(!pr))
1436                 return 0;
1437
1438         local_irq_disable();
1439
1440         /* Do not access any ACPI IO ports in suspend path */
1441         if (acpi_idle_suspend) {
1442                 acpi_safe_halt();
1443                 local_irq_enable();
1444                 return 0;
1445         }
1446
1447         if (pr->flags.bm_check)
1448                 acpi_idle_update_bm_rld(pr, cx);
1449
1450         t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1451         acpi_idle_do_entry(cx);
1452         t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1453
1454         local_irq_enable();
1455         cx->usage++;
1456
1457         return ticks_elapsed_in_us(t1, t2);
1458 }
1459
1460 /**
1461  * acpi_idle_enter_simple - enters an ACPI state without BM handling
1462  * @dev: the target CPU
1463  * @state: the state data
1464  */
1465 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1466                                   struct cpuidle_state *state)
1467 {
1468         struct acpi_processor *pr;
1469         struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1470         u32 t1, t2;
1471         int sleep_ticks = 0;
1472
1473         pr = processors[smp_processor_id()];
1474
1475         if (unlikely(!pr))
1476                 return 0;
1477
1478         if (acpi_idle_suspend)
1479                 return(acpi_idle_enter_c1(dev, state));
1480
1481         local_irq_disable();
1482         current_thread_info()->status &= ~TS_POLLING;
1483         /*
1484          * TS_POLLING-cleared state must be visible before we test
1485          * NEED_RESCHED:
1486          */
1487         smp_mb();
1488
1489         if (unlikely(need_resched())) {
1490                 current_thread_info()->status |= TS_POLLING;
1491                 local_irq_enable();
1492                 return 0;
1493         }
1494
1495         /*
1496          * Must be done before busmaster disable as we might need to
1497          * access HPET !
1498          */
1499         acpi_state_timer_broadcast(pr, cx, 1);
1500
1501         if (pr->flags.bm_check)
1502                 acpi_idle_update_bm_rld(pr, cx);
1503
1504         if (cx->type == ACPI_STATE_C3)
1505                 ACPI_FLUSH_CPU_CACHE();
1506
1507         t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1508         /* Tell the scheduler that we are going deep-idle: */
1509         sched_clock_idle_sleep_event();
1510         acpi_idle_do_entry(cx);
1511         t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1512
1513 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1514         /* TSC could halt in idle, so notify users */
1515         if (tsc_halts_in_c(cx->type))
1516                 mark_tsc_unstable("TSC halts in idle");;
1517 #endif
1518         sleep_ticks = ticks_elapsed(t1, t2);
1519
1520         /* Tell the scheduler how much we idled: */
1521         sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1522
1523         local_irq_enable();
1524         current_thread_info()->status |= TS_POLLING;
1525
1526         cx->usage++;
1527
1528         acpi_state_timer_broadcast(pr, cx, 0);
1529         cx->time += sleep_ticks;
1530         return ticks_elapsed_in_us(t1, t2);
1531 }
1532
1533 static int c3_cpu_count;
1534 static DEFINE_SPINLOCK(c3_lock);
1535
1536 /**
1537  * acpi_idle_enter_bm - enters C3 with proper BM handling
1538  * @dev: the target CPU
1539  * @state: the state data
1540  *
1541  * If BM is detected, the deepest non-C3 idle state is entered instead.
1542  */
1543 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1544                               struct cpuidle_state *state)
1545 {
1546         struct acpi_processor *pr;
1547         struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1548         u32 t1, t2;
1549         int sleep_ticks = 0;
1550
1551         pr = processors[smp_processor_id()];
1552
1553         if (unlikely(!pr))
1554                 return 0;
1555
1556         if (acpi_idle_suspend)
1557                 return(acpi_idle_enter_c1(dev, state));
1558
1559         if (acpi_idle_bm_check()) {
1560                 if (dev->safe_state) {
1561                         return dev->safe_state->enter(dev, dev->safe_state);
1562                 } else {
1563                         local_irq_disable();
1564                         acpi_safe_halt();
1565                         local_irq_enable();
1566                         return 0;
1567                 }
1568         }
1569
1570         local_irq_disable();
1571         current_thread_info()->status &= ~TS_POLLING;
1572         /*
1573          * TS_POLLING-cleared state must be visible before we test
1574          * NEED_RESCHED:
1575          */
1576         smp_mb();
1577
1578         if (unlikely(need_resched())) {
1579                 current_thread_info()->status |= TS_POLLING;
1580                 local_irq_enable();
1581                 return 0;
1582         }
1583
1584         acpi_unlazy_tlb(smp_processor_id());
1585
1586         /* Tell the scheduler that we are going deep-idle: */
1587         sched_clock_idle_sleep_event();
1588         /*
1589          * Must be done before busmaster disable as we might need to
1590          * access HPET !
1591          */
1592         acpi_state_timer_broadcast(pr, cx, 1);
1593
1594         acpi_idle_update_bm_rld(pr, cx);
1595
1596         /*
1597          * disable bus master
1598          * bm_check implies we need ARB_DIS
1599          * !bm_check implies we need cache flush
1600          * bm_control implies whether we can do ARB_DIS
1601          *
1602          * That leaves a case where bm_check is set and bm_control is
1603          * not set. In that case we cannot do much, we enter C3
1604          * without doing anything.
1605          */
1606         if (pr->flags.bm_check && pr->flags.bm_control) {
1607                 spin_lock(&c3_lock);
1608                 c3_cpu_count++;
1609                 /* Disable bus master arbitration when all CPUs are in C3 */
1610                 if (c3_cpu_count == num_online_cpus())
1611                         acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1612                 spin_unlock(&c3_lock);
1613         } else if (!pr->flags.bm_check) {
1614                 ACPI_FLUSH_CPU_CACHE();
1615         }
1616
1617         t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1618         acpi_idle_do_entry(cx);
1619         t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1620
1621         /* Re-enable bus master arbitration */
1622         if (pr->flags.bm_check && pr->flags.bm_control) {
1623                 spin_lock(&c3_lock);
1624                 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1625                 c3_cpu_count--;
1626                 spin_unlock(&c3_lock);
1627         }
1628
1629 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1630         /* TSC could halt in idle, so notify users */
1631         if (tsc_halts_in_c(ACPI_STATE_C3))
1632                 mark_tsc_unstable("TSC halts in idle");
1633 #endif
1634         sleep_ticks = ticks_elapsed(t1, t2);
1635         /* Tell the scheduler how much we idled: */
1636         sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1637
1638         local_irq_enable();
1639         current_thread_info()->status |= TS_POLLING;
1640
1641         cx->usage++;
1642
1643         acpi_state_timer_broadcast(pr, cx, 0);
1644         cx->time += sleep_ticks;
1645         return ticks_elapsed_in_us(t1, t2);
1646 }
1647
1648 struct cpuidle_driver acpi_idle_driver = {
1649         .name =         "acpi_idle",
1650         .owner =        THIS_MODULE,
1651 };
1652
1653 /**
1654  * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1655  * @pr: the ACPI processor
1656  */
1657 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1658 {
1659         int i, count = CPUIDLE_DRIVER_STATE_START;
1660         struct acpi_processor_cx *cx;
1661         struct cpuidle_state *state;
1662         struct cpuidle_device *dev = &pr->power.dev;
1663
1664         if (!pr->flags.power_setup_done)
1665                 return -EINVAL;
1666
1667         if (pr->flags.power == 0) {
1668                 return -EINVAL;
1669         }
1670
1671         for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1672                 dev->states[i].name[0] = '\0';
1673                 dev->states[i].desc[0] = '\0';
1674         }
1675
1676         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1677                 cx = &pr->power.states[i];
1678                 state = &dev->states[count];
1679
1680                 if (!cx->valid)
1681                         continue;
1682
1683 #ifdef CONFIG_HOTPLUG_CPU
1684                 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1685                     !pr->flags.has_cst &&
1686                     !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1687                         continue;
1688 #endif
1689                 cpuidle_set_statedata(state, cx);
1690
1691                 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1692                 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1693                 state->exit_latency = cx->latency;
1694                 state->target_residency = cx->latency * latency_factor;
1695                 state->power_usage = cx->power;
1696
1697                 state->flags = 0;
1698                 switch (cx->type) {
1699                         case ACPI_STATE_C1:
1700                         state->flags |= CPUIDLE_FLAG_SHALLOW;
1701                         if (cx->entry_method == ACPI_CSTATE_FFH)
1702                                 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1703
1704                         state->enter = acpi_idle_enter_c1;
1705                         dev->safe_state = state;
1706                         break;
1707
1708                         case ACPI_STATE_C2:
1709                         state->flags |= CPUIDLE_FLAG_BALANCED;
1710                         state->flags |= CPUIDLE_FLAG_TIME_VALID;
1711                         state->enter = acpi_idle_enter_simple;
1712                         dev->safe_state = state;
1713                         break;
1714
1715                         case ACPI_STATE_C3:
1716                         state->flags |= CPUIDLE_FLAG_DEEP;
1717                         state->flags |= CPUIDLE_FLAG_TIME_VALID;
1718                         state->flags |= CPUIDLE_FLAG_CHECK_BM;
1719                         state->enter = pr->flags.bm_check ?
1720                                         acpi_idle_enter_bm :
1721                                         acpi_idle_enter_simple;
1722                         break;
1723                 }
1724
1725                 count++;
1726                 if (count == CPUIDLE_STATE_MAX)
1727                         break;
1728         }
1729
1730         dev->state_count = count;
1731
1732         if (!count)
1733                 return -EINVAL;
1734
1735         return 0;
1736 }
1737
1738 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1739 {
1740         int ret;
1741
1742         if (boot_option_idle_override)
1743                 return 0;
1744
1745         if (!pr)
1746                 return -EINVAL;
1747
1748         if (nocst) {
1749                 return -ENODEV;
1750         }
1751
1752         if (!pr->flags.power_setup_done)
1753                 return -ENODEV;
1754
1755         cpuidle_pause_and_lock();
1756         cpuidle_disable_device(&pr->power.dev);
1757         acpi_processor_get_power_info(pr);
1758         acpi_processor_setup_cpuidle(pr);
1759         ret = cpuidle_enable_device(&pr->power.dev);
1760         cpuidle_resume_and_unlock();
1761
1762         return ret;
1763 }
1764
1765 #endif /* CONFIG_CPU_IDLE */
1766
1767 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1768                               struct acpi_device *device)
1769 {
1770         acpi_status status = 0;
1771         static int first_run;
1772         struct proc_dir_entry *entry = NULL;
1773         unsigned int i;
1774
1775         if (boot_option_idle_override)
1776                 return 0;
1777
1778         if (!first_run) {
1779                 dmi_check_system(processor_power_dmi_table);
1780                 max_cstate = acpi_processor_cstate_check(max_cstate);
1781                 if (max_cstate < ACPI_C_STATES_MAX)
1782                         printk(KERN_NOTICE
1783                                "ACPI: processor limited to max C-state %d\n",
1784                                max_cstate);
1785                 first_run++;
1786 #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1787                 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1788                                 &acpi_processor_latency_notifier);
1789 #endif
1790         }
1791
1792         if (!pr)
1793                 return -EINVAL;
1794
1795         if (acpi_gbl_FADT.cst_control && !nocst) {
1796                 status =
1797                     acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1798                 if (ACPI_FAILURE(status)) {
1799                         ACPI_EXCEPTION((AE_INFO, status,
1800                                         "Notifying BIOS of _CST ability failed"));
1801                 }
1802         }
1803
1804         acpi_processor_get_power_info(pr);
1805         pr->flags.power_setup_done = 1;
1806
1807         /*
1808          * Install the idle handler if processor power management is supported.
1809          * Note that we use previously set idle handler will be used on
1810          * platforms that only support C1.
1811          */
1812         if (pr->flags.power) {
1813 #ifdef CONFIG_CPU_IDLE
1814                 acpi_processor_setup_cpuidle(pr);
1815                 pr->power.dev.cpu = pr->id;
1816                 if (cpuidle_register_device(&pr->power.dev))
1817                         return -EIO;
1818 #endif
1819
1820                 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1821                 for (i = 1; i <= pr->power.count; i++)
1822                         if (pr->power.states[i].valid)
1823                                 printk(" C%d[C%d]", i,
1824                                        pr->power.states[i].type);
1825                 printk(")\n");
1826
1827 #ifndef CONFIG_CPU_IDLE
1828                 if (pr->id == 0) {
1829                         pm_idle_save = pm_idle;
1830                         pm_idle = acpi_processor_idle;
1831                 }
1832 #endif
1833         }
1834
1835         /* 'power' [R] */
1836         entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1837                                   S_IRUGO, acpi_device_dir(device));
1838         if (!entry)
1839                 return -EIO;
1840         else {
1841                 entry->proc_fops = &acpi_processor_power_fops;
1842                 entry->data = acpi_driver_data(device);
1843                 entry->owner = THIS_MODULE;
1844         }
1845
1846         return 0;
1847 }
1848
1849 int acpi_processor_power_exit(struct acpi_processor *pr,
1850                               struct acpi_device *device)
1851 {
1852         if (boot_option_idle_override)
1853                 return 0;
1854
1855 #ifdef CONFIG_CPU_IDLE
1856         if (pr->flags.power)
1857                 cpuidle_unregister_device(&pr->power.dev);
1858 #endif
1859         pr->flags.power_setup_done = 0;
1860
1861         if (acpi_device_dir(device))
1862                 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1863                                   acpi_device_dir(device));
1864
1865 #ifndef CONFIG_CPU_IDLE
1866
1867         /* Unregister the idle handler when processor #0 is removed. */
1868         if (pr->id == 0) {
1869                 pm_idle = pm_idle_save;
1870
1871                 /*
1872                  * We are about to unload the current idle thread pm callback
1873                  * (pm_idle), Wait for all processors to update cached/local
1874                  * copies of pm_idle before proceeding.
1875                  */
1876                 cpu_idle_wait();
1877 #ifdef CONFIG_SMP
1878                 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1879                                 &acpi_processor_latency_notifier);
1880 #endif
1881         }
1882 #endif
1883
1884         return 0;
1885 }