1b1a786b7deccb0b690138e97c737d19decf861a
[safe/jmp/linux-2.6] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/resume-trace.h>
25 #include <linux/rwsem.h>
26 #include <linux/interrupt.h>
27
28 #include "../base.h"
29 #include "power.h"
30
31 /*
32  * The entries in the dpm_list list are in a depth first order, simply
33  * because children are guaranteed to be discovered after parents, and
34  * are inserted at the back of the list on discovery.
35  *
36  * Since device_pm_add() may be called with a device semaphore held,
37  * we must never try to acquire a device semaphore while holding
38  * dpm_list_mutex.
39  */
40
41 LIST_HEAD(dpm_list);
42
43 static DEFINE_MUTEX(dpm_list_mtx);
44
45 /*
46  * Set once the preparation of devices for a PM transition has started, reset
47  * before starting to resume devices.  Protected by dpm_list_mtx.
48  */
49 static bool transition_started;
50
51 /**
52  *      device_pm_lock - lock the list of active devices used by the PM core
53  */
54 void device_pm_lock(void)
55 {
56         mutex_lock(&dpm_list_mtx);
57 }
58
59 /**
60  *      device_pm_unlock - unlock the list of active devices used by the PM core
61  */
62 void device_pm_unlock(void)
63 {
64         mutex_unlock(&dpm_list_mtx);
65 }
66
67 /**
68  *      device_pm_add - add a device to the list of active devices
69  *      @dev:   Device to be added to the list
70  */
71 void device_pm_add(struct device *dev)
72 {
73         pr_debug("PM: Adding info for %s:%s\n",
74                  dev->bus ? dev->bus->name : "No Bus",
75                  kobject_name(&dev->kobj));
76         mutex_lock(&dpm_list_mtx);
77         if (dev->parent) {
78                 if (dev->parent->power.status >= DPM_SUSPENDING)
79                         dev_warn(dev, "parent %s should not be sleeping\n",
80                                  dev_name(dev->parent));
81         } else if (transition_started) {
82                 /*
83                  * We refuse to register parentless devices while a PM
84                  * transition is in progress in order to avoid leaving them
85                  * unhandled down the road
86                  */
87                 dev_WARN(dev, "Parentless device registered during a PM transaction\n");
88         }
89
90         list_add_tail(&dev->power.entry, &dpm_list);
91         mutex_unlock(&dpm_list_mtx);
92 }
93
94 /**
95  *      device_pm_remove - remove a device from the list of active devices
96  *      @dev:   Device to be removed from the list
97  *
98  *      This function also removes the device's PM-related sysfs attributes.
99  */
100 void device_pm_remove(struct device *dev)
101 {
102         pr_debug("PM: Removing info for %s:%s\n",
103                  dev->bus ? dev->bus->name : "No Bus",
104                  kobject_name(&dev->kobj));
105         mutex_lock(&dpm_list_mtx);
106         list_del_init(&dev->power.entry);
107         mutex_unlock(&dpm_list_mtx);
108 }
109
110 /**
111  *      device_pm_move_before - move device in dpm_list
112  *      @deva:  Device to move in dpm_list
113  *      @devb:  Device @deva should come before
114  */
115 void device_pm_move_before(struct device *deva, struct device *devb)
116 {
117         pr_debug("PM: Moving %s:%s before %s:%s\n",
118                  deva->bus ? deva->bus->name : "No Bus",
119                  kobject_name(&deva->kobj),
120                  devb->bus ? devb->bus->name : "No Bus",
121                  kobject_name(&devb->kobj));
122         /* Delete deva from dpm_list and reinsert before devb. */
123         list_move_tail(&deva->power.entry, &devb->power.entry);
124 }
125
126 /**
127  *      device_pm_move_after - move device in dpm_list
128  *      @deva:  Device to move in dpm_list
129  *      @devb:  Device @deva should come after
130  */
131 void device_pm_move_after(struct device *deva, struct device *devb)
132 {
133         pr_debug("PM: Moving %s:%s after %s:%s\n",
134                  deva->bus ? deva->bus->name : "No Bus",
135                  kobject_name(&deva->kobj),
136                  devb->bus ? devb->bus->name : "No Bus",
137                  kobject_name(&devb->kobj));
138         /* Delete deva from dpm_list and reinsert after devb. */
139         list_move(&deva->power.entry, &devb->power.entry);
140 }
141
142 /**
143  *      device_pm_move_last - move device to end of dpm_list
144  *      @dev:   Device to move in dpm_list
145  */
146 void device_pm_move_last(struct device *dev)
147 {
148         pr_debug("PM: Moving %s:%s to end of list\n",
149                  dev->bus ? dev->bus->name : "No Bus",
150                  kobject_name(&dev->kobj));
151         list_move_tail(&dev->power.entry, &dpm_list);
152 }
153
154 /**
155  *      pm_op - execute the PM operation appropiate for given PM event
156  *      @dev:   Device.
157  *      @ops:   PM operations to choose from.
158  *      @state: PM transition of the system being carried out.
159  */
160 static int pm_op(struct device *dev,
161                  const struct dev_pm_ops *ops,
162                  pm_message_t state)
163 {
164         int error = 0;
165
166         switch (state.event) {
167 #ifdef CONFIG_SUSPEND
168         case PM_EVENT_SUSPEND:
169                 if (ops->suspend) {
170                         error = ops->suspend(dev);
171                         suspend_report_result(ops->suspend, error);
172                 }
173                 break;
174         case PM_EVENT_RESUME:
175                 if (ops->resume) {
176                         error = ops->resume(dev);
177                         suspend_report_result(ops->resume, error);
178                 }
179                 break;
180 #endif /* CONFIG_SUSPEND */
181 #ifdef CONFIG_HIBERNATION
182         case PM_EVENT_FREEZE:
183         case PM_EVENT_QUIESCE:
184                 if (ops->freeze) {
185                         error = ops->freeze(dev);
186                         suspend_report_result(ops->freeze, error);
187                 }
188                 break;
189         case PM_EVENT_HIBERNATE:
190                 if (ops->poweroff) {
191                         error = ops->poweroff(dev);
192                         suspend_report_result(ops->poweroff, error);
193                 }
194                 break;
195         case PM_EVENT_THAW:
196         case PM_EVENT_RECOVER:
197                 if (ops->thaw) {
198                         error = ops->thaw(dev);
199                         suspend_report_result(ops->thaw, error);
200                 }
201                 break;
202         case PM_EVENT_RESTORE:
203                 if (ops->restore) {
204                         error = ops->restore(dev);
205                         suspend_report_result(ops->restore, error);
206                 }
207                 break;
208 #endif /* CONFIG_HIBERNATION */
209         default:
210                 error = -EINVAL;
211         }
212         return error;
213 }
214
215 /**
216  *      pm_noirq_op - execute the PM operation appropiate for given PM event
217  *      @dev:   Device.
218  *      @ops:   PM operations to choose from.
219  *      @state: PM transition of the system being carried out.
220  *
221  *      The operation is executed with interrupts disabled by the only remaining
222  *      functional CPU in the system.
223  */
224 static int pm_noirq_op(struct device *dev,
225                         const struct dev_pm_ops *ops,
226                         pm_message_t state)
227 {
228         int error = 0;
229
230         switch (state.event) {
231 #ifdef CONFIG_SUSPEND
232         case PM_EVENT_SUSPEND:
233                 if (ops->suspend_noirq) {
234                         error = ops->suspend_noirq(dev);
235                         suspend_report_result(ops->suspend_noirq, error);
236                 }
237                 break;
238         case PM_EVENT_RESUME:
239                 if (ops->resume_noirq) {
240                         error = ops->resume_noirq(dev);
241                         suspend_report_result(ops->resume_noirq, error);
242                 }
243                 break;
244 #endif /* CONFIG_SUSPEND */
245 #ifdef CONFIG_HIBERNATION
246         case PM_EVENT_FREEZE:
247         case PM_EVENT_QUIESCE:
248                 if (ops->freeze_noirq) {
249                         error = ops->freeze_noirq(dev);
250                         suspend_report_result(ops->freeze_noirq, error);
251                 }
252                 break;
253         case PM_EVENT_HIBERNATE:
254                 if (ops->poweroff_noirq) {
255                         error = ops->poweroff_noirq(dev);
256                         suspend_report_result(ops->poweroff_noirq, error);
257                 }
258                 break;
259         case PM_EVENT_THAW:
260         case PM_EVENT_RECOVER:
261                 if (ops->thaw_noirq) {
262                         error = ops->thaw_noirq(dev);
263                         suspend_report_result(ops->thaw_noirq, error);
264                 }
265                 break;
266         case PM_EVENT_RESTORE:
267                 if (ops->restore_noirq) {
268                         error = ops->restore_noirq(dev);
269                         suspend_report_result(ops->restore_noirq, error);
270                 }
271                 break;
272 #endif /* CONFIG_HIBERNATION */
273         default:
274                 error = -EINVAL;
275         }
276         return error;
277 }
278
279 static char *pm_verb(int event)
280 {
281         switch (event) {
282         case PM_EVENT_SUSPEND:
283                 return "suspend";
284         case PM_EVENT_RESUME:
285                 return "resume";
286         case PM_EVENT_FREEZE:
287                 return "freeze";
288         case PM_EVENT_QUIESCE:
289                 return "quiesce";
290         case PM_EVENT_HIBERNATE:
291                 return "hibernate";
292         case PM_EVENT_THAW:
293                 return "thaw";
294         case PM_EVENT_RESTORE:
295                 return "restore";
296         case PM_EVENT_RECOVER:
297                 return "recover";
298         default:
299                 return "(unknown PM event)";
300         }
301 }
302
303 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
304 {
305         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
306                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
307                 ", may wakeup" : "");
308 }
309
310 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
311                         int error)
312 {
313         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
314                 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
315 }
316
317 /*------------------------- Resume routines -------------------------*/
318
319 /**
320  *      device_resume_noirq - Power on one device (early resume).
321  *      @dev:   Device.
322  *      @state: PM transition of the system being carried out.
323  *
324  *      Must be called with interrupts disabled.
325  */
326 static int device_resume_noirq(struct device *dev, pm_message_t state)
327 {
328         int error = 0;
329
330         TRACE_DEVICE(dev);
331         TRACE_RESUME(0);
332
333         if (!dev->bus)
334                 goto End;
335
336         if (dev->bus->pm) {
337                 pm_dev_dbg(dev, state, "EARLY ");
338                 error = pm_noirq_op(dev, dev->bus->pm, state);
339         }
340  End:
341         TRACE_RESUME(error);
342         return error;
343 }
344
345 /**
346  *      dpm_resume_noirq - Power on all regular (non-sysdev) devices.
347  *      @state: PM transition of the system being carried out.
348  *
349  *      Call the "noirq" resume handlers for all devices marked as
350  *      DPM_OFF_IRQ and enable device drivers to receive interrupts.
351  *
352  *      Must be called under dpm_list_mtx.  Device drivers should not receive
353  *      interrupts while it's being executed.
354  */
355 void dpm_resume_noirq(pm_message_t state)
356 {
357         struct device *dev;
358
359         mutex_lock(&dpm_list_mtx);
360         list_for_each_entry(dev, &dpm_list, power.entry)
361                 if (dev->power.status > DPM_OFF) {
362                         int error;
363
364                         dev->power.status = DPM_OFF;
365                         error = device_resume_noirq(dev, state);
366                         if (error)
367                                 pm_dev_err(dev, state, " early", error);
368                 }
369         mutex_unlock(&dpm_list_mtx);
370         resume_device_irqs();
371 }
372 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
373
374 /**
375  *      device_resume - Restore state for one device.
376  *      @dev:   Device.
377  *      @state: PM transition of the system being carried out.
378  */
379 static int device_resume(struct device *dev, pm_message_t state)
380 {
381         int error = 0;
382
383         TRACE_DEVICE(dev);
384         TRACE_RESUME(0);
385
386         down(&dev->sem);
387
388         if (dev->bus) {
389                 if (dev->bus->pm) {
390                         pm_dev_dbg(dev, state, "");
391                         error = pm_op(dev, dev->bus->pm, state);
392                 } else if (dev->bus->resume) {
393                         pm_dev_dbg(dev, state, "legacy ");
394                         error = dev->bus->resume(dev);
395                 }
396                 if (error)
397                         goto End;
398         }
399
400         if (dev->type) {
401                 if (dev->type->pm) {
402                         pm_dev_dbg(dev, state, "type ");
403                         error = pm_op(dev, dev->type->pm, state);
404                 }
405                 if (error)
406                         goto End;
407         }
408
409         if (dev->class) {
410                 if (dev->class->pm) {
411                         pm_dev_dbg(dev, state, "class ");
412                         error = pm_op(dev, dev->class->pm, state);
413                 } else if (dev->class->resume) {
414                         pm_dev_dbg(dev, state, "legacy class ");
415                         error = dev->class->resume(dev);
416                 }
417         }
418  End:
419         up(&dev->sem);
420
421         TRACE_RESUME(error);
422         return error;
423 }
424
425 /**
426  *      dpm_resume - Resume every device.
427  *      @state: PM transition of the system being carried out.
428  *
429  *      Execute the appropriate "resume" callback for all devices the status of
430  *      which indicates that they are inactive.
431  */
432 static void dpm_resume(pm_message_t state)
433 {
434         struct list_head list;
435
436         INIT_LIST_HEAD(&list);
437         mutex_lock(&dpm_list_mtx);
438         transition_started = false;
439         while (!list_empty(&dpm_list)) {
440                 struct device *dev = to_device(dpm_list.next);
441
442                 get_device(dev);
443                 if (dev->power.status >= DPM_OFF) {
444                         int error;
445
446                         dev->power.status = DPM_RESUMING;
447                         mutex_unlock(&dpm_list_mtx);
448
449                         error = device_resume(dev, state);
450
451                         mutex_lock(&dpm_list_mtx);
452                         if (error)
453                                 pm_dev_err(dev, state, "", error);
454                 } else if (dev->power.status == DPM_SUSPENDING) {
455                         /* Allow new children of the device to be registered */
456                         dev->power.status = DPM_RESUMING;
457                 }
458                 if (!list_empty(&dev->power.entry))
459                         list_move_tail(&dev->power.entry, &list);
460                 put_device(dev);
461         }
462         list_splice(&list, &dpm_list);
463         mutex_unlock(&dpm_list_mtx);
464 }
465
466 /**
467  *      device_complete - Complete a PM transition for given device
468  *      @dev:   Device.
469  *      @state: PM transition of the system being carried out.
470  */
471 static void device_complete(struct device *dev, pm_message_t state)
472 {
473         down(&dev->sem);
474
475         if (dev->class && dev->class->pm && dev->class->pm->complete) {
476                 pm_dev_dbg(dev, state, "completing class ");
477                 dev->class->pm->complete(dev);
478         }
479
480         if (dev->type && dev->type->pm && dev->type->pm->complete) {
481                 pm_dev_dbg(dev, state, "completing type ");
482                 dev->type->pm->complete(dev);
483         }
484
485         if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
486                 pm_dev_dbg(dev, state, "completing ");
487                 dev->bus->pm->complete(dev);
488         }
489
490         up(&dev->sem);
491 }
492
493 /**
494  *      dpm_complete - Complete a PM transition for all devices.
495  *      @state: PM transition of the system being carried out.
496  *
497  *      Execute the ->complete() callbacks for all devices that are not marked
498  *      as DPM_ON.
499  */
500 static void dpm_complete(pm_message_t state)
501 {
502         struct list_head list;
503
504         INIT_LIST_HEAD(&list);
505         mutex_lock(&dpm_list_mtx);
506         while (!list_empty(&dpm_list)) {
507                 struct device *dev = to_device(dpm_list.prev);
508
509                 get_device(dev);
510                 if (dev->power.status > DPM_ON) {
511                         dev->power.status = DPM_ON;
512                         mutex_unlock(&dpm_list_mtx);
513
514                         device_complete(dev, state);
515
516                         mutex_lock(&dpm_list_mtx);
517                 }
518                 if (!list_empty(&dev->power.entry))
519                         list_move(&dev->power.entry, &list);
520                 put_device(dev);
521         }
522         list_splice(&list, &dpm_list);
523         mutex_unlock(&dpm_list_mtx);
524 }
525
526 /**
527  *      dpm_resume_end - Restore state of each device in system.
528  *      @state: PM transition of the system being carried out.
529  *
530  *      Resume all the devices, unlock them all, and allow new
531  *      devices to be registered once again.
532  */
533 void dpm_resume_end(pm_message_t state)
534 {
535         might_sleep();
536         dpm_resume(state);
537         dpm_complete(state);
538 }
539 EXPORT_SYMBOL_GPL(dpm_resume_end);
540
541
542 /*------------------------- Suspend routines -------------------------*/
543
544 /**
545  *      resume_event - return a PM message representing the resume event
546  *                     corresponding to given sleep state.
547  *      @sleep_state: PM message representing a sleep state.
548  */
549 static pm_message_t resume_event(pm_message_t sleep_state)
550 {
551         switch (sleep_state.event) {
552         case PM_EVENT_SUSPEND:
553                 return PMSG_RESUME;
554         case PM_EVENT_FREEZE:
555         case PM_EVENT_QUIESCE:
556                 return PMSG_RECOVER;
557         case PM_EVENT_HIBERNATE:
558                 return PMSG_RESTORE;
559         }
560         return PMSG_ON;
561 }
562
563 /**
564  *      device_suspend_noirq - Shut down one device (late suspend).
565  *      @dev:   Device.
566  *      @state: PM transition of the system being carried out.
567  *
568  *      This is called with interrupts off and only a single CPU running.
569  */
570 static int device_suspend_noirq(struct device *dev, pm_message_t state)
571 {
572         int error = 0;
573
574         if (!dev->bus)
575                 return 0;
576
577         if (dev->bus->pm) {
578                 pm_dev_dbg(dev, state, "LATE ");
579                 error = pm_noirq_op(dev, dev->bus->pm, state);
580         }
581         return error;
582 }
583
584 /**
585  *      dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
586  *      @state: PM transition of the system being carried out.
587  *
588  *      Prevent device drivers from receiving interrupts and call the "noirq"
589  *      suspend handlers.
590  *
591  *      Must be called under dpm_list_mtx.
592  */
593 int dpm_suspend_noirq(pm_message_t state)
594 {
595         struct device *dev;
596         int error = 0;
597
598         suspend_device_irqs();
599         mutex_lock(&dpm_list_mtx);
600         list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
601                 error = device_suspend_noirq(dev, state);
602                 if (error) {
603                         pm_dev_err(dev, state, " late", error);
604                         break;
605                 }
606                 dev->power.status = DPM_OFF_IRQ;
607         }
608         mutex_unlock(&dpm_list_mtx);
609         if (error)
610                 dpm_resume_noirq(resume_event(state));
611         return error;
612 }
613 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
614
615 /**
616  *      device_suspend - Save state of one device.
617  *      @dev:   Device.
618  *      @state: PM transition of the system being carried out.
619  */
620 static int device_suspend(struct device *dev, pm_message_t state)
621 {
622         int error = 0;
623
624         down(&dev->sem);
625
626         if (dev->class) {
627                 if (dev->class->pm) {
628                         pm_dev_dbg(dev, state, "class ");
629                         error = pm_op(dev, dev->class->pm, state);
630                 } else if (dev->class->suspend) {
631                         pm_dev_dbg(dev, state, "legacy class ");
632                         error = dev->class->suspend(dev, state);
633                         suspend_report_result(dev->class->suspend, error);
634                 }
635                 if (error)
636                         goto End;
637         }
638
639         if (dev->type) {
640                 if (dev->type->pm) {
641                         pm_dev_dbg(dev, state, "type ");
642                         error = pm_op(dev, dev->type->pm, state);
643                 }
644                 if (error)
645                         goto End;
646         }
647
648         if (dev->bus) {
649                 if (dev->bus->pm) {
650                         pm_dev_dbg(dev, state, "");
651                         error = pm_op(dev, dev->bus->pm, state);
652                 } else if (dev->bus->suspend) {
653                         pm_dev_dbg(dev, state, "legacy ");
654                         error = dev->bus->suspend(dev, state);
655                         suspend_report_result(dev->bus->suspend, error);
656                 }
657         }
658  End:
659         up(&dev->sem);
660
661         return error;
662 }
663
664 /**
665  *      dpm_suspend - Suspend every device.
666  *      @state: PM transition of the system being carried out.
667  *
668  *      Execute the appropriate "suspend" callbacks for all devices.
669  */
670 static int dpm_suspend(pm_message_t state)
671 {
672         struct list_head list;
673         int error = 0;
674
675         INIT_LIST_HEAD(&list);
676         mutex_lock(&dpm_list_mtx);
677         while (!list_empty(&dpm_list)) {
678                 struct device *dev = to_device(dpm_list.prev);
679
680                 get_device(dev);
681                 mutex_unlock(&dpm_list_mtx);
682
683                 error = device_suspend(dev, state);
684
685                 mutex_lock(&dpm_list_mtx);
686                 if (error) {
687                         pm_dev_err(dev, state, "", error);
688                         put_device(dev);
689                         break;
690                 }
691                 dev->power.status = DPM_OFF;
692                 if (!list_empty(&dev->power.entry))
693                         list_move(&dev->power.entry, &list);
694                 put_device(dev);
695         }
696         list_splice(&list, dpm_list.prev);
697         mutex_unlock(&dpm_list_mtx);
698         return error;
699 }
700
701 /**
702  *      device_prepare - Execute the ->prepare() callback(s) for given device.
703  *      @dev:   Device.
704  *      @state: PM transition of the system being carried out.
705  */
706 static int device_prepare(struct device *dev, pm_message_t state)
707 {
708         int error = 0;
709
710         down(&dev->sem);
711
712         if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
713                 pm_dev_dbg(dev, state, "preparing ");
714                 error = dev->bus->pm->prepare(dev);
715                 suspend_report_result(dev->bus->pm->prepare, error);
716                 if (error)
717                         goto End;
718         }
719
720         if (dev->type && dev->type->pm && dev->type->pm->prepare) {
721                 pm_dev_dbg(dev, state, "preparing type ");
722                 error = dev->type->pm->prepare(dev);
723                 suspend_report_result(dev->type->pm->prepare, error);
724                 if (error)
725                         goto End;
726         }
727
728         if (dev->class && dev->class->pm && dev->class->pm->prepare) {
729                 pm_dev_dbg(dev, state, "preparing class ");
730                 error = dev->class->pm->prepare(dev);
731                 suspend_report_result(dev->class->pm->prepare, error);
732         }
733  End:
734         up(&dev->sem);
735
736         return error;
737 }
738
739 /**
740  *      dpm_prepare - Prepare all devices for a PM transition.
741  *      @state: PM transition of the system being carried out.
742  *
743  *      Execute the ->prepare() callback for all devices.
744  */
745 static int dpm_prepare(pm_message_t state)
746 {
747         struct list_head list;
748         int error = 0;
749
750         INIT_LIST_HEAD(&list);
751         mutex_lock(&dpm_list_mtx);
752         transition_started = true;
753         while (!list_empty(&dpm_list)) {
754                 struct device *dev = to_device(dpm_list.next);
755
756                 get_device(dev);
757                 dev->power.status = DPM_PREPARING;
758                 mutex_unlock(&dpm_list_mtx);
759
760                 error = device_prepare(dev, state);
761
762                 mutex_lock(&dpm_list_mtx);
763                 if (error) {
764                         dev->power.status = DPM_ON;
765                         if (error == -EAGAIN) {
766                                 put_device(dev);
767                                 error = 0;
768                                 continue;
769                         }
770                         printk(KERN_ERR "PM: Failed to prepare device %s "
771                                 "for power transition: error %d\n",
772                                 kobject_name(&dev->kobj), error);
773                         put_device(dev);
774                         break;
775                 }
776                 dev->power.status = DPM_SUSPENDING;
777                 if (!list_empty(&dev->power.entry))
778                         list_move_tail(&dev->power.entry, &list);
779                 put_device(dev);
780         }
781         list_splice(&list, &dpm_list);
782         mutex_unlock(&dpm_list_mtx);
783         return error;
784 }
785
786 /**
787  *      dpm_suspend_start - Save state and stop all devices in system.
788  *      @state: PM transition of the system being carried out.
789  *
790  *      Prepare and suspend all devices.
791  */
792 int dpm_suspend_start(pm_message_t state)
793 {
794         int error;
795
796         might_sleep();
797         error = dpm_prepare(state);
798         if (!error)
799                 error = dpm_suspend(state);
800         return error;
801 }
802 EXPORT_SYMBOL_GPL(dpm_suspend_start);
803
804 void __suspend_report_result(const char *function, void *fn, int ret)
805 {
806         if (ret)
807                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
808 }
809 EXPORT_SYMBOL_GPL(__suspend_report_result);