]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/base/power/main.c
1e84b8aa220f2f194b883126012df28dbc8ddf86
[linux.git] / drivers / base / power / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17
18 #define pr_fmt(fmt) "PM: " fmt
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static const char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         /* Skip PM setup/initialization. */
128         if (device_pm_not_required(dev))
129                 return;
130
131         pr_debug("Adding info for %s:%s\n",
132                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
133         device_pm_check_callbacks(dev);
134         mutex_lock(&dpm_list_mtx);
135         if (dev->parent && dev->parent->power.is_prepared)
136                 dev_warn(dev, "parent %s should not be sleeping\n",
137                         dev_name(dev->parent));
138         list_add_tail(&dev->power.entry, &dpm_list);
139         dev->power.in_dpm_list = true;
140         mutex_unlock(&dpm_list_mtx);
141 }
142
143 /**
144  * device_pm_remove - Remove a device from the PM core's list of active devices.
145  * @dev: Device to be removed from the list.
146  */
147 void device_pm_remove(struct device *dev)
148 {
149         if (device_pm_not_required(dev))
150                 return;
151
152         pr_debug("Removing info for %s:%s\n",
153                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
154         complete_all(&dev->power.completion);
155         mutex_lock(&dpm_list_mtx);
156         list_del_init(&dev->power.entry);
157         dev->power.in_dpm_list = false;
158         mutex_unlock(&dpm_list_mtx);
159         device_wakeup_disable(dev);
160         pm_runtime_remove(dev);
161         device_pm_check_callbacks(dev);
162 }
163
164 /**
165  * device_pm_move_before - Move device in the PM core's list of active devices.
166  * @deva: Device to move in dpm_list.
167  * @devb: Device @deva should come before.
168  */
169 void device_pm_move_before(struct device *deva, struct device *devb)
170 {
171         pr_debug("Moving %s:%s before %s:%s\n",
172                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
173                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
174         /* Delete deva from dpm_list and reinsert before devb. */
175         list_move_tail(&deva->power.entry, &devb->power.entry);
176 }
177
178 /**
179  * device_pm_move_after - Move device in the PM core's list of active devices.
180  * @deva: Device to move in dpm_list.
181  * @devb: Device @deva should come after.
182  */
183 void device_pm_move_after(struct device *deva, struct device *devb)
184 {
185         pr_debug("Moving %s:%s after %s:%s\n",
186                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
187                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
188         /* Delete deva from dpm_list and reinsert after devb. */
189         list_move(&deva->power.entry, &devb->power.entry);
190 }
191
192 /**
193  * device_pm_move_last - Move device to end of the PM core's list of devices.
194  * @dev: Device to move in dpm_list.
195  */
196 void device_pm_move_last(struct device *dev)
197 {
198         pr_debug("Moving %s:%s to end of list\n",
199                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
200         list_move_tail(&dev->power.entry, &dpm_list);
201 }
202
203 static ktime_t initcall_debug_start(struct device *dev, void *cb)
204 {
205         if (!pm_print_times_enabled)
206                 return 0;
207
208         dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
209                  task_pid_nr(current),
210                  dev->parent ? dev_name(dev->parent) : "none");
211         return ktime_get();
212 }
213
214 static void initcall_debug_report(struct device *dev, ktime_t calltime,
215                                   void *cb, int error)
216 {
217         ktime_t rettime;
218         s64 nsecs;
219
220         if (!pm_print_times_enabled)
221                 return;
222
223         rettime = ktime_get();
224         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
225
226         dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
227                  (unsigned long long)nsecs >> 10);
228 }
229
230 /**
231  * dpm_wait - Wait for a PM operation to complete.
232  * @dev: Device to wait for.
233  * @async: If unset, wait only if the device's power.async_suspend flag is set.
234  */
235 static void dpm_wait(struct device *dev, bool async)
236 {
237         if (!dev)
238                 return;
239
240         if (async || (pm_async_enabled && dev->power.async_suspend))
241                 wait_for_completion(&dev->power.completion);
242 }
243
244 static int dpm_wait_fn(struct device *dev, void *async_ptr)
245 {
246         dpm_wait(dev, *((bool *)async_ptr));
247         return 0;
248 }
249
250 static void dpm_wait_for_children(struct device *dev, bool async)
251 {
252        device_for_each_child(dev, &async, dpm_wait_fn);
253 }
254
255 static void dpm_wait_for_suppliers(struct device *dev, bool async)
256 {
257         struct device_link *link;
258         int idx;
259
260         idx = device_links_read_lock();
261
262         /*
263          * If the supplier goes away right after we've checked the link to it,
264          * we'll wait for its completion to change the state, but that's fine,
265          * because the only things that will block as a result are the SRCU
266          * callbacks freeing the link objects for the links in the list we're
267          * walking.
268          */
269         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
270                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271                         dpm_wait(link->supplier, async);
272
273         device_links_read_unlock(idx);
274 }
275
276 static void dpm_wait_for_superior(struct device *dev, bool async)
277 {
278         dpm_wait(dev->parent, async);
279         dpm_wait_for_suppliers(dev, async);
280 }
281
282 static void dpm_wait_for_consumers(struct device *dev, bool async)
283 {
284         struct device_link *link;
285         int idx;
286
287         idx = device_links_read_lock();
288
289         /*
290          * The status of a device link can only be changed from "dormant" by a
291          * probe, but that cannot happen during system suspend/resume.  In
292          * theory it can change to "dormant" at that time, but then it is
293          * reasonable to wait for the target device anyway (eg. if it goes
294          * away, it's better to wait for it to go away completely and then
295          * continue instead of trying to continue in parallel with its
296          * unregistration).
297          */
298         list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
299                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
300                         dpm_wait(link->consumer, async);
301
302         device_links_read_unlock(idx);
303 }
304
305 static void dpm_wait_for_subordinate(struct device *dev, bool async)
306 {
307         dpm_wait_for_children(dev, async);
308         dpm_wait_for_consumers(dev, async);
309 }
310
311 /**
312  * pm_op - Return the PM operation appropriate for given PM event.
313  * @ops: PM operations to choose from.
314  * @state: PM transition of the system being carried out.
315  */
316 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
317 {
318         switch (state.event) {
319 #ifdef CONFIG_SUSPEND
320         case PM_EVENT_SUSPEND:
321                 return ops->suspend;
322         case PM_EVENT_RESUME:
323                 return ops->resume;
324 #endif /* CONFIG_SUSPEND */
325 #ifdef CONFIG_HIBERNATE_CALLBACKS
326         case PM_EVENT_FREEZE:
327         case PM_EVENT_QUIESCE:
328                 return ops->freeze;
329         case PM_EVENT_HIBERNATE:
330                 return ops->poweroff;
331         case PM_EVENT_THAW:
332         case PM_EVENT_RECOVER:
333                 return ops->thaw;
334                 break;
335         case PM_EVENT_RESTORE:
336                 return ops->restore;
337 #endif /* CONFIG_HIBERNATE_CALLBACKS */
338         }
339
340         return NULL;
341 }
342
343 /**
344  * pm_late_early_op - Return the PM operation appropriate for given PM event.
345  * @ops: PM operations to choose from.
346  * @state: PM transition of the system being carried out.
347  *
348  * Runtime PM is disabled for @dev while this function is being executed.
349  */
350 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
351                                       pm_message_t state)
352 {
353         switch (state.event) {
354 #ifdef CONFIG_SUSPEND
355         case PM_EVENT_SUSPEND:
356                 return ops->suspend_late;
357         case PM_EVENT_RESUME:
358                 return ops->resume_early;
359 #endif /* CONFIG_SUSPEND */
360 #ifdef CONFIG_HIBERNATE_CALLBACKS
361         case PM_EVENT_FREEZE:
362         case PM_EVENT_QUIESCE:
363                 return ops->freeze_late;
364         case PM_EVENT_HIBERNATE:
365                 return ops->poweroff_late;
366         case PM_EVENT_THAW:
367         case PM_EVENT_RECOVER:
368                 return ops->thaw_early;
369         case PM_EVENT_RESTORE:
370                 return ops->restore_early;
371 #endif /* CONFIG_HIBERNATE_CALLBACKS */
372         }
373
374         return NULL;
375 }
376
377 /**
378  * pm_noirq_op - Return the PM operation appropriate for given PM event.
379  * @ops: PM operations to choose from.
380  * @state: PM transition of the system being carried out.
381  *
382  * The driver of @dev will not receive interrupts while this function is being
383  * executed.
384  */
385 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
386 {
387         switch (state.event) {
388 #ifdef CONFIG_SUSPEND
389         case PM_EVENT_SUSPEND:
390                 return ops->suspend_noirq;
391         case PM_EVENT_RESUME:
392                 return ops->resume_noirq;
393 #endif /* CONFIG_SUSPEND */
394 #ifdef CONFIG_HIBERNATE_CALLBACKS
395         case PM_EVENT_FREEZE:
396         case PM_EVENT_QUIESCE:
397                 return ops->freeze_noirq;
398         case PM_EVENT_HIBERNATE:
399                 return ops->poweroff_noirq;
400         case PM_EVENT_THAW:
401         case PM_EVENT_RECOVER:
402                 return ops->thaw_noirq;
403         case PM_EVENT_RESTORE:
404                 return ops->restore_noirq;
405 #endif /* CONFIG_HIBERNATE_CALLBACKS */
406         }
407
408         return NULL;
409 }
410
411 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
412 {
413         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
414                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
415                 ", may wakeup" : "");
416 }
417
418 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
419                         int error)
420 {
421         pr_err("Device %s failed to %s%s: error %d\n",
422                dev_name(dev), pm_verb(state.event), info, error);
423 }
424
425 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
426                           const char *info)
427 {
428         ktime_t calltime;
429         u64 usecs64;
430         int usecs;
431
432         calltime = ktime_get();
433         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
434         do_div(usecs64, NSEC_PER_USEC);
435         usecs = usecs64;
436         if (usecs == 0)
437                 usecs = 1;
438
439         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
440                   info ?: "", info ? " " : "", pm_verb(state.event),
441                   error ? "aborted" : "complete",
442                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
443 }
444
445 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
446                             pm_message_t state, const char *info)
447 {
448         ktime_t calltime;
449         int error;
450
451         if (!cb)
452                 return 0;
453
454         calltime = initcall_debug_start(dev, cb);
455
456         pm_dev_dbg(dev, state, info);
457         trace_device_pm_callback_start(dev, info, state.event);
458         error = cb(dev);
459         trace_device_pm_callback_end(dev, error);
460         suspend_report_result(cb, error);
461
462         initcall_debug_report(dev, calltime, cb, error);
463
464         return error;
465 }
466
467 #ifdef CONFIG_DPM_WATCHDOG
468 struct dpm_watchdog {
469         struct device           *dev;
470         struct task_struct      *tsk;
471         struct timer_list       timer;
472 };
473
474 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
475         struct dpm_watchdog wd
476
477 /**
478  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
479  * @t: The timer that PM watchdog depends on.
480  *
481  * Called when a driver has timed out suspending or resuming.
482  * There's not much we can do here to recover so panic() to
483  * capture a crash-dump in pstore.
484  */
485 static void dpm_watchdog_handler(struct timer_list *t)
486 {
487         struct dpm_watchdog *wd = from_timer(wd, t, timer);
488
489         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
490         show_stack(wd->tsk, NULL);
491         panic("%s %s: unrecoverable failure\n",
492                 dev_driver_string(wd->dev), dev_name(wd->dev));
493 }
494
495 /**
496  * dpm_watchdog_set - Enable pm watchdog for given device.
497  * @wd: Watchdog. Must be allocated on the stack.
498  * @dev: Device to handle.
499  */
500 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
501 {
502         struct timer_list *timer = &wd->timer;
503
504         wd->dev = dev;
505         wd->tsk = current;
506
507         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
508         /* use same timeout value for both suspend and resume */
509         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
510         add_timer(timer);
511 }
512
513 /**
514  * dpm_watchdog_clear - Disable suspend/resume watchdog.
515  * @wd: Watchdog to disable.
516  */
517 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
518 {
519         struct timer_list *timer = &wd->timer;
520
521         del_timer_sync(timer);
522         destroy_timer_on_stack(timer);
523 }
524 #else
525 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
526 #define dpm_watchdog_set(x, y)
527 #define dpm_watchdog_clear(x)
528 #endif
529
530 /*------------------------- Resume routines -------------------------*/
531
532 /**
533  * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
534  * @dev: Target device.
535  *
536  * Make the core skip the "early resume" and "resume" phases for @dev.
537  *
538  * This function can be called by middle-layer code during the "noirq" phase of
539  * system resume if necessary, but not by device drivers.
540  */
541 void dev_pm_skip_next_resume_phases(struct device *dev)
542 {
543         dev->power.is_late_suspended = false;
544         dev->power.is_suspended = false;
545 }
546
547 /**
548  * suspend_event - Return a "suspend" message for given "resume" one.
549  * @resume_msg: PM message representing a system-wide resume transition.
550  */
551 static pm_message_t suspend_event(pm_message_t resume_msg)
552 {
553         switch (resume_msg.event) {
554         case PM_EVENT_RESUME:
555                 return PMSG_SUSPEND;
556         case PM_EVENT_THAW:
557         case PM_EVENT_RESTORE:
558                 return PMSG_FREEZE;
559         case PM_EVENT_RECOVER:
560                 return PMSG_HIBERNATE;
561         }
562         return PMSG_ON;
563 }
564
565 /**
566  * dev_pm_may_skip_resume - System-wide device resume optimization check.
567  * @dev: Target device.
568  *
569  * Checks whether or not the device may be left in suspend after a system-wide
570  * transition to the working state.
571  */
572 bool dev_pm_may_skip_resume(struct device *dev)
573 {
574         return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
575 }
576
577 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
578                                                 pm_message_t state,
579                                                 const char **info_p)
580 {
581         pm_callback_t callback;
582         const char *info;
583
584         if (dev->pm_domain) {
585                 info = "noirq power domain ";
586                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
587         } else if (dev->type && dev->type->pm) {
588                 info = "noirq type ";
589                 callback = pm_noirq_op(dev->type->pm, state);
590         } else if (dev->class && dev->class->pm) {
591                 info = "noirq class ";
592                 callback = pm_noirq_op(dev->class->pm, state);
593         } else if (dev->bus && dev->bus->pm) {
594                 info = "noirq bus ";
595                 callback = pm_noirq_op(dev->bus->pm, state);
596         } else {
597                 return NULL;
598         }
599
600         if (info_p)
601                 *info_p = info;
602
603         return callback;
604 }
605
606 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
607                                                  pm_message_t state,
608                                                  const char **info_p);
609
610 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
611                                                 pm_message_t state,
612                                                 const char **info_p);
613
614 /**
615  * device_resume_noirq - Execute a "noirq resume" callback for given device.
616  * @dev: Device to handle.
617  * @state: PM transition of the system being carried out.
618  * @async: If true, the device is being resumed asynchronously.
619  *
620  * The driver of @dev will not receive interrupts while this function is being
621  * executed.
622  */
623 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
624 {
625         pm_callback_t callback;
626         const char *info;
627         bool skip_resume;
628         int error = 0;
629
630         TRACE_DEVICE(dev);
631         TRACE_RESUME(0);
632
633         if (dev->power.syscore || dev->power.direct_complete)
634                 goto Out;
635
636         if (!dev->power.is_noirq_suspended)
637                 goto Out;
638
639         dpm_wait_for_superior(dev, async);
640
641         skip_resume = dev_pm_may_skip_resume(dev);
642
643         callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
644         if (callback)
645                 goto Run;
646
647         if (skip_resume)
648                 goto Skip;
649
650         if (dev_pm_smart_suspend_and_suspended(dev)) {
651                 pm_message_t suspend_msg = suspend_event(state);
652
653                 /*
654                  * If "freeze" callbacks have been skipped during a transition
655                  * related to hibernation, the subsequent "thaw" callbacks must
656                  * be skipped too or bad things may happen.  Otherwise, resume
657                  * callbacks are going to be run for the device, so its runtime
658                  * PM status must be changed to reflect the new state after the
659                  * transition under way.
660                  */
661                 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
662                     !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
663                         if (state.event == PM_EVENT_THAW) {
664                                 skip_resume = true;
665                                 goto Skip;
666                         } else {
667                                 pm_runtime_set_active(dev);
668                         }
669                 }
670         }
671
672         if (dev->driver && dev->driver->pm) {
673                 info = "noirq driver ";
674                 callback = pm_noirq_op(dev->driver->pm, state);
675         }
676
677 Run:
678         error = dpm_run_callback(callback, dev, state, info);
679
680 Skip:
681         dev->power.is_noirq_suspended = false;
682
683         if (skip_resume) {
684                 /*
685                  * The device is going to be left in suspend, but it might not
686                  * have been in runtime suspend before the system suspended, so
687                  * its runtime PM status needs to be updated to avoid confusing
688                  * the runtime PM framework when runtime PM is enabled for the
689                  * device again.
690                  */
691                 pm_runtime_set_suspended(dev);
692                 dev_pm_skip_next_resume_phases(dev);
693         }
694
695 Out:
696         complete_all(&dev->power.completion);
697         TRACE_RESUME(error);
698         return error;
699 }
700
701 static bool is_async(struct device *dev)
702 {
703         return dev->power.async_suspend && pm_async_enabled
704                 && !pm_trace_is_enabled();
705 }
706
707 static bool dpm_async_fn(struct device *dev, async_func_t func)
708 {
709         reinit_completion(&dev->power.completion);
710
711         if (is_async(dev)) {
712                 get_device(dev);
713                 async_schedule(func, dev);
714                 return true;
715         }
716
717         return false;
718 }
719
720 static void async_resume_noirq(void *data, async_cookie_t cookie)
721 {
722         struct device *dev = (struct device *)data;
723         int error;
724
725         error = device_resume_noirq(dev, pm_transition, true);
726         if (error)
727                 pm_dev_err(dev, pm_transition, " async", error);
728
729         put_device(dev);
730 }
731
732 void dpm_noirq_resume_devices(pm_message_t state)
733 {
734         struct device *dev;
735         ktime_t starttime = ktime_get();
736
737         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
738         mutex_lock(&dpm_list_mtx);
739         pm_transition = state;
740
741         /*
742          * Advanced the async threads upfront,
743          * in case the starting of async threads is
744          * delayed by non-async resuming devices.
745          */
746         list_for_each_entry(dev, &dpm_noirq_list, power.entry)
747                 dpm_async_fn(dev, async_resume_noirq);
748
749         while (!list_empty(&dpm_noirq_list)) {
750                 dev = to_device(dpm_noirq_list.next);
751                 get_device(dev);
752                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
753                 mutex_unlock(&dpm_list_mtx);
754
755                 if (!is_async(dev)) {
756                         int error;
757
758                         error = device_resume_noirq(dev, state, false);
759                         if (error) {
760                                 suspend_stats.failed_resume_noirq++;
761                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
762                                 dpm_save_failed_dev(dev_name(dev));
763                                 pm_dev_err(dev, state, " noirq", error);
764                         }
765                 }
766
767                 mutex_lock(&dpm_list_mtx);
768                 put_device(dev);
769         }
770         mutex_unlock(&dpm_list_mtx);
771         async_synchronize_full();
772         dpm_show_time(starttime, state, 0, "noirq");
773         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
774 }
775
776 void dpm_noirq_end(void)
777 {
778         resume_device_irqs();
779         device_wakeup_disarm_wake_irqs();
780         cpuidle_resume();
781 }
782
783 /**
784  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
785  * @state: PM transition of the system being carried out.
786  *
787  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
788  * allow device drivers' interrupt handlers to be called.
789  */
790 void dpm_resume_noirq(pm_message_t state)
791 {
792         dpm_noirq_resume_devices(state);
793         dpm_noirq_end();
794 }
795
796 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
797                                                 pm_message_t state,
798                                                 const char **info_p)
799 {
800         pm_callback_t callback;
801         const char *info;
802
803         if (dev->pm_domain) {
804                 info = "early power domain ";
805                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
806         } else if (dev->type && dev->type->pm) {
807                 info = "early type ";
808                 callback = pm_late_early_op(dev->type->pm, state);
809         } else if (dev->class && dev->class->pm) {
810                 info = "early class ";
811                 callback = pm_late_early_op(dev->class->pm, state);
812         } else if (dev->bus && dev->bus->pm) {
813                 info = "early bus ";
814                 callback = pm_late_early_op(dev->bus->pm, state);
815         } else {
816                 return NULL;
817         }
818
819         if (info_p)
820                 *info_p = info;
821
822         return callback;
823 }
824
825 /**
826  * device_resume_early - Execute an "early resume" callback for given device.
827  * @dev: Device to handle.
828  * @state: PM transition of the system being carried out.
829  * @async: If true, the device is being resumed asynchronously.
830  *
831  * Runtime PM is disabled for @dev while this function is being executed.
832  */
833 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
834 {
835         pm_callback_t callback;
836         const char *info;
837         int error = 0;
838
839         TRACE_DEVICE(dev);
840         TRACE_RESUME(0);
841
842         if (dev->power.syscore || dev->power.direct_complete)
843                 goto Out;
844
845         if (!dev->power.is_late_suspended)
846                 goto Out;
847
848         dpm_wait_for_superior(dev, async);
849
850         callback = dpm_subsys_resume_early_cb(dev, state, &info);
851
852         if (!callback && dev->driver && dev->driver->pm) {
853                 info = "early driver ";
854                 callback = pm_late_early_op(dev->driver->pm, state);
855         }
856
857         error = dpm_run_callback(callback, dev, state, info);
858         dev->power.is_late_suspended = false;
859
860  Out:
861         TRACE_RESUME(error);
862
863         pm_runtime_enable(dev);
864         complete_all(&dev->power.completion);
865         return error;
866 }
867
868 static void async_resume_early(void *data, async_cookie_t cookie)
869 {
870         struct device *dev = (struct device *)data;
871         int error;
872
873         error = device_resume_early(dev, pm_transition, true);
874         if (error)
875                 pm_dev_err(dev, pm_transition, " async", error);
876
877         put_device(dev);
878 }
879
880 /**
881  * dpm_resume_early - Execute "early resume" callbacks for all devices.
882  * @state: PM transition of the system being carried out.
883  */
884 void dpm_resume_early(pm_message_t state)
885 {
886         struct device *dev;
887         ktime_t starttime = ktime_get();
888
889         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
890         mutex_lock(&dpm_list_mtx);
891         pm_transition = state;
892
893         /*
894          * Advanced the async threads upfront,
895          * in case the starting of async threads is
896          * delayed by non-async resuming devices.
897          */
898         list_for_each_entry(dev, &dpm_late_early_list, power.entry)
899                 dpm_async_fn(dev, async_resume_early);
900
901         while (!list_empty(&dpm_late_early_list)) {
902                 dev = to_device(dpm_late_early_list.next);
903                 get_device(dev);
904                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
905                 mutex_unlock(&dpm_list_mtx);
906
907                 if (!is_async(dev)) {
908                         int error;
909
910                         error = device_resume_early(dev, state, false);
911                         if (error) {
912                                 suspend_stats.failed_resume_early++;
913                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
914                                 dpm_save_failed_dev(dev_name(dev));
915                                 pm_dev_err(dev, state, " early", error);
916                         }
917                 }
918                 mutex_lock(&dpm_list_mtx);
919                 put_device(dev);
920         }
921         mutex_unlock(&dpm_list_mtx);
922         async_synchronize_full();
923         dpm_show_time(starttime, state, 0, "early");
924         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
925 }
926
927 /**
928  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
929  * @state: PM transition of the system being carried out.
930  */
931 void dpm_resume_start(pm_message_t state)
932 {
933         dpm_resume_noirq(state);
934         dpm_resume_early(state);
935 }
936 EXPORT_SYMBOL_GPL(dpm_resume_start);
937
938 /**
939  * device_resume - Execute "resume" callbacks for given device.
940  * @dev: Device to handle.
941  * @state: PM transition of the system being carried out.
942  * @async: If true, the device is being resumed asynchronously.
943  */
944 static int device_resume(struct device *dev, pm_message_t state, bool async)
945 {
946         pm_callback_t callback = NULL;
947         const char *info = NULL;
948         int error = 0;
949         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
950
951         TRACE_DEVICE(dev);
952         TRACE_RESUME(0);
953
954         if (dev->power.syscore)
955                 goto Complete;
956
957         if (dev->power.direct_complete) {
958                 /* Match the pm_runtime_disable() in __device_suspend(). */
959                 pm_runtime_enable(dev);
960                 goto Complete;
961         }
962
963         dpm_wait_for_superior(dev, async);
964         dpm_watchdog_set(&wd, dev);
965         device_lock(dev);
966
967         /*
968          * This is a fib.  But we'll allow new children to be added below
969          * a resumed device, even if the device hasn't been completed yet.
970          */
971         dev->power.is_prepared = false;
972
973         if (!dev->power.is_suspended)
974                 goto Unlock;
975
976         if (dev->pm_domain) {
977                 info = "power domain ";
978                 callback = pm_op(&dev->pm_domain->ops, state);
979                 goto Driver;
980         }
981
982         if (dev->type && dev->type->pm) {
983                 info = "type ";
984                 callback = pm_op(dev->type->pm, state);
985                 goto Driver;
986         }
987
988         if (dev->class && dev->class->pm) {
989                 info = "class ";
990                 callback = pm_op(dev->class->pm, state);
991                 goto Driver;
992         }
993
994         if (dev->bus) {
995                 if (dev->bus->pm) {
996                         info = "bus ";
997                         callback = pm_op(dev->bus->pm, state);
998                 } else if (dev->bus->resume) {
999                         info = "legacy bus ";
1000                         callback = dev->bus->resume;
1001                         goto End;
1002                 }
1003         }
1004
1005  Driver:
1006         if (!callback && dev->driver && dev->driver->pm) {
1007                 info = "driver ";
1008                 callback = pm_op(dev->driver->pm, state);
1009         }
1010
1011  End:
1012         error = dpm_run_callback(callback, dev, state, info);
1013         dev->power.is_suspended = false;
1014
1015  Unlock:
1016         device_unlock(dev);
1017         dpm_watchdog_clear(&wd);
1018
1019  Complete:
1020         complete_all(&dev->power.completion);
1021
1022         TRACE_RESUME(error);
1023
1024         return error;
1025 }
1026
1027 static void async_resume(void *data, async_cookie_t cookie)
1028 {
1029         struct device *dev = (struct device *)data;
1030         int error;
1031
1032         error = device_resume(dev, pm_transition, true);
1033         if (error)
1034                 pm_dev_err(dev, pm_transition, " async", error);
1035         put_device(dev);
1036 }
1037
1038 /**
1039  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1040  * @state: PM transition of the system being carried out.
1041  *
1042  * Execute the appropriate "resume" callback for all devices whose status
1043  * indicates that they are suspended.
1044  */
1045 void dpm_resume(pm_message_t state)
1046 {
1047         struct device *dev;
1048         ktime_t starttime = ktime_get();
1049
1050         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1051         might_sleep();
1052
1053         mutex_lock(&dpm_list_mtx);
1054         pm_transition = state;
1055         async_error = 0;
1056
1057         list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1058                 dpm_async_fn(dev, async_resume);
1059
1060         while (!list_empty(&dpm_suspended_list)) {
1061                 dev = to_device(dpm_suspended_list.next);
1062                 get_device(dev);
1063                 if (!is_async(dev)) {
1064                         int error;
1065
1066                         mutex_unlock(&dpm_list_mtx);
1067
1068                         error = device_resume(dev, state, false);
1069                         if (error) {
1070                                 suspend_stats.failed_resume++;
1071                                 dpm_save_failed_step(SUSPEND_RESUME);
1072                                 dpm_save_failed_dev(dev_name(dev));
1073                                 pm_dev_err(dev, state, "", error);
1074                         }
1075
1076                         mutex_lock(&dpm_list_mtx);
1077                 }
1078                 if (!list_empty(&dev->power.entry))
1079                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1080                 put_device(dev);
1081         }
1082         mutex_unlock(&dpm_list_mtx);
1083         async_synchronize_full();
1084         dpm_show_time(starttime, state, 0, NULL);
1085
1086         cpufreq_resume();
1087         devfreq_resume();
1088         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1089 }
1090
1091 /**
1092  * device_complete - Complete a PM transition for given device.
1093  * @dev: Device to handle.
1094  * @state: PM transition of the system being carried out.
1095  */
1096 static void device_complete(struct device *dev, pm_message_t state)
1097 {
1098         void (*callback)(struct device *) = NULL;
1099         const char *info = NULL;
1100
1101         if (dev->power.syscore)
1102                 return;
1103
1104         device_lock(dev);
1105
1106         if (dev->pm_domain) {
1107                 info = "completing power domain ";
1108                 callback = dev->pm_domain->ops.complete;
1109         } else if (dev->type && dev->type->pm) {
1110                 info = "completing type ";
1111                 callback = dev->type->pm->complete;
1112         } else if (dev->class && dev->class->pm) {
1113                 info = "completing class ";
1114                 callback = dev->class->pm->complete;
1115         } else if (dev->bus && dev->bus->pm) {
1116                 info = "completing bus ";
1117                 callback = dev->bus->pm->complete;
1118         }
1119
1120         if (!callback && dev->driver && dev->driver->pm) {
1121                 info = "completing driver ";
1122                 callback = dev->driver->pm->complete;
1123         }
1124
1125         if (callback) {
1126                 pm_dev_dbg(dev, state, info);
1127                 callback(dev);
1128         }
1129
1130         device_unlock(dev);
1131
1132         pm_runtime_put(dev);
1133 }
1134
1135 /**
1136  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1137  * @state: PM transition of the system being carried out.
1138  *
1139  * Execute the ->complete() callbacks for all devices whose PM status is not
1140  * DPM_ON (this allows new devices to be registered).
1141  */
1142 void dpm_complete(pm_message_t state)
1143 {
1144         struct list_head list;
1145
1146         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1147         might_sleep();
1148
1149         INIT_LIST_HEAD(&list);
1150         mutex_lock(&dpm_list_mtx);
1151         while (!list_empty(&dpm_prepared_list)) {
1152                 struct device *dev = to_device(dpm_prepared_list.prev);
1153
1154                 get_device(dev);
1155                 dev->power.is_prepared = false;
1156                 list_move(&dev->power.entry, &list);
1157                 mutex_unlock(&dpm_list_mtx);
1158
1159                 trace_device_pm_callback_start(dev, "", state.event);
1160                 device_complete(dev, state);
1161                 trace_device_pm_callback_end(dev, 0);
1162
1163                 mutex_lock(&dpm_list_mtx);
1164                 put_device(dev);
1165         }
1166         list_splice(&list, &dpm_list);
1167         mutex_unlock(&dpm_list_mtx);
1168
1169         /* Allow device probing and trigger re-probing of deferred devices */
1170         device_unblock_probing();
1171         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1172 }
1173
1174 /**
1175  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1176  * @state: PM transition of the system being carried out.
1177  *
1178  * Execute "resume" callbacks for all devices and complete the PM transition of
1179  * the system.
1180  */
1181 void dpm_resume_end(pm_message_t state)
1182 {
1183         dpm_resume(state);
1184         dpm_complete(state);
1185 }
1186 EXPORT_SYMBOL_GPL(dpm_resume_end);
1187
1188
1189 /*------------------------- Suspend routines -------------------------*/
1190
1191 /**
1192  * resume_event - Return a "resume" message for given "suspend" sleep state.
1193  * @sleep_state: PM message representing a sleep state.
1194  *
1195  * Return a PM message representing the resume event corresponding to given
1196  * sleep state.
1197  */
1198 static pm_message_t resume_event(pm_message_t sleep_state)
1199 {
1200         switch (sleep_state.event) {
1201         case PM_EVENT_SUSPEND:
1202                 return PMSG_RESUME;
1203         case PM_EVENT_FREEZE:
1204         case PM_EVENT_QUIESCE:
1205                 return PMSG_RECOVER;
1206         case PM_EVENT_HIBERNATE:
1207                 return PMSG_RESTORE;
1208         }
1209         return PMSG_ON;
1210 }
1211
1212 static void dpm_superior_set_must_resume(struct device *dev)
1213 {
1214         struct device_link *link;
1215         int idx;
1216
1217         if (dev->parent)
1218                 dev->parent->power.must_resume = true;
1219
1220         idx = device_links_read_lock();
1221
1222         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1223                 link->supplier->power.must_resume = true;
1224
1225         device_links_read_unlock(idx);
1226 }
1227
1228 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1229                                                  pm_message_t state,
1230                                                  const char **info_p)
1231 {
1232         pm_callback_t callback;
1233         const char *info;
1234
1235         if (dev->pm_domain) {
1236                 info = "noirq power domain ";
1237                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1238         } else if (dev->type && dev->type->pm) {
1239                 info = "noirq type ";
1240                 callback = pm_noirq_op(dev->type->pm, state);
1241         } else if (dev->class && dev->class->pm) {
1242                 info = "noirq class ";
1243                 callback = pm_noirq_op(dev->class->pm, state);
1244         } else if (dev->bus && dev->bus->pm) {
1245                 info = "noirq bus ";
1246                 callback = pm_noirq_op(dev->bus->pm, state);
1247         } else {
1248                 return NULL;
1249         }
1250
1251         if (info_p)
1252                 *info_p = info;
1253
1254         return callback;
1255 }
1256
1257 static bool device_must_resume(struct device *dev, pm_message_t state,
1258                                bool no_subsys_suspend_noirq)
1259 {
1260         pm_message_t resume_msg = resume_event(state);
1261
1262         /*
1263          * If all of the device driver's "noirq", "late" and "early" callbacks
1264          * are invoked directly by the core, the decision to allow the device to
1265          * stay in suspend can be based on its current runtime PM status and its
1266          * wakeup settings.
1267          */
1268         if (no_subsys_suspend_noirq &&
1269             !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1270             !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1271             !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1272                 return !pm_runtime_status_suspended(dev) &&
1273                         (resume_msg.event != PM_EVENT_RESUME ||
1274                          (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1275
1276         /*
1277          * The only safe strategy here is to require that if the device may not
1278          * be left in suspend, resume callbacks must be invoked for it.
1279          */
1280         return !dev->power.may_skip_resume;
1281 }
1282
1283 /**
1284  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1285  * @dev: Device to handle.
1286  * @state: PM transition of the system being carried out.
1287  * @async: If true, the device is being suspended asynchronously.
1288  *
1289  * The driver of @dev will not receive interrupts while this function is being
1290  * executed.
1291  */
1292 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1293 {
1294         pm_callback_t callback;
1295         const char *info;
1296         bool no_subsys_cb = false;
1297         int error = 0;
1298
1299         TRACE_DEVICE(dev);
1300         TRACE_SUSPEND(0);
1301
1302         dpm_wait_for_subordinate(dev, async);
1303
1304         if (async_error)
1305                 goto Complete;
1306
1307         if (pm_wakeup_pending()) {
1308                 async_error = -EBUSY;
1309                 goto Complete;
1310         }
1311
1312         if (dev->power.syscore || dev->power.direct_complete)
1313                 goto Complete;
1314
1315         callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1316         if (callback)
1317                 goto Run;
1318
1319         no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1320
1321         if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1322                 goto Skip;
1323
1324         if (dev->driver && dev->driver->pm) {
1325                 info = "noirq driver ";
1326                 callback = pm_noirq_op(dev->driver->pm, state);
1327         }
1328
1329 Run:
1330         error = dpm_run_callback(callback, dev, state, info);
1331         if (error) {
1332                 async_error = error;
1333                 goto Complete;
1334         }
1335
1336 Skip:
1337         dev->power.is_noirq_suspended = true;
1338
1339         if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1340                 dev->power.must_resume = dev->power.must_resume ||
1341                                 atomic_read(&dev->power.usage_count) > 1 ||
1342                                 device_must_resume(dev, state, no_subsys_cb);
1343         } else {
1344                 dev->power.must_resume = true;
1345         }
1346
1347         if (dev->power.must_resume)
1348                 dpm_superior_set_must_resume(dev);
1349
1350 Complete:
1351         complete_all(&dev->power.completion);
1352         TRACE_SUSPEND(error);
1353         return error;
1354 }
1355
1356 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1357 {
1358         struct device *dev = (struct device *)data;
1359         int error;
1360
1361         error = __device_suspend_noirq(dev, pm_transition, true);
1362         if (error) {
1363                 dpm_save_failed_dev(dev_name(dev));
1364                 pm_dev_err(dev, pm_transition, " async", error);
1365         }
1366
1367         put_device(dev);
1368 }
1369
1370 static int device_suspend_noirq(struct device *dev)
1371 {
1372         if (dpm_async_fn(dev, async_suspend_noirq))
1373                 return 0;
1374
1375         return __device_suspend_noirq(dev, pm_transition, false);
1376 }
1377
1378 void dpm_noirq_begin(void)
1379 {
1380         cpuidle_pause();
1381         device_wakeup_arm_wake_irqs();
1382         suspend_device_irqs();
1383 }
1384
1385 int dpm_noirq_suspend_devices(pm_message_t state)
1386 {
1387         ktime_t starttime = ktime_get();
1388         int error = 0;
1389
1390         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1391         mutex_lock(&dpm_list_mtx);
1392         pm_transition = state;
1393         async_error = 0;
1394
1395         while (!list_empty(&dpm_late_early_list)) {
1396                 struct device *dev = to_device(dpm_late_early_list.prev);
1397
1398                 get_device(dev);
1399                 mutex_unlock(&dpm_list_mtx);
1400
1401                 error = device_suspend_noirq(dev);
1402
1403                 mutex_lock(&dpm_list_mtx);
1404                 if (error) {
1405                         pm_dev_err(dev, state, " noirq", error);
1406                         dpm_save_failed_dev(dev_name(dev));
1407                         put_device(dev);
1408                         break;
1409                 }
1410                 if (!list_empty(&dev->power.entry))
1411                         list_move(&dev->power.entry, &dpm_noirq_list);
1412                 put_device(dev);
1413
1414                 if (async_error)
1415                         break;
1416         }
1417         mutex_unlock(&dpm_list_mtx);
1418         async_synchronize_full();
1419         if (!error)
1420                 error = async_error;
1421
1422         if (error) {
1423                 suspend_stats.failed_suspend_noirq++;
1424                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1425         }
1426         dpm_show_time(starttime, state, error, "noirq");
1427         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1428         return error;
1429 }
1430
1431 /**
1432  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1433  * @state: PM transition of the system being carried out.
1434  *
1435  * Prevent device drivers' interrupt handlers from being called and invoke
1436  * "noirq" suspend callbacks for all non-sysdev devices.
1437  */
1438 int dpm_suspend_noirq(pm_message_t state)
1439 {
1440         int ret;
1441
1442         dpm_noirq_begin();
1443         ret = dpm_noirq_suspend_devices(state);
1444         if (ret)
1445                 dpm_resume_noirq(resume_event(state));
1446
1447         return ret;
1448 }
1449
1450 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1451 {
1452         struct device *parent = dev->parent;
1453
1454         if (!parent)
1455                 return;
1456
1457         spin_lock_irq(&parent->power.lock);
1458
1459         if (dev->power.wakeup_path && !parent->power.ignore_children)
1460                 parent->power.wakeup_path = true;
1461
1462         spin_unlock_irq(&parent->power.lock);
1463 }
1464
1465 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1466                                                 pm_message_t state,
1467                                                 const char **info_p)
1468 {
1469         pm_callback_t callback;
1470         const char *info;
1471
1472         if (dev->pm_domain) {
1473                 info = "late power domain ";
1474                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1475         } else if (dev->type && dev->type->pm) {
1476                 info = "late type ";
1477                 callback = pm_late_early_op(dev->type->pm, state);
1478         } else if (dev->class && dev->class->pm) {
1479                 info = "late class ";
1480                 callback = pm_late_early_op(dev->class->pm, state);
1481         } else if (dev->bus && dev->bus->pm) {
1482                 info = "late bus ";
1483                 callback = pm_late_early_op(dev->bus->pm, state);
1484         } else {
1485                 return NULL;
1486         }
1487
1488         if (info_p)
1489                 *info_p = info;
1490
1491         return callback;
1492 }
1493
1494 /**
1495  * __device_suspend_late - Execute a "late suspend" callback for given device.
1496  * @dev: Device to handle.
1497  * @state: PM transition of the system being carried out.
1498  * @async: If true, the device is being suspended asynchronously.
1499  *
1500  * Runtime PM is disabled for @dev while this function is being executed.
1501  */
1502 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1503 {
1504         pm_callback_t callback;
1505         const char *info;
1506         int error = 0;
1507
1508         TRACE_DEVICE(dev);
1509         TRACE_SUSPEND(0);
1510
1511         __pm_runtime_disable(dev, false);
1512
1513         dpm_wait_for_subordinate(dev, async);
1514
1515         if (async_error)
1516                 goto Complete;
1517
1518         if (pm_wakeup_pending()) {
1519                 async_error = -EBUSY;
1520                 goto Complete;
1521         }
1522
1523         if (dev->power.syscore || dev->power.direct_complete)
1524                 goto Complete;
1525
1526         callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1527         if (callback)
1528                 goto Run;
1529
1530         if (dev_pm_smart_suspend_and_suspended(dev) &&
1531             !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1532                 goto Skip;
1533
1534         if (dev->driver && dev->driver->pm) {
1535                 info = "late driver ";
1536                 callback = pm_late_early_op(dev->driver->pm, state);
1537         }
1538
1539 Run:
1540         error = dpm_run_callback(callback, dev, state, info);
1541         if (error) {
1542                 async_error = error;
1543                 goto Complete;
1544         }
1545         dpm_propagate_wakeup_to_parent(dev);
1546
1547 Skip:
1548         dev->power.is_late_suspended = true;
1549
1550 Complete:
1551         TRACE_SUSPEND(error);
1552         complete_all(&dev->power.completion);
1553         return error;
1554 }
1555
1556 static void async_suspend_late(void *data, async_cookie_t cookie)
1557 {
1558         struct device *dev = (struct device *)data;
1559         int error;
1560
1561         error = __device_suspend_late(dev, pm_transition, true);
1562         if (error) {
1563                 dpm_save_failed_dev(dev_name(dev));
1564                 pm_dev_err(dev, pm_transition, " async", error);
1565         }
1566         put_device(dev);
1567 }
1568
1569 static int device_suspend_late(struct device *dev)
1570 {
1571         if (dpm_async_fn(dev, async_suspend_late))
1572                 return 0;
1573
1574         return __device_suspend_late(dev, pm_transition, false);
1575 }
1576
1577 /**
1578  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1579  * @state: PM transition of the system being carried out.
1580  */
1581 int dpm_suspend_late(pm_message_t state)
1582 {
1583         ktime_t starttime = ktime_get();
1584         int error = 0;
1585
1586         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1587         mutex_lock(&dpm_list_mtx);
1588         pm_transition = state;
1589         async_error = 0;
1590
1591         while (!list_empty(&dpm_suspended_list)) {
1592                 struct device *dev = to_device(dpm_suspended_list.prev);
1593
1594                 get_device(dev);
1595                 mutex_unlock(&dpm_list_mtx);
1596
1597                 error = device_suspend_late(dev);
1598
1599                 mutex_lock(&dpm_list_mtx);
1600                 if (!list_empty(&dev->power.entry))
1601                         list_move(&dev->power.entry, &dpm_late_early_list);
1602
1603                 if (error) {
1604                         pm_dev_err(dev, state, " late", error);
1605                         dpm_save_failed_dev(dev_name(dev));
1606                         put_device(dev);
1607                         break;
1608                 }
1609                 put_device(dev);
1610
1611                 if (async_error)
1612                         break;
1613         }
1614         mutex_unlock(&dpm_list_mtx);
1615         async_synchronize_full();
1616         if (!error)
1617                 error = async_error;
1618         if (error) {
1619                 suspend_stats.failed_suspend_late++;
1620                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1621                 dpm_resume_early(resume_event(state));
1622         }
1623         dpm_show_time(starttime, state, error, "late");
1624         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1625         return error;
1626 }
1627
1628 /**
1629  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1630  * @state: PM transition of the system being carried out.
1631  */
1632 int dpm_suspend_end(pm_message_t state)
1633 {
1634         ktime_t starttime = ktime_get();
1635         int error;
1636
1637         error = dpm_suspend_late(state);
1638         if (error)
1639                 goto out;
1640
1641         error = dpm_suspend_noirq(state);
1642         if (error)
1643                 dpm_resume_early(resume_event(state));
1644
1645 out:
1646         dpm_show_time(starttime, state, error, "end");
1647         return error;
1648 }
1649 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1650
1651 /**
1652  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1653  * @dev: Device to suspend.
1654  * @state: PM transition of the system being carried out.
1655  * @cb: Suspend callback to execute.
1656  * @info: string description of caller.
1657  */
1658 static int legacy_suspend(struct device *dev, pm_message_t state,
1659                           int (*cb)(struct device *dev, pm_message_t state),
1660                           const char *info)
1661 {
1662         int error;
1663         ktime_t calltime;
1664
1665         calltime = initcall_debug_start(dev, cb);
1666
1667         trace_device_pm_callback_start(dev, info, state.event);
1668         error = cb(dev, state);
1669         trace_device_pm_callback_end(dev, error);
1670         suspend_report_result(cb, error);
1671
1672         initcall_debug_report(dev, calltime, cb, error);
1673
1674         return error;
1675 }
1676
1677 static void dpm_clear_superiors_direct_complete(struct device *dev)
1678 {
1679         struct device_link *link;
1680         int idx;
1681
1682         if (dev->parent) {
1683                 spin_lock_irq(&dev->parent->power.lock);
1684                 dev->parent->power.direct_complete = false;
1685                 spin_unlock_irq(&dev->parent->power.lock);
1686         }
1687
1688         idx = device_links_read_lock();
1689
1690         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1691                 spin_lock_irq(&link->supplier->power.lock);
1692                 link->supplier->power.direct_complete = false;
1693                 spin_unlock_irq(&link->supplier->power.lock);
1694         }
1695
1696         device_links_read_unlock(idx);
1697 }
1698
1699 /**
1700  * __device_suspend - Execute "suspend" callbacks for given device.
1701  * @dev: Device to handle.
1702  * @state: PM transition of the system being carried out.
1703  * @async: If true, the device is being suspended asynchronously.
1704  */
1705 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1706 {
1707         pm_callback_t callback = NULL;
1708         const char *info = NULL;
1709         int error = 0;
1710         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1711
1712         TRACE_DEVICE(dev);
1713         TRACE_SUSPEND(0);
1714
1715         dpm_wait_for_subordinate(dev, async);
1716
1717         if (async_error) {
1718                 dev->power.direct_complete = false;
1719                 goto Complete;
1720         }
1721
1722         /*
1723          * If a device configured to wake up the system from sleep states
1724          * has been suspended at run time and there's a resume request pending
1725          * for it, this is equivalent to the device signaling wakeup, so the
1726          * system suspend operation should be aborted.
1727          */
1728         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1729                 pm_wakeup_event(dev, 0);
1730
1731         if (pm_wakeup_pending()) {
1732                 dev->power.direct_complete = false;
1733                 async_error = -EBUSY;
1734                 goto Complete;
1735         }
1736
1737         if (dev->power.syscore)
1738                 goto Complete;
1739
1740         /* Avoid direct_complete to let wakeup_path propagate. */
1741         if (device_may_wakeup(dev) || dev->power.wakeup_path)
1742                 dev->power.direct_complete = false;
1743
1744         if (dev->power.direct_complete) {
1745                 if (pm_runtime_status_suspended(dev)) {
1746                         pm_runtime_disable(dev);
1747                         if (pm_runtime_status_suspended(dev)) {
1748                                 pm_dev_dbg(dev, state, "direct-complete ");
1749                                 goto Complete;
1750                         }
1751
1752                         pm_runtime_enable(dev);
1753                 }
1754                 dev->power.direct_complete = false;
1755         }
1756
1757         dev->power.may_skip_resume = false;
1758         dev->power.must_resume = false;
1759
1760         dpm_watchdog_set(&wd, dev);
1761         device_lock(dev);
1762
1763         if (dev->pm_domain) {
1764                 info = "power domain ";
1765                 callback = pm_op(&dev->pm_domain->ops, state);
1766                 goto Run;
1767         }
1768
1769         if (dev->type && dev->type->pm) {
1770                 info = "type ";
1771                 callback = pm_op(dev->type->pm, state);
1772                 goto Run;
1773         }
1774
1775         if (dev->class && dev->class->pm) {
1776                 info = "class ";
1777                 callback = pm_op(dev->class->pm, state);
1778                 goto Run;
1779         }
1780
1781         if (dev->bus) {
1782                 if (dev->bus->pm) {
1783                         info = "bus ";
1784                         callback = pm_op(dev->bus->pm, state);
1785                 } else if (dev->bus->suspend) {
1786                         pm_dev_dbg(dev, state, "legacy bus ");
1787                         error = legacy_suspend(dev, state, dev->bus->suspend,
1788                                                 "legacy bus ");
1789                         goto End;
1790                 }
1791         }
1792
1793  Run:
1794         if (!callback && dev->driver && dev->driver->pm) {
1795                 info = "driver ";
1796                 callback = pm_op(dev->driver->pm, state);
1797         }
1798
1799         error = dpm_run_callback(callback, dev, state, info);
1800
1801  End:
1802         if (!error) {
1803                 dev->power.is_suspended = true;
1804                 if (device_may_wakeup(dev))
1805                         dev->power.wakeup_path = true;
1806
1807                 dpm_propagate_wakeup_to_parent(dev);
1808                 dpm_clear_superiors_direct_complete(dev);
1809         }
1810
1811         device_unlock(dev);
1812         dpm_watchdog_clear(&wd);
1813
1814  Complete:
1815         if (error)
1816                 async_error = error;
1817
1818         complete_all(&dev->power.completion);
1819         TRACE_SUSPEND(error);
1820         return error;
1821 }
1822
1823 static void async_suspend(void *data, async_cookie_t cookie)
1824 {
1825         struct device *dev = (struct device *)data;
1826         int error;
1827
1828         error = __device_suspend(dev, pm_transition, true);
1829         if (error) {
1830                 dpm_save_failed_dev(dev_name(dev));
1831                 pm_dev_err(dev, pm_transition, " async", error);
1832         }
1833
1834         put_device(dev);
1835 }
1836
1837 static int device_suspend(struct device *dev)
1838 {
1839         if (dpm_async_fn(dev, async_suspend))
1840                 return 0;
1841
1842         return __device_suspend(dev, pm_transition, false);
1843 }
1844
1845 /**
1846  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1847  * @state: PM transition of the system being carried out.
1848  */
1849 int dpm_suspend(pm_message_t state)
1850 {
1851         ktime_t starttime = ktime_get();
1852         int error = 0;
1853
1854         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1855         might_sleep();
1856
1857         devfreq_suspend();
1858         cpufreq_suspend();
1859
1860         mutex_lock(&dpm_list_mtx);
1861         pm_transition = state;
1862         async_error = 0;
1863         while (!list_empty(&dpm_prepared_list)) {
1864                 struct device *dev = to_device(dpm_prepared_list.prev);
1865
1866                 get_device(dev);
1867                 mutex_unlock(&dpm_list_mtx);
1868
1869                 error = device_suspend(dev);
1870
1871                 mutex_lock(&dpm_list_mtx);
1872                 if (error) {
1873                         pm_dev_err(dev, state, "", error);
1874                         dpm_save_failed_dev(dev_name(dev));
1875                         put_device(dev);
1876                         break;
1877                 }
1878                 if (!list_empty(&dev->power.entry))
1879                         list_move(&dev->power.entry, &dpm_suspended_list);
1880                 put_device(dev);
1881                 if (async_error)
1882                         break;
1883         }
1884         mutex_unlock(&dpm_list_mtx);
1885         async_synchronize_full();
1886         if (!error)
1887                 error = async_error;
1888         if (error) {
1889                 suspend_stats.failed_suspend++;
1890                 dpm_save_failed_step(SUSPEND_SUSPEND);
1891         }
1892         dpm_show_time(starttime, state, error, NULL);
1893         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1894         return error;
1895 }
1896
1897 /**
1898  * device_prepare - Prepare a device for system power transition.
1899  * @dev: Device to handle.
1900  * @state: PM transition of the system being carried out.
1901  *
1902  * Execute the ->prepare() callback(s) for given device.  No new children of the
1903  * device may be registered after this function has returned.
1904  */
1905 static int device_prepare(struct device *dev, pm_message_t state)
1906 {
1907         int (*callback)(struct device *) = NULL;
1908         int ret = 0;
1909
1910         if (dev->power.syscore)
1911                 return 0;
1912
1913         WARN_ON(!pm_runtime_enabled(dev) &&
1914                 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1915                                               DPM_FLAG_LEAVE_SUSPENDED));
1916
1917         /*
1918          * If a device's parent goes into runtime suspend at the wrong time,
1919          * it won't be possible to resume the device.  To prevent this we
1920          * block runtime suspend here, during the prepare phase, and allow
1921          * it again during the complete phase.
1922          */
1923         pm_runtime_get_noresume(dev);
1924
1925         device_lock(dev);
1926
1927         dev->power.wakeup_path = false;
1928
1929         if (dev->power.no_pm_callbacks)
1930                 goto unlock;
1931
1932         if (dev->pm_domain)
1933                 callback = dev->pm_domain->ops.prepare;
1934         else if (dev->type && dev->type->pm)
1935                 callback = dev->type->pm->prepare;
1936         else if (dev->class && dev->class->pm)
1937                 callback = dev->class->pm->prepare;
1938         else if (dev->bus && dev->bus->pm)
1939                 callback = dev->bus->pm->prepare;
1940
1941         if (!callback && dev->driver && dev->driver->pm)
1942                 callback = dev->driver->pm->prepare;
1943
1944         if (callback)
1945                 ret = callback(dev);
1946
1947 unlock:
1948         device_unlock(dev);
1949
1950         if (ret < 0) {
1951                 suspend_report_result(callback, ret);
1952                 pm_runtime_put(dev);
1953                 return ret;
1954         }
1955         /*
1956          * A positive return value from ->prepare() means "this device appears
1957          * to be runtime-suspended and its state is fine, so if it really is
1958          * runtime-suspended, you can leave it in that state provided that you
1959          * will do the same thing with all of its descendants".  This only
1960          * applies to suspend transitions, however.
1961          */
1962         spin_lock_irq(&dev->power.lock);
1963         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1964                 ((pm_runtime_suspended(dev) && ret > 0) ||
1965                  dev->power.no_pm_callbacks) &&
1966                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1967         spin_unlock_irq(&dev->power.lock);
1968         return 0;
1969 }
1970
1971 /**
1972  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1973  * @state: PM transition of the system being carried out.
1974  *
1975  * Execute the ->prepare() callback(s) for all devices.
1976  */
1977 int dpm_prepare(pm_message_t state)
1978 {
1979         int error = 0;
1980
1981         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1982         might_sleep();
1983
1984         /*
1985          * Give a chance for the known devices to complete their probes, before
1986          * disable probing of devices. This sync point is important at least
1987          * at boot time + hibernation restore.
1988          */
1989         wait_for_device_probe();
1990         /*
1991          * It is unsafe if probing of devices will happen during suspend or
1992          * hibernation and system behavior will be unpredictable in this case.
1993          * So, let's prohibit device's probing here and defer their probes
1994          * instead. The normal behavior will be restored in dpm_complete().
1995          */
1996         device_block_probing();
1997
1998         mutex_lock(&dpm_list_mtx);
1999         while (!list_empty(&dpm_list)) {
2000                 struct device *dev = to_device(dpm_list.next);
2001
2002                 get_device(dev);
2003                 mutex_unlock(&dpm_list_mtx);
2004
2005                 trace_device_pm_callback_start(dev, "", state.event);
2006                 error = device_prepare(dev, state);
2007                 trace_device_pm_callback_end(dev, error);
2008
2009                 mutex_lock(&dpm_list_mtx);
2010                 if (error) {
2011                         if (error == -EAGAIN) {
2012                                 put_device(dev);
2013                                 error = 0;
2014                                 continue;
2015                         }
2016                         pr_info("Device %s not prepared for power transition: code %d\n",
2017                                 dev_name(dev), error);
2018                         put_device(dev);
2019                         break;
2020                 }
2021                 dev->power.is_prepared = true;
2022                 if (!list_empty(&dev->power.entry))
2023                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
2024                 put_device(dev);
2025         }
2026         mutex_unlock(&dpm_list_mtx);
2027         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2028         return error;
2029 }
2030
2031 /**
2032  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2033  * @state: PM transition of the system being carried out.
2034  *
2035  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2036  * callbacks for them.
2037  */
2038 int dpm_suspend_start(pm_message_t state)
2039 {
2040         ktime_t starttime = ktime_get();
2041         int error;
2042
2043         error = dpm_prepare(state);
2044         if (error) {
2045                 suspend_stats.failed_prepare++;
2046                 dpm_save_failed_step(SUSPEND_PREPARE);
2047         } else
2048                 error = dpm_suspend(state);
2049         dpm_show_time(starttime, state, error, "start");
2050         return error;
2051 }
2052 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2053
2054 void __suspend_report_result(const char *function, void *fn, int ret)
2055 {
2056         if (ret)
2057                 pr_err("%s(): %pS returns %d\n", function, fn, ret);
2058 }
2059 EXPORT_SYMBOL_GPL(__suspend_report_result);
2060
2061 /**
2062  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2063  * @subordinate: Device that needs to wait for @dev.
2064  * @dev: Device to wait for.
2065  */
2066 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2067 {
2068         dpm_wait(dev, subordinate->power.async_suspend);
2069         return async_error;
2070 }
2071 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2072
2073 /**
2074  * dpm_for_each_dev - device iterator.
2075  * @data: data for the callback.
2076  * @fn: function to be called for each device.
2077  *
2078  * Iterate over devices in dpm_list, and call @fn for each device,
2079  * passing it @data.
2080  */
2081 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2082 {
2083         struct device *dev;
2084
2085         if (!fn)
2086                 return;
2087
2088         device_pm_lock();
2089         list_for_each_entry(dev, &dpm_list, power.entry)
2090                 fn(dev, data);
2091         device_pm_unlock();
2092 }
2093 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2094
2095 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2096 {
2097         if (!ops)
2098                 return true;
2099
2100         return !ops->prepare &&
2101                !ops->suspend &&
2102                !ops->suspend_late &&
2103                !ops->suspend_noirq &&
2104                !ops->resume_noirq &&
2105                !ops->resume_early &&
2106                !ops->resume &&
2107                !ops->complete;
2108 }
2109
2110 void device_pm_check_callbacks(struct device *dev)
2111 {
2112         spin_lock_irq(&dev->power.lock);
2113         dev->power.no_pm_callbacks =
2114                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2115                  !dev->bus->suspend && !dev->bus->resume)) &&
2116                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2117                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2118                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2119                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2120                  !dev->driver->suspend && !dev->driver->resume));
2121         spin_unlock_irq(&dev->power.lock);
2122 }
2123
2124 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2125 {
2126         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2127                 pm_runtime_status_suspended(dev);
2128 }