blob: 1079e030fd9e2e8685761f73a7693ddcbb088c0e [file] [log] [blame]
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001/*
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02002 * drivers/base/power/runtime.c - Helper functions for device runtime PM
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02003 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
Alan Stern1bfee5b2010-09-25 23:35:00 +02005 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02006 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040011#include <linux/export.h>
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020012#include <linux/pm_runtime.h>
Ming Leic3dc2f12011-09-27 22:54:41 +020013#include <trace/events/rpm.h>
Alan Stern7490e442010-09-25 23:35:15 +020014#include "power.h"
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020015
Alan Stern140a6c92010-09-25 23:35:07 +020016static int rpm_resume(struct device *dev, int rpmflags);
Alan Stern7490e442010-09-25 23:35:15 +020017static int rpm_suspend(struct device *dev, int rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020018
19/**
Alan Stern47693732010-09-25 23:34:46 +020020 * update_pm_runtime_accounting - Update the time accounting of power states
21 * @dev: Device to update the accounting for
22 *
23 * In order to be able to have time accounting of the various power states
24 * (as used by programs such as PowerTOP to show the effectiveness of runtime
25 * PM), we need to track the time spent in each state.
26 * update_pm_runtime_accounting must be called each time before the
27 * runtime_status field is updated, to account the time in the old state
28 * correctly.
29 */
30void update_pm_runtime_accounting(struct device *dev)
31{
32 unsigned long now = jiffies;
33 int delta;
34
35 delta = now - dev->power.accounting_timestamp;
36
37 if (delta < 0)
38 delta = 0;
39
40 dev->power.accounting_timestamp = now;
41
42 if (dev->power.disable_depth > 0)
43 return;
44
45 if (dev->power.runtime_status == RPM_SUSPENDED)
46 dev->power.suspended_jiffies += delta;
47 else
48 dev->power.active_jiffies += delta;
49}
50
51static void __update_runtime_status(struct device *dev, enum rpm_status status)
52{
53 update_pm_runtime_accounting(dev);
54 dev->power.runtime_status = status;
55}
56
57/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020058 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
59 * @dev: Device to handle.
60 */
61static void pm_runtime_deactivate_timer(struct device *dev)
62{
63 if (dev->power.timer_expires > 0) {
64 del_timer(&dev->power.suspend_timer);
65 dev->power.timer_expires = 0;
66 }
67}
68
69/**
70 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
71 * @dev: Device to handle.
72 */
73static void pm_runtime_cancel_pending(struct device *dev)
74{
75 pm_runtime_deactivate_timer(dev);
76 /*
77 * In case there's a request pending, make sure its work function will
78 * return without doing anything.
79 */
80 dev->power.request = RPM_REQ_NONE;
81}
82
Alan Stern15bcb912010-09-25 23:35:21 +020083/*
84 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
85 * @dev: Device to handle.
86 *
87 * Compute the autosuspend-delay expiration time based on the device's
88 * power.last_busy time. If the delay has already expired or is disabled
89 * (negative) or the power.use_autosuspend flag isn't set, return 0.
90 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
91 *
92 * This function may be called either with or without dev->power.lock held.
93 * Either way it can be racy, since power.last_busy may be updated at any time.
94 */
95unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
96{
97 int autosuspend_delay;
98 long elapsed;
99 unsigned long last_busy;
100 unsigned long expires = 0;
101
102 if (!dev->power.use_autosuspend)
103 goto out;
104
105 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
106 if (autosuspend_delay < 0)
107 goto out;
108
109 last_busy = ACCESS_ONCE(dev->power.last_busy);
110 elapsed = jiffies - last_busy;
111 if (elapsed < 0)
112 goto out; /* jiffies has wrapped around. */
113
114 /*
115 * If the autosuspend_delay is >= 1 second, align the timer by rounding
116 * up to the nearest second.
117 */
118 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
119 if (autosuspend_delay >= 1000)
120 expires = round_jiffies(expires);
121 expires += !expires;
122 if (elapsed >= expires - last_busy)
123 expires = 0; /* Already expired. */
124
125 out:
126 return expires;
127}
128EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
129
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200130/**
Alan Stern1bfee5b2010-09-25 23:35:00 +0200131 * rpm_check_suspend_allowed - Test whether a device may be suspended.
132 * @dev: Device to test.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200133 */
Alan Stern1bfee5b2010-09-25 23:35:00 +0200134static int rpm_check_suspend_allowed(struct device *dev)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200135{
136 int retval = 0;
137
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200138 if (dev->power.runtime_error)
139 retval = -EINVAL;
Rafael J. Wysocki632e2702011-07-01 22:29:15 +0200140 else if (dev->power.disable_depth > 0)
141 retval = -EACCES;
142 else if (atomic_read(&dev->power.usage_count) > 0)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200143 retval = -EAGAIN;
144 else if (!pm_children_suspended(dev))
145 retval = -EBUSY;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200146
147 /* Pending resume requests take precedence over suspends. */
148 else if ((dev->power.deferred_resume
Kevin Winchester78ca7c32010-10-29 15:29:55 +0200149 && dev->power.runtime_status == RPM_SUSPENDING)
Alan Stern1bfee5b2010-09-25 23:35:00 +0200150 || (dev->power.request_pending
151 && dev->power.request == RPM_REQ_RESUME))
152 retval = -EAGAIN;
153 else if (dev->power.runtime_status == RPM_SUSPENDED)
154 retval = 1;
155
156 return retval;
157}
158
Alan Stern1bfee5b2010-09-25 23:35:00 +0200159/**
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200160 * __rpm_callback - Run a given runtime PM callback for a given device.
161 * @cb: Runtime PM callback to run.
162 * @dev: Device to run the callback for.
163 */
164static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
165 __releases(&dev->power.lock) __acquires(&dev->power.lock)
166{
167 int retval;
168
169 if (dev->power.irq_safe)
170 spin_unlock(&dev->power.lock);
171 else
172 spin_unlock_irq(&dev->power.lock);
173
174 retval = cb(dev);
175
176 if (dev->power.irq_safe)
177 spin_lock(&dev->power.lock);
178 else
179 spin_lock_irq(&dev->power.lock);
180
181 return retval;
182}
183
184/**
Alan Stern140a6c92010-09-25 23:35:07 +0200185 * rpm_idle - Notify device bus type if the device can be suspended.
Alan Stern1bfee5b2010-09-25 23:35:00 +0200186 * @dev: Device to notify the bus type about.
187 * @rpmflags: Flag bits.
188 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200189 * Check if the device's runtime PM status allows it to be suspended. If
Alan Stern1bfee5b2010-09-25 23:35:00 +0200190 * another idle notification has been started earlier, return immediately. If
191 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
192 * run the ->runtime_idle() callback directly.
193 *
194 * This function must be called under dev->power.lock with interrupts disabled.
195 */
Alan Stern140a6c92010-09-25 23:35:07 +0200196static int rpm_idle(struct device *dev, int rpmflags)
Alan Stern1bfee5b2010-09-25 23:35:00 +0200197{
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200198 int (*callback)(struct device *);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200199 int retval;
200
Ming Leic3dc2f12011-09-27 22:54:41 +0200201 trace_rpm_idle(dev, rpmflags);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200202 retval = rpm_check_suspend_allowed(dev);
203 if (retval < 0)
204 ; /* Conditions are wrong. */
205
206 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
207 else if (dev->power.runtime_status != RPM_ACTIVE)
208 retval = -EAGAIN;
209
210 /*
211 * Any pending request other than an idle notification takes
212 * precedence over us, except that the timer may be running.
213 */
214 else if (dev->power.request_pending &&
215 dev->power.request > RPM_REQ_IDLE)
216 retval = -EAGAIN;
217
218 /* Act as though RPM_NOWAIT is always set. */
219 else if (dev->power.idle_notification)
220 retval = -EINPROGRESS;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200221 if (retval)
222 goto out;
223
Alan Stern1bfee5b2010-09-25 23:35:00 +0200224 /* Pending requests need to be canceled. */
225 dev->power.request = RPM_REQ_NONE;
226
Alan Stern7490e442010-09-25 23:35:15 +0200227 if (dev->power.no_callbacks) {
228 /* Assume ->runtime_idle() callback would have suspended. */
229 retval = rpm_suspend(dev, rpmflags);
230 goto out;
231 }
232
Alan Stern1bfee5b2010-09-25 23:35:00 +0200233 /* Carry out an asynchronous or a synchronous idle notification. */
234 if (rpmflags & RPM_ASYNC) {
235 dev->power.request = RPM_REQ_IDLE;
236 if (!dev->power.request_pending) {
237 dev->power.request_pending = true;
238 queue_work(pm_wq, &dev->power.work);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200239 }
Alan Stern1bfee5b2010-09-25 23:35:00 +0200240 goto out;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200241 }
242
243 dev->power.idle_notification = true;
244
Rafael J. Wysocki564b9052011-06-23 01:52:55 +0200245 if (dev->pm_domain)
246 callback = dev->pm_domain->ops.runtime_idle;
Rafael J. Wysocki4d27e9d2011-04-29 00:35:50 +0200247 else if (dev->type && dev->type->pm)
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200248 callback = dev->type->pm->runtime_idle;
249 else if (dev->class && dev->class->pm)
250 callback = dev->class->pm->runtime_idle;
Rafael J. Wysocki9659cc02011-02-18 23:20:21 +0100251 else if (dev->bus && dev->bus->pm)
252 callback = dev->bus->pm->runtime_idle;
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200253 else
254 callback = NULL;
255
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200256 if (callback)
257 __rpm_callback(callback, dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200258
259 dev->power.idle_notification = false;
260 wake_up_all(&dev->power.wait_queue);
261
262 out:
Ming Leic3dc2f12011-09-27 22:54:41 +0200263 trace_rpm_return_int(dev, _THIS_IP_, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200264 return retval;
265}
266
267/**
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200268 * rpm_callback - Run a given runtime PM callback for a given device.
269 * @cb: Runtime PM callback to run.
270 * @dev: Device to run the callback for.
271 */
272static int rpm_callback(int (*cb)(struct device *), struct device *dev)
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200273{
274 int retval;
275
276 if (!cb)
277 return -ENOSYS;
278
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200279 retval = __rpm_callback(cb, dev);
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200280
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200281 dev->power.runtime_error = retval;
Rafael J. Wysocki632e2702011-07-01 22:29:15 +0200282 return retval != -EACCES ? retval : -EIO;
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200283}
284
285/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200286 * rpm_suspend - Carry out runtime suspend of given device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200287 * @dev: Device to suspend.
Alan Stern3f9af052010-09-25 23:34:54 +0200288 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200289 *
Ming Lei47d8f0b2011-10-12 11:53:32 +0800290 * Check if the device's runtime PM status allows it to be suspended.
291 * Cancel a pending idle notification, autosuspend or suspend. If
292 * another suspend has been started earlier, either return immediately
293 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
294 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
Ming Lei857b36c2011-10-12 22:59:33 +0200295 * otherwise run the ->runtime_suspend() callback directly. When
296 * ->runtime_suspend succeeded, if a deferred resume was requested while
297 * the callback was running then carry it out, otherwise send an idle
298 * notification for its parent (if the suspend succeeded and both
299 * ignore_children of parent->power and irq_safe of dev->power are not set).
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200300 *
301 * This function must be called under dev->power.lock with interrupts disabled.
302 */
Alan Stern140a6c92010-09-25 23:35:07 +0200303static int rpm_suspend(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200304 __releases(&dev->power.lock) __acquires(&dev->power.lock)
305{
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200306 int (*callback)(struct device *);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200307 struct device *parent = NULL;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200308 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200309
Ming Leic3dc2f12011-09-27 22:54:41 +0200310 trace_rpm_suspend(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200311
312 repeat:
Alan Stern1bfee5b2010-09-25 23:35:00 +0200313 retval = rpm_check_suspend_allowed(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200314
Alan Stern1bfee5b2010-09-25 23:35:00 +0200315 if (retval < 0)
316 ; /* Conditions are wrong. */
317
318 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
319 else if (dev->power.runtime_status == RPM_RESUMING &&
320 !(rpmflags & RPM_ASYNC))
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200321 retval = -EAGAIN;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200322 if (retval)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200323 goto out;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200324
Alan Stern15bcb912010-09-25 23:35:21 +0200325 /* If the autosuspend_delay time hasn't expired yet, reschedule. */
326 if ((rpmflags & RPM_AUTO)
327 && dev->power.runtime_status != RPM_SUSPENDING) {
328 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
329
330 if (expires != 0) {
331 /* Pending requests need to be canceled. */
332 dev->power.request = RPM_REQ_NONE;
333
334 /*
335 * Optimization: If the timer is already running and is
336 * set to expire at or before the autosuspend delay,
337 * avoid the overhead of resetting it. Just let it
338 * expire; pm_suspend_timer_fn() will take care of the
339 * rest.
340 */
341 if (!(dev->power.timer_expires && time_before_eq(
342 dev->power.timer_expires, expires))) {
343 dev->power.timer_expires = expires;
344 mod_timer(&dev->power.suspend_timer, expires);
345 }
346 dev->power.timer_autosuspends = 1;
347 goto out;
348 }
349 }
350
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200351 /* Other scheduled or pending requests need to be canceled. */
352 pm_runtime_cancel_pending(dev);
353
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200354 if (dev->power.runtime_status == RPM_SUSPENDING) {
355 DEFINE_WAIT(wait);
356
Alan Stern1bfee5b2010-09-25 23:35:00 +0200357 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200358 retval = -EINPROGRESS;
359 goto out;
360 }
361
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200362 if (dev->power.irq_safe) {
363 spin_unlock(&dev->power.lock);
364
365 cpu_relax();
366
367 spin_lock(&dev->power.lock);
368 goto repeat;
369 }
370
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200371 /* Wait for the other suspend running in parallel with us. */
372 for (;;) {
373 prepare_to_wait(&dev->power.wait_queue, &wait,
374 TASK_UNINTERRUPTIBLE);
375 if (dev->power.runtime_status != RPM_SUSPENDING)
376 break;
377
378 spin_unlock_irq(&dev->power.lock);
379
380 schedule();
381
382 spin_lock_irq(&dev->power.lock);
383 }
384 finish_wait(&dev->power.wait_queue, &wait);
385 goto repeat;
386 }
387
Alan Stern7490e442010-09-25 23:35:15 +0200388 dev->power.deferred_resume = false;
389 if (dev->power.no_callbacks)
390 goto no_callback; /* Assume success. */
391
Alan Stern1bfee5b2010-09-25 23:35:00 +0200392 /* Carry out an asynchronous or a synchronous suspend. */
393 if (rpmflags & RPM_ASYNC) {
Alan Stern15bcb912010-09-25 23:35:21 +0200394 dev->power.request = (rpmflags & RPM_AUTO) ?
395 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200396 if (!dev->power.request_pending) {
397 dev->power.request_pending = true;
398 queue_work(pm_wq, &dev->power.work);
399 }
400 goto out;
401 }
402
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200403 __update_runtime_status(dev, RPM_SUSPENDING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200404
Rafael J. Wysocki564b9052011-06-23 01:52:55 +0200405 if (dev->pm_domain)
406 callback = dev->pm_domain->ops.runtime_suspend;
Rafael J. Wysocki4d27e9d2011-04-29 00:35:50 +0200407 else if (dev->type && dev->type->pm)
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200408 callback = dev->type->pm->runtime_suspend;
409 else if (dev->class && dev->class->pm)
410 callback = dev->class->pm->runtime_suspend;
Rafael J. Wysocki9659cc02011-02-18 23:20:21 +0100411 else if (dev->bus && dev->bus->pm)
412 callback = dev->bus->pm->runtime_suspend;
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200413 else
414 callback = NULL;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200415
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200416 retval = rpm_callback(callback, dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200417 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200418 __update_runtime_status(dev, RPM_ACTIVE);
ShuoX Liu2cffff12011-07-08 20:53:55 +0200419 dev->power.deferred_resume = false;
Rafael J. Wysockif71648d2010-10-11 01:02:27 +0200420 if (retval == -EAGAIN || retval == -EBUSY)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200421 dev->power.runtime_error = 0;
Rafael J. Wysockif71648d2010-10-11 01:02:27 +0200422 else
Alan Stern240c7332010-03-23 00:50:07 +0100423 pm_runtime_cancel_pending(dev);
Ming Lei857b36c2011-10-12 22:59:33 +0200424 wake_up_all(&dev->power.wait_queue);
425 goto out;
426 }
Alan Stern7490e442010-09-25 23:35:15 +0200427 no_callback:
Ming Lei857b36c2011-10-12 22:59:33 +0200428 __update_runtime_status(dev, RPM_SUSPENDED);
429 pm_runtime_deactivate_timer(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200430
Ming Lei857b36c2011-10-12 22:59:33 +0200431 if (dev->parent) {
432 parent = dev->parent;
433 atomic_add_unless(&parent->power.child_count, -1, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200434 }
435 wake_up_all(&dev->power.wait_queue);
436
437 if (dev->power.deferred_resume) {
Alan Stern140a6c92010-09-25 23:35:07 +0200438 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200439 retval = -EAGAIN;
440 goto out;
441 }
442
Alan Sternc3810c82011-01-25 20:50:07 +0100443 /* Maybe the parent is now able to suspend. */
Alan Sternc7b61de2010-12-01 00:14:42 +0100444 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
Alan Sternc3810c82011-01-25 20:50:07 +0100445 spin_unlock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200446
Alan Sternc3810c82011-01-25 20:50:07 +0100447 spin_lock(&parent->power.lock);
448 rpm_idle(parent, RPM_ASYNC);
449 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200450
Alan Sternc3810c82011-01-25 20:50:07 +0100451 spin_lock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200452 }
453
454 out:
Ming Leic3dc2f12011-09-27 22:54:41 +0200455 trace_rpm_return_int(dev, _THIS_IP_, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200456
457 return retval;
458}
459
460/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200461 * rpm_resume - Carry out runtime resume of given device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200462 * @dev: Device to resume.
Alan Stern3f9af052010-09-25 23:34:54 +0200463 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200464 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200465 * Check if the device's runtime PM status allows it to be resumed. Cancel
Alan Stern1bfee5b2010-09-25 23:35:00 +0200466 * any scheduled or pending requests. If another resume has been started
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300467 * earlier, either return immediately or wait for it to finish, depending on the
Alan Stern1bfee5b2010-09-25 23:35:00 +0200468 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
469 * parallel with this function, either tell the other process to resume after
470 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
471 * flag is set then queue a resume request; otherwise run the
472 * ->runtime_resume() callback directly. Queue an idle notification for the
473 * device if the resume succeeded.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200474 *
475 * This function must be called under dev->power.lock with interrupts disabled.
476 */
Alan Stern140a6c92010-09-25 23:35:07 +0200477static int rpm_resume(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200478 __releases(&dev->power.lock) __acquires(&dev->power.lock)
479{
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200480 int (*callback)(struct device *);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200481 struct device *parent = NULL;
482 int retval = 0;
483
Ming Leic3dc2f12011-09-27 22:54:41 +0200484 trace_rpm_resume(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200485
486 repeat:
Alan Stern1bfee5b2010-09-25 23:35:00 +0200487 if (dev->power.runtime_error)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200488 retval = -EINVAL;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200489 else if (dev->power.disable_depth > 0)
Rafael J. Wysocki632e2702011-07-01 22:29:15 +0200490 retval = -EACCES;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200491 if (retval)
492 goto out;
493
Alan Stern15bcb912010-09-25 23:35:21 +0200494 /*
495 * Other scheduled or pending requests need to be canceled. Small
496 * optimization: If an autosuspend timer is running, leave it running
497 * rather than cancelling it now only to restart it again in the near
498 * future.
499 */
500 dev->power.request = RPM_REQ_NONE;
501 if (!dev->power.timer_autosuspends)
502 pm_runtime_deactivate_timer(dev);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200503
504 if (dev->power.runtime_status == RPM_ACTIVE) {
505 retval = 1;
506 goto out;
507 }
508
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200509 if (dev->power.runtime_status == RPM_RESUMING
510 || dev->power.runtime_status == RPM_SUSPENDING) {
511 DEFINE_WAIT(wait);
512
Alan Stern1bfee5b2010-09-25 23:35:00 +0200513 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200514 if (dev->power.runtime_status == RPM_SUSPENDING)
515 dev->power.deferred_resume = true;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200516 else
517 retval = -EINPROGRESS;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200518 goto out;
519 }
520
Rafael J. Wysockiad3c36a2011-09-27 21:54:52 +0200521 if (dev->power.irq_safe) {
522 spin_unlock(&dev->power.lock);
523
524 cpu_relax();
525
526 spin_lock(&dev->power.lock);
527 goto repeat;
528 }
529
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200530 /* Wait for the operation carried out in parallel with us. */
531 for (;;) {
532 prepare_to_wait(&dev->power.wait_queue, &wait,
533 TASK_UNINTERRUPTIBLE);
534 if (dev->power.runtime_status != RPM_RESUMING
535 && dev->power.runtime_status != RPM_SUSPENDING)
536 break;
537
538 spin_unlock_irq(&dev->power.lock);
539
540 schedule();
541
542 spin_lock_irq(&dev->power.lock);
543 }
544 finish_wait(&dev->power.wait_queue, &wait);
545 goto repeat;
546 }
547
Alan Stern7490e442010-09-25 23:35:15 +0200548 /*
549 * See if we can skip waking up the parent. This is safe only if
550 * power.no_callbacks is set, because otherwise we don't know whether
551 * the resume will actually succeed.
552 */
553 if (dev->power.no_callbacks && !parent && dev->parent) {
Ming Leid63be5f2010-10-22 23:48:14 +0200554 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
Alan Stern7490e442010-09-25 23:35:15 +0200555 if (dev->parent->power.disable_depth > 0
556 || dev->parent->power.ignore_children
557 || dev->parent->power.runtime_status == RPM_ACTIVE) {
558 atomic_inc(&dev->parent->power.child_count);
559 spin_unlock(&dev->parent->power.lock);
560 goto no_callback; /* Assume success. */
561 }
562 spin_unlock(&dev->parent->power.lock);
563 }
564
Alan Stern1bfee5b2010-09-25 23:35:00 +0200565 /* Carry out an asynchronous or a synchronous resume. */
566 if (rpmflags & RPM_ASYNC) {
567 dev->power.request = RPM_REQ_RESUME;
568 if (!dev->power.request_pending) {
569 dev->power.request_pending = true;
570 queue_work(pm_wq, &dev->power.work);
571 }
572 retval = 0;
573 goto out;
574 }
575
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200576 if (!parent && dev->parent) {
577 /*
Alan Sternc7b61de2010-12-01 00:14:42 +0100578 * Increment the parent's usage counter and resume it if
579 * necessary. Not needed if dev is irq-safe; then the
580 * parent is permanently resumed.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200581 */
582 parent = dev->parent;
Alan Sternc7b61de2010-12-01 00:14:42 +0100583 if (dev->power.irq_safe)
584 goto skip_parent;
Alan Stern862f89b2009-11-25 01:06:37 +0100585 spin_unlock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200586
587 pm_runtime_get_noresume(parent);
588
Alan Stern862f89b2009-11-25 01:06:37 +0100589 spin_lock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200590 /*
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200591 * We can resume if the parent's runtime PM is disabled or it
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200592 * is set to ignore children.
593 */
594 if (!parent->power.disable_depth
595 && !parent->power.ignore_children) {
Alan Stern140a6c92010-09-25 23:35:07 +0200596 rpm_resume(parent, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200597 if (parent->power.runtime_status != RPM_ACTIVE)
598 retval = -EBUSY;
599 }
Alan Stern862f89b2009-11-25 01:06:37 +0100600 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200601
Alan Stern862f89b2009-11-25 01:06:37 +0100602 spin_lock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200603 if (retval)
604 goto out;
605 goto repeat;
606 }
Alan Sternc7b61de2010-12-01 00:14:42 +0100607 skip_parent:
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200608
Alan Stern7490e442010-09-25 23:35:15 +0200609 if (dev->power.no_callbacks)
610 goto no_callback; /* Assume success. */
611
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200612 __update_runtime_status(dev, RPM_RESUMING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200613
Rafael J. Wysocki564b9052011-06-23 01:52:55 +0200614 if (dev->pm_domain)
615 callback = dev->pm_domain->ops.runtime_resume;
Rafael J. Wysocki4d27e9d2011-04-29 00:35:50 +0200616 else if (dev->type && dev->type->pm)
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200617 callback = dev->type->pm->runtime_resume;
618 else if (dev->class && dev->class->pm)
619 callback = dev->class->pm->runtime_resume;
Rafael J. Wysocki9659cc02011-02-18 23:20:21 +0100620 else if (dev->bus && dev->bus->pm)
621 callback = dev->bus->pm->runtime_resume;
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200622 else
623 callback = NULL;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200624
Rafael J. Wysocki71c63122010-10-04 22:08:01 +0200625 retval = rpm_callback(callback, dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200626 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200627 __update_runtime_status(dev, RPM_SUSPENDED);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200628 pm_runtime_cancel_pending(dev);
629 } else {
Alan Stern7490e442010-09-25 23:35:15 +0200630 no_callback:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200631 __update_runtime_status(dev, RPM_ACTIVE);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200632 if (parent)
633 atomic_inc(&parent->power.child_count);
634 }
635 wake_up_all(&dev->power.wait_queue);
636
637 if (!retval)
Alan Stern140a6c92010-09-25 23:35:07 +0200638 rpm_idle(dev, RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200639
640 out:
Alan Sternc7b61de2010-12-01 00:14:42 +0100641 if (parent && !dev->power.irq_safe) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200642 spin_unlock_irq(&dev->power.lock);
643
644 pm_runtime_put(parent);
645
646 spin_lock_irq(&dev->power.lock);
647 }
648
Ming Leic3dc2f12011-09-27 22:54:41 +0200649 trace_rpm_return_int(dev, _THIS_IP_, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200650
651 return retval;
652}
653
654/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200655 * pm_runtime_work - Universal runtime PM work function.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200656 * @work: Work structure used for scheduling the execution of this function.
657 *
658 * Use @work to get the device object the work is to be done for, determine what
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200659 * is to be done and execute the appropriate runtime PM function.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200660 */
661static void pm_runtime_work(struct work_struct *work)
662{
663 struct device *dev = container_of(work, struct device, power.work);
664 enum rpm_request req;
665
666 spin_lock_irq(&dev->power.lock);
667
668 if (!dev->power.request_pending)
669 goto out;
670
671 req = dev->power.request;
672 dev->power.request = RPM_REQ_NONE;
673 dev->power.request_pending = false;
674
675 switch (req) {
676 case RPM_REQ_NONE:
677 break;
678 case RPM_REQ_IDLE:
Alan Stern140a6c92010-09-25 23:35:07 +0200679 rpm_idle(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200680 break;
681 case RPM_REQ_SUSPEND:
Alan Stern140a6c92010-09-25 23:35:07 +0200682 rpm_suspend(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200683 break;
Alan Stern15bcb912010-09-25 23:35:21 +0200684 case RPM_REQ_AUTOSUSPEND:
685 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
686 break;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200687 case RPM_REQ_RESUME:
Alan Stern140a6c92010-09-25 23:35:07 +0200688 rpm_resume(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200689 break;
690 }
691
692 out:
693 spin_unlock_irq(&dev->power.lock);
694}
695
696/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200697 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
698 * @data: Device pointer passed by pm_schedule_suspend().
699 *
Alan Stern1bfee5b2010-09-25 23:35:00 +0200700 * Check if the time is right and queue a suspend request.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200701 */
702static void pm_suspend_timer_fn(unsigned long data)
703{
704 struct device *dev = (struct device *)data;
705 unsigned long flags;
706 unsigned long expires;
707
708 spin_lock_irqsave(&dev->power.lock, flags);
709
710 expires = dev->power.timer_expires;
711 /* If 'expire' is after 'jiffies' we've been called too early. */
712 if (expires > 0 && !time_after(expires, jiffies)) {
713 dev->power.timer_expires = 0;
Alan Stern15bcb912010-09-25 23:35:21 +0200714 rpm_suspend(dev, dev->power.timer_autosuspends ?
715 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200716 }
717
718 spin_unlock_irqrestore(&dev->power.lock, flags);
719}
720
721/**
722 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
723 * @dev: Device to suspend.
724 * @delay: Time to wait before submitting a suspend request, in milliseconds.
725 */
726int pm_schedule_suspend(struct device *dev, unsigned int delay)
727{
728 unsigned long flags;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200729 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200730
731 spin_lock_irqsave(&dev->power.lock, flags);
732
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200733 if (!delay) {
Alan Stern140a6c92010-09-25 23:35:07 +0200734 retval = rpm_suspend(dev, RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200735 goto out;
736 }
737
Alan Stern1bfee5b2010-09-25 23:35:00 +0200738 retval = rpm_check_suspend_allowed(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200739 if (retval)
740 goto out;
741
Alan Stern1bfee5b2010-09-25 23:35:00 +0200742 /* Other scheduled or pending requests need to be canceled. */
743 pm_runtime_cancel_pending(dev);
744
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200745 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200746 dev->power.timer_expires += !dev->power.timer_expires;
Alan Stern15bcb912010-09-25 23:35:21 +0200747 dev->power.timer_autosuspends = 0;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200748 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
749
750 out:
751 spin_unlock_irqrestore(&dev->power.lock, flags);
752
753 return retval;
754}
755EXPORT_SYMBOL_GPL(pm_schedule_suspend);
756
757/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200758 * __pm_runtime_idle - Entry point for runtime idle operations.
Alan Stern140a6c92010-09-25 23:35:07 +0200759 * @dev: Device to send idle notification for.
760 * @rpmflags: Flag bits.
761 *
762 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
763 * return immediately if it is larger than zero. Then carry out an idle
764 * notification, either synchronous or asynchronous.
765 *
Colin Cross311aab72011-08-08 23:39:36 +0200766 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
767 * or if pm_runtime_irq_safe() has been called.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200768 */
Alan Stern140a6c92010-09-25 23:35:07 +0200769int __pm_runtime_idle(struct device *dev, int rpmflags)
770{
771 unsigned long flags;
772 int retval;
773
Colin Cross311aab72011-08-08 23:39:36 +0200774 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
775
Alan Stern140a6c92010-09-25 23:35:07 +0200776 if (rpmflags & RPM_GET_PUT) {
777 if (!atomic_dec_and_test(&dev->power.usage_count))
778 return 0;
779 }
780
781 spin_lock_irqsave(&dev->power.lock, flags);
782 retval = rpm_idle(dev, rpmflags);
783 spin_unlock_irqrestore(&dev->power.lock, flags);
784
785 return retval;
786}
787EXPORT_SYMBOL_GPL(__pm_runtime_idle);
788
789/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200790 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
Alan Stern140a6c92010-09-25 23:35:07 +0200791 * @dev: Device to suspend.
792 * @rpmflags: Flag bits.
793 *
Alan Stern15bcb912010-09-25 23:35:21 +0200794 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
795 * return immediately if it is larger than zero. Then carry out a suspend,
796 * either synchronous or asynchronous.
Alan Stern140a6c92010-09-25 23:35:07 +0200797 *
Colin Cross311aab72011-08-08 23:39:36 +0200798 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
799 * or if pm_runtime_irq_safe() has been called.
Alan Stern140a6c92010-09-25 23:35:07 +0200800 */
801int __pm_runtime_suspend(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200802{
803 unsigned long flags;
804 int retval;
805
Colin Cross311aab72011-08-08 23:39:36 +0200806 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
807
Alan Stern15bcb912010-09-25 23:35:21 +0200808 if (rpmflags & RPM_GET_PUT) {
809 if (!atomic_dec_and_test(&dev->power.usage_count))
810 return 0;
811 }
812
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200813 spin_lock_irqsave(&dev->power.lock, flags);
Alan Stern140a6c92010-09-25 23:35:07 +0200814 retval = rpm_suspend(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200815 spin_unlock_irqrestore(&dev->power.lock, flags);
816
817 return retval;
818}
Alan Stern140a6c92010-09-25 23:35:07 +0200819EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200820
821/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200822 * __pm_runtime_resume - Entry point for runtime resume operations.
Alan Stern140a6c92010-09-25 23:35:07 +0200823 * @dev: Device to resume.
Alan Stern3f9af052010-09-25 23:34:54 +0200824 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200825 *
Alan Stern140a6c92010-09-25 23:35:07 +0200826 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
827 * carry out a resume, either synchronous or asynchronous.
828 *
Colin Cross311aab72011-08-08 23:39:36 +0200829 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
830 * or if pm_runtime_irq_safe() has been called.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200831 */
Alan Stern140a6c92010-09-25 23:35:07 +0200832int __pm_runtime_resume(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200833{
Alan Stern140a6c92010-09-25 23:35:07 +0200834 unsigned long flags;
Alan Stern1d531c12009-12-13 20:28:30 +0100835 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200836
Colin Cross311aab72011-08-08 23:39:36 +0200837 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
838
Alan Stern140a6c92010-09-25 23:35:07 +0200839 if (rpmflags & RPM_GET_PUT)
840 atomic_inc(&dev->power.usage_count);
841
842 spin_lock_irqsave(&dev->power.lock, flags);
843 retval = rpm_resume(dev, rpmflags);
844 spin_unlock_irqrestore(&dev->power.lock, flags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200845
846 return retval;
847}
Alan Stern140a6c92010-09-25 23:35:07 +0200848EXPORT_SYMBOL_GPL(__pm_runtime_resume);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200849
850/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200851 * __pm_runtime_set_status - Set runtime PM status of a device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200852 * @dev: Device to handle.
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200853 * @status: New runtime PM status of the device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200854 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200855 * If runtime PM of the device is disabled or its power.runtime_error field is
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200856 * different from zero, the status may be changed either to RPM_ACTIVE, or to
857 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
858 * However, if the device has a parent and the parent is not active, and the
859 * parent's power.ignore_children flag is unset, the device's status cannot be
860 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
861 *
862 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
863 * and the device parent's counter of unsuspended children is modified to
864 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
865 * notification request for the parent is submitted.
866 */
867int __pm_runtime_set_status(struct device *dev, unsigned int status)
868{
869 struct device *parent = dev->parent;
870 unsigned long flags;
871 bool notify_parent = false;
872 int error = 0;
873
874 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
875 return -EINVAL;
876
877 spin_lock_irqsave(&dev->power.lock, flags);
878
879 if (!dev->power.runtime_error && !dev->power.disable_depth) {
880 error = -EAGAIN;
881 goto out;
882 }
883
884 if (dev->power.runtime_status == status)
885 goto out_set;
886
887 if (status == RPM_SUSPENDED) {
888 /* It always is possible to set the status to 'suspended'. */
889 if (parent) {
890 atomic_add_unless(&parent->power.child_count, -1, 0);
891 notify_parent = !parent->power.ignore_children;
892 }
893 goto out_set;
894 }
895
896 if (parent) {
Rafael J. Wysockibab636b2009-12-03 20:21:21 +0100897 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200898
899 /*
900 * It is invalid to put an active child under a parent that is
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200901 * not active, has runtime PM enabled and the
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200902 * 'power.ignore_children' flag unset.
903 */
904 if (!parent->power.disable_depth
905 && !parent->power.ignore_children
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100906 && parent->power.runtime_status != RPM_ACTIVE)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200907 error = -EBUSY;
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100908 else if (dev->power.runtime_status == RPM_SUSPENDED)
909 atomic_inc(&parent->power.child_count);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200910
Alan Stern862f89b2009-11-25 01:06:37 +0100911 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200912
913 if (error)
914 goto out;
915 }
916
917 out_set:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200918 __update_runtime_status(dev, status);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200919 dev->power.runtime_error = 0;
920 out:
921 spin_unlock_irqrestore(&dev->power.lock, flags);
922
923 if (notify_parent)
924 pm_request_idle(parent);
925
926 return error;
927}
928EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
929
930/**
931 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
932 * @dev: Device to handle.
933 *
934 * Flush all pending requests for the device from pm_wq and wait for all
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200935 * runtime PM operations involving the device in progress to complete.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200936 *
937 * Should be called under dev->power.lock with interrupts disabled.
938 */
939static void __pm_runtime_barrier(struct device *dev)
940{
941 pm_runtime_deactivate_timer(dev);
942
943 if (dev->power.request_pending) {
944 dev->power.request = RPM_REQ_NONE;
945 spin_unlock_irq(&dev->power.lock);
946
947 cancel_work_sync(&dev->power.work);
948
949 spin_lock_irq(&dev->power.lock);
950 dev->power.request_pending = false;
951 }
952
953 if (dev->power.runtime_status == RPM_SUSPENDING
954 || dev->power.runtime_status == RPM_RESUMING
955 || dev->power.idle_notification) {
956 DEFINE_WAIT(wait);
957
958 /* Suspend, wake-up or idle notification in progress. */
959 for (;;) {
960 prepare_to_wait(&dev->power.wait_queue, &wait,
961 TASK_UNINTERRUPTIBLE);
962 if (dev->power.runtime_status != RPM_SUSPENDING
963 && dev->power.runtime_status != RPM_RESUMING
964 && !dev->power.idle_notification)
965 break;
966 spin_unlock_irq(&dev->power.lock);
967
968 schedule();
969
970 spin_lock_irq(&dev->power.lock);
971 }
972 finish_wait(&dev->power.wait_queue, &wait);
973 }
974}
975
976/**
977 * pm_runtime_barrier - Flush pending requests and wait for completions.
978 * @dev: Device to handle.
979 *
980 * Prevent the device from being suspended by incrementing its usage counter and
981 * if there's a pending resume request for the device, wake the device up.
982 * Next, make sure that all pending requests for the device have been flushed
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +0200983 * from pm_wq and wait for all runtime PM operations involving the device in
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200984 * progress to complete.
985 *
986 * Return value:
987 * 1, if there was a resume request pending and the device had to be woken up,
988 * 0, otherwise
989 */
990int pm_runtime_barrier(struct device *dev)
991{
992 int retval = 0;
993
994 pm_runtime_get_noresume(dev);
995 spin_lock_irq(&dev->power.lock);
996
997 if (dev->power.request_pending
998 && dev->power.request == RPM_REQ_RESUME) {
Alan Stern140a6c92010-09-25 23:35:07 +0200999 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001000 retval = 1;
1001 }
1002
1003 __pm_runtime_barrier(dev);
1004
1005 spin_unlock_irq(&dev->power.lock);
1006 pm_runtime_put_noidle(dev);
1007
1008 return retval;
1009}
1010EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1011
1012/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001013 * __pm_runtime_disable - Disable runtime PM of a device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001014 * @dev: Device to handle.
1015 * @check_resume: If set, check if there's a resume request for the device.
1016 *
1017 * Increment power.disable_depth for the device and if was zero previously,
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001018 * cancel all pending runtime PM requests for the device and wait for all
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001019 * operations in progress to complete. The device can be either active or
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001020 * suspended after its runtime PM has been disabled.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001021 *
1022 * If @check_resume is set and there's a resume request pending when
1023 * __pm_runtime_disable() is called and power.disable_depth is zero, the
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001024 * function will wake up the device before disabling its runtime PM.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001025 */
1026void __pm_runtime_disable(struct device *dev, bool check_resume)
1027{
1028 spin_lock_irq(&dev->power.lock);
1029
1030 if (dev->power.disable_depth > 0) {
1031 dev->power.disable_depth++;
1032 goto out;
1033 }
1034
1035 /*
1036 * Wake up the device if there's a resume request pending, because that
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001037 * means there probably is some I/O to process and disabling runtime PM
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001038 * shouldn't prevent the device from processing the I/O.
1039 */
1040 if (check_resume && dev->power.request_pending
1041 && dev->power.request == RPM_REQ_RESUME) {
1042 /*
1043 * Prevent suspends and idle notifications from being carried
1044 * out after we have woken up the device.
1045 */
1046 pm_runtime_get_noresume(dev);
1047
Alan Stern140a6c92010-09-25 23:35:07 +02001048 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001049
1050 pm_runtime_put_noidle(dev);
1051 }
1052
1053 if (!dev->power.disable_depth++)
1054 __pm_runtime_barrier(dev);
1055
1056 out:
1057 spin_unlock_irq(&dev->power.lock);
1058}
1059EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1060
1061/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001062 * pm_runtime_enable - Enable runtime PM of a device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001063 * @dev: Device to handle.
1064 */
1065void pm_runtime_enable(struct device *dev)
1066{
1067 unsigned long flags;
1068
1069 spin_lock_irqsave(&dev->power.lock, flags);
1070
1071 if (dev->power.disable_depth > 0)
1072 dev->power.disable_depth--;
1073 else
1074 dev_warn(dev, "Unbalanced %s!\n", __func__);
1075
1076 spin_unlock_irqrestore(&dev->power.lock, flags);
1077}
1078EXPORT_SYMBOL_GPL(pm_runtime_enable);
1079
1080/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001081 * pm_runtime_forbid - Block runtime PM of a device.
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001082 * @dev: Device to handle.
1083 *
1084 * Increase the device's usage count and clear its power.runtime_auto flag,
1085 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1086 * for it.
1087 */
1088void pm_runtime_forbid(struct device *dev)
1089{
1090 spin_lock_irq(&dev->power.lock);
1091 if (!dev->power.runtime_auto)
1092 goto out;
1093
1094 dev->power.runtime_auto = false;
1095 atomic_inc(&dev->power.usage_count);
Alan Stern140a6c92010-09-25 23:35:07 +02001096 rpm_resume(dev, 0);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001097
1098 out:
1099 spin_unlock_irq(&dev->power.lock);
1100}
1101EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1102
1103/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001104 * pm_runtime_allow - Unblock runtime PM of a device.
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001105 * @dev: Device to handle.
1106 *
1107 * Decrease the device's usage count and set its power.runtime_auto flag.
1108 */
1109void pm_runtime_allow(struct device *dev)
1110{
1111 spin_lock_irq(&dev->power.lock);
1112 if (dev->power.runtime_auto)
1113 goto out;
1114
1115 dev->power.runtime_auto = true;
1116 if (atomic_dec_and_test(&dev->power.usage_count))
Alan Stern15bcb912010-09-25 23:35:21 +02001117 rpm_idle(dev, RPM_AUTO);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001118
1119 out:
1120 spin_unlock_irq(&dev->power.lock);
1121}
1122EXPORT_SYMBOL_GPL(pm_runtime_allow);
1123
1124/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001125 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
Alan Stern7490e442010-09-25 23:35:15 +02001126 * @dev: Device to handle.
1127 *
1128 * Set the power.no_callbacks flag, which tells the PM core that this
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001129 * device is power-managed through its parent and has no runtime PM
1130 * callbacks of its own. The runtime sysfs attributes will be removed.
Alan Stern7490e442010-09-25 23:35:15 +02001131 */
1132void pm_runtime_no_callbacks(struct device *dev)
1133{
1134 spin_lock_irq(&dev->power.lock);
1135 dev->power.no_callbacks = 1;
1136 spin_unlock_irq(&dev->power.lock);
1137 if (device_is_registered(dev))
1138 rpm_sysfs_remove(dev);
1139}
1140EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1141
1142/**
Alan Sternc7b61de2010-12-01 00:14:42 +01001143 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1144 * @dev: Device to handle
1145 *
1146 * Set the power.irq_safe flag, which tells the PM core that the
1147 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1148 * always be invoked with the spinlock held and interrupts disabled. It also
1149 * causes the parent's usage counter to be permanently incremented, preventing
1150 * the parent from runtime suspending -- otherwise an irq-safe child might have
1151 * to wait for a non-irq-safe parent.
1152 */
1153void pm_runtime_irq_safe(struct device *dev)
1154{
1155 if (dev->parent)
1156 pm_runtime_get_sync(dev->parent);
1157 spin_lock_irq(&dev->power.lock);
1158 dev->power.irq_safe = 1;
1159 spin_unlock_irq(&dev->power.lock);
1160}
1161EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1162
1163/**
Alan Stern15bcb912010-09-25 23:35:21 +02001164 * update_autosuspend - Handle a change to a device's autosuspend settings.
1165 * @dev: Device to handle.
1166 * @old_delay: The former autosuspend_delay value.
1167 * @old_use: The former use_autosuspend value.
1168 *
1169 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1170 * set; otherwise allow it. Send an idle notification if suspends are allowed.
1171 *
1172 * This function must be called under dev->power.lock with interrupts disabled.
1173 */
1174static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1175{
1176 int delay = dev->power.autosuspend_delay;
1177
1178 /* Should runtime suspend be prevented now? */
1179 if (dev->power.use_autosuspend && delay < 0) {
1180
1181 /* If it used to be allowed then prevent it. */
1182 if (!old_use || old_delay >= 0) {
1183 atomic_inc(&dev->power.usage_count);
1184 rpm_resume(dev, 0);
1185 }
1186 }
1187
1188 /* Runtime suspend should be allowed now. */
1189 else {
1190
1191 /* If it used to be prevented then allow it. */
1192 if (old_use && old_delay < 0)
1193 atomic_dec(&dev->power.usage_count);
1194
1195 /* Maybe we can autosuspend now. */
1196 rpm_idle(dev, RPM_AUTO);
1197 }
1198}
1199
1200/**
1201 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1202 * @dev: Device to handle.
1203 * @delay: Value of the new delay in milliseconds.
1204 *
1205 * Set the device's power.autosuspend_delay value. If it changes to negative
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001206 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1207 * changes the other way, allow runtime suspends.
Alan Stern15bcb912010-09-25 23:35:21 +02001208 */
1209void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1210{
1211 int old_delay, old_use;
1212
1213 spin_lock_irq(&dev->power.lock);
1214 old_delay = dev->power.autosuspend_delay;
1215 old_use = dev->power.use_autosuspend;
1216 dev->power.autosuspend_delay = delay;
1217 update_autosuspend(dev, old_delay, old_use);
1218 spin_unlock_irq(&dev->power.lock);
1219}
1220EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1221
1222/**
1223 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1224 * @dev: Device to handle.
1225 * @use: New value for use_autosuspend.
1226 *
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001227 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
Alan Stern15bcb912010-09-25 23:35:21 +02001228 * suspends as needed.
1229 */
1230void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1231{
1232 int old_delay, old_use;
1233
1234 spin_lock_irq(&dev->power.lock);
1235 old_delay = dev->power.autosuspend_delay;
1236 old_use = dev->power.use_autosuspend;
1237 dev->power.use_autosuspend = use;
1238 update_autosuspend(dev, old_delay, old_use);
1239 spin_unlock_irq(&dev->power.lock);
1240}
1241EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1242
1243/**
Rafael J. Wysocki62052ab2011-07-06 10:52:13 +02001244 * pm_runtime_init - Initialize runtime PM fields in given device object.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001245 * @dev: Device object to initialize.
1246 */
1247void pm_runtime_init(struct device *dev)
1248{
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001249 dev->power.runtime_status = RPM_SUSPENDED;
1250 dev->power.idle_notification = false;
1251
1252 dev->power.disable_depth = 1;
1253 atomic_set(&dev->power.usage_count, 0);
1254
1255 dev->power.runtime_error = 0;
1256
1257 atomic_set(&dev->power.child_count, 0);
1258 pm_suspend_ignore_children(dev, false);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001259 dev->power.runtime_auto = true;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001260
1261 dev->power.request_pending = false;
1262 dev->power.request = RPM_REQ_NONE;
1263 dev->power.deferred_resume = false;
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +02001264 dev->power.accounting_timestamp = jiffies;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001265 INIT_WORK(&dev->power.work, pm_runtime_work);
1266
1267 dev->power.timer_expires = 0;
1268 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1269 (unsigned long)dev);
1270
1271 init_waitqueue_head(&dev->power.wait_queue);
1272}
1273
1274/**
1275 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1276 * @dev: Device object being removed from device hierarchy.
1277 */
1278void pm_runtime_remove(struct device *dev)
1279{
1280 __pm_runtime_disable(dev, false);
1281
1282 /* Change the status back to 'suspended' to match the initial status. */
1283 if (dev->power.runtime_status == RPM_ACTIVE)
1284 pm_runtime_set_suspended(dev);
Alan Sternc7b61de2010-12-01 00:14:42 +01001285 if (dev->power.irq_safe && dev->parent)
1286 pm_runtime_put_sync(dev->parent);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001287}