blob: 5bd4daa93ef1830731c30defb4ef56fffe70417f [file] [log] [blame]
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
Alan Stern1bfee5b2010-09-25 23:35:00 +02005 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02006 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched.h>
11#include <linux/pm_runtime.h>
12#include <linux/jiffies.h>
Alan Stern7490e442010-09-25 23:35:15 +020013#include "power.h"
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020014
Alan Stern140a6c92010-09-25 23:35:07 +020015static int rpm_resume(struct device *dev, int rpmflags);
Alan Stern7490e442010-09-25 23:35:15 +020016static int rpm_suspend(struct device *dev, int rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020017
18/**
Alan Stern47693732010-09-25 23:34:46 +020019 * update_pm_runtime_accounting - Update the time accounting of power states
20 * @dev: Device to update the accounting for
21 *
22 * In order to be able to have time accounting of the various power states
23 * (as used by programs such as PowerTOP to show the effectiveness of runtime
24 * PM), we need to track the time spent in each state.
25 * update_pm_runtime_accounting must be called each time before the
26 * runtime_status field is updated, to account the time in the old state
27 * correctly.
28 */
29void update_pm_runtime_accounting(struct device *dev)
30{
31 unsigned long now = jiffies;
32 int delta;
33
34 delta = now - dev->power.accounting_timestamp;
35
36 if (delta < 0)
37 delta = 0;
38
39 dev->power.accounting_timestamp = now;
40
41 if (dev->power.disable_depth > 0)
42 return;
43
44 if (dev->power.runtime_status == RPM_SUSPENDED)
45 dev->power.suspended_jiffies += delta;
46 else
47 dev->power.active_jiffies += delta;
48}
49
50static void __update_runtime_status(struct device *dev, enum rpm_status status)
51{
52 update_pm_runtime_accounting(dev);
53 dev->power.runtime_status = status;
54}
55
56/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020057 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
58 * @dev: Device to handle.
59 */
60static void pm_runtime_deactivate_timer(struct device *dev)
61{
62 if (dev->power.timer_expires > 0) {
63 del_timer(&dev->power.suspend_timer);
64 dev->power.timer_expires = 0;
65 }
66}
67
68/**
69 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
70 * @dev: Device to handle.
71 */
72static void pm_runtime_cancel_pending(struct device *dev)
73{
74 pm_runtime_deactivate_timer(dev);
75 /*
76 * In case there's a request pending, make sure its work function will
77 * return without doing anything.
78 */
79 dev->power.request = RPM_REQ_NONE;
80}
81
82/**
Alan Stern1bfee5b2010-09-25 23:35:00 +020083 * rpm_check_suspend_allowed - Test whether a device may be suspended.
84 * @dev: Device to test.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020085 */
Alan Stern1bfee5b2010-09-25 23:35:00 +020086static int rpm_check_suspend_allowed(struct device *dev)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020087{
88 int retval = 0;
89
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020090 if (dev->power.runtime_error)
91 retval = -EINVAL;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020092 else if (atomic_read(&dev->power.usage_count) > 0
Alan Stern1bfee5b2010-09-25 23:35:00 +020093 || dev->power.disable_depth > 0)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +020094 retval = -EAGAIN;
95 else if (!pm_children_suspended(dev))
96 retval = -EBUSY;
Alan Stern1bfee5b2010-09-25 23:35:00 +020097
98 /* Pending resume requests take precedence over suspends. */
99 else if ((dev->power.deferred_resume
100 && dev->power.status == RPM_SUSPENDING)
101 || (dev->power.request_pending
102 && dev->power.request == RPM_REQ_RESUME))
103 retval = -EAGAIN;
104 else if (dev->power.runtime_status == RPM_SUSPENDED)
105 retval = 1;
106
107 return retval;
108}
109
110
111/**
Alan Stern140a6c92010-09-25 23:35:07 +0200112 * rpm_idle - Notify device bus type if the device can be suspended.
Alan Stern1bfee5b2010-09-25 23:35:00 +0200113 * @dev: Device to notify the bus type about.
114 * @rpmflags: Flag bits.
115 *
116 * Check if the device's run-time PM status allows it to be suspended. If
117 * another idle notification has been started earlier, return immediately. If
118 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
119 * run the ->runtime_idle() callback directly.
120 *
121 * This function must be called under dev->power.lock with interrupts disabled.
122 */
Alan Stern140a6c92010-09-25 23:35:07 +0200123static int rpm_idle(struct device *dev, int rpmflags)
Alan Stern1bfee5b2010-09-25 23:35:00 +0200124 __releases(&dev->power.lock) __acquires(&dev->power.lock)
125{
126 int retval;
127
128 retval = rpm_check_suspend_allowed(dev);
129 if (retval < 0)
130 ; /* Conditions are wrong. */
131
132 /* Idle notifications are allowed only in the RPM_ACTIVE state. */
133 else if (dev->power.runtime_status != RPM_ACTIVE)
134 retval = -EAGAIN;
135
136 /*
137 * Any pending request other than an idle notification takes
138 * precedence over us, except that the timer may be running.
139 */
140 else if (dev->power.request_pending &&
141 dev->power.request > RPM_REQ_IDLE)
142 retval = -EAGAIN;
143
144 /* Act as though RPM_NOWAIT is always set. */
145 else if (dev->power.idle_notification)
146 retval = -EINPROGRESS;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200147 if (retval)
148 goto out;
149
Alan Stern1bfee5b2010-09-25 23:35:00 +0200150 /* Pending requests need to be canceled. */
151 dev->power.request = RPM_REQ_NONE;
152
Alan Stern7490e442010-09-25 23:35:15 +0200153 if (dev->power.no_callbacks) {
154 /* Assume ->runtime_idle() callback would have suspended. */
155 retval = rpm_suspend(dev, rpmflags);
156 goto out;
157 }
158
Alan Stern1bfee5b2010-09-25 23:35:00 +0200159 /* Carry out an asynchronous or a synchronous idle notification. */
160 if (rpmflags & RPM_ASYNC) {
161 dev->power.request = RPM_REQ_IDLE;
162 if (!dev->power.request_pending) {
163 dev->power.request_pending = true;
164 queue_work(pm_wq, &dev->power.work);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200165 }
Alan Stern1bfee5b2010-09-25 23:35:00 +0200166 goto out;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200167 }
168
169 dev->power.idle_notification = true;
170
171 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
172 spin_unlock_irq(&dev->power.lock);
173
174 dev->bus->pm->runtime_idle(dev);
175
176 spin_lock_irq(&dev->power.lock);
Rafael J. Wysockia6ab7aa2009-12-22 20:43:17 +0100177 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
178 spin_unlock_irq(&dev->power.lock);
179
180 dev->type->pm->runtime_idle(dev);
181
182 spin_lock_irq(&dev->power.lock);
183 } else if (dev->class && dev->class->pm
184 && dev->class->pm->runtime_idle) {
185 spin_unlock_irq(&dev->power.lock);
186
187 dev->class->pm->runtime_idle(dev);
188
189 spin_lock_irq(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200190 }
191
192 dev->power.idle_notification = false;
193 wake_up_all(&dev->power.wait_queue);
194
195 out:
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200196 return retval;
197}
198
199/**
Alan Stern140a6c92010-09-25 23:35:07 +0200200 * rpm_suspend - Carry out run-time suspend of given device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200201 * @dev: Device to suspend.
Alan Stern3f9af052010-09-25 23:34:54 +0200202 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200203 *
Alan Stern1bfee5b2010-09-25 23:35:00 +0200204 * Check if the device's run-time PM status allows it to be suspended. If
205 * another suspend has been started earlier, either return immediately or wait
206 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
207 * pending idle notification. If the RPM_ASYNC flag is set then queue a
208 * suspend request; otherwise run the ->runtime_suspend() callback directly.
209 * If a deferred resume was requested while the callback was running then carry
210 * it out; otherwise send an idle notification for the device (if the suspend
211 * failed) or for its parent (if the suspend succeeded).
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200212 *
213 * This function must be called under dev->power.lock with interrupts disabled.
214 */
Alan Stern140a6c92010-09-25 23:35:07 +0200215static int rpm_suspend(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200216 __releases(&dev->power.lock) __acquires(&dev->power.lock)
217{
218 struct device *parent = NULL;
219 bool notify = false;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200220 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200221
Alan Stern3f9af052010-09-25 23:34:54 +0200222 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200223
224 repeat:
Alan Stern1bfee5b2010-09-25 23:35:00 +0200225 retval = rpm_check_suspend_allowed(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200226
Alan Stern1bfee5b2010-09-25 23:35:00 +0200227 if (retval < 0)
228 ; /* Conditions are wrong. */
229
230 /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
231 else if (dev->power.runtime_status == RPM_RESUMING &&
232 !(rpmflags & RPM_ASYNC))
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200233 retval = -EAGAIN;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200234 if (retval)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200235 goto out;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200236
237 /* Other scheduled or pending requests need to be canceled. */
238 pm_runtime_cancel_pending(dev);
239
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200240 if (dev->power.runtime_status == RPM_SUSPENDING) {
241 DEFINE_WAIT(wait);
242
Alan Stern1bfee5b2010-09-25 23:35:00 +0200243 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200244 retval = -EINPROGRESS;
245 goto out;
246 }
247
248 /* Wait for the other suspend running in parallel with us. */
249 for (;;) {
250 prepare_to_wait(&dev->power.wait_queue, &wait,
251 TASK_UNINTERRUPTIBLE);
252 if (dev->power.runtime_status != RPM_SUSPENDING)
253 break;
254
255 spin_unlock_irq(&dev->power.lock);
256
257 schedule();
258
259 spin_lock_irq(&dev->power.lock);
260 }
261 finish_wait(&dev->power.wait_queue, &wait);
262 goto repeat;
263 }
264
Alan Stern7490e442010-09-25 23:35:15 +0200265 dev->power.deferred_resume = false;
266 if (dev->power.no_callbacks)
267 goto no_callback; /* Assume success. */
268
Alan Stern1bfee5b2010-09-25 23:35:00 +0200269 /* Carry out an asynchronous or a synchronous suspend. */
270 if (rpmflags & RPM_ASYNC) {
271 dev->power.request = RPM_REQ_SUSPEND;
272 if (!dev->power.request_pending) {
273 dev->power.request_pending = true;
274 queue_work(pm_wq, &dev->power.work);
275 }
276 goto out;
277 }
278
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200279 __update_runtime_status(dev, RPM_SUSPENDING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200280
281 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
282 spin_unlock_irq(&dev->power.lock);
283
284 retval = dev->bus->pm->runtime_suspend(dev);
285
286 spin_lock_irq(&dev->power.lock);
287 dev->power.runtime_error = retval;
Rafael J. Wysockia6ab7aa2009-12-22 20:43:17 +0100288 } else if (dev->type && dev->type->pm
289 && dev->type->pm->runtime_suspend) {
290 spin_unlock_irq(&dev->power.lock);
291
292 retval = dev->type->pm->runtime_suspend(dev);
293
294 spin_lock_irq(&dev->power.lock);
295 dev->power.runtime_error = retval;
296 } else if (dev->class && dev->class->pm
297 && dev->class->pm->runtime_suspend) {
298 spin_unlock_irq(&dev->power.lock);
299
300 retval = dev->class->pm->runtime_suspend(dev);
301
302 spin_lock_irq(&dev->power.lock);
303 dev->power.runtime_error = retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200304 } else {
305 retval = -ENOSYS;
306 }
307
308 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200309 __update_runtime_status(dev, RPM_ACTIVE);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200310 dev->power.deferred_resume = 0;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200311 if (retval == -EAGAIN || retval == -EBUSY) {
Alan Stern240c7332010-03-23 00:50:07 +0100312 if (dev->power.timer_expires == 0)
313 notify = true;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200314 dev->power.runtime_error = 0;
Alan Stern240c7332010-03-23 00:50:07 +0100315 } else {
316 pm_runtime_cancel_pending(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200317 }
318 } else {
Alan Stern7490e442010-09-25 23:35:15 +0200319 no_callback:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200320 __update_runtime_status(dev, RPM_SUSPENDED);
Alan Stern240c7332010-03-23 00:50:07 +0100321 pm_runtime_deactivate_timer(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200322
323 if (dev->parent) {
324 parent = dev->parent;
325 atomic_add_unless(&parent->power.child_count, -1, 0);
326 }
327 }
328 wake_up_all(&dev->power.wait_queue);
329
330 if (dev->power.deferred_resume) {
Alan Stern140a6c92010-09-25 23:35:07 +0200331 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200332 retval = -EAGAIN;
333 goto out;
334 }
335
336 if (notify)
Alan Stern140a6c92010-09-25 23:35:07 +0200337 rpm_idle(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200338
339 if (parent && !parent->power.ignore_children) {
340 spin_unlock_irq(&dev->power.lock);
341
342 pm_request_idle(parent);
343
344 spin_lock_irq(&dev->power.lock);
345 }
346
347 out:
Alan Stern3f9af052010-09-25 23:34:54 +0200348 dev_dbg(dev, "%s returns %d\n", __func__, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200349
350 return retval;
351}
352
353/**
Alan Stern140a6c92010-09-25 23:35:07 +0200354 * rpm_resume - Carry out run-time resume of given device.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200355 * @dev: Device to resume.
Alan Stern3f9af052010-09-25 23:34:54 +0200356 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200357 *
Alan Stern1bfee5b2010-09-25 23:35:00 +0200358 * Check if the device's run-time PM status allows it to be resumed. Cancel
359 * any scheduled or pending requests. If another resume has been started
360 * earlier, either return imediately or wait for it to finish, depending on the
361 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
362 * parallel with this function, either tell the other process to resume after
363 * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
364 * flag is set then queue a resume request; otherwise run the
365 * ->runtime_resume() callback directly. Queue an idle notification for the
366 * device if the resume succeeded.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200367 *
368 * This function must be called under dev->power.lock with interrupts disabled.
369 */
Alan Stern140a6c92010-09-25 23:35:07 +0200370static int rpm_resume(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200371 __releases(&dev->power.lock) __acquires(&dev->power.lock)
372{
373 struct device *parent = NULL;
374 int retval = 0;
375
Alan Stern3f9af052010-09-25 23:34:54 +0200376 dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200377
378 repeat:
Alan Stern1bfee5b2010-09-25 23:35:00 +0200379 if (dev->power.runtime_error)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200380 retval = -EINVAL;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200381 else if (dev->power.disable_depth > 0)
382 retval = -EAGAIN;
383 if (retval)
384 goto out;
385
Alan Stern1bfee5b2010-09-25 23:35:00 +0200386 /* Other scheduled or pending requests need to be canceled. */
387 pm_runtime_cancel_pending(dev);
388
389 if (dev->power.runtime_status == RPM_ACTIVE) {
390 retval = 1;
391 goto out;
392 }
393
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200394 if (dev->power.runtime_status == RPM_RESUMING
395 || dev->power.runtime_status == RPM_SUSPENDING) {
396 DEFINE_WAIT(wait);
397
Alan Stern1bfee5b2010-09-25 23:35:00 +0200398 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200399 if (dev->power.runtime_status == RPM_SUSPENDING)
400 dev->power.deferred_resume = true;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200401 else
402 retval = -EINPROGRESS;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200403 goto out;
404 }
405
406 /* Wait for the operation carried out in parallel with us. */
407 for (;;) {
408 prepare_to_wait(&dev->power.wait_queue, &wait,
409 TASK_UNINTERRUPTIBLE);
410 if (dev->power.runtime_status != RPM_RESUMING
411 && dev->power.runtime_status != RPM_SUSPENDING)
412 break;
413
414 spin_unlock_irq(&dev->power.lock);
415
416 schedule();
417
418 spin_lock_irq(&dev->power.lock);
419 }
420 finish_wait(&dev->power.wait_queue, &wait);
421 goto repeat;
422 }
423
Alan Stern7490e442010-09-25 23:35:15 +0200424 /*
425 * See if we can skip waking up the parent. This is safe only if
426 * power.no_callbacks is set, because otherwise we don't know whether
427 * the resume will actually succeed.
428 */
429 if (dev->power.no_callbacks && !parent && dev->parent) {
430 spin_lock(&dev->parent->power.lock);
431 if (dev->parent->power.disable_depth > 0
432 || dev->parent->power.ignore_children
433 || dev->parent->power.runtime_status == RPM_ACTIVE) {
434 atomic_inc(&dev->parent->power.child_count);
435 spin_unlock(&dev->parent->power.lock);
436 goto no_callback; /* Assume success. */
437 }
438 spin_unlock(&dev->parent->power.lock);
439 }
440
Alan Stern1bfee5b2010-09-25 23:35:00 +0200441 /* Carry out an asynchronous or a synchronous resume. */
442 if (rpmflags & RPM_ASYNC) {
443 dev->power.request = RPM_REQ_RESUME;
444 if (!dev->power.request_pending) {
445 dev->power.request_pending = true;
446 queue_work(pm_wq, &dev->power.work);
447 }
448 retval = 0;
449 goto out;
450 }
451
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200452 if (!parent && dev->parent) {
453 /*
454 * Increment the parent's resume counter and resume it if
455 * necessary.
456 */
457 parent = dev->parent;
Alan Stern862f89b2009-11-25 01:06:37 +0100458 spin_unlock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200459
460 pm_runtime_get_noresume(parent);
461
Alan Stern862f89b2009-11-25 01:06:37 +0100462 spin_lock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200463 /*
464 * We can resume if the parent's run-time PM is disabled or it
465 * is set to ignore children.
466 */
467 if (!parent->power.disable_depth
468 && !parent->power.ignore_children) {
Alan Stern140a6c92010-09-25 23:35:07 +0200469 rpm_resume(parent, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200470 if (parent->power.runtime_status != RPM_ACTIVE)
471 retval = -EBUSY;
472 }
Alan Stern862f89b2009-11-25 01:06:37 +0100473 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200474
Alan Stern862f89b2009-11-25 01:06:37 +0100475 spin_lock(&dev->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200476 if (retval)
477 goto out;
478 goto repeat;
479 }
480
Alan Stern7490e442010-09-25 23:35:15 +0200481 if (dev->power.no_callbacks)
482 goto no_callback; /* Assume success. */
483
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200484 __update_runtime_status(dev, RPM_RESUMING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200485
486 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
487 spin_unlock_irq(&dev->power.lock);
488
489 retval = dev->bus->pm->runtime_resume(dev);
490
491 spin_lock_irq(&dev->power.lock);
492 dev->power.runtime_error = retval;
Rafael J. Wysockia6ab7aa2009-12-22 20:43:17 +0100493 } else if (dev->type && dev->type->pm
494 && dev->type->pm->runtime_resume) {
495 spin_unlock_irq(&dev->power.lock);
496
497 retval = dev->type->pm->runtime_resume(dev);
498
499 spin_lock_irq(&dev->power.lock);
500 dev->power.runtime_error = retval;
501 } else if (dev->class && dev->class->pm
502 && dev->class->pm->runtime_resume) {
503 spin_unlock_irq(&dev->power.lock);
504
505 retval = dev->class->pm->runtime_resume(dev);
506
507 spin_lock_irq(&dev->power.lock);
508 dev->power.runtime_error = retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200509 } else {
510 retval = -ENOSYS;
511 }
512
513 if (retval) {
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200514 __update_runtime_status(dev, RPM_SUSPENDED);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200515 pm_runtime_cancel_pending(dev);
516 } else {
Alan Stern7490e442010-09-25 23:35:15 +0200517 no_callback:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200518 __update_runtime_status(dev, RPM_ACTIVE);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200519 if (parent)
520 atomic_inc(&parent->power.child_count);
521 }
522 wake_up_all(&dev->power.wait_queue);
523
524 if (!retval)
Alan Stern140a6c92010-09-25 23:35:07 +0200525 rpm_idle(dev, RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200526
527 out:
528 if (parent) {
529 spin_unlock_irq(&dev->power.lock);
530
531 pm_runtime_put(parent);
532
533 spin_lock_irq(&dev->power.lock);
534 }
535
Alan Stern3f9af052010-09-25 23:34:54 +0200536 dev_dbg(dev, "%s returns %d\n", __func__, retval);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200537
538 return retval;
539}
540
541/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200542 * pm_runtime_work - Universal run-time PM work function.
543 * @work: Work structure used for scheduling the execution of this function.
544 *
545 * Use @work to get the device object the work is to be done for, determine what
546 * is to be done and execute the appropriate run-time PM function.
547 */
548static void pm_runtime_work(struct work_struct *work)
549{
550 struct device *dev = container_of(work, struct device, power.work);
551 enum rpm_request req;
552
553 spin_lock_irq(&dev->power.lock);
554
555 if (!dev->power.request_pending)
556 goto out;
557
558 req = dev->power.request;
559 dev->power.request = RPM_REQ_NONE;
560 dev->power.request_pending = false;
561
562 switch (req) {
563 case RPM_REQ_NONE:
564 break;
565 case RPM_REQ_IDLE:
Alan Stern140a6c92010-09-25 23:35:07 +0200566 rpm_idle(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200567 break;
568 case RPM_REQ_SUSPEND:
Alan Stern140a6c92010-09-25 23:35:07 +0200569 rpm_suspend(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200570 break;
571 case RPM_REQ_RESUME:
Alan Stern140a6c92010-09-25 23:35:07 +0200572 rpm_resume(dev, RPM_NOWAIT);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200573 break;
574 }
575
576 out:
577 spin_unlock_irq(&dev->power.lock);
578}
579
580/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200581 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
582 * @data: Device pointer passed by pm_schedule_suspend().
583 *
Alan Stern1bfee5b2010-09-25 23:35:00 +0200584 * Check if the time is right and queue a suspend request.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200585 */
586static void pm_suspend_timer_fn(unsigned long data)
587{
588 struct device *dev = (struct device *)data;
589 unsigned long flags;
590 unsigned long expires;
591
592 spin_lock_irqsave(&dev->power.lock, flags);
593
594 expires = dev->power.timer_expires;
595 /* If 'expire' is after 'jiffies' we've been called too early. */
596 if (expires > 0 && !time_after(expires, jiffies)) {
597 dev->power.timer_expires = 0;
Alan Stern140a6c92010-09-25 23:35:07 +0200598 rpm_suspend(dev, RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200599 }
600
601 spin_unlock_irqrestore(&dev->power.lock, flags);
602}
603
604/**
605 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
606 * @dev: Device to suspend.
607 * @delay: Time to wait before submitting a suspend request, in milliseconds.
608 */
609int pm_schedule_suspend(struct device *dev, unsigned int delay)
610{
611 unsigned long flags;
Alan Stern1bfee5b2010-09-25 23:35:00 +0200612 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200613
614 spin_lock_irqsave(&dev->power.lock, flags);
615
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200616 if (!delay) {
Alan Stern140a6c92010-09-25 23:35:07 +0200617 retval = rpm_suspend(dev, RPM_ASYNC);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200618 goto out;
619 }
620
Alan Stern1bfee5b2010-09-25 23:35:00 +0200621 retval = rpm_check_suspend_allowed(dev);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200622 if (retval)
623 goto out;
624
Alan Stern1bfee5b2010-09-25 23:35:00 +0200625 /* Other scheduled or pending requests need to be canceled. */
626 pm_runtime_cancel_pending(dev);
627
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200628 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
Alan Stern1bfee5b2010-09-25 23:35:00 +0200629 dev->power.timer_expires += !dev->power.timer_expires;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200630 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
631
632 out:
633 spin_unlock_irqrestore(&dev->power.lock, flags);
634
635 return retval;
636}
637EXPORT_SYMBOL_GPL(pm_schedule_suspend);
638
639/**
Alan Stern140a6c92010-09-25 23:35:07 +0200640 * __pm_runtime_idle - Entry point for run-time idle operations.
641 * @dev: Device to send idle notification for.
642 * @rpmflags: Flag bits.
643 *
644 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
645 * return immediately if it is larger than zero. Then carry out an idle
646 * notification, either synchronous or asynchronous.
647 *
648 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200649 */
Alan Stern140a6c92010-09-25 23:35:07 +0200650int __pm_runtime_idle(struct device *dev, int rpmflags)
651{
652 unsigned long flags;
653 int retval;
654
655 if (rpmflags & RPM_GET_PUT) {
656 if (!atomic_dec_and_test(&dev->power.usage_count))
657 return 0;
658 }
659
660 spin_lock_irqsave(&dev->power.lock, flags);
661 retval = rpm_idle(dev, rpmflags);
662 spin_unlock_irqrestore(&dev->power.lock, flags);
663
664 return retval;
665}
666EXPORT_SYMBOL_GPL(__pm_runtime_idle);
667
668/**
669 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
670 * @dev: Device to suspend.
671 * @rpmflags: Flag bits.
672 *
673 * Carry out a suspend, either synchronous or asynchronous.
674 *
675 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
676 */
677int __pm_runtime_suspend(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200678{
679 unsigned long flags;
680 int retval;
681
682 spin_lock_irqsave(&dev->power.lock, flags);
Alan Stern140a6c92010-09-25 23:35:07 +0200683 retval = rpm_suspend(dev, rpmflags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200684 spin_unlock_irqrestore(&dev->power.lock, flags);
685
686 return retval;
687}
Alan Stern140a6c92010-09-25 23:35:07 +0200688EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200689
690/**
Alan Stern140a6c92010-09-25 23:35:07 +0200691 * __pm_runtime_resume - Entry point for run-time resume operations.
692 * @dev: Device to resume.
Alan Stern3f9af052010-09-25 23:34:54 +0200693 * @rpmflags: Flag bits.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200694 *
Alan Stern140a6c92010-09-25 23:35:07 +0200695 * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
696 * carry out a resume, either synchronous or asynchronous.
697 *
698 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200699 */
Alan Stern140a6c92010-09-25 23:35:07 +0200700int __pm_runtime_resume(struct device *dev, int rpmflags)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200701{
Alan Stern140a6c92010-09-25 23:35:07 +0200702 unsigned long flags;
Alan Stern1d531c12009-12-13 20:28:30 +0100703 int retval;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200704
Alan Stern140a6c92010-09-25 23:35:07 +0200705 if (rpmflags & RPM_GET_PUT)
706 atomic_inc(&dev->power.usage_count);
707
708 spin_lock_irqsave(&dev->power.lock, flags);
709 retval = rpm_resume(dev, rpmflags);
710 spin_unlock_irqrestore(&dev->power.lock, flags);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200711
712 return retval;
713}
Alan Stern140a6c92010-09-25 23:35:07 +0200714EXPORT_SYMBOL_GPL(__pm_runtime_resume);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200715
716/**
717 * __pm_runtime_set_status - Set run-time PM status of a device.
718 * @dev: Device to handle.
719 * @status: New run-time PM status of the device.
720 *
721 * If run-time PM of the device is disabled or its power.runtime_error field is
722 * different from zero, the status may be changed either to RPM_ACTIVE, or to
723 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
724 * However, if the device has a parent and the parent is not active, and the
725 * parent's power.ignore_children flag is unset, the device's status cannot be
726 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
727 *
728 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
729 * and the device parent's counter of unsuspended children is modified to
730 * reflect the new status. If the new status is RPM_SUSPENDED, an idle
731 * notification request for the parent is submitted.
732 */
733int __pm_runtime_set_status(struct device *dev, unsigned int status)
734{
735 struct device *parent = dev->parent;
736 unsigned long flags;
737 bool notify_parent = false;
738 int error = 0;
739
740 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
741 return -EINVAL;
742
743 spin_lock_irqsave(&dev->power.lock, flags);
744
745 if (!dev->power.runtime_error && !dev->power.disable_depth) {
746 error = -EAGAIN;
747 goto out;
748 }
749
750 if (dev->power.runtime_status == status)
751 goto out_set;
752
753 if (status == RPM_SUSPENDED) {
754 /* It always is possible to set the status to 'suspended'. */
755 if (parent) {
756 atomic_add_unless(&parent->power.child_count, -1, 0);
757 notify_parent = !parent->power.ignore_children;
758 }
759 goto out_set;
760 }
761
762 if (parent) {
Rafael J. Wysockibab636b2009-12-03 20:21:21 +0100763 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200764
765 /*
766 * It is invalid to put an active child under a parent that is
767 * not active, has run-time PM enabled and the
768 * 'power.ignore_children' flag unset.
769 */
770 if (!parent->power.disable_depth
771 && !parent->power.ignore_children
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100772 && parent->power.runtime_status != RPM_ACTIVE)
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200773 error = -EBUSY;
Rafael J. Wysocki965c4ac2009-12-03 21:04:41 +0100774 else if (dev->power.runtime_status == RPM_SUSPENDED)
775 atomic_inc(&parent->power.child_count);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200776
Alan Stern862f89b2009-11-25 01:06:37 +0100777 spin_unlock(&parent->power.lock);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200778
779 if (error)
780 goto out;
781 }
782
783 out_set:
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +0200784 __update_runtime_status(dev, status);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200785 dev->power.runtime_error = 0;
786 out:
787 spin_unlock_irqrestore(&dev->power.lock, flags);
788
789 if (notify_parent)
790 pm_request_idle(parent);
791
792 return error;
793}
794EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
795
796/**
797 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
798 * @dev: Device to handle.
799 *
800 * Flush all pending requests for the device from pm_wq and wait for all
801 * run-time PM operations involving the device in progress to complete.
802 *
803 * Should be called under dev->power.lock with interrupts disabled.
804 */
805static void __pm_runtime_barrier(struct device *dev)
806{
807 pm_runtime_deactivate_timer(dev);
808
809 if (dev->power.request_pending) {
810 dev->power.request = RPM_REQ_NONE;
811 spin_unlock_irq(&dev->power.lock);
812
813 cancel_work_sync(&dev->power.work);
814
815 spin_lock_irq(&dev->power.lock);
816 dev->power.request_pending = false;
817 }
818
819 if (dev->power.runtime_status == RPM_SUSPENDING
820 || dev->power.runtime_status == RPM_RESUMING
821 || dev->power.idle_notification) {
822 DEFINE_WAIT(wait);
823
824 /* Suspend, wake-up or idle notification in progress. */
825 for (;;) {
826 prepare_to_wait(&dev->power.wait_queue, &wait,
827 TASK_UNINTERRUPTIBLE);
828 if (dev->power.runtime_status != RPM_SUSPENDING
829 && dev->power.runtime_status != RPM_RESUMING
830 && !dev->power.idle_notification)
831 break;
832 spin_unlock_irq(&dev->power.lock);
833
834 schedule();
835
836 spin_lock_irq(&dev->power.lock);
837 }
838 finish_wait(&dev->power.wait_queue, &wait);
839 }
840}
841
842/**
843 * pm_runtime_barrier - Flush pending requests and wait for completions.
844 * @dev: Device to handle.
845 *
846 * Prevent the device from being suspended by incrementing its usage counter and
847 * if there's a pending resume request for the device, wake the device up.
848 * Next, make sure that all pending requests for the device have been flushed
849 * from pm_wq and wait for all run-time PM operations involving the device in
850 * progress to complete.
851 *
852 * Return value:
853 * 1, if there was a resume request pending and the device had to be woken up,
854 * 0, otherwise
855 */
856int pm_runtime_barrier(struct device *dev)
857{
858 int retval = 0;
859
860 pm_runtime_get_noresume(dev);
861 spin_lock_irq(&dev->power.lock);
862
863 if (dev->power.request_pending
864 && dev->power.request == RPM_REQ_RESUME) {
Alan Stern140a6c92010-09-25 23:35:07 +0200865 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200866 retval = 1;
867 }
868
869 __pm_runtime_barrier(dev);
870
871 spin_unlock_irq(&dev->power.lock);
872 pm_runtime_put_noidle(dev);
873
874 return retval;
875}
876EXPORT_SYMBOL_GPL(pm_runtime_barrier);
877
878/**
879 * __pm_runtime_disable - Disable run-time PM of a device.
880 * @dev: Device to handle.
881 * @check_resume: If set, check if there's a resume request for the device.
882 *
883 * Increment power.disable_depth for the device and if was zero previously,
884 * cancel all pending run-time PM requests for the device and wait for all
885 * operations in progress to complete. The device can be either active or
886 * suspended after its run-time PM has been disabled.
887 *
888 * If @check_resume is set and there's a resume request pending when
889 * __pm_runtime_disable() is called and power.disable_depth is zero, the
890 * function will wake up the device before disabling its run-time PM.
891 */
892void __pm_runtime_disable(struct device *dev, bool check_resume)
893{
894 spin_lock_irq(&dev->power.lock);
895
896 if (dev->power.disable_depth > 0) {
897 dev->power.disable_depth++;
898 goto out;
899 }
900
901 /*
902 * Wake up the device if there's a resume request pending, because that
903 * means there probably is some I/O to process and disabling run-time PM
904 * shouldn't prevent the device from processing the I/O.
905 */
906 if (check_resume && dev->power.request_pending
907 && dev->power.request == RPM_REQ_RESUME) {
908 /*
909 * Prevent suspends and idle notifications from being carried
910 * out after we have woken up the device.
911 */
912 pm_runtime_get_noresume(dev);
913
Alan Stern140a6c92010-09-25 23:35:07 +0200914 rpm_resume(dev, 0);
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +0200915
916 pm_runtime_put_noidle(dev);
917 }
918
919 if (!dev->power.disable_depth++)
920 __pm_runtime_barrier(dev);
921
922 out:
923 spin_unlock_irq(&dev->power.lock);
924}
925EXPORT_SYMBOL_GPL(__pm_runtime_disable);
926
927/**
928 * pm_runtime_enable - Enable run-time PM of a device.
929 * @dev: Device to handle.
930 */
931void pm_runtime_enable(struct device *dev)
932{
933 unsigned long flags;
934
935 spin_lock_irqsave(&dev->power.lock, flags);
936
937 if (dev->power.disable_depth > 0)
938 dev->power.disable_depth--;
939 else
940 dev_warn(dev, "Unbalanced %s!\n", __func__);
941
942 spin_unlock_irqrestore(&dev->power.lock, flags);
943}
944EXPORT_SYMBOL_GPL(pm_runtime_enable);
945
946/**
Rafael J. Wysocki53823632010-01-23 22:02:51 +0100947 * pm_runtime_forbid - Block run-time PM of a device.
948 * @dev: Device to handle.
949 *
950 * Increase the device's usage count and clear its power.runtime_auto flag,
951 * so that it cannot be suspended at run time until pm_runtime_allow() is called
952 * for it.
953 */
954void pm_runtime_forbid(struct device *dev)
955{
956 spin_lock_irq(&dev->power.lock);
957 if (!dev->power.runtime_auto)
958 goto out;
959
960 dev->power.runtime_auto = false;
961 atomic_inc(&dev->power.usage_count);
Alan Stern140a6c92010-09-25 23:35:07 +0200962 rpm_resume(dev, 0);
Rafael J. Wysocki53823632010-01-23 22:02:51 +0100963
964 out:
965 spin_unlock_irq(&dev->power.lock);
966}
967EXPORT_SYMBOL_GPL(pm_runtime_forbid);
968
969/**
970 * pm_runtime_allow - Unblock run-time PM of a device.
971 * @dev: Device to handle.
972 *
973 * Decrease the device's usage count and set its power.runtime_auto flag.
974 */
975void pm_runtime_allow(struct device *dev)
976{
977 spin_lock_irq(&dev->power.lock);
978 if (dev->power.runtime_auto)
979 goto out;
980
981 dev->power.runtime_auto = true;
982 if (atomic_dec_and_test(&dev->power.usage_count))
Alan Stern140a6c92010-09-25 23:35:07 +0200983 rpm_idle(dev, 0);
Rafael J. Wysocki53823632010-01-23 22:02:51 +0100984
985 out:
986 spin_unlock_irq(&dev->power.lock);
987}
988EXPORT_SYMBOL_GPL(pm_runtime_allow);
989
990/**
Alan Stern7490e442010-09-25 23:35:15 +0200991 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
992 * @dev: Device to handle.
993 *
994 * Set the power.no_callbacks flag, which tells the PM core that this
995 * device is power-managed through its parent and has no run-time PM
996 * callbacks of its own. The run-time sysfs attributes will be removed.
997 *
998 */
999void pm_runtime_no_callbacks(struct device *dev)
1000{
1001 spin_lock_irq(&dev->power.lock);
1002 dev->power.no_callbacks = 1;
1003 spin_unlock_irq(&dev->power.lock);
1004 if (device_is_registered(dev))
1005 rpm_sysfs_remove(dev);
1006}
1007EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1008
1009/**
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001010 * pm_runtime_init - Initialize run-time PM fields in given device object.
1011 * @dev: Device object to initialize.
1012 */
1013void pm_runtime_init(struct device *dev)
1014{
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001015 dev->power.runtime_status = RPM_SUSPENDED;
1016 dev->power.idle_notification = false;
1017
1018 dev->power.disable_depth = 1;
1019 atomic_set(&dev->power.usage_count, 0);
1020
1021 dev->power.runtime_error = 0;
1022
1023 atomic_set(&dev->power.child_count, 0);
1024 pm_suspend_ignore_children(dev, false);
Rafael J. Wysocki53823632010-01-23 22:02:51 +01001025 dev->power.runtime_auto = true;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001026
1027 dev->power.request_pending = false;
1028 dev->power.request = RPM_REQ_NONE;
1029 dev->power.deferred_resume = false;
Arjan van de Ven8d4b9d12010-07-19 02:01:06 +02001030 dev->power.accounting_timestamp = jiffies;
Rafael J. Wysocki5e928f72009-08-18 23:38:32 +02001031 INIT_WORK(&dev->power.work, pm_runtime_work);
1032
1033 dev->power.timer_expires = 0;
1034 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1035 (unsigned long)dev);
1036
1037 init_waitqueue_head(&dev->power.wait_queue);
1038}
1039
1040/**
1041 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1042 * @dev: Device object being removed from device hierarchy.
1043 */
1044void pm_runtime_remove(struct device *dev)
1045{
1046 __pm_runtime_disable(dev, false);
1047
1048 /* Change the status back to 'suspended' to match the initial status. */
1049 if (dev->power.runtime_status == RPM_ACTIVE)
1050 pm_runtime_set_suspended(dev);
1051}