1
0
Fork 0

PM / Runtime: Combine runtime PM entry points

This patch (as1424) combines the various public entry points for the
runtime PM routines into three simple functions: one for idle, one for
suspend, and one for resume.  A new bitflag specifies whether or not
to increment or decrement the usage_count field.

The new entry points are named __pm_runtime_idle,
__pm_runtime_suspend, and __pm_runtime_resume, to reflect that they
are trampolines.  Simultaneously, the corresponding internal routines
are renamed to rpm_idle, rpm_suspend, and rpm_resume.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
hifive-unleashed-5.1
Alan Stern 2010-09-25 23:35:07 +02:00 committed by Rafael J. Wysocki
parent 1bfee5bc86
commit 140a6c9452
2 changed files with 120 additions and 140 deletions

View File

@ -11,7 +11,7 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
static int __pm_runtime_resume(struct device *dev, int rpmflags); static int rpm_resume(struct device *dev, int rpmflags);
/** /**
* update_pm_runtime_accounting - Update the time accounting of power states * update_pm_runtime_accounting - Update the time accounting of power states
@ -107,7 +107,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
/** /**
* __pm_runtime_idle - Notify device bus type if the device can be suspended. * rpm_idle - Notify device bus type if the device can be suspended.
* @dev: Device to notify the bus type about. * @dev: Device to notify the bus type about.
* @rpmflags: Flag bits. * @rpmflags: Flag bits.
* *
@ -118,7 +118,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
* *
* This function must be called under dev->power.lock with interrupts disabled. * This function must be called under dev->power.lock with interrupts disabled.
*/ */
static int __pm_runtime_idle(struct device *dev, int rpmflags) static int rpm_idle(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock) __releases(&dev->power.lock) __acquires(&dev->power.lock)
{ {
int retval; int retval;
@ -189,23 +189,7 @@ static int __pm_runtime_idle(struct device *dev, int rpmflags)
} }
/** /**
* pm_runtime_idle - Notify device bus type if the device can be suspended. * rpm_suspend - Carry out run-time suspend of given device.
* @dev: Device to notify the bus type about.
*/
int pm_runtime_idle(struct device *dev)
{
int retval;
spin_lock_irq(&dev->power.lock);
retval = __pm_runtime_idle(dev, 0);
spin_unlock_irq(&dev->power.lock);
return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_idle);
/**
* __pm_runtime_suspend - Carry out run-time suspend of given device.
* @dev: Device to suspend. * @dev: Device to suspend.
* @rpmflags: Flag bits. * @rpmflags: Flag bits.
* *
@ -220,7 +204,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_idle);
* *
* This function must be called under dev->power.lock with interrupts disabled. * This function must be called under dev->power.lock with interrupts disabled.
*/ */
static int __pm_runtime_suspend(struct device *dev, int rpmflags) static int rpm_suspend(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock) __releases(&dev->power.lock) __acquires(&dev->power.lock)
{ {
struct device *parent = NULL; struct device *parent = NULL;
@ -332,13 +316,13 @@ static int __pm_runtime_suspend(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue); wake_up_all(&dev->power.wait_queue);
if (dev->power.deferred_resume) { if (dev->power.deferred_resume) {
__pm_runtime_resume(dev, 0); rpm_resume(dev, 0);
retval = -EAGAIN; retval = -EAGAIN;
goto out; goto out;
} }
if (notify) if (notify)
__pm_runtime_idle(dev, 0); rpm_idle(dev, 0);
if (parent && !parent->power.ignore_children) { if (parent && !parent->power.ignore_children) {
spin_unlock_irq(&dev->power.lock); spin_unlock_irq(&dev->power.lock);
@ -355,23 +339,7 @@ static int __pm_runtime_suspend(struct device *dev, int rpmflags)
} }
/** /**
* pm_runtime_suspend - Carry out run-time suspend of given device. * rpm_resume - Carry out run-time resume of given device.
* @dev: Device to suspend.
*/
int pm_runtime_suspend(struct device *dev)
{
int retval;
spin_lock_irq(&dev->power.lock);
retval = __pm_runtime_suspend(dev, 0);
spin_unlock_irq(&dev->power.lock);
return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_suspend);
/**
* __pm_runtime_resume - Carry out run-time resume of given device.
* @dev: Device to resume. * @dev: Device to resume.
* @rpmflags: Flag bits. * @rpmflags: Flag bits.
* *
@ -387,7 +355,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_suspend);
* *
* This function must be called under dev->power.lock with interrupts disabled. * This function must be called under dev->power.lock with interrupts disabled.
*/ */
static int __pm_runtime_resume(struct device *dev, int rpmflags) static int rpm_resume(struct device *dev, int rpmflags)
__releases(&dev->power.lock) __acquires(&dev->power.lock) __releases(&dev->power.lock) __acquires(&dev->power.lock)
{ {
struct device *parent = NULL; struct device *parent = NULL;
@ -469,7 +437,7 @@ static int __pm_runtime_resume(struct device *dev, int rpmflags)
*/ */
if (!parent->power.disable_depth if (!parent->power.disable_depth
&& !parent->power.ignore_children) { && !parent->power.ignore_children) {
__pm_runtime_resume(parent, 0); rpm_resume(parent, 0);
if (parent->power.runtime_status != RPM_ACTIVE) if (parent->power.runtime_status != RPM_ACTIVE)
retval = -EBUSY; retval = -EBUSY;
} }
@ -521,7 +489,7 @@ static int __pm_runtime_resume(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue); wake_up_all(&dev->power.wait_queue);
if (!retval) if (!retval)
__pm_runtime_idle(dev, RPM_ASYNC); rpm_idle(dev, RPM_ASYNC);
out: out:
if (parent) { if (parent) {
@ -537,22 +505,6 @@ static int __pm_runtime_resume(struct device *dev, int rpmflags)
return retval; return retval;
} }
/**
* pm_runtime_resume - Carry out run-time resume of given device.
* @dev: Device to suspend.
*/
int pm_runtime_resume(struct device *dev)
{
int retval;
spin_lock_irq(&dev->power.lock);
retval = __pm_runtime_resume(dev, 0);
spin_unlock_irq(&dev->power.lock);
return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_resume);
/** /**
* pm_runtime_work - Universal run-time PM work function. * pm_runtime_work - Universal run-time PM work function.
* @work: Work structure used for scheduling the execution of this function. * @work: Work structure used for scheduling the execution of this function.
@ -578,13 +530,13 @@ static void pm_runtime_work(struct work_struct *work)
case RPM_REQ_NONE: case RPM_REQ_NONE:
break; break;
case RPM_REQ_IDLE: case RPM_REQ_IDLE:
__pm_runtime_idle(dev, RPM_NOWAIT); rpm_idle(dev, RPM_NOWAIT);
break; break;
case RPM_REQ_SUSPEND: case RPM_REQ_SUSPEND:
__pm_runtime_suspend(dev, RPM_NOWAIT); rpm_suspend(dev, RPM_NOWAIT);
break; break;
case RPM_REQ_RESUME: case RPM_REQ_RESUME:
__pm_runtime_resume(dev, RPM_NOWAIT); rpm_resume(dev, RPM_NOWAIT);
break; break;
} }
@ -592,23 +544,6 @@ static void pm_runtime_work(struct work_struct *work)
spin_unlock_irq(&dev->power.lock); spin_unlock_irq(&dev->power.lock);
} }
/**
* pm_request_idle - Submit an idle notification request for given device.
* @dev: Device to handle.
*/
int pm_request_idle(struct device *dev)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
retval = __pm_runtime_idle(dev, RPM_ASYNC);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(pm_request_idle);
/** /**
* pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
* @data: Device pointer passed by pm_schedule_suspend(). * @data: Device pointer passed by pm_schedule_suspend().
@ -627,7 +562,7 @@ static void pm_suspend_timer_fn(unsigned long data)
/* If 'expire' is after 'jiffies' we've been called too early. */ /* If 'expire' is after 'jiffies' we've been called too early. */
if (expires > 0 && !time_after(expires, jiffies)) { if (expires > 0 && !time_after(expires, jiffies)) {
dev->power.timer_expires = 0; dev->power.timer_expires = 0;
__pm_runtime_suspend(dev, RPM_ASYNC); rpm_suspend(dev, RPM_ASYNC);
} }
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
@ -646,7 +581,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
if (!delay) { if (!delay) {
retval = __pm_runtime_suspend(dev, RPM_ASYNC); retval = rpm_suspend(dev, RPM_ASYNC);
goto out; goto out;
} }
@ -669,62 +604,81 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
EXPORT_SYMBOL_GPL(pm_schedule_suspend); EXPORT_SYMBOL_GPL(pm_schedule_suspend);
/** /**
* pm_request_resume - Submit a resume request for given device. * __pm_runtime_idle - Entry point for run-time idle operations.
* @dev: Device to resume. * @dev: Device to send idle notification for.
* @rpmflags: Flag bits.
*
* If the RPM_GET_PUT flag is set, decrement the device's usage count and
* return immediately if it is larger than zero. Then carry out an idle
* notification, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set.
*/ */
int pm_request_resume(struct device *dev) int __pm_runtime_idle(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
}
EXPORT_SYMBOL_GPL(__pm_runtime_idle);
/**
* __pm_runtime_suspend - Entry point for run-time put/suspend operations.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
* Carry out a suspend, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set.
*/
int __pm_runtime_suspend(struct device *dev, int rpmflags)
{ {
unsigned long flags; unsigned long flags;
int retval; int retval;
spin_lock_irqsave(&dev->power.lock, flags); spin_lock_irqsave(&dev->power.lock, flags);
retval = __pm_runtime_resume(dev, RPM_ASYNC); retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
return retval; return retval;
} }
EXPORT_SYMBOL_GPL(pm_request_resume); EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
/** /**
* __pm_runtime_get - Reference count a device and wake it up, if necessary. * __pm_runtime_resume - Entry point for run-time resume operations.
* @dev: Device to handle. * @dev: Device to resume.
* @rpmflags: Flag bits. * @rpmflags: Flag bits.
* *
* Increment the usage count of the device and resume it or submit a resume * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
* request for it, depending on the RPM_ASYNC flag bit. * carry out a resume, either synchronous or asynchronous.
*
* This routine may be called in atomic context if the RPM_ASYNC flag is set.
*/ */
int __pm_runtime_get(struct device *dev, int rpmflags) int __pm_runtime_resume(struct device *dev, int rpmflags)
{ {
unsigned long flags;
int retval; int retval;
atomic_inc(&dev->power.usage_count); if (rpmflags & RPM_GET_PUT)
retval = (rpmflags & RPM_ASYNC) ? atomic_inc(&dev->power.usage_count);
pm_request_resume(dev) : pm_runtime_resume(dev);
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_resume(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval; return retval;
} }
EXPORT_SYMBOL_GPL(__pm_runtime_get); EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
* __pm_runtime_put - Decrement the device's usage counter and notify its bus.
* @dev: Device to handle.
* @rpmflags: Flag bits.
*
* Decrement the usage count of the device and if it reaches zero, carry out a
* synchronous idle notification or submit an idle notification request for it,
* depending on the RPM_ASYNC flag bit.
*/
int __pm_runtime_put(struct device *dev, int rpmflags)
{
int retval = 0;
if (atomic_dec_and_test(&dev->power.usage_count))
retval = (rpmflags & RPM_ASYNC) ?
pm_request_idle(dev) : pm_runtime_idle(dev);
return retval;
}
EXPORT_SYMBOL_GPL(__pm_runtime_put);
/** /**
* __pm_runtime_set_status - Set run-time PM status of a device. * __pm_runtime_set_status - Set run-time PM status of a device.
@ -875,7 +829,7 @@ int pm_runtime_barrier(struct device *dev)
if (dev->power.request_pending if (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME) { && dev->power.request == RPM_REQ_RESUME) {
__pm_runtime_resume(dev, 0); rpm_resume(dev, 0);
retval = 1; retval = 1;
} }
@ -924,7 +878,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
*/ */
pm_runtime_get_noresume(dev); pm_runtime_get_noresume(dev);
__pm_runtime_resume(dev, 0); rpm_resume(dev, 0);
pm_runtime_put_noidle(dev); pm_runtime_put_noidle(dev);
} }
@ -972,7 +926,7 @@ void pm_runtime_forbid(struct device *dev)
dev->power.runtime_auto = false; dev->power.runtime_auto = false;
atomic_inc(&dev->power.usage_count); atomic_inc(&dev->power.usage_count);
__pm_runtime_resume(dev, 0); rpm_resume(dev, 0);
out: out:
spin_unlock_irq(&dev->power.lock); spin_unlock_irq(&dev->power.lock);
@ -993,7 +947,7 @@ void pm_runtime_allow(struct device *dev)
dev->power.runtime_auto = true; dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count)) if (atomic_dec_and_test(&dev->power.usage_count))
__pm_runtime_idle(dev, 0); rpm_idle(dev, 0);
out: out:
spin_unlock_irq(&dev->power.lock); spin_unlock_irq(&dev->power.lock);

View File

@ -16,19 +16,17 @@
#define RPM_ASYNC 0x01 /* Request is asynchronous */ #define RPM_ASYNC 0x01 /* Request is asynchronous */
#define RPM_NOWAIT 0x02 /* Don't wait for concurrent #define RPM_NOWAIT 0x02 /* Don't wait for concurrent
state change */ state change */
#define RPM_GET_PUT 0x04 /* Increment/decrement the
usage_count */
#ifdef CONFIG_PM_RUNTIME #ifdef CONFIG_PM_RUNTIME
extern struct workqueue_struct *pm_wq; extern struct workqueue_struct *pm_wq;
extern int pm_runtime_idle(struct device *dev); extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int pm_runtime_suspend(struct device *dev); extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int pm_runtime_resume(struct device *dev); extern int __pm_runtime_resume(struct device *dev, int rpmflags);
extern int pm_request_idle(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int pm_request_resume(struct device *dev);
extern int __pm_runtime_get(struct device *dev, int rpmflags);
extern int __pm_runtime_put(struct device *dev, int rpmflags);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev); extern int pm_runtime_barrier(struct device *dev);
extern void pm_runtime_enable(struct device *dev); extern void pm_runtime_enable(struct device *dev);
@ -77,19 +75,22 @@ static inline bool pm_runtime_suspended(struct device *dev)
#else /* !CONFIG_PM_RUNTIME */ #else /* !CONFIG_PM_RUNTIME */
static inline int pm_runtime_idle(struct device *dev) { return -ENOSYS; } static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
static inline int pm_runtime_suspend(struct device *dev) { return -ENOSYS; } {
static inline int pm_runtime_resume(struct device *dev) { return 0; } return -ENOSYS;
static inline int pm_request_idle(struct device *dev) { return -ENOSYS; } }
static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
return -ENOSYS;
}
static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
{
return 1;
}
static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
{ {
return -ENOSYS; return -ENOSYS;
} }
static inline int pm_request_resume(struct device *dev) { return 0; }
static inline int __pm_runtime_get(struct device *dev, int rpmflags)
{ return 1; }
static inline int __pm_runtime_put(struct device *dev, int rpmflags)
{ return 0; }
static inline int __pm_runtime_set_status(struct device *dev, static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; } unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; } static inline int pm_runtime_barrier(struct device *dev) { return 0; }
@ -112,24 +113,49 @@ static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
#endif /* !CONFIG_PM_RUNTIME */ #endif /* !CONFIG_PM_RUNTIME */
static inline int pm_runtime_idle(struct device *dev)
{
return __pm_runtime_idle(dev, 0);
}
static inline int pm_runtime_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, 0);
}
static inline int pm_runtime_resume(struct device *dev)
{
return __pm_runtime_resume(dev, 0);
}
static inline int pm_request_idle(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_ASYNC);
}
static inline int pm_request_resume(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_ASYNC);
}
static inline int pm_runtime_get(struct device *dev) static inline int pm_runtime_get(struct device *dev)
{ {
return __pm_runtime_get(dev, RPM_ASYNC); return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
} }
static inline int pm_runtime_get_sync(struct device *dev) static inline int pm_runtime_get_sync(struct device *dev)
{ {
return __pm_runtime_get(dev, 0); return __pm_runtime_resume(dev, RPM_GET_PUT);
} }
static inline int pm_runtime_put(struct device *dev) static inline int pm_runtime_put(struct device *dev)
{ {
return __pm_runtime_put(dev, RPM_ASYNC); return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
} }
static inline int pm_runtime_put_sync(struct device *dev) static inline int pm_runtime_put_sync(struct device *dev)
{ {
return __pm_runtime_put(dev, 0); return __pm_runtime_idle(dev, RPM_GET_PUT);
} }
static inline int pm_runtime_set_active(struct device *dev) static inline int pm_runtime_set_active(struct device *dev)