1
0
Fork 0

Merge branches 'pm-core', 'pm-qos' and 'pm-domains'

* pm-core:
  PM / wakeirq: report a wakeup_event on dedicated wekup irq
  PM / wakeirq: Fix spurious wake-up events for dedicated wakeirqs
  PM / wakeirq: Enable dedicated wakeirq for suspend

* pm-qos:
  PM / QoS: Fix memory leak on resume_latency.notifiers
  PM / QoS: Remove unneeded linux/miscdevice.h include

* pm-domains:
  PM / Domains: Provide dummy governors if CONFIG_PM_GENERIC_DOMAINS=n
  PM / Domains: Fix asynchronous execution of *noirq() callbacks
  PM / Domains: Correct comment in irq_safe_dev_in_no_sleep_domain()
  PM / Domains: Rename functions in genpd for power on/off
hifive-unleashed-5.1
Rafael J. Wysocki 2017-02-20 14:26:02 +01:00
commit 58339feae7
5 changed files with 91 additions and 61 deletions

View File

@ -130,7 +130,7 @@ static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
/* Warn once for each IRQ safe dev in no sleep domain */
/* Warn once if IRQ safe dev in no sleep domain */
if (ret)
dev_warn_once(dev, "PM domain %s will not be powered off\n",
genpd->name);
@ -201,7 +201,7 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic();
}
static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
@ -231,7 +231,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
}
static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
@ -262,10 +262,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
}
/**
* genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
* genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
* @genpd: PM domain to power off.
*
* Queue up the execution of genpd_poweroff() unless it's already been done
* Queue up the execution of genpd_power_off() unless it's already been done
* before.
*/
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
@ -274,14 +274,14 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
}
/**
* genpd_poweron - Restore power to a given PM domain and its masters.
* genpd_power_on - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
* @depth: nesting count for lockdep.
*
* Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
{
struct gpd_link *link;
int ret = 0;
@ -300,7 +300,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
genpd_sd_counter_inc(master);
genpd_lock_nested(master, depth + 1);
ret = genpd_poweron(master, depth + 1);
ret = genpd_power_on(master, depth + 1);
genpd_unlock(master);
if (ret) {
@ -309,7 +309,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
}
}
ret = genpd_power_on(genpd, true);
ret = _genpd_power_on(genpd, true);
if (ret)
goto err;
@ -368,14 +368,14 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
}
/**
* genpd_poweroff - Remove power from a given PM domain.
* genpd_power_off - Remove power from a given PM domain.
* @genpd: PM domain to power down.
* @is_async: PM domain is powered down from a scheduled work
*
* If all of the @genpd's devices have been suspended and all of its subdomains
* have been powered down, remove power from @genpd.
*/
static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async)
{
struct pm_domain_data *pdd;
struct gpd_link *link;
@ -427,13 +427,13 @@ static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
* managed to call genpd_poweron() for the master yet after
* incrementing it. In that case genpd_poweron() will wait
* managed to call genpd_power_on() for the master yet after
* incrementing it. In that case genpd_power_on() will wait
* for us to drop the lock, so we can call .power_off() and let
* the genpd_poweron() restore power for us (this shouldn't
* the genpd_power_on() restore power for us (this shouldn't
* happen very often).
*/
ret = genpd_power_off(genpd, true);
ret = _genpd_power_off(genpd, true);
if (ret)
return ret;
}
@ -459,7 +459,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
genpd_lock(genpd);
genpd_poweroff(genpd, true);
genpd_power_off(genpd, true);
genpd_unlock(genpd);
}
@ -578,7 +578,7 @@ static int genpd_runtime_suspend(struct device *dev)
return 0;
genpd_lock(genpd);
genpd_poweroff(genpd, false);
genpd_power_off(genpd, false);
genpd_unlock(genpd);
return 0;
@ -618,7 +618,7 @@ static int genpd_runtime_resume(struct device *dev)
}
genpd_lock(genpd);
ret = genpd_poweron(genpd, 0);
ret = genpd_power_on(genpd, 0);
genpd_unlock(genpd);
if (ret)
@ -658,7 +658,7 @@ err_poweroff:
if (!pm_runtime_is_irq_safe(dev) ||
(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
genpd_lock(genpd);
genpd_poweroff(genpd, 0);
genpd_power_off(genpd, 0);
genpd_unlock(genpd);
}
@ -674,9 +674,9 @@ static int __init pd_ignore_unused_setup(char *__unused)
__setup("pd_ignore_unused", pd_ignore_unused_setup);
/**
* genpd_poweroff_unused - Power off all PM domains with no devices in use.
* genpd_power_off_unused - Power off all PM domains with no devices in use.
*/
static int __init genpd_poweroff_unused(void)
static int __init genpd_power_off_unused(void)
{
struct generic_pm_domain *genpd;
@ -694,7 +694,7 @@ static int __init genpd_poweroff_unused(void)
return 0;
}
late_initcall(genpd_poweroff_unused);
late_initcall(genpd_power_off_unused);
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
@ -727,18 +727,20 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
}
/**
* genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
* genpd_sync_power_off - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
*
* Check if the given PM domain can be powered off (during system suspend or
* hibernation) and do that if so. Also, in that case propagate to its masters.
*
* This function is only called in "noirq" and "syscore" stages of system power
* transitions, so it need not acquire locks (all of the "noirq" callbacks are
* executed sequentially, so it is guaranteed that it will never run twice in
* parallel).
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
* these cases the lock must be held.
*/
static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
unsigned int depth)
{
struct gpd_link *link;
@ -751,26 +753,35 @@ static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
/* Choose the deepest state when suspending */
genpd->state_idx = genpd->state_count - 1;
genpd_power_off(genpd, false);
_genpd_power_off(genpd, false);
genpd->status = GPD_STATE_POWER_OFF;
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
genpd_sync_poweroff(link->master);
if (use_lock)
genpd_lock_nested(link->master, depth + 1);
genpd_sync_power_off(link->master, use_lock, depth + 1);
if (use_lock)
genpd_unlock(link->master);
}
}
/**
* genpd_sync_poweron - Synchronously power on a PM domain and its masters.
* genpd_sync_power_on - Synchronously power on a PM domain and its masters.
* @genpd: PM domain to power on.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
*
* This function is only called in "noirq" and "syscore" stages of system power
* transitions, so it need not acquire locks (all of the "noirq" callbacks are
* executed sequentially, so it is guaranteed that it will never run twice in
* parallel).
* transitions. The "noirq" callbacks may be executed asynchronously, thus in
* these cases the lock must be held.
*/
static void genpd_sync_poweron(struct generic_pm_domain *genpd)
static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
unsigned int depth)
{
struct gpd_link *link;
@ -778,11 +789,18 @@ static void genpd_sync_poweron(struct generic_pm_domain *genpd)
return;
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sync_poweron(link->master);
genpd_sd_counter_inc(link->master);
if (use_lock)
genpd_lock_nested(link->master, depth + 1);
genpd_sync_power_on(link->master, use_lock, depth + 1);
if (use_lock)
genpd_unlock(link->master);
}
genpd_power_on(genpd, false);
_genpd_power_on(genpd, false);
genpd->status = GPD_STATE_ACTIVE;
}
@ -888,13 +906,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
return ret;
}
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*/
genpd_lock(genpd);
genpd->suspended_count++;
genpd_sync_poweroff(genpd);
genpd_sync_power_off(genpd, true, 0);
genpd_unlock(genpd);
return 0;
}
@ -919,13 +934,10 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*/
genpd_sync_poweron(genpd);
genpd_lock(genpd);
genpd_sync_power_on(genpd, true, 0);
genpd->suspended_count--;
genpd_unlock(genpd);
if (genpd->dev_ops.stop && genpd->dev_ops.start)
ret = pm_runtime_force_resume(dev);
@ -1002,22 +1014,20 @@ static int pm_genpd_restore_noirq(struct device *dev)
return -EINVAL;
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*
* At this point suspended_count == 0 means we are being run for the
* first time for the given domain in the present cycle.
*/
genpd_lock(genpd);
if (genpd->suspended_count++ == 0)
/*
* The boot kernel might put the domain into arbitrary state,
* so make it appear as powered off to genpd_sync_poweron(),
* so make it appear as powered off to genpd_sync_power_on(),
* so that it tries to power it on in case it was really off.
*/
genpd->status = GPD_STATE_POWER_OFF;
genpd_sync_poweron(genpd);
genpd_sync_power_on(genpd, true, 0);
genpd_unlock(genpd);
if (genpd->dev_ops.stop && genpd->dev_ops.start)
ret = pm_runtime_force_resume(dev);
@ -1072,9 +1082,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
if (suspend) {
genpd->suspended_count++;
genpd_sync_poweroff(genpd);
genpd_sync_power_off(genpd, false, 0);
} else {
genpd_sync_poweron(genpd);
genpd_sync_power_on(genpd, false, 0);
genpd->suspended_count--;
}
}
@ -2043,7 +2053,7 @@ int genpd_dev_pm_attach(struct device *dev)
dev->pm_domain->sync = genpd_dev_pm_sync;
genpd_lock(pd);
ret = genpd_poweron(pd, 0);
ret = genpd_power_on(pd, 0);
genpd_unlock(pd);
out:
return ret ? -EPROBE_DEFER : 0;

View File

@ -281,7 +281,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
dev->power.qos = ERR_PTR(-ENODEV);
spin_unlock_irq(&dev->power.lock);
kfree(c->notifiers);
kfree(qos->resume_latency.notifiers);
kfree(qos);
out:

View File

@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
struct wake_irq *wirq = _wirq;
int res;
/* Maybe abort suspend? */
if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
pm_wakeup_event(wirq->dev, 0);
return IRQ_HANDLED;
}
/* We don't want RPM_ASYNC or RPM_NOWAIT here */
res = pm_runtime_resume(wirq->dev);
if (res < 0)
@ -183,6 +190,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
wirq->irq = irq;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
/* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
/*
* Consumer device may need to power up and restore state
* so we use a threaded irq.
@ -312,8 +322,12 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
if (!wirq)
return;
if (device_may_wakeup(wirq->dev))
if (device_may_wakeup(wirq->dev)) {
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
}
}
/**
@ -328,6 +342,10 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
if (!wirq)
return;
if (device_may_wakeup(wirq->dev))
if (device_may_wakeup(wirq->dev)) {
disable_irq_wake(wirq->irq);
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
disable_irq_nosync(wirq->irq);
}
}

View File

@ -182,6 +182,9 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
{
return -ENOTSUPP;
}
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,

View File

@ -6,7 +6,6 @@
*/
#include <linux/plist.h>
#include <linux/notifier.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/workqueue.h>