More power management updates for 3.4

Fixes mostly, including:
 
 * Patch series that hopefully fixes races between the freezer and request_firmware()
   and request_firmware_nowait() for good, with two cleanups from Stephen Boyd on top.
 
 * Runtime PM fix from Alan Stern preventing tasks from getting stuck indefinitely
   in the runtime PM wait queue.
 
 * Device PM QoS update from MyungJoo Ham introducing a new variant of
   pm_qos_update_request() allowing the callers to specify a timeout.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.18 (GNU/Linux)
 
 iQIcBAABAgAGBQJPdmPZAAoJEKhOf7ml8uNsvcgQAIKBya3ESVg2PbB1riIRJ0M5
 3R5ntbQ0sxa631lIoipZLP6HeN2fgTcfTqhHpr9/dtt80Zh/HbNWee4XEmkJvGOK
 UuG/Vzg2IJA2LKYbRDEALm9GwvlG8ylIrz1mWOSt77K+seyjnvCyfQsoVd5S/+sz
 bzDCwIJlV/lvtynvAMfaZ+O75XW1uYRJ6a1ABviEU4o+J7OC9UCp0h/b9c1WZqDJ
 1X0pBU0/28ZFnYnK+zuAqwJg7pua/HrC0nT/pQTRSZ0kXAgt7uuqIlpVz9HXiqzu
 TVbu3uW6FPWT0TP/iFmKMA1eiQJHLXgshECaccVOoMzIG/pqYTNbfu9BzEho3tL9
 w716ruo1JoythvnlIz4j8R2RtiE8SxTzCqGm4OHcie72VUSqduIhWgRyZOFhebUo
 xqiUSN2cyYUf9SJoeg0TSmQdutoa7vnswZgq4qjlOz39OPxHrwAe5ROXIBwoHvnz
 akmBtnabyNVsRiLe9eIH5N5C9TxHDgZwS70SMYqo1D09Qo+NTUtvSVgC/NiIjhXb
 yY3UliDqGlkUhHJ+8ydntNb39VU4L1MO0IGzEvmvfXvSIcXavGkkmd9RV9yytLEK
 1ujq99NHITzxyuF2+bNGpPQVEVH3sQgAv/doFTiEZiUHIIAy5Fmy/+ipcurslXLm
 urlq4RLG+JXgPjw4XO14
 =ligR
 -----END PGP SIGNATURE-----

Merge tag 'pm-for-3.4-part-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management updates from Rafael Wysocki:
 - Patch series that hopefully fixes races between the freezer and
   request_firmware() and request_firmware_nowait() for good, with two
   cleanups from Stephen Boyd on top.
 - Runtime PM fix from Alan Stern preventing tasks from getting stuck
   indefinitely in the runtime PM wait queue.
 - Device PM QoS update from MyungJoo Ham introducing a new variant of
   pm_qos_update_request() allowing the callers to specify a timeout.

* tag 'pm-for-3.4-part-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM / QoS: add pm_qos_update_request_timeout() API
  firmware_class: Move request_firmware_nowait() to workqueues
  firmware_class: Reorganize fw_create_instance()
  PM / Sleep: Mitigate race between the freezer and request_firmware()
  PM / Sleep: Move disabling of usermode helpers to the freezer
  PM / Hibernate: Disable usermode helpers right before freezing tasks
  firmware_class: Do not warn that system is not ready from async loads
  firmware_class: Split _request_firmware() into three functions, v2
  firmware_class: Rework usermodehelper check
  PM / Runtime: don't forget to wake up waitqueue on failure
This commit is contained in:
Linus Torvalds 2012-04-04 14:26:40 -07:00
commit 64ebe98731
10 changed files with 288 additions and 172 deletions

View file

@ -16,10 +16,11 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/highmem.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/sched.h>
#define to_dev(obj) container_of(obj, struct device, kobj)
@ -81,6 +82,11 @@ enum {
static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
{
return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
}
/* fw_lock could be moved to 'struct firmware_priv' but since it is just
* guarding for corner cases a global lock should be OK */
static DEFINE_MUTEX(fw_lock);
@ -440,13 +446,11 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
{
struct firmware_priv *fw_priv;
struct device *f_dev;
int error;
fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
if (!fw_priv) {
dev_err(device, "%s: kmalloc failed\n", __func__);
error = -ENOMEM;
goto err_out;
return ERR_PTR(-ENOMEM);
}
fw_priv->fw = firmware;
@ -463,98 +467,80 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
f_dev->parent = device;
f_dev->class = &firmware_class;
dev_set_uevent_suppress(f_dev, true);
/* Need to pin this module until class device is destroyed */
__module_get(THIS_MODULE);
error = device_add(f_dev);
if (error) {
dev_err(device, "%s: device_register failed\n", __func__);
goto err_put_dev;
}
error = device_create_bin_file(f_dev, &firmware_attr_data);
if (error) {
dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
goto err_del_dev;
}
error = device_create_file(f_dev, &dev_attr_loading);
if (error) {
dev_err(device, "%s: device_create_file failed\n", __func__);
goto err_del_bin_attr;
}
if (uevent)
dev_set_uevent_suppress(f_dev, false);
return fw_priv;
err_del_bin_attr:
device_remove_bin_file(f_dev, &firmware_attr_data);
err_del_dev:
device_del(f_dev);
err_put_dev:
put_device(f_dev);
err_out:
return ERR_PTR(error);
}
static void fw_destroy_instance(struct firmware_priv *fw_priv)
static struct firmware_priv *
_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
struct device *device, bool uevent, bool nowait)
{
struct device *f_dev = &fw_priv->dev;
device_remove_file(f_dev, &dev_attr_loading);
device_remove_bin_file(f_dev, &firmware_attr_data);
device_unregister(f_dev);
}
static int _request_firmware(const struct firmware **firmware_p,
const char *name, struct device *device,
bool uevent, bool nowait)
{
struct firmware_priv *fw_priv;
struct firmware *firmware;
int retval = 0;
struct firmware_priv *fw_priv;
if (!firmware_p)
return -EINVAL;
return ERR_PTR(-EINVAL);
*firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
if (!firmware) {
dev_err(device, "%s: kmalloc(struct firmware) failed\n",
__func__);
return -ENOMEM;
return ERR_PTR(-ENOMEM);
}
if (fw_get_builtin_firmware(firmware, name)) {
dev_dbg(device, "firmware: using built-in firmware %s\n", name);
return 0;
return NULL;
}
read_lock_usermodehelper();
if (WARN_ON(usermodehelper_is_disabled())) {
dev_err(device, "firmware: %s will not be loaded\n", name);
retval = -EBUSY;
goto out;
}
if (uevent)
dev_dbg(device, "firmware: requesting %s\n", name);
fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
if (IS_ERR(fw_priv)) {
retval = PTR_ERR(fw_priv);
goto out;
release_firmware(firmware);
*firmware_p = NULL;
}
return fw_priv;
}
static void _request_firmware_cleanup(const struct firmware **firmware_p)
{
release_firmware(*firmware_p);
*firmware_p = NULL;
}
static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
long timeout)
{
int retval = 0;
struct device *f_dev = &fw_priv->dev;
dev_set_uevent_suppress(f_dev, true);
/* Need to pin this module until class device is destroyed */
__module_get(THIS_MODULE);
retval = device_add(f_dev);
if (retval) {
dev_err(f_dev, "%s: device_register failed\n", __func__);
goto err_put_dev;
}
retval = device_create_bin_file(f_dev, &firmware_attr_data);
if (retval) {
dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
goto err_del_dev;
}
retval = device_create_file(f_dev, &dev_attr_loading);
if (retval) {
dev_err(f_dev, "%s: device_create_file failed\n", __func__);
goto err_del_bin_attr;
}
if (uevent) {
if (loading_timeout > 0)
dev_set_uevent_suppress(f_dev, false);
dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id);
if (timeout != MAX_SCHEDULE_TIMEOUT)
mod_timer(&fw_priv->timeout,
round_jiffies_up(jiffies +
loading_timeout * HZ));
round_jiffies_up(jiffies + timeout));
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
@ -570,16 +556,13 @@ static int _request_firmware(const struct firmware **firmware_p,
fw_priv->fw = NULL;
mutex_unlock(&fw_lock);
fw_destroy_instance(fw_priv);
out:
read_unlock_usermodehelper();
if (retval) {
release_firmware(firmware);
*firmware_p = NULL;
}
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
device_remove_bin_file(f_dev, &firmware_attr_data);
err_del_dev:
device_del(f_dev);
err_put_dev:
put_device(f_dev);
return retval;
}
@ -602,7 +585,26 @@ int
request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device)
{
return _request_firmware(firmware_p, name, device, true, false);
struct firmware_priv *fw_priv;
int ret;
fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
false);
if (IS_ERR_OR_NULL(fw_priv))
return PTR_RET(fw_priv);
ret = usermodehelper_read_trylock();
if (WARN_ON(ret)) {
dev_err(device, "firmware: %s will not be loaded\n", name);
} else {
ret = _request_firmware_load(fw_priv, true,
firmware_loading_timeout());
usermodehelper_read_unlock();
}
if (ret)
_request_firmware_cleanup(firmware_p);
return ret;
}
/**
@ -629,25 +631,39 @@ struct firmware_work {
bool uevent;
};
static int request_firmware_work_func(void *arg)
static void request_firmware_work_func(struct work_struct *work)
{
struct firmware_work *fw_work = arg;
struct firmware_work *fw_work;
const struct firmware *fw;
struct firmware_priv *fw_priv;
long timeout;
int ret;
if (!arg) {
WARN_ON(1);
return 0;
fw_work = container_of(work, struct firmware_work, work);
fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
fw_work->uevent, true);
if (IS_ERR_OR_NULL(fw_priv)) {
ret = PTR_RET(fw_priv);
goto out;
}
ret = _request_firmware(&fw, fw_work->name, fw_work->device,
fw_work->uevent, true);
timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
if (timeout) {
ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
usermodehelper_read_unlock();
} else {
dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
fw_work->name);
ret = -EAGAIN;
}
if (ret)
_request_firmware_cleanup(&fw);
out:
fw_work->cont(fw, fw_work->context);
module_put(fw_work->module);
kfree(fw_work);
return ret;
}
/**
@ -673,7 +689,6 @@ request_firmware_nowait(
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context))
{
struct task_struct *task;
struct firmware_work *fw_work;
fw_work = kzalloc(sizeof (struct firmware_work), gfp);
@ -692,15 +707,8 @@ request_firmware_nowait(
return -EFAULT;
}
task = kthread_run(request_firmware_work_func, fw_work,
"firmware/%s", name);
if (IS_ERR(task)) {
fw_work->cont(NULL, fw_work->context);
module_put(fw_work->module);
kfree(fw_work);
return PTR_ERR(task);
}
INIT_WORK(&fw_work->work, request_firmware_work_func);
schedule_work(&fw_work->work);
return 0;
}

View file

@ -532,6 +532,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.suspend_time = ktime_set(0, 0);
dev->power.max_time_suspended_ns = -1;
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
if (retval == -EAGAIN || retval == -EBUSY) {
dev->power.runtime_error = 0;
@ -547,7 +549,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
} else {
pm_runtime_cancel_pending(dev);
}
wake_up_all(&dev->power.wait_queue);
goto out;
}

View file

@ -110,12 +110,29 @@ call_usermodehelper(char *path, char **argv, char **envp, int wait)
extern struct ctl_table usermodehelper_table[];
enum umh_disable_depth {
UMH_ENABLED = 0,
UMH_FREEZING,
UMH_DISABLED,
};
extern void usermodehelper_init(void);
extern int usermodehelper_disable(void);
extern void usermodehelper_enable(void);
extern bool usermodehelper_is_disabled(void);
extern void read_lock_usermodehelper(void);
extern void read_unlock_usermodehelper(void);
extern int __usermodehelper_disable(enum umh_disable_depth depth);
extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
static inline int usermodehelper_disable(void)
{
return __usermodehelper_disable(UMH_DISABLED);
}
static inline void usermodehelper_enable(void)
{
__usermodehelper_set_disable_depth(UMH_ENABLED);
}
extern int usermodehelper_read_trylock(void);
extern long usermodehelper_read_lock_wait(long timeout);
extern void usermodehelper_read_unlock(void);
#endif /* __LINUX_KMOD_H__ */

View file

@ -8,6 +8,7 @@
#include <linux/notifier.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/workqueue.h>
enum {
PM_QOS_RESERVED = 0,
@ -29,6 +30,7 @@ enum {
struct pm_qos_request {
struct plist_node node;
int pm_qos_class;
struct delayed_work work; /* for pm_qos_update_request_timeout */
};
struct dev_pm_qos_request {
@ -73,6 +75,8 @@ void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
s32 value);
void pm_qos_update_request(struct pm_qos_request *req,
s32 new_value);
void pm_qos_update_request_timeout(struct pm_qos_request *req,
s32 new_value, unsigned long timeout_us);
void pm_qos_remove_request(struct pm_qos_request *req);
int pm_qos_request(int pm_qos_class);

View file

@ -322,7 +322,7 @@ static void __call_usermodehelper(struct work_struct *work)
* land has been frozen during a system-wide hibernation or suspend operation).
* Should always be manipulated under umhelper_sem acquired for write.
*/
static int usermodehelper_disabled = 1;
static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
/* Number of helpers running */
static atomic_t running_helpers = ATOMIC_INIT(0);
@ -333,33 +333,111 @@ static atomic_t running_helpers = ATOMIC_INIT(0);
*/
static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
/*
* Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
* to become 'false'.
*/
static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
/*
* Time to wait for running_helpers to become zero before the setting of
* usermodehelper_disabled in usermodehelper_disable() fails
*/
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
void read_lock_usermodehelper(void)
int usermodehelper_read_trylock(void)
{
down_read(&umhelper_sem);
}
EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
DEFINE_WAIT(wait);
int ret = 0;
void read_unlock_usermodehelper(void)
down_read(&umhelper_sem);
for (;;) {
prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
TASK_INTERRUPTIBLE);
if (!usermodehelper_disabled)
break;
if (usermodehelper_disabled == UMH_DISABLED)
ret = -EAGAIN;
up_read(&umhelper_sem);
if (ret)
break;
schedule();
try_to_freeze();
down_read(&umhelper_sem);
}
finish_wait(&usermodehelper_disabled_waitq, &wait);
return ret;
}
EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
long usermodehelper_read_lock_wait(long timeout)
{
DEFINE_WAIT(wait);
if (timeout < 0)
return -EINVAL;
down_read(&umhelper_sem);
for (;;) {
prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
TASK_UNINTERRUPTIBLE);
if (!usermodehelper_disabled)
break;
up_read(&umhelper_sem);
timeout = schedule_timeout(timeout);
if (!timeout)
break;
down_read(&umhelper_sem);
}
finish_wait(&usermodehelper_disabled_waitq, &wait);
return timeout;
}
EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
void usermodehelper_read_unlock(void)
{
up_read(&umhelper_sem);
}
EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
/**
* usermodehelper_disable - prevent new helpers from being started
* __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
* depth: New value to assign to usermodehelper_disabled.
*
* Change the value of usermodehelper_disabled (under umhelper_sem locked for
* writing) and wakeup tasks waiting for it to change.
*/
int usermodehelper_disable(void)
void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
{
down_write(&umhelper_sem);
usermodehelper_disabled = depth;
wake_up(&usermodehelper_disabled_waitq);
up_write(&umhelper_sem);
}
/**
* __usermodehelper_disable - Prevent new helpers from being started.
* @depth: New value to assign to usermodehelper_disabled.
*
* Set usermodehelper_disabled to @depth and wait for running helpers to exit.
*/
int __usermodehelper_disable(enum umh_disable_depth depth)
{
long retval;
if (!depth)
return -EINVAL;
down_write(&umhelper_sem);
usermodehelper_disabled = 1;
usermodehelper_disabled = depth;
up_write(&umhelper_sem);
/*
@ -374,31 +452,10 @@ int usermodehelper_disable(void)
if (retval)
return 0;
down_write(&umhelper_sem);
usermodehelper_disabled = 0;
up_write(&umhelper_sem);
__usermodehelper_set_disable_depth(UMH_ENABLED);
return -EAGAIN;
}
/**
* usermodehelper_enable - allow new helpers to be started again
*/
void usermodehelper_enable(void)
{
down_write(&umhelper_sem);
usermodehelper_disabled = 0;
up_write(&umhelper_sem);
}
/**
* usermodehelper_is_disabled - check if new helpers are allowed to be started
*/
bool usermodehelper_is_disabled(void)
{
return usermodehelper_disabled;
}
EXPORT_SYMBOL_GPL(usermodehelper_is_disabled);
static void helper_lock(void)
{
atomic_inc(&running_helpers);

View file

@ -16,7 +16,6 @@
#include <linux/string.h>
#include <linux/device.h>
#include <linux/async.h>
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/mount.h>
@ -611,14 +610,10 @@ int hibernate(void)
if (error)
goto Exit;
error = usermodehelper_disable();
if (error)
goto Exit;
/* Allocate memory management structures */
error = create_basic_memory_bitmaps();
if (error)
goto Enable_umh;
goto Exit;
printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
@ -661,8 +656,6 @@ int hibernate(void)
Free_bitmaps:
free_basic_memory_bitmaps();
Enable_umh:
usermodehelper_enable();
Exit:
pm_notifier_call_chain(PM_POST_HIBERNATION);
pm_restore_console();
@ -777,16 +770,10 @@ static int software_resume(void)
if (error)
goto close_finish;
error = usermodehelper_disable();
error = create_basic_memory_bitmaps();
if (error)
goto close_finish;
error = create_basic_memory_bitmaps();
if (error) {
usermodehelper_enable();
goto close_finish;
}
pr_debug("PM: Preparing processes for restore.\n");
error = freeze_processes();
if (error) {
@ -806,7 +793,6 @@ static int software_resume(void)
thaw_processes();
Done:
free_basic_memory_bitmaps();
usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_RESTORE);
pm_restore_console();

View file

@ -16,6 +16,7 @@
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/kmod.h>
/*
* Timeout for stopping processes
@ -122,6 +123,10 @@ int freeze_processes(void)
{
int error;
error = __usermodehelper_disable(UMH_FREEZING);
if (error)
return error;
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
@ -130,6 +135,7 @@ int freeze_processes(void)
error = try_to_freeze_tasks(true);
if (!error) {
printk("done.");
__usermodehelper_set_disable_depth(UMH_DISABLED);
oom_killer_disable();
}
printk("\n");
@ -187,6 +193,8 @@ void thaw_processes(void)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
usermodehelper_enable();
schedule();
printk("done.\n");
}

View file

@ -229,6 +229,21 @@ int pm_qos_request_active(struct pm_qos_request *req)
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
/**
* pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
* @work: work struct for the delayed work (timeout)
*
* This cancels the timeout request by falling back to the default at timeout.
*/
static void pm_qos_work_fn(struct work_struct *work)
{
struct pm_qos_request *req = container_of(to_delayed_work(work),
struct pm_qos_request,
work);
pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
/**
* pm_qos_add_request - inserts new qos request into the list
* @req: pointer to a preallocated handle
@ -253,6 +268,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
return;
}
req->pm_qos_class = pm_qos_class;
INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
&req->node, PM_QOS_ADD_REQ, value);
}
@ -279,6 +295,9 @@ void pm_qos_update_request(struct pm_qos_request *req,
return;
}
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
@ -286,6 +305,34 @@ void pm_qos_update_request(struct pm_qos_request *req,
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
/**
* pm_qos_update_request_timeout - modifies an existing qos request temporarily.
* @req : handle to list element holding a pm_qos request to use
* @new_value: defines the temporal qos request
* @timeout_us: the effective duration of this qos request in usecs.
*
* After timeout_us, this qos request is cancelled automatically.
*/
void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
unsigned long timeout_us)
{
if (!req)
return;
if (WARN(!pm_qos_request_active(req),
"%s called for unknown object.", __func__))
return;
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
}
/**
* pm_qos_remove_request - modifies an existing qos request
* @req: handle to request list element
@ -305,6 +352,9 @@ void pm_qos_remove_request(struct pm_qos_request *req)
return;
}
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);

View file

@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
@ -102,17 +101,12 @@ static int suspend_prepare(void)
if (error)
goto Finish;
error = usermodehelper_disable();
if (error)
goto Finish;
error = suspend_freeze_processes();
if (!error)
return 0;
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
@ -259,7 +253,6 @@ int suspend_devices_and_enter(suspend_state_t state)
static void suspend_finish(void)
{
suspend_thaw_processes();
usermodehelper_enable();
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
}

View file

@ -12,7 +12,6 @@
#include <linux/suspend.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
#include <linux/kmod.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
@ -222,14 +221,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
sys_sync();
printk("done.\n");
error = usermodehelper_disable();
if (error)
break;
error = freeze_processes();
if (error)
usermodehelper_enable();
else
if (!error)
data->frozen = 1;
break;
@ -238,7 +231,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
break;
pm_restore_gfp_mask();
thaw_processes();
usermodehelper_enable();
data->frozen = 0;
break;