summaryrefslogtreecommitdiff
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/generic_ops.c233
-rw-r--r--drivers/base/power/main.c194
-rw-r--r--drivers/base/power/power.h6
-rw-r--r--drivers/base/power/runtime.c55
-rw-r--r--drivers/base/power/sysfs.c159
6 files changed, 619 insertions, 29 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 3ce3519e8f30..89de75325cea 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_PM) += sysfs.o
obj-$(CONFIG_PM_SLEEP) += main.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
+obj-$(CONFIG_PM_OPS) += generic_ops.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
new file mode 100644
index 000000000000..4b29d4981253
--- /dev/null
+++ b/drivers/base/power/generic_ops.c
@@ -0,0 +1,233 @@
+/*
+ * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
+ * @dev: Device to handle.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_idle(), execute it and return its error code, if nonzero.
+ * Otherwise, execute pm_runtime_suspend() for the device and return 0.
+ */
+int pm_generic_runtime_idle(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm && pm->runtime_idle) {
+ int ret = pm->runtime_idle(dev);
+ if (ret)
+ return ret;
+ }
+
+ pm_runtime_suspend(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
+
+/**
+ * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
+ * @dev: Device to suspend.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_suspend(), execute it and return its error code. Otherwise,
+ * return -EINVAL.
+ */
+int pm_generic_runtime_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
+
+/**
+ * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
+ * @dev: Device to resume.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_resume(), execute it and return its error code. Otherwise,
+ * return -EINVAL.
+ */
+int pm_generic_runtime_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
+ * @dev: Device to handle.
+ * @event: PM transition of the system under way.
+ *
+ * If the device has not been suspended at run time, execute the
+ * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
+ * return its error code. Otherwise, return zero.
+ */
+static int __pm_generic_call(struct device *dev, int event)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int (*callback)(struct device *);
+
+ if (!pm || pm_runtime_suspended(dev))
+ return 0;
+
+ switch (event) {
+ case PM_EVENT_SUSPEND:
+ callback = pm->suspend;
+ break;
+ case PM_EVENT_FREEZE:
+ callback = pm->freeze;
+ break;
+ case PM_EVENT_HIBERNATE:
+ callback = pm->poweroff;
+ break;
+ case PM_EVENT_THAW:
+ callback = pm->thaw;
+ break;
+ default:
+ callback = NULL;
+ break;
+ }
+
+ return callback ? callback(dev) : 0;
+}
+
+/**
+ * pm_generic_suspend - Generic suspend callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_SUSPEND);
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend);
+
+/**
+ * pm_generic_freeze - Generic freeze callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_FREEZE);
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze);
+
+/**
+ * pm_generic_poweroff - Generic poweroff callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_HIBERNATE);
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff);
+
+/**
+ * pm_generic_thaw - Generic thaw callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_THAW);
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw);
+
+/**
+ * __pm_generic_resume - Generic resume/restore callback for subsystems.
+ * @dev: Device to handle.
+ * @event: PM transition of the system under way.
+ *
+ * Execute the resume/resotre callback provided by the @dev's driver, if
+ * defined. If it returns 0, change the device's runtime PM status to 'active'.
+ * Return the callback's error code.
+ */
+static int __pm_generic_resume(struct device *dev, int event)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int (*callback)(struct device *);
+ int ret;
+
+ if (!pm)
+ return 0;
+
+ switch (event) {
+ case PM_EVENT_RESUME:
+ callback = pm->resume;
+ break;
+ case PM_EVENT_RESTORE:
+ callback = pm->restore;
+ break;
+ default:
+ callback = NULL;
+ break;
+ }
+
+ if (!callback)
+ return 0;
+
+ ret = callback(dev);
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * pm_generic_resume - Generic resume callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESUME);
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume);
+
+/**
+ * pm_generic_restore - Generic restore callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESTORE);
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore);
+#endif /* CONFIG_PM_SLEEP */
+
+struct dev_pm_ops generic_subsys_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+#endif
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = pm_generic_runtime_suspend,
+ .runtime_resume = pm_generic_runtime_resume,
+ .runtime_idle = pm_generic_runtime_idle,
+#endif
+};
+EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a5142bddef41..941fcb87e52a 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -25,6 +25,7 @@
#include <linux/resume-trace.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/async.h>
#include "../base.h"
#include "power.h"
@@ -34,14 +35,15 @@
* because children are guaranteed to be discovered after parents, and
* are inserted at the back of the list on discovery.
*
- * Since device_pm_add() may be called with a device semaphore held,
- * we must never try to acquire a device semaphore while holding
+ * Since device_pm_add() may be called with a device lock held,
+ * we must never try to acquire a device lock while holding
* dpm_list_mutex.
*/
LIST_HEAD(dpm_list);
static DEFINE_MUTEX(dpm_list_mtx);
+static pm_message_t pm_transition;
/*
* Set once the preparation of devices for a PM transition has started, reset
@@ -56,6 +58,7 @@ static bool transition_started;
void device_pm_init(struct device *dev)
{
dev->power.status = DPM_ON;
+ init_completion(&dev->power.completion);
pm_runtime_init(dev);
}
@@ -111,6 +114,7 @@ void device_pm_remove(struct device *dev)
pr_debug("PM: Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj));
+ complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
@@ -188,6 +192,31 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
}
/**
+ * dpm_wait - Wait for a PM operation to complete.
+ * @dev: Device to wait for.
+ * @async: If unset, wait only if the device's power.async_suspend flag is set.
+ */
+static void dpm_wait(struct device *dev, bool async)
+{
+ if (!dev)
+ return;
+
+ if (async || (pm_async_enabled && dev->power.async_suspend))
+ wait_for_completion(&dev->power.completion);
+}
+
+static int dpm_wait_fn(struct device *dev, void *async_ptr)
+{
+ dpm_wait(dev, *((bool *)async_ptr));
+ return 0;
+}
+
+static void dpm_wait_for_children(struct device *dev, bool async)
+{
+ device_for_each_child(dev, &async, dpm_wait_fn);
+}
+
+/**
* pm_op - Execute the PM operation appropriate for given PM event.
* @dev: Device to handle.
* @ops: PM operations to choose from.
@@ -271,8 +300,9 @@ static int pm_noirq_op(struct device *dev,
ktime_t calltime, delta, rettime;
if (initcall_debug) {
- pr_info("calling %s_i+ @ %i\n",
- dev_name(dev), task_pid_nr(current));
+ pr_info("calling %s+ @ %i, parent: %s\n",
+ dev_name(dev), task_pid_nr(current),
+ dev->parent ? dev_name(dev->parent) : "none");
calltime = ktime_get();
}
@@ -409,8 +439,23 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "EARLY ");
error = pm_noirq_op(dev, dev->bus->pm, state);
+ if (error)
+ goto End;
}
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "EARLY type ");
+ error = pm_noirq_op(dev, dev->type->pm, state);
+ if (error)
+ goto End;
+ }
+
+ if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "EARLY class ");
+ error = pm_noirq_op(dev, dev->class->pm, state);
+ }
+
+End:
TRACE_RESUME(error);
return error;
}
@@ -468,15 +513,19 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
*/
-static int device_resume(struct device *dev, pm_message_t state)
+static int device_resume(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- down(&dev->sem);
+ dpm_wait(dev->parent, async);
+ device_lock(dev);
+
+ dev->power.status = DPM_RESUMING;
if (dev->bus) {
if (dev->bus->pm) {
@@ -509,12 +558,30 @@ static int device_resume(struct device *dev, pm_message_t state)
}
}
End:
- up(&dev->sem);
+ device_unlock(dev);
+ complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
+static void async_resume(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+ put_device(dev);
+}
+
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -525,21 +592,33 @@ static int device_resume(struct device *dev, pm_message_t state)
static void dpm_resume(pm_message_t state)
{
struct list_head list;
+ struct device *dev;
ktime_t starttime = ktime_get();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
- struct device *dev = to_device(dpm_list.next);
+ pm_transition = state;
+
+ list_for_each_entry(dev, &dpm_list, power.entry) {
+ if (dev->power.status < DPM_OFF)
+ continue;
+ INIT_COMPLETION(dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_list)) {
+ dev = to_device(dpm_list.next);
get_device(dev);
- if (dev->power.status >= DPM_OFF) {
+ if (dev->power.status >= DPM_OFF && !is_async(dev)) {
int error;
- dev->power.status = DPM_RESUMING;
mutex_unlock(&dpm_list_mtx);
- error = device_resume(dev, state);
+ error = device_resume(dev, state, false);
mutex_lock(&dpm_list_mtx);
if (error)
@@ -554,6 +633,7 @@ static void dpm_resume(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, NULL);
}
@@ -564,7 +644,7 @@ static void dpm_resume(pm_message_t state)
*/
static void device_complete(struct device *dev, pm_message_t state)
{
- down(&dev->sem);
+ device_lock(dev);
if (dev->class && dev->class->pm && dev->class->pm->complete) {
pm_dev_dbg(dev, state, "completing class ");
@@ -581,7 +661,7 @@ static void device_complete(struct device *dev, pm_message_t state)
dev->bus->pm->complete(dev);
}
- up(&dev->sem);
+ device_unlock(dev);
}
/**
@@ -670,10 +750,26 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error = 0;
+ if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "LATE class ");
+ error = pm_noirq_op(dev, dev->class->pm, state);
+ if (error)
+ goto End;
+ }
+
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "LATE type ");
+ error = pm_noirq_op(dev, dev->type->pm, state);
+ if (error)
+ goto End;
+ }
+
if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "LATE ");
error = pm_noirq_op(dev, dev->bus->pm, state);
}
+
+End:
return error;
}
@@ -731,16 +827,23 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
+static int async_error;
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
*/
-static int device_suspend(struct device *dev, pm_message_t state)
+static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
- down(&dev->sem);
+ dpm_wait_for_children(dev, async);
+ device_lock(dev);
+
+ if (async_error)
+ goto End;
if (dev->class) {
if (dev->class->pm) {
@@ -772,12 +875,44 @@ static int device_suspend(struct device *dev, pm_message_t state)
error = legacy_suspend(dev, state, dev->bus->suspend);
}
}
+
+ if (!error)
+ dev->power.status = DPM_OFF;
+
End:
- up(&dev->sem);
+ device_unlock(dev);
+ complete_all(&dev->power.completion);
return error;
}
+static void async_suspend(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend(dev, pm_transition, true);
+ if (error) {
+ pm_dev_err(dev, pm_transition, " async", error);
+ async_error = error;
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend(struct device *dev)
+{
+ INIT_COMPLETION(dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend, dev);
+ return 0;
+ }
+
+ return __device_suspend(dev, pm_transition, false);
+}
+
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -790,13 +925,15 @@ static int dpm_suspend(pm_message_t state)
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev, state);
+ error = device_suspend(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
@@ -804,13 +941,17 @@ static int dpm_suspend(pm_message_t state)
put_device(dev);
break;
}
- dev->power.status = DPM_OFF;
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &list);
put_device(dev);
+ if (async_error)
+ break;
}
list_splice(&list, dpm_list.prev);
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
if (!error)
dpm_show_time(starttime, state, NULL);
return error;
@@ -828,7 +969,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
{
int error = 0;
- down(&dev->sem);
+ device_lock(dev);
if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
pm_dev_dbg(dev, state, "preparing ");
@@ -852,7 +993,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
suspend_report_result(dev->class->pm->prepare, error);
}
End:
- up(&dev->sem);
+ device_unlock(dev);
return error;
}
@@ -936,3 +1077,14 @@ void __suspend_report_result(const char *function, void *fn, int ret)
printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
+
+/**
+ * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
+ * @dev: Device to wait for.
+ * @subordinate: Device that needs to wait for @dev.
+ */
+void device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
+{
+ dpm_wait(dev, subordinate->power.async_suspend);
+}
+EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b8fa1aa5225a..c0bd03c83b9c 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -12,10 +12,10 @@ static inline void pm_runtime_remove(struct device *dev) {}
#ifdef CONFIG_PM_SLEEP
-/*
- * main.c
- */
+/* kernel/power/main.c */
+extern int pm_async_enabled;
+/* drivers/base/power/main.c */
extern struct list_head dpm_list; /* The active device list */
static inline struct device *to_device(struct list_head *entry)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index f8b044e8aef7..b0ec0e9f27e9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -229,14 +229,16 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
if (retval) {
dev->power.runtime_status = RPM_ACTIVE;
- pm_runtime_cancel_pending(dev);
-
if (retval == -EAGAIN || retval == -EBUSY) {
- notify = true;
+ if (dev->power.timer_expires == 0)
+ notify = true;
dev->power.runtime_error = 0;
+ } else {
+ pm_runtime_cancel_pending(dev);
}
} else {
dev->power.runtime_status = RPM_SUSPENDED;
+ pm_runtime_deactivate_timer(dev);
if (dev->parent) {
parent = dev->parent;
@@ -659,8 +661,6 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
- else if (dev->power.runtime_status == RPM_SUSPENDING)
- retval = -EINPROGRESS;
else if (atomic_read(&dev->power.usage_count) > 0
|| dev->power.disable_depth > 0)
retval = -EAGAIN;
@@ -1011,6 +1011,50 @@ void pm_runtime_enable(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_enable);
/**
+ * pm_runtime_forbid - Block run-time PM of a device.
+ * @dev: Device to handle.
+ *
+ * Increase the device's usage count and clear its power.runtime_auto flag,
+ * so that it cannot be suspended at run time until pm_runtime_allow() is called
+ * for it.
+ */
+void pm_runtime_forbid(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (!dev->power.runtime_auto)
+ goto out;
+
+ dev->power.runtime_auto = false;
+ atomic_inc(&dev->power.usage_count);
+ __pm_runtime_resume(dev, false);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_forbid);
+
+/**
+ * pm_runtime_allow - Unblock run-time PM of a device.
+ * @dev: Device to handle.
+ *
+ * Decrease the device's usage count and set its power.runtime_auto flag.
+ */
+void pm_runtime_allow(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.runtime_auto)
+ goto out;
+
+ dev->power.runtime_auto = true;
+ if (atomic_dec_and_test(&dev->power.usage_count))
+ __pm_runtime_idle(dev);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_allow);
+
+/**
* pm_runtime_init - Initialize run-time PM fields in given device object.
* @dev: Device object to initialize.
*/
@@ -1028,6 +1072,7 @@ void pm_runtime_init(struct device *dev)
atomic_set(&dev->power.child_count, 0);
pm_suspend_ignore_children(dev, false);
+ dev->power.runtime_auto = true;
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 596aeecfdffe..a4c33bc51257 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -4,9 +4,26 @@
#include <linux/device.h>
#include <linux/string.h>
+#include <linux/pm_runtime.h>
+#include <asm/atomic.h>
#include "power.h"
/*
+ * control - Report/change current runtime PM setting of the device
+ *
+ * Runtime power management of a device can be blocked with the help of
+ * this attribute. All devices have one of the following two values for
+ * the power/control file:
+ *
+ * + "auto\n" to allow the device to be power managed at run time;
+ * + "on\n" to prevent the device from being power managed at run time;
+ *
+ * The default for all devices is "auto", which means that devices may be
+ * subject to automatic power management, depending on their drivers.
+ * Changing this attribute to "on" prevents the driver from power managing
+ * the device at run time. Doing that while the device is suspended causes
+ * it to be woken up.
+ *
* wakeup - Report/change current wakeup option for device
*
* Some devices support "wakeup" events, which are hardware signals
@@ -38,11 +55,61 @@
* wakeup events internally (unless they are disabled), keeping
* their hardware in low power modes whenever they're unused. This
* saves runtime power, without requiring system-wide sleep states.
+ *
+ * async - Report/change current async suspend setting for the device
+ *
+ * Asynchronous suspend and resume of the device during system-wide power
+ * state transitions can be enabled by writing "enabled" to this file.
+ * Analogously, if "disabled" is written to this file, the device will be
+ * suspended and resumed synchronously.
+ *
+ * All devices have one of the following two values for power/async:
+ *
+ * + "enabled\n" to permit the asynchronous suspend/resume of the device;
+ * + "disabled\n" to forbid it;
+ *
+ * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
+ * of a device unless it is certain that all of the PM dependencies of the
+ * device are known to the PM core. However, for some devices this
+ * attribute is set to "enabled" by bus type code or device drivers and in
+ * that cases it should be safe to leave the default value.
*/
static const char enabled[] = "enabled";
static const char disabled[] = "disabled";
+#ifdef CONFIG_PM_RUNTIME
+static const char ctrl_auto[] = "auto";
+static const char ctrl_on[] = "on";
+
+static ssize_t control_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ dev->power.runtime_auto ? ctrl_auto : ctrl_on);
+}
+
+static ssize_t control_store(struct device * dev, struct device_attribute *attr,
+ const char * buf, size_t n)
+{
+ char *cp;
+ int len = n;
+
+ cp = memchr(buf, '\n', n);
+ if (cp)
+ len = cp - buf;
+ if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
+ pm_runtime_allow(dev);
+ else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
+ pm_runtime_forbid(dev);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR(control, 0644, control_show, control_store);
+#endif
+
static ssize_t
wake_show(struct device * dev, struct device_attribute *attr, char * buf)
{
@@ -77,9 +144,101 @@ wake_store(struct device * dev, struct device_attribute *attr,
static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_RUNTIME
+
+static ssize_t rtpm_usagecount_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
+}
+
+static ssize_t rtpm_children_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev->power.ignore_children ?
+ 0 : atomic_read(&dev->power.child_count));
+}
+
+static ssize_t rtpm_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ return sprintf(buf, "disabled & forbidden\n");
+ else if (dev->power.disable_depth)
+ return sprintf(buf, "disabled\n");
+ else if (dev->power.runtime_auto == false)
+ return sprintf(buf, "forbidden\n");
+ return sprintf(buf, "enabled\n");
+}
+
+static ssize_t rtpm_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (dev->power.runtime_error)
+ return sprintf(buf, "error\n");
+ switch (dev->power.runtime_status) {
+ case RPM_SUSPENDED:
+ return sprintf(buf, "suspended\n");
+ case RPM_SUSPENDING:
+ return sprintf(buf, "suspending\n");
+ case RPM_RESUMING:
+ return sprintf(buf, "resuming\n");
+ case RPM_ACTIVE:
+ return sprintf(buf, "active\n");
+ }
+ return -EIO;
+}
+
+static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
+static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
+static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+
+#endif
+
+static ssize_t async_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ device_async_suspend_enabled(dev) ? enabled : disabled);
+}
+
+static ssize_t async_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ char *cp;
+ int len = n;
+
+ cp = memchr(buf, '\n', n);
+ if (cp)
+ len = cp - buf;
+ if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0)
+ device_enable_async_suspend(dev);
+ else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0)
+ device_disable_async_suspend(dev);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR(async, 0644, async_show, async_store);
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute * power_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_control.attr,
+#endif
&dev_attr_wakeup.attr,
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_async.attr,
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_status.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif
+#endif
NULL,
};
static struct attribute_group pm_attr_group = {