@@ -122,6 +122,33 @@ u64 pm_runtime_suspended_time(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
+/**
+ * pm_runtime_set_next_wakeup_event - Notify PM framework of an impending event.
+ * @dev: Device to handle
+ * @next: impending interrupt/wakeup for the device
+ */
+int pm_runtime_set_next_event(struct device *dev, ktime_t next)
+{
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ /*
+ * Note the next pending wakeup of a device,
+ * if the device does not have runtime PM enabled.
+ */
+ spin_lock_irqsave(&dev->power.lock, flags);
+ if (!dev->power.disable_depth) {
+ if (ktime_before(ktime_get(), next)) {
+ dev->power.next_event = next;
+ ret = 0;
+ }
+ }
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_next_event);
+
/**
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
* @dev: Device to handle.
@@ -1380,6 +1407,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
/* Update time accounting before disabling PM-runtime. */
update_pm_runtime_accounting(dev);
+ /* Reset the next wakeup for the device */
+ dev->power.next_event = KTIME_MAX;
+
if (!dev->power.disable_depth++)
__pm_runtime_barrier(dev);
@@ -1609,6 +1639,7 @@ void pm_runtime_init(struct device *dev)
dev->power.deferred_resume = false;
INIT_WORK(&dev->power.work, pm_runtime_work);
+ dev->power.next_event = KTIME_MAX;
dev->power.timer_expires = 0;
hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
dev->power.suspend_timer.function = pm_suspend_timer_fn;
@@ -8,6 +8,7 @@
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
+#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
@@ -616,6 +617,7 @@ struct dev_pm_info {
u64 active_time;
u64 suspended_time;
u64 accounting_timestamp;
+ ktime_t next_event;
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
void (*set_latency_tolerance)(struct device *, s32);
@@ -59,6 +59,7 @@ extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device *dev);
+extern int pm_runtime_set_next_event(struct device *dev, ktime_t next);
/**
* pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
Some devices have a predictable interrupt pattern while executing a particular usecase. An example would be the VSYNC interrupt on devices associated with displays. A 60 Hz display could cause a periodic interrupt every 16 ms. A PM domain that holds such a device could power off and on at similar intervals. But when the domain is a supplier for multiple such devices, the idle pattern for the domain is bit muddled, but still deducible. If the next wakeup is known for all devices in a domain, it becomes easy to find out the sleep duration for the domain. By allowing drivers to notify runtime PM of their device's future wakeup, we can make better PM domain governor idle state decisions when powering off the domain. For now, let's allow updating the device's next wake up as long as runtime PM is enabled. If the device is runtime suspended, the domain could be as well and we don't want to wake up the domain to recompute the domain idle state. As a first step, the onus is on the drivers to update their device's wakeup correctly before the device and the domain enter runtime idle. Signed-off-by: Lina Iyer <ilina@codeaurora.org> --- drivers/base/power/runtime.c | 31 +++++++++++++++++++++++++++++++ include/linux/pm.h | 2 ++ include/linux/pm_runtime.h | 1 + 3 files changed, 34 insertions(+)