@@ -74,11 +74,61 @@ static const struct genpd_lock_fns genpd_mtx_fns = {
.unlock = genpd_unlock_mtx,
};
+static void genpd_lock_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+}
+
+static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
+ int depth)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave_nested(&genpd->slock, flags, depth);
+ genpd->lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+ return 0;
+}
+
+static void genpd_unlock_spin(struct generic_pm_domain *genpd)
+ __releases(&genpd->slock)
+{
+ spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
+}
+
+static const struct genpd_lock_fns genpd_spin_fns = {
+ .lock = genpd_lock_spin,
+ .lock_nested = genpd_lock_nested_spin,
+ .lock_interruptible = genpd_lock_interruptible_spin,
+ .unlock = genpd_unlock_spin,
+};
+
#define genpd_lock(p) p->lock_fns->lock(p)
#define genpd_lock_nested(p, d) p->lock_fns->lock_nested(p, d)
#define genpd_lock_interruptible(p) p->lock_fns->lock_interruptible(p)
#define genpd_unlock(p) p->lock_fns->unlock(p)
+#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
+
+static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
+ struct generic_pm_domain *genpd)
+{
+ return pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
+}
+
/*
* Get the generic PM domain for a particular struct device.
* This validates the struct device pointer, the PM domain pointer,
@@ -343,7 +393,12 @@ static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
- if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
+ /*
+ * Do not allow PM domain to be powered off, when an IRQ safe
+ * device is part of a non-IRQ safe domain.
+ */
+ if (!pm_runtime_suspended(pdd->dev) ||
+ irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
not_suspended++;
}
@@ -506,10 +561,10 @@ static int genpd_runtime_suspend(struct device *dev)
}
/*
- * If power.irq_safe is set, this routine will be run with interrupts
- * off, so it can't use mutexes.
+ * If power.irq_safe is set, this routine may be run with
+ * IRQs disabled, so suspend only if the PM domain also is irq_safe.
*/
- if (dev->power.irq_safe)
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
return 0;
genpd_lock(genpd);
@@ -543,8 +598,11 @@ static int genpd_runtime_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- /* If power.irq_safe, the PM domain is never powered off. */
- if (dev->power.irq_safe) {
+ /*
+ * As we don't power off a non IRQ safe domain, which holds
+ * an IRQ safe device, we don't need to restore power to it.
+ */
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
timed = false;
goto out;
}
@@ -586,7 +644,8 @@ static int genpd_runtime_resume(struct device *dev)
err_stop:
genpd_stop_dev(genpd, dev);
err_poweroff:
- if (!dev->power.irq_safe) {
+ if (!dev->power.irq_safe ||
+ (dev->power.irq_safe && genpd_is_irq_safe(genpd))) {
genpd_lock(genpd);
genpd_poweroff(genpd, 0);
genpd_unlock(genpd);
@@ -1117,6 +1176,11 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
+ /* Check if we are adding an IRQ safe device to non-IRQ safe domain */
+ if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
+ dev_warn_once(dev, "PM domain %s will not be powered off\n",
+ genpd->name);
+
genpd_lock(genpd);
if (genpd->prepared_count > 0) {
@@ -1211,6 +1275,17 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|| genpd == subdomain)
return -EINVAL;
+ /*
+ * If the domain can be powered on/off in an IRQ safe
+ * context, ensure that the subdomain can also be
+ * powered on/off in that context.
+ */
+ if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
+ WARN("Parent %s of subdomain %s must be IRQ safe\n",
+ genpd->name, subdomain->name);
+ return -EINVAL;
+ }
+
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
@@ -1375,6 +1450,17 @@ static int genpd_of_parse(struct generic_pm_domain *genpd)
return pm_genpd_of_parse_power_states(genpd);
}
+static void genpd_lock_init(struct generic_pm_domain *genpd)
+{
+ if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
+ spin_lock_init(&genpd->slock);
+ genpd->lock_fns = &genpd_spin_fns;
+ } else {
+ mutex_init(&genpd->mlock);
+ genpd->lock_fns = &genpd_mtx_fns;
+ }
+}
+
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -1394,8 +1480,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
INIT_LIST_HEAD(&genpd->master_links);
INIT_LIST_HEAD(&genpd->slave_links);
INIT_LIST_HEAD(&genpd->dev_list);
- mutex_init(&genpd->mlock);
- genpd->lock_fns = &genpd_mtx_fns;
+ genpd_lock_init(genpd);
genpd->gov = gov;
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
atomic_set(&genpd->sd_count, 0);
@@ -1839,7 +1924,9 @@ static int pm_genpd_summary_one(struct seq_file *s,
}
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
- kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
if (kobj_path == NULL)
continue;
@@ -15,9 +15,11 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/notifier.h>
+#include <linux/spinlock.h>
/* Defines used for the flags field in the struct generic_pm_domain */
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
+#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
#define GENPD_MAX_NUM_STATES 8 /* Number of possible low power states */
@@ -76,7 +78,13 @@ struct generic_pm_domain {
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
const struct genpd_lock_fns *lock_fns;
- struct mutex mlock;
+ union {
+ struct mutex mlock;
+ struct {
+ spinlock_t slock;
+ unsigned long lock_flags;
+ };
+ };
};