@@ -26,12 +26,15 @@
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
+static DEFINE_MUTEX(enable_mutex_lock);
static struct task_struct *prepare_owner;
static struct task_struct *enable_owner;
+static struct task_struct *enable_mutex_owner;
static int prepare_refcnt;
static int enable_refcnt;
+static int enable_mutex_refcnt;
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
@@ -149,7 +152,7 @@ static void clk_prepare_unlock(void)
mutex_unlock(&prepare_lock);
}
-static unsigned long clk_enable_lock(void)
+static unsigned long clk_enable_spin_lock(void)
__acquires(enable_lock)
{
unsigned long flags;
@@ -177,7 +180,7 @@ static unsigned long clk_enable_lock(void)
return flags;
}
-static void clk_enable_unlock(unsigned long flags)
+static void clk_enable_spin_unlock(unsigned long flags)
__releases(enable_lock)
{
WARN_ON_ONCE(enable_owner != current);
@@ -191,6 +194,52 @@ static void clk_enable_unlock(unsigned long flags)
spin_unlock_irqrestore(&enable_lock, flags);
}
+static void clk_enable_mutex_lock(void)
+{
+ if (!mutex_trylock(&enable_mutex_lock)) {
+ if (enable_mutex_owner == current) {
+ enable_mutex_refcnt++;
+ return;
+ }
+ mutex_lock(&enable_mutex_lock);
+ }
+ WARN_ON_ONCE(enable_mutex_owner != NULL);
+ WARN_ON_ONCE(enable_mutex_refcnt != 0);
+ enable_mutex_owner = current;
+ enable_mutex_refcnt = 1;
+}
+
+static void clk_enable_mutex_unlock(void)
+{
+ WARN_ON_ONCE(enable_mutex_owner != current);
+ WARN_ON_ONCE(enable_mutex_refcnt == 0);
+
+ if (--enable_mutex_refcnt)
+ return;
+ enable_mutex_owner = NULL;
+ mutex_unlock(&enable_mutex_lock);
+}
+
+static unsigned long clk_enable_lock(struct clk_core *core)
+{
+ unsigned long flags = 0;
+
+ if (core && (core->flags & CLK_ENABLE_MUTEX_LOCK))
+ clk_enable_mutex_lock();
+ else
+ flags = clk_enable_spin_lock();
+
+ return flags;
+}
+
+static void clk_enable_unlock(struct clk_core *core, unsigned long flags)
+{
+ if (core && (core->flags & CLK_ENABLE_MUTEX_LOCK))
+ clk_enable_mutex_unlock();
+ else
+ clk_enable_spin_unlock(flags);
+}
+
static bool clk_core_rate_is_protected(struct clk_core *core)
{
return core->protect_count;
@@ -1111,9 +1160,9 @@ static void clk_core_disable_lock(struct clk_core *core)
{
unsigned long flags;
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
clk_core_disable(core);
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
}
/**
@@ -1178,9 +1227,9 @@ static int clk_core_enable_lock(struct clk_core *core)
unsigned long flags;
int ret;
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
ret = clk_core_enable(core);
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
return ret;
}
@@ -1390,7 +1439,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (clk_pm_runtime_get(core))
goto unprepare_out;
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
if (core->enable_count)
goto unlock_out;
@@ -1413,7 +1462,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
}
unlock_out:
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
clk_pm_runtime_put(core);
unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
@@ -2042,9 +2091,9 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core,
}
/* update the clk tree topology */
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
clk_reparent(core, parent);
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
return old_parent;
}
@@ -2087,9 +2136,9 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
trace_clk_set_parent_complete(core, parent);
if (ret) {
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
clk_reparent(core, old_parent);
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
__clk_set_parent_after(core, old_parent, parent);
@@ -3388,6 +3437,7 @@ static const struct {
ENTRY(CLK_IS_CRITICAL),
ENTRY(CLK_OPS_PARENT_ENABLE),
ENTRY(CLK_DUTY_CYCLE_PARENT),
+ ENTRY(CLK_ENABLE_MUTEX_LOCK),
#undef ENTRY
};
@@ -4410,9 +4460,9 @@ void clk_unregister(struct clk *clk)
* Assign empty clock ops for consumers that might still hold
* a reference to this clock.
*/
- flags = clk_enable_lock();
+ flags = clk_enable_lock(clk->core);
clk->core->ops = &clk_nodrv_ops;
- clk_enable_unlock(flags);
+ clk_enable_unlock(clk->core, flags);
if (ops->terminate)
ops->terminate(clk->core->hw);
@@ -32,6 +32,10 @@
#define CLK_OPS_PARENT_ENABLE BIT(12)
/* duty cycle call may be forwarded to the parent clock */
#define CLK_DUTY_CYCLE_PARENT BIT(13)
+/* clock operation is accessing register by MDIO, which needs to sleep.
+ * the lock should use mutex_lock instead of spin_lock.
+ */
+#define CLK_ENABLE_MUTEX_LOCK BIT(14)
struct clk;
struct clk_hw;
Support the clock controller where the HW register is accessed by MDIO bus, the spin lock can't be used because of sleep during the MDIO operation. Add the flag CLK_ENABLE_MUTEX_LOCK to hint clock framework to use mutex lock instead of the spin lock. Signed-off-by: Luo Jie <quic_luoj@quicinc.com> --- drivers/clk/clk.c | 78 +++++++++++++++++++++++++++++------- include/linux/clk-provider.h | 4 ++ 2 files changed, 68 insertions(+), 14 deletions(-)