There are unused clocks that need to stay enabled on clk_disable_unused, but rather should be disabled later on on sync_state. Provide a generic sync_state callback for the clock providers that register such clocks. Then, use the same mechanism as clk_disable_unused from that generic callback, but pass the device to make sure only the clocks belonging to the current clock provider get disabled, if unused. Also, during the default clk_disable_unused, if the driver that registered the clock has the generic clk_sync_state_disable_unused callback set for sync_state, leave its clocks enabled. Signed-off-by: Abel Vesa <abel.vesa@xxxxxxxxxx> --- drivers/clk/clk.c | 67 +++++++++++++++++++++++++++--------- include/linux/clk-provider.h | 1 + 2 files changed, 52 insertions(+), 16 deletions(-) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 7fc191c15507..ea55806505c0 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1218,19 +1218,31 @@ static void clk_core_disable_unprepare(struct clk_core *core) clk_core_unprepare_lock(core); } -static void __init clk_unprepare_unused_subtree(struct clk_core *core) +static void clk_unprepare_unused_subtree(struct clk_core *core, + struct device *dev) { struct clk_core *child; lockdep_assert_held(&prepare_lock); hlist_for_each_entry(child, &core->children, child_node) - clk_unprepare_unused_subtree(child); + clk_unprepare_unused_subtree(child, dev); + + if (dev && core->dev != dev) + return; + + /* + * clock will be unprepared on sync_state, + * so leave as is on clk_disable_unused + */ + if (!dev && dev_has_sync_state(core->dev) && + core->dev->driver->sync_state == clk_sync_state_disable_unused) + return; if (core->prepare_count) return; - if (core->flags & CLK_IGNORE_UNUSED) + if (!dev && core->flags & CLK_IGNORE_UNUSED) return; if (clk_pm_runtime_get(core)) @@ -1248,7 +1260,8 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core) clk_pm_runtime_put(core); } -static void __init clk_disable_unused_subtree(struct clk_core *core) +static void clk_disable_unused_subtree(struct clk_core *core, + struct device *dev) { struct clk_core *child; unsigned long flags; @@ -1256,7 +1269,18 @@ static void __init clk_disable_unused_subtree(struct clk_core *core) lockdep_assert_held(&prepare_lock); hlist_for_each_entry(child, &core->children, child_node) - clk_disable_unused_subtree(child); + clk_disable_unused_subtree(child, dev); + + if (dev && core->dev != dev) + return; + + /* + * clock will be disabled on sync_state, + * so leave as is on clk_disable_unused + */ + if (!dev && dev_has_sync_state(core->dev) && + core->dev->driver->sync_state == clk_sync_state_disable_unused) + return; if (core->flags & CLK_OPS_PARENT_ENABLE) clk_core_prepare_enable(core->parent); @@ -1269,7 +1293,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core) if (core->enable_count) goto unlock_out; - if (core->flags & CLK_IGNORE_UNUSED) + if (!dev && core->flags & CLK_IGNORE_UNUSED) goto unlock_out; /* @@ -1302,35 +1326,46 @@ static int __init clk_ignore_unused_setup(char *__unused) } __setup("clk_ignore_unused", clk_ignore_unused_setup); -static int __init clk_disable_unused(void) +static void __clk_disable_unused(struct device *dev) { struct clk_core *core; - if (clk_ignore_unused) { - pr_warn("clk: Not disabling unused clocks\n"); - return 0; - } - clk_prepare_lock(); hlist_for_each_entry(core, &clk_root_list, child_node) - clk_disable_unused_subtree(core); + clk_disable_unused_subtree(core, dev); hlist_for_each_entry(core, &clk_orphan_list, child_node) - clk_disable_unused_subtree(core); + clk_disable_unused_subtree(core, dev); hlist_for_each_entry(core, &clk_root_list, child_node) - clk_unprepare_unused_subtree(core); + clk_unprepare_unused_subtree(core, dev); hlist_for_each_entry(core, &clk_orphan_list, child_node) - clk_unprepare_unused_subtree(core); + clk_unprepare_unused_subtree(core, dev); clk_prepare_unlock(); +} + +static int __init clk_disable_unused(void) +{ + if (clk_ignore_unused) { + pr_warn("clk: Not disabling unused clocks\n"); + return 0; + } + + __clk_disable_unused(NULL); return 0; } late_initcall_sync(clk_disable_unused); +void clk_sync_state_disable_unused(struct device *dev) +{ + __clk_disable_unused(dev); +} +EXPORT_SYMBOL_GPL(clk_sync_state_disable_unused); + static int clk_core_determine_round_nolock(struct clk_core *core, struct clk_rate_request *req) { diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 72d937c03a3e..5d3ed2b14f2c 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -679,6 +679,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); +void clk_sync_state_disable_unused(struct device *dev); /** * clk_register_divider - register a divider clock with the clock framework * @dev: device registering this clock -- 2.34.3