Hi Stephen, On Thu, 2024-08-08 at 13:14 -0700, Stephen Boyd wrote: > > +static struct of_clk_stdout_clks { > > + bool bump_refs; > > + > > + struct mutex lock; > > + bool have_all; > > + struct clk **clks; > > + size_t n_clks; > > +} of_clk_stdout_clks = { > > This can be initdata? With the __ref wrapper you suggested below that's indeed possible. Without, it's not, and I wasn't aware of __ref. Thanks for the pointer! > > + .lock = __MUTEX_INITIALIZER(of_clk_stdout_clks.lock), > > +}; > > + > > +static int __init of_clk_bump_stdout_clocks_param(char *str) > > +{ > > + of_clk_stdout_clks.bump_refs = true; > > + return 0; > > +} > > +__setup("earlycon", of_clk_bump_stdout_clocks_param); > > +__setup_param("earlyprintk", of_clk_keep_stdout_clocks_earlyprintk, > > + of_clk_bump_stdout_clocks_param, 0); > > + > > +static void of_clk_bump_stdout_clocks(void) > > This can be __init? dito. Cheers, Andre' > > > +{ > > + size_t n_clks; > > + > > + /* > > + * We only need to run this code if required to do so and only ever > > + * before late initcalls have run. Otherwise it'd be impossible to know > > + * when to drop the extra clock references again. > > + * > > + * This generally means that this only works if on affected platforms > > + * the clock drivers have been built-in (as opposed to being modules). > > + */ > > + if (!of_clk_stdout_clks.bump_refs) > > + return; > > + > > + n_clks = of_clk_get_parent_count(of_stdout); > > + if (!n_clks || !of_stdout) > > + return; > > + > > + mutex_lock(&of_clk_stdout_clks.lock); > > + > > + /* > > + * We only need to keep trying if we have not succeeded previously, > > + * i.e. if not all required clocks were ready during previous attempts. > > + */ > > + if (of_clk_stdout_clks.have_all) > > + goto out_unlock; > > + > > + if (!of_clk_stdout_clks.clks) { > > + of_clk_stdout_clks.n_clks = n_clks; > > + > > + of_clk_stdout_clks.clks = kcalloc(of_clk_stdout_clks.n_clks, > > + sizeof(*of_clk_stdout_clks.clks), > > + GFP_KERNEL); > > + if (!of_clk_stdout_clks.clks) > > + goto out_unlock; > > + } > > + > > + /* assume that this time we'll be able to grab all required clocks */ > > + of_clk_stdout_clks.have_all = true; > > + for (size_t i = 0; i < n_clks; ++i) { > > + struct clk *clk; > > + > > + /* we might have grabbed this clock in a previous attempt */ > > + if (of_clk_stdout_clks.clks[i]) > > + continue; > > + > > + clk = of_clk_get(of_stdout, i); > > + if (IS_ERR(clk)) { > > + /* retry next time if clock has not probed yet */ > > + of_clk_stdout_clks.have_all = false; > > + continue; > > + } > > + > > + if (clk_prepare_enable(clk)) { > > + clk_put(clk); > > + continue; > > + } > > + of_clk_stdout_clks.clks[i] = clk; > > + } > > + > > +out_unlock: > > + mutex_unlock(&of_clk_stdout_clks.lock); > > +} > > + > > +static int __init of_clk_drop_stdout_clocks(void) > > +{ > > + for (size_t i = 0; i < of_clk_stdout_clks.n_clks; ++i) { > > + clk_disable_unprepare(of_clk_stdout_clks.clks[i]); > > + clk_put(of_clk_stdout_clks.clks[i]); > > + } > > + > > + kfree(of_clk_stdout_clks.clks); > > + > > + /* > > + * Do not try to acquire stdout clocks after late initcalls, e.g. > > + * during further module loading, as we then wouldn't have a way to > > + * drop the references (and associated allocations) ever again. > > + */ > > + of_clk_stdout_clks.bump_refs = false; > > + > > + return 0; > > +} > > +late_initcall_sync(of_clk_drop_stdout_clocks); > > + > > /** > > * struct of_clk_provider - Clock provider registration structure > > * @link: Entry in global list of clock providers > > @@ -5031,6 +5156,8 @@ int of_clk_add_provider(struct device_node *np, > > > > fwnode_dev_initialized(&np->fwnode, true); > > > > + of_clk_bump_stdout_clocks(); > > This can be a wrapper function that isn't marked __init but which calls > the init function with __ref. That lets us free up as much code as > possible. We need to set a bool in of_clk_drop_stdout_clocks() that when > false doesn't call the __init functions that are wrapped though, i.e. > 'bump_refs'. Here's the structure: > > static bool bump_stdout_clks __ro_after_init = true; > > static int __init _of_clk_bump_stdout_clks(void) > { > ... > } > > static int __ref of_clk_bump_stdout_clks(void) > { > if (bump_stdout_clks) > return _of_clk_bump_stdout_clks(); > > return 0; > } > > static int __init of_clk_drop_stdout_clks(void) > { > bump_stdout_clks = false; > ... > } > late_initcall_sync(of_clk_drop_stdout_clks); > > > + > > return ret; > > } > > EXPORT_SYMBOL_GPL(of_clk_add_provider);