On 11/29/19 8:04 AM, Sebastian Andrzej Siewior wrote: > The phandle cache code allocates memory while holding devtree_lock which > is a raw_spinlock_t. Memory allocation (and free()) is not possible on > RT while a raw_spinlock_t is held. > Invoke the kfree() and kcalloc() while the lock is dropped. > > Cc: Rob Herring <robh+dt@xxxxxxxxxx> > Cc: Frank Rowand <frowand.list@xxxxxxxxx> > Cc: devicetree@xxxxxxxxxxxxxxx > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> > --- > v1…v2: > - Add comments regarding memory allocation under the raw_spinlock_t > - use a temporary variable for the memory allocation. > > drivers/of/base.c | 32 +++++++++++++++++++++++--------- > 1 file changed, 23 insertions(+), 9 deletions(-) > > --- a/drivers/of/base.c > +++ b/drivers/of/base.c > @@ -136,33 +136,37 @@ static struct device_node **phandle_cach > static u32 phandle_cache_mask; > > /* > - * Caller must hold devtree_lock. > + * Caller must hold devtree_lock under which kfree() can't be used so caller > + * needs to kfree() the returned pointer. > */ > -static void __of_free_phandle_cache(void) > +static struct device_node **__of_free_phandle_cache(void) > { > u32 cache_entries = phandle_cache_mask + 1; > u32 k; > + struct device_node **shadow; > > if (!phandle_cache) > - return; > + return NULL; > > for (k = 0; k < cache_entries; k++) > of_node_put(phandle_cache[k]); > > - kfree(phandle_cache); > + shadow = phandle_cache; > phandle_cache = NULL; > + return shadow; > } > > int of_free_phandle_cache(void) > { > unsigned long flags; > + struct device_node **shadow; > > raw_spin_lock_irqsave(&devtree_lock, flags); > > - __of_free_phandle_cache(); > + shadow = __of_free_phandle_cache(); > > raw_spin_unlock_irqrestore(&devtree_lock, flags); > - > + kfree(shadow); > return 0; > } > #if !defined(CONFIG_MODULES) > @@ -197,10 +201,12 @@ void of_populate_phandle_cache(void) > u32 cache_entries; > struct device_node *np; > u32 phandles = 0; > + struct device_node **shadow; > + struct device_node **tmp_phandle_cache; > > raw_spin_lock_irqsave(&devtree_lock, flags); > > - __of_free_phandle_cache(); > + shadow = __of_free_phandle_cache(); > > for_each_of_allnodes(np) > if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) > @@ -208,12 +214,19 @@ void of_populate_phandle_cache(void) > > if (!phandles) > goto out; > + /* > + * kcalloc() can't be used while a raw_spinlock_t held so it is dropped > + * for the allocation. > + */ > + raw_spin_unlock_irqrestore(&devtree_lock, flags); > > cache_entries = roundup_pow_of_two(phandles); > phandle_cache_mask = cache_entries - 1; > > - phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), > - GFP_ATOMIC); > + tmp_phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), > + GFP_ATOMIC); > + raw_spin_lock_irqsave(&devtree_lock, flags); > + phandle_cache = tmp_phandle_cache; > if (!phandle_cache) > goto out; > > @@ -225,6 +238,7 @@ void of_populate_phandle_cache(void) > > out: > raw_spin_unlock_irqrestore(&devtree_lock, flags); > + kfree(shadow); > } > > void __init of_core_init(void) > Reviewed-by: Frank Rowand <frank.rowand@xxxxxxxx> Compiled and booted on 5.4. Did not do any more detailed testing. -Frank