> > @@ -1776,25 +1776,26 @@ int of_update_property(struct device_node *np, > struct property *newprop) > > if (!newprop->name) > > return -EINVAL; > > > > - oldprop = of_find_property(np, newprop->name, NULL); > > - if (!oldprop) > > - return of_add_property(np, newprop); > > - > > raw_spin_lock_irqsave(&devtree_lock, flags); > > - next = &np->properties; > > - while (*next) { > > + oldprop = __of_find_property(np, newprop->name, NULL); > > + if (!oldprop) { > > + /* add the node */ > > + rc = __of_add_property(np, newprop); > > + } else { > > if you changed this line to: > } else while (*next) { > then most of the other changes go away. You don't need the separate > while loop Yes, I will fix this. > and the function remains largely identical aside from moving > the __of_find_property() into the spinlock. > But, from the following codes, we can see that, if oldprop != NULL Meaning that we have found it, and should just do the updatation later: +++++++++++++++ oldprop = of_find_property(np, newprop->name, NULL); if (!oldprop) return of_add_property(np, newprop); --------------- > > /* found the node */ > > newprop->next = oldprop->next; > > *next = newprop; > > oldprop->next = np->deadprops; > > np->deadprops = oldprop; > > - found = 1; And why the 'found' flag is here is that the oldprop maybe removed just before the spin_lock and after of_find_property(). And so use and move __of_find_property() and __of_add_property() into the spinlock could avoid this... Thanks, -- BRs, Xiubo > > - break; > > } > > - next = &(*next)->next; > > } > > raw_spin_unlock_irqrestore(&devtree_lock, flags); > > + > > if (rc) > > return rc; > > > > @@ -1803,9 +1804,6 @@ int of_update_property(struct device_node *np, struct > property *newprop) > > sysfs_remove_bin_file(&np->kobj, &oldprop->attr); > > __of_add_property_sysfs(np, newprop); > > > > - if (!found) > > - return -ENODEV; > > - > > return 0; > > } > > > > -- > > 1.8.4 > > > > > > -- To unsubscribe from this list: send the line "unsubscribe devicetree" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html