On Tue, Jan 10, 2017 at 10:00:38PM +0100, Peter Zijlstra wrote: > > +static inline int add_chain_cache_classes(unsigned int prev, > > + unsigned int next, > > + unsigned int irq_context, > > + u64 chain_key) > > +{ > > + struct hlist_head *hash_head = chainhashentry(chain_key); > > + struct lock_chain *chain; > > + > > + /* > > + * Allocate a new chain entry from the static array, and add > > + * it to the hash: > > + */ > > + > > + /* > > + * We might need to take the graph lock, ensure we've got IRQs > > + * disabled to make this an IRQ-safe lock.. for recursion reasons > > + * lockdep won't complain about its own locking errors. > > + */ > > + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) > > + return 0; > > + > > + if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { > > + if (!debug_locks_off_graph_unlock()) > > + return 0; > > + > > + print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); > > + dump_stack(); > > + return 0; > > + } > > + > > + chain = lock_chains + nr_lock_chains++; > > + chain->chain_key = chain_key; > > + chain->irq_context = irq_context; > > + chain->depth = 2; > > + if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { > > + chain->base = nr_chain_hlocks; > > + nr_chain_hlocks += chain->depth; > > + chain_hlocks[chain->base] = prev - 1; > > + chain_hlocks[chain->base + 1] = next -1; > > + } > > You didn't copy this part right. There is no error when > > MAX_LOCKDEP_CHAIN_HLOCKS. Oh my god! I am sorry. I missed it. Thank you, Byungchul > > > > + hlist_add_head_rcu(&chain->entry, hash_head); > > + debug_atomic_inc(chain_lookup_misses); > > + inc_chains(); > > + > > + return 1; > > +} > > + > > static inline int add_chain_cache(struct task_struct *curr, > > struct held_lock *hlock, > > u64 chain_key) > > -- > > 1.9.1 > > -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>