On Sun, Aug 25, 2019 at 09:23:24PM +0800, Changbin Du wrote: > When CONFIG_FTRACE_FUNC_PROTOTYPE is enabled, thousands of > ftrace_func_entry instances are created. So create a dedicated > memcache to enhance performance. > > Signed-off-by: Changbin Du <changbin.du@xxxxxxxxx> > --- > kernel/trace/ftrace.c | 17 ++++++++++++++++- > 1 file changed, 16 insertions(+), 1 deletion(-) > > diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c > index a314f0768b2c..cfcb8dad93ea 100644 > --- a/kernel/trace/ftrace.c > +++ b/kernel/trace/ftrace.c > @@ -94,6 +94,8 @@ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; > /* What to set function_trace_op to */ > static struct ftrace_ops *set_function_trace_op; > > +struct kmem_cache *hash_entry_cache; > + > static bool ftrace_pids_enabled(struct ftrace_ops *ops) > { > struct trace_array *tr; > @@ -1169,7 +1171,7 @@ static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip, > { > struct ftrace_func_entry *entry; > > - entry = kmalloc(sizeof(*entry), GFP_KERNEL); > + entry = kmem_cache_alloc(hash_entry_cache, GFP_KERNEL); > if (!entry) > return -ENOMEM; > > @@ -6153,6 +6155,15 @@ void __init ftrace_init(void) > if (ret) > goto failed; > > + hash_entry_cache = kmem_cache_create("ftrace-hash", > + sizeof(struct ftrace_func_entry), > + sizeof(struct ftrace_func_entry), > + 0, NULL); > + if (!hash_entry_cache) { > + pr_err("failed to create ftrace hash entry cache\n"); > + goto failed; > + } Wait what; you already have then in the binary image, now you're allocating extra memory for each of them? Did you look at what ORC does? Is the binary search really not fast enough?