On Thu, 19 Mar 2015 18:59:41 -0700 Alexei Starovoitov <ast@xxxxxxxxxxxx> wrote: Some nits... > --- /dev/null > +++ b/kernel/trace/bpf_trace.c > @@ -0,0 +1,123 @@ > +/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com > + * > + * This program is free software; you can redistribute it and/or > + * modify it under the terms of version 2 of the GNU General Public > + * License as published by the Free Software Foundation. > + */ > +#include <linux/kernel.h> > +#include <linux/types.h> > +#include <linux/slab.h> > +#include <linux/bpf.h> > +#include <linux/filter.h> > +#include <linux/uaccess.h> > +#include "trace.h" > + > +static DEFINE_PER_CPU(int, bpf_prog_active); > + > +/** > + * trace_call_bpf - invoke BPF program > + * @prog - BPF program > + * @ctx - opaque context pointer > + * > + * kprobe handlers execute BPF programs via this helper. > + * Can be used from static tracepoints in the future. Should also state what the expected return values are. What does a return of "1" mean? > + */ > +unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) > +{ > + unsigned int ret; > + > + if (in_nmi()) /* not supported yet */ > + return 1; > + > + preempt_disable(); > + > + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { > + /* since some bpf program is already running on this cpu, You are no longer in the net/ world. The rest of the Linux coding style is: /* * multi line comments */ Only DaveM gets away with that format ;-) > + * don't call into another bpf program (same or different) > + * and don't send kprobe event into ring-buffer, > + * so return zero here > + */ > + ret = 0; > + goto out; > + } > + > + rcu_read_lock(); > + ret = BPF_PROG_RUN(prog, ctx); > + rcu_read_unlock(); > + > + out: > + __this_cpu_dec(bpf_prog_active); > + preempt_enable(); > + > + return ret; > +} > +EXPORT_SYMBOL_GPL(trace_call_bpf); The rest looks fine. For that.. Reviewed-by: Steven Rostedt <rostedt@xxxxxxxxxxx> -- Steve -- To unsubscribe from this list: send the line "unsubscribe linux-api" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html