This patch added netlink and ipv6_route targets, using the same seq_ops (except show()) for /proc/net/{netlink,ipv6_route}. Since module is not supported for now, ipv6_route is supported only if the IPV6 is built-in, i.e., not compiled as a module. The restriction can be lifted once module is properly supported for bpfdump. Signed-off-by: Yonghong Song <yhs@xxxxxx> --- include/linux/bpf.h | 8 +++- kernel/bpf/dump.c | 13 ++++++ net/ipv6/ip6_fib.c | 71 +++++++++++++++++++++++++++++- net/ipv6/route.c | 29 +++++++++++++ net/netlink/af_netlink.c | 94 +++++++++++++++++++++++++++++++++++++++- 5 files changed, 210 insertions(+), 5 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1179ca3d0230..401e5bf921a2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1124,6 +1124,12 @@ struct bpf_dump_reg { u32 target_feature; }; +struct bpf_dump_meta { + struct seq_file *seq; + u64 session_id; + u64 seq_num; +}; + int bpf_dump_reg_target(struct bpf_dump_reg *reg_info); int bpf_dump_set_target_info(u32 target_fd, struct bpf_prog *prog); int bpf_fd_dump_create(u32 prog_fd, const char __user *dumper_name, @@ -1131,7 +1137,7 @@ int bpf_fd_dump_create(u32 prog_fd, const char __user *dumper_name, int bpf_prog_dump_create(struct bpf_prog *prog); struct bpf_prog *bpf_dump_get_prog(struct seq_file *seq, u32 priv_data_size, u64 *session_id, u64 *seq_num, bool is_last); - +int bpf_dump_run_prog(struct bpf_prog *prog, void *ctx); int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, diff --git a/kernel/bpf/dump.c b/kernel/bpf/dump.c index c6d4d64aaa8e..789b35772a81 100644 --- a/kernel/bpf/dump.c +++ b/kernel/bpf/dump.c @@ -487,6 +487,19 @@ struct bpf_prog *bpf_dump_get_prog(struct seq_file *seq, u32 priv_data_size, return extra_data->prog; } +int bpf_dump_run_prog(struct bpf_prog *prog, void *ctx) +{ + int ret; + + migrate_disable(); + rcu_read_lock(); + ret = BPF_PROG_RUN(prog, ctx); + rcu_read_unlock(); + migrate_enable(); + + return ret; +} + int bpf_dump_reg_target(struct bpf_dump_reg *reg_info) { struct bpfdump_target_info *tinfo, *ptinfo; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 46ed56719476..f5a48511d233 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -2467,7 +2467,7 @@ void fib6_gc_cleanup(void) } #ifdef CONFIG_PROC_FS -static int ipv6_route_seq_show(struct seq_file *seq, void *v) +static int ipv6_route_native_seq_show(struct seq_file *seq, void *v) { struct fib6_info *rt = v; struct ipv6_route_iter *iter = seq->private; @@ -2625,7 +2625,7 @@ static bool ipv6_route_iter_active(struct ipv6_route_iter *iter) return w->node && !(w->state == FWS_U && w->node == w->root); } -static void ipv6_route_seq_stop(struct seq_file *seq, void *v) +static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v) __releases(RCU_BH) { struct net *net = seq_file_net(seq); @@ -2637,6 +2637,73 @@ static void ipv6_route_seq_stop(struct seq_file *seq, void *v) rcu_read_unlock_bh(); } +#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL) +struct bpfdump__ipv6_route { + struct bpf_dump_meta *meta; + struct fib6_info *rt; +}; + +static int ipv6_route_prog_seq_show(struct bpf_prog *prog, struct seq_file *seq, + u64 session_id, u64 seq_num, void *v) +{ + struct bpfdump__ipv6_route ctx; + struct bpf_dump_meta meta; + int ret; + + meta.seq = seq; + meta.session_id = session_id; + meta.seq_num = seq_num; + ctx.meta = &meta; + ctx.rt = v; + ret = bpf_dump_run_prog(prog, &ctx); + return ret == 0 ? 0 : -EINVAL; +} + +static int ipv6_route_seq_show(struct seq_file *seq, void *v) +{ + struct ipv6_route_iter *iter = seq->private; + u64 session_id, seq_num; + struct bpf_prog *prog; + int ret; + + prog = bpf_dump_get_prog(seq, sizeof(struct ipv6_route_iter), + &session_id, &seq_num, false); + if (!prog) + return ipv6_route_native_seq_show(seq, v); + + ret = ipv6_route_prog_seq_show(prog, seq, session_id, seq_num, v); + iter->w.leaf = NULL; + + return ret; +} + +static void ipv6_route_seq_stop(struct seq_file *seq, void *v) +{ + u64 session_id, seq_num; + struct bpf_prog *prog; + + if (!v) { + prog = bpf_dump_get_prog(seq, sizeof(struct ipv6_route_iter), + &session_id, &seq_num, true); + if (prog) + ipv6_route_prog_seq_show(prog, seq, session_id, + seq_num, v); + } + + ipv6_route_native_seq_stop(seq, v); +} +#else +static int ipv6_route_seq_show(struct seq_file *seq, void *v) +{ + return ipv6_route_native_seq_show(seq, v); +} + +static void ipv6_route_seq_stop(struct seq_file *seq, void *v) +{ + ipv6_route_native_seq_stop(seq, v); +} +#endif + const struct seq_operations ipv6_route_seq_ops = { .start = ipv6_route_seq_start, .next = ipv6_route_seq_next, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 310cbddaa533..ea87d3f2c363 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -6390,10 +6390,31 @@ void __init ip6_route_init_special_entries(void) #endif } +#if IS_BUILTIN(CONFIG_IPV6) +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) +int __init __bpfdump__ipv6_route(struct bpf_dump_meta *meta, struct fib6_info *rt) +{ + return 0; +} +#endif +#endif + int __init ip6_route_init(void) { int ret; int cpu; +#if IS_BUILTIN(CONFIG_IPV6) +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + struct bpf_dump_reg reg_info = { + .target = "ipv6_route", + .target_proto = "__bpfdump__ipv6_route", + .prog_ctx_type_name = "bpfdump__ipv6_route", + .seq_ops = &ipv6_route_seq_ops, + .seq_priv_size = sizeof(struct ipv6_route_iter), + .target_feature = BPF_DUMP_SEQ_NET_PRIVATE, + }; +#endif +#endif ret = -ENOMEM; ip6_dst_ops_template.kmem_cachep = @@ -6452,6 +6473,14 @@ int __init ip6_route_init(void) if (ret) goto out_register_late_subsys; +#if IS_BUILTIN(CONFIG_IPV6) +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + ret = bpf_dump_reg_target(®_info); + if (ret) + goto out_register_late_subsys; +#endif +#endif + for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 5ded01ca8b20..fe9a10642c39 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2596,7 +2596,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) return __netlink_seq_next(seq); } -static void netlink_seq_stop(struct seq_file *seq, void *v) +static void netlink_native_seq_stop(struct seq_file *seq, void *v) { struct nl_seq_iter *iter = seq->private; @@ -2607,7 +2607,7 @@ static void netlink_seq_stop(struct seq_file *seq, void *v) } -static int netlink_seq_show(struct seq_file *seq, void *v) +static int netlink_native_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, @@ -2634,6 +2634,80 @@ static int netlink_seq_show(struct seq_file *seq, void *v) return 0; } +#ifdef CONFIG_BPF_SYSCALL +struct bpfdump__netlink { + struct bpf_dump_meta *meta; + struct netlink_sock *sk; +}; + +int __init __bpfdump__netlink(struct bpf_dump_meta *meta, struct netlink_sock *sk) +{ + return 0; +} + +static int netlink_prog_seq_show(struct bpf_prog *prog, struct seq_file *seq, + u64 session_id, u64 seq_num, void *v) +{ + struct bpfdump__netlink ctx; + struct bpf_dump_meta meta; + int ret = 0; + + meta.seq = seq; + meta.session_id = session_id; + meta.seq_num = seq_num; + ctx.meta = &meta; + ctx.sk = nlk_sk((struct sock *)v); + ret = bpf_dump_run_prog(prog, &ctx); + + return ret == 0 ? 0 : -EINVAL; +} + +static int netlink_seq_show(struct seq_file *seq, void *v) +{ + u64 session_id, seq_num; + struct bpf_prog *prog; + + prog = bpf_dump_get_prog(seq, sizeof(struct nl_seq_iter), + &session_id, &seq_num, false); + if (!prog) + return netlink_native_seq_show(seq, v); + + if (v == SEQ_START_TOKEN) + return 0; + + return netlink_prog_seq_show(prog, seq, session_id, + seq_num - 1, v); +} + +static void netlink_seq_stop(struct seq_file *seq, void *v) +{ + u64 session_id, seq_num; + struct bpf_prog *prog; + + if (!v) { + prog = bpf_dump_get_prog(seq, sizeof(struct nl_seq_iter), + &session_id, &seq_num, true); + if (prog) { + if (seq_num) + seq_num = seq_num - 1; + netlink_prog_seq_show(prog, seq, session_id, + seq_num, v); + } + } + + netlink_native_seq_stop(seq, v); +} +#else +static int netlink_seq_show(struct seq_file *seq, void *v) +{ + return netlink_native_seq_show(seq, v); +} +static void netlink_seq_stop(struct seq_file *seq, void *v) +{ + netlink_native_seq_stop(seq, v); +} +#endif + static const struct seq_operations netlink_seq_ops = { .start = netlink_seq_start, .next = netlink_seq_next, @@ -2744,6 +2818,16 @@ static int __init netlink_proto_init(void) { int i; int err = proto_register(&netlink_proto, 0); +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + struct bpf_dump_reg reg_info = { + .target = "netlink", + .target_proto = "__bpfdump__netlink", + .prog_ctx_type_name = "bpfdump_netlink", + .seq_ops = &netlink_seq_ops, + .seq_priv_size = sizeof(struct nl_seq_iter), + .target_feature = BPF_DUMP_SEQ_NET_PRIVATE, + }; +#endif if (err != 0) goto out; @@ -2764,6 +2848,12 @@ static int __init netlink_proto_init(void) } } +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + err = bpf_dump_reg_target(®_info); + if (err) + goto out; +#endif + netlink_add_usersock_entry(); sock_register(&netlink_family_ops); -- 2.24.1