Re: [PATCH bpf-next] bpf: Restrict attachment of bpf program to some tracepoints

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Dec 06, 2022 at 06:14:06PM -0800, Namhyung Kim wrote:

SNIP

> -static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
> +static void *bpf_trace_norecurse_funcs[12] = {
> +	(void *)bpf_trace_run_norecurse1,
> +	(void *)bpf_trace_run_norecurse2,
> +	(void *)bpf_trace_run_norecurse3,
> +	(void *)bpf_trace_run_norecurse4,
> +	(void *)bpf_trace_run_norecurse5,
> +	(void *)bpf_trace_run_norecurse6,
> +	(void *)bpf_trace_run_norecurse7,
> +	(void *)bpf_trace_run_norecurse8,
> +	(void *)bpf_trace_run_norecurse9,
> +	(void *)bpf_trace_run_norecurse10,
> +	(void *)bpf_trace_run_norecurse11,
> +	(void *)bpf_trace_run_norecurse12,
> +};
> +
> +static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog,
> +				void *func, void *data)
>  {
>  	struct tracepoint *tp = btp->tp;
>  
> @@ -2325,13 +2354,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
>  	if (prog->aux->max_tp_access > btp->writable_size)
>  		return -EINVAL;
>  
> -	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
> -						   prog);
> +	return tracepoint_probe_register_may_exist(tp, func, data);
>  }
>  
>  int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
>  {
> -	return __bpf_probe_register(btp, prog);
> +	return __bpf_probe_register(btp, prog, btp->bpf_func, prog);
>  }
>  
>  int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
> @@ -2339,6 +2367,33 @@ int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
>  	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
>  }
>  
> +int bpf_probe_register_norecurse(struct bpf_raw_event_map *btp, struct bpf_prog *prog,
> +				 struct bpf_raw_event_data *data)
> +{
> +	void *bpf_func;
> +
> +	data->active = alloc_percpu_gfp(int, GFP_KERNEL);
> +	if (!data->active)
> +		return -ENOMEM;
> +
> +	data->prog = prog;
> +	bpf_func = bpf_trace_norecurse_funcs[btp->num_args];
> +	return __bpf_probe_register(btp, prog, bpf_func, data);

I don't think we can do that, because it won't do the arg -> u64 conversion
that __bpf_trace_##call functions are doing:

	__bpf_trace_##call(void *__data, proto)                                 \
	{                                                                       \
		struct bpf_prog *prog = __data;                                 \
		CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args));  \
	}

like for 'old_pid' arg in sched_process_exec tracepoint:

	ffffffff811959e0 <__bpf_trace_sched_process_exec>:
	ffffffff811959e0:       89 d2                   mov    %edx,%edx
	ffffffff811959e2:       e9 a9 07 14 00          jmp    ffffffff812d6190 <bpf_trace_run3>
	ffffffff811959e7:       66 0f 1f 84 00 00 00    nopw   0x0(%rax,%rax,1)
	ffffffff811959ee:       00 00

bpf program could see some trash in args < u64

we'd need to add 'recursion' variant for all __bpf_trace_##call functions

jirka



> +}
> +
> +int bpf_probe_unregister_norecurse(struct bpf_raw_event_map *btp,
> +				   struct bpf_raw_event_data *data)
> +{
> +	int err;
> +	void *bpf_func;
> +
> +	bpf_func = bpf_trace_norecurse_funcs[btp->num_args];
> +	err = tracepoint_probe_unregister(btp->tp, bpf_func, data);
> +	free_percpu(data->active);
> +
> +	return err;
> +}
> +
>  int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
>  			    u32 *fd_type, const char **buf,
>  			    u64 *probe_offset, u64 *probe_addr)
> -- 
> 2.39.0.rc0.267.gcb52ba06e7-goog
> 



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux