From: Joanne Koong <joannelkoong@xxxxxxxxx> This patch adds a new helper function void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len); which returns a pointer to the underlying data of a dynptr. *len* must be a statically known value. The bpf program may access the returned data slice as a normal buffer (eg can do direct reads and writes), since the verifier associates the length with the returned pointer, and enforces that no out of bounds accesses occur. This requires a few additions to the verifier. For every referenced-tracked dynptr that is initialized, we associate an id with it and attach any data slices to that id. When a release function is called on a dynptr (eg bpf_free), we invalidate all slices that correspond to that dynptr. This ensures the slice can't be used after its dynptr has been invalidated. Signed-off-by: Joanne Koong <joannelkoong@xxxxxxxxx> --- include/linux/bpf_verifier.h | 2 + include/uapi/linux/bpf.h | 12 ++++++ kernel/bpf/helpers.c | 28 ++++++++++++++ kernel/bpf/verifier.c | 70 +++++++++++++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 12 ++++++ 5 files changed, 122 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index bc0f105148f9..4862567af5ef 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -100,6 +100,8 @@ struct bpf_reg_state { * for the purpose of tracking that it's freed. * For PTR_TO_SOCKET this is used to share which pointers retain the * same reference to the socket, to determine proper reference freeing. + * For stack slots that are dynptrs, this is used to track references to + * the dynptr to enforce proper reference freeing. */ u32 id; /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 16a35e46be90..c835e437cb28 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5191,6 +5191,17 @@ union bpf_attr { * Return * 0 on success, -EINVAL if *offset* + *len* exceeds the length * of *dst*'s data or if *dst* is not writeable. + * + * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) + * Description + * Get a pointer to the underlying dynptr data. + * + * *len* must be a statically known value. The returned data slice + * is invalidated whenever the dynptr is invalidated. + * Return + * Pointer to the underlying dynptr data, NULL if the ptr is + * read-only, if the dynptr is invalid, or if the offset and length + * is out of bounds. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5392,6 +5403,7 @@ union bpf_attr { FN(free), \ FN(dynptr_read), \ FN(dynptr_write), \ + FN(dynptr_data), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 7ec20e79928e..c1295fb5d9d4 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1412,6 +1412,32 @@ const struct bpf_func_proto bpf_dynptr_from_mem_proto = { .arg3_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, }; +BPF_CALL_3(bpf_dynptr_data, struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) +{ + int err; + + if (!ptr->data) + return 0; + + err = bpf_dynptr_check_off_len(ptr, offset, len); + if (err) + return 0; + + if (bpf_dynptr_is_rdonly(ptr)) + return 0; + + return (unsigned long)(ptr->data + ptr->offset + offset); +} + +const struct bpf_func_proto bpf_dynptr_data_proto = { + .func = bpf_dynptr_data, + .gpl_only = false, + .ret_type = RET_PTR_TO_ALLOC_MEM_OR_NULL, + .arg1_type = ARG_PTR_TO_DYNPTR, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, +}; + BPF_CALL_4(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src, u32, offset) { int err; @@ -1570,6 +1596,8 @@ bpf_base_func_proto(enum bpf_func_id func_id) return &bpf_dynptr_read_proto; case BPF_FUNC_dynptr_write: return &bpf_dynptr_write_proto; + case BPF_FUNC_dynptr_data: + return &bpf_dynptr_data_proto; default: break; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cb3bcb54d4b4..7352ffb4f9a5 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -187,6 +187,11 @@ struct bpf_verifier_stack_elem { POISON_POINTER_DELTA)) #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) +/* forward declarations */ +static void release_reg_references(struct bpf_verifier_env *env, + struct bpf_func_state *state, + int ref_obj_id); + static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; @@ -523,6 +528,11 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id) func_id == BPF_FUNC_skc_to_tcp_request_sock; } +static inline bool is_dynptr_ref_function(enum bpf_func_id func_id) +{ + return func_id == BPF_FUNC_dynptr_data; +} + static bool is_cmpxchg_insn(const struct bpf_insn *insn) { return BPF_CLASS(insn->code) == BPF_STX && @@ -700,7 +710,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ { struct bpf_func_state *state = cur_func(env); enum bpf_dynptr_type type; - int spi, i, err; + int spi, id, i, err; spi = get_spi(reg->off); @@ -721,12 +731,27 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ state->stack[spi].spilled_ptr.dynptr_first_slot = true; + /* Generate an id for the dynptr if the dynptr type can be + * acquired/released. + * + * This is used to associated data slices with dynptrs, so that + * if a dynptr gets invalidated, its data slices will also be + * invalidated. + */ + if (dynptr_type_refcounted(state, spi)) { + id = ++env->id_gen; + state->stack[spi].spilled_ptr.id = id; + state->stack[spi - 1].spilled_ptr.id = id; + } + return 0; } static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg) { + struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = func(env, reg); + bool refcounted; int spi, i; spi = get_spi(reg->off); @@ -734,6 +759,8 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re if (!check_spi_bounds(state, spi, BPF_DYNPTR_NR_SLOTS)) return -EINVAL; + refcounted = dynptr_type_refcounted(state, spi); + for (i = 0; i < BPF_REG_SIZE; i++) { state->stack[spi].slot_type[i] = STACK_INVALID; state->stack[spi - 1].slot_type[i] = STACK_INVALID; @@ -743,6 +770,15 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re state->stack[spi].spilled_ptr.dynptr_first_slot = 0; state->stack[spi - 1].spilled_ptr.dynptr_type = 0; + /* Invalidate any slices associated with this dynptr */ + if (refcounted) { + for (i = 0; i <= vstate->curframe; i++) + release_reg_references(env, vstate->frame[i], + state->stack[spi].spilled_ptr.id); + state->stack[spi].spilled_ptr.id = 0; + state->stack[spi - 1].spilled_ptr.id = 0; + } + return 0; } @@ -780,6 +816,19 @@ static bool check_dynptr_init(struct bpf_verifier_env *env, struct bpf_reg_state return state->stack[spi].spilled_ptr.dynptr_type == expected_type; } +static bool is_ref_obj_id_dynptr(struct bpf_func_state *state, u32 id) +{ + int i; + + for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { + if (state->stack[i].slot_type[0] == STACK_DYNPTR && + state->stack[i].spilled_ptr.id == id) + return true; + } + + return false; +} + static bool stack_access_into_dynptr(struct bpf_func_state *state, int spi, int size) { int nr_slots, i; @@ -5585,6 +5634,14 @@ static bool id_in_stack_slot(enum bpf_arg_type arg_type) return arg_type_is_dynptr(arg_type); } +static inline u32 stack_slot_get_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) +{ + struct bpf_func_state *state = func(env, reg); + int spi = get_spi(reg->off); + + return state->stack[spi].spilled_ptr.id; +} + static int check_func_arg(struct bpf_verifier_env *env, u32 arg, struct bpf_call_arg_meta *meta, const struct bpf_func_proto *fn) @@ -7114,6 +7171,14 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn regs[BPF_REG_0].id = id; /* For release_reference() */ regs[BPF_REG_0].ref_obj_id = id; + } else if (is_dynptr_ref_function(func_id)) { + /* Retrieve the id of the associated dynptr. */ + int id = stack_slot_get_id(env, ®s[BPF_REG_1]); + + if (id < 0) + return id; + regs[BPF_REG_0].id = id; + regs[BPF_REG_0].ref_obj_id = id; } do_refine_retval_range(regs, fn->ret_type, func_id, &meta); @@ -9545,7 +9610,8 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, u32 id = regs[regno].id; int i; - if (ref_obj_id && ref_obj_id == id && is_null) + if (ref_obj_id && ref_obj_id == id && is_null && + !is_ref_obj_id_dynptr(state, id)) /* regs[regno] is in the " == NULL" branch. * No one could have freed the reference state before * doing the NULL check. diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 16a35e46be90..c835e437cb28 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5191,6 +5191,17 @@ union bpf_attr { * Return * 0 on success, -EINVAL if *offset* + *len* exceeds the length * of *dst*'s data or if *dst* is not writeable. + * + * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) + * Description + * Get a pointer to the underlying dynptr data. + * + * *len* must be a statically known value. The returned data slice + * is invalidated whenever the dynptr is invalidated. + * Return + * Pointer to the underlying dynptr data, NULL if the ptr is + * read-only, if the dynptr is invalid, or if the offset and length + * is out of bounds. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5392,6 +5403,7 @@ union bpf_attr { FN(free), \ FN(dynptr_read), \ FN(dynptr_write), \ + FN(dynptr_data), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper -- 2.30.2