This patch migrate convert_ctx_accesses to new list patching infrastructure. pre-patch is used for generating prologue, because what we really want to do is insert the prog before prog start without touching the first insn. Signed-off-by: Jiong Wang <jiong.wang@xxxxxxxxxxxxx> --- kernel/bpf/verifier.c | 98 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 40 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2026d64..2d16e85 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -8631,41 +8631,59 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; - int i, cnt, size, ctx_field_size, delta = 0; - const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; u32 target_size, size_default, off; - struct bpf_prog *new_prog; + struct bpf_list_insn *list, *elem; + int cnt, size, ctx_field_size; enum bpf_access_type type; bool is_narrower_load; + int ret = 0; + + list = bpf_create_list_insn(env->prog); + if (IS_ERR(list)) + return PTR_ERR(list); + elem = list; if (ops->gen_prologue || env->seen_direct_write) { if (!ops->gen_prologue) { verbose(env, "bpf verifier is misconfigured\n"); - return -EINVAL; + ret = -EINVAL; + goto free_list_ret; } cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); - return -EINVAL; + ret = -EINVAL; + goto free_list_ret; } else if (cnt) { - new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); - if (!new_prog) - return -ENOMEM; + struct bpf_list_insn *new_hdr; - env->prog = new_prog; - delta += cnt - 1; + /* "gen_prologue" generates patch buffer, we want to use + * pre-patch buffer because we don't want to touch the + * insn/aux at start offset. + */ + new_hdr = bpf_prepatch_list_insn(list, insn_buf, + cnt - 1); + if (IS_ERR(new_hdr)) { + ret = -ENOMEM; + goto free_list_ret; + } + /* Update list head, so new pre-patched nodes could be + * freed by destroyer. + */ + list = new_hdr; } } if (bpf_prog_is_dev_bound(env->prog->aux)) - return 0; + goto linearize_list_ret; - insn = env->prog->insnsi + delta; - - for (i = 0; i < insn_cnt; i++, insn++) { + for (; elem; elem = elem->next) { bpf_convert_ctx_access_t convert_ctx_access; + struct bpf_insn_aux_data *aux; + + insn = &elem->insn; if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || @@ -8680,8 +8698,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) else continue; - if (type == BPF_WRITE && - env->insn_aux_data[i + delta].sanitize_stack_off) { + aux = &env->insn_aux_data[elem->orig_idx - 1]; + if (type == BPF_WRITE && aux->sanitize_stack_off) { struct bpf_insn patch[] = { /* Sanitize suspicious stack slot with zero. * There are no memory dependencies for this store, @@ -8689,8 +8707,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) * constant of zero */ BPF_ST_MEM(BPF_DW, BPF_REG_FP, - env->insn_aux_data[i + delta].sanitize_stack_off, - 0), + aux->sanitize_stack_off, 0), /* the original STX instruction will immediately * overwrite the same stack slot with appropriate value */ @@ -8698,17 +8715,15 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) }; cnt = ARRAY_SIZE(patch); - new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); - if (!new_prog) - return -ENOMEM; - - delta += cnt - 1; - env->prog = new_prog; - insn = new_prog->insnsi + i + delta; + elem = bpf_patch_list_insn(elem, patch, cnt); + if (IS_ERR(elem)) { + ret = PTR_ERR(elem); + goto free_list_ret; + } continue; } - switch (env->insn_aux_data[i + delta].ptr_type) { + switch (aux->ptr_type) { case PTR_TO_CTX: if (!ops->convert_ctx_access) continue; @@ -8728,7 +8743,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) continue; } - ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; + ctx_field_size = aux->ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, @@ -8744,7 +8759,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); - return -EINVAL; + ret = -EINVAL; + goto free_list_ret; } size_code = BPF_H; @@ -8763,7 +8779,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); - return -EINVAL; + ret = -EINVAL; + goto free_list_ret; } if (is_narrower_load && size < target_size) { @@ -8786,18 +8803,19 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) } } - new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); - if (!new_prog) - return -ENOMEM; - - delta += cnt - 1; - - /* keep walking new program and skip insns we just inserted */ - env->prog = new_prog; - insn = new_prog->insnsi + i + delta; + elem = bpf_patch_list_insn(elem, insn_buf, cnt); + if (IS_ERR(elem)) { + ret = PTR_ERR(elem); + goto free_list_ret; + } } - - return 0; +linearize_list_ret: + env = verifier_linearize_list_insn(env, list); + if (IS_ERR(env)) + ret = PTR_ERR(env); +free_list_ret: + bpf_destroy_list_insn(list); + return ret; } static int jit_subprogs(struct bpf_verifier_env *env) -- 2.7.4