On 1/27/23 11:17 AM, Joanne Koong wrote:
@@ -8243,6 +8316,28 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
regs[BPF_REG_0].mem_size = meta.mem_size;
+ if (func_id == BPF_FUNC_dynptr_data &&
+ dynptr_type == BPF_DYNPTR_TYPE_SKB) {
+ bool seen_direct_write = env->seen_direct_write;
+
+ regs[BPF_REG_0].type |= DYNPTR_TYPE_SKB;
+ if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE))
+ regs[BPF_REG_0].type |= MEM_RDONLY;
+ else
+ /*
+ * Calling may_access_direct_pkt_data() will set
+ * env->seen_direct_write to true if the skb is
+ * writable. As an optimization, we can ignore
+ * setting env->seen_direct_write.
+ *
+ * env->seen_direct_write is used by skb
+ * programs to determine whether the skb's page
+ * buffers should be cloned. Since data slice
+ * writes would only be to the head, we can skip
+ * this.
+ */
+ env->seen_direct_write = seen_direct_write;
+ }
[ ... ]
@@ -9263,17 +9361,26 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return ret;
break;
case KF_ARG_PTR_TO_DYNPTR:
+ {
+ enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR;
+
if (reg->type != PTR_TO_STACK &&
reg->type != CONST_PTR_TO_DYNPTR) {
verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i);
return -EINVAL;
}
- ret = process_dynptr_func(env, regno, insn_idx,
- ARG_PTR_TO_DYNPTR | MEM_RDONLY);
+ if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb])
+ dynptr_arg_type |= MEM_UNINIT | DYNPTR_TYPE_SKB;
+ else
+ dynptr_arg_type |= MEM_RDONLY;
+
+ ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type,
+ meta->func_id);
if (ret < 0)
return ret;
break;
+ }
case KF_ARG_PTR_TO_LIST_HEAD:
if (reg->type != PTR_TO_MAP_VALUE &&
reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
@@ -15857,6 +15964,14 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
*cnt = 1;
+ } else if (desc->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
+ bool is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE);
Does it need to restore the env->seen_direct_write here also?
It seems this 'seen_direct_write' saving/restoring is needed now because
'may_access_direct_pkt_data(BPF_WRITE)' is not only called when it is actually
writing the packet. Some refactoring can help to avoid issue like this.
While at 'seen_direct_write', Alexei has also pointed out that the verifier
needs to track whether the (packet) 'slice' returned by bpf_dynptr_data() has
been written. It should be tracked in 'seen_direct_write'. Take a look at how
reg_is_pkt_pointer() and may_access_direct_pkt_data() are done in
check_mem_access(). iirc, this reg_is_pkt_pointer() part got loss somewhere in
v5 (or v4?) when bpf_dynptr_data() was changed to return register typed
PTR_TO_MEM instead of PTR_TO_PACKET.
[ ... ]
+int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags,
+ struct bpf_dynptr_kern *ptr, int is_rdonly)
hmm... this exposed kfunc takes "int is_rdonly".
What if the bpf prog calls it like bpf_dynptr_from_skb(..., false) in some hook
that is not writable to packet?
+{
+ if (flags) {
+ bpf_dynptr_set_null(ptr);
+ return -EINVAL;
+ }
+
+ bpf_dynptr_init(ptr, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len);
+
+ if (is_rdonly)
+ bpf_dynptr_set_rdonly(ptr);
+
+ return 0;
+}
+
BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
{
return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
@@ -11607,3 +11634,28 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
return func;
}
+
+BTF_SET8_START(bpf_kfunc_check_set_skb)
+BTF_ID_FLAGS(func, bpf_dynptr_from_skb)
+BTF_SET8_END(bpf_kfunc_check_set_skb)
+
+static const struct btf_kfunc_id_set bpf_kfunc_set_skb = {
+ .owner = THIS_MODULE,
+ .set = &bpf_kfunc_check_set_skb,
+};
+
+static int __init bpf_kfunc_init(void)
+{
+ int ret;
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SK_SKB, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SOCKET_FILTER, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_OUT, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_IN, &bpf_kfunc_set_skb);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_XMIT, &bpf_kfunc_set_skb);
+ return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb);
+}
+late_initcall(bpf_kfunc_init);