Re: [PATCH bpf-next v6] bpf: Support 64-bit pointers to kfuncs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Apr 06, 2023 at 02:31:06PM +0200, Ilya Leoshkevich wrote:
> On Thu, 2023-04-06 at 11:44 +0200, Jiri Olsa wrote:
> > On Wed, Apr 05, 2023 at 11:34:53PM +0200, Ilya Leoshkevich wrote:
> > 
> > SNIP
> > 
> > >  
> > > +int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
> > > +                      u16 btf_fd_idx, u8 **func_addr)
> > > +{
> > > +       const struct bpf_kfunc_desc *desc;
> > > +
> > > +       desc = find_kfunc_desc(prog, func_id, btf_fd_idx);
> > > +       if (!desc)
> > > +               return -EFAULT;
> > > +
> > > +       *func_addr = (u8 *)desc->addr;
> > > +       return 0;
> > > +}
> > > +
> > >  static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env
> > > *env,
> > >                                          s16 offset)
> > >  {
> > > @@ -2672,14 +2691,19 @@ static int add_kfunc_call(struct
> > > bpf_verifier_env *env, u32 func_id, s16 offset)
> > >                 return -EINVAL;
> > >         }
> > >  
> > > -       call_imm = BPF_CALL_IMM(addr);
> > > -       /* Check whether or not the relative offset overflows desc-
> > > >imm */
> > > -       if ((unsigned long)(s32)call_imm != call_imm) {
> > > -               verbose(env, "address of kernel function %s is out
> > > of range\n",
> > > -                       func_name);
> > > -               return -EINVAL;
> > > +       if (bpf_jit_supports_far_kfunc_call()) {
> > > +               call_imm = func_id;
> > > +       } else {
> > > +               call_imm = BPF_CALL_IMM(addr);
> > 
> > we compute call_imm again in fixup_kfunc_call, seems like we could
> > store
> > the address and the func_id in desc and have fixup_kfunc_call do the
> > insn->imm setup
> 
> We can drop this diff in fixup_kfunc_call():
> 
> -       insn->imm = desc->imm;
> +       if (!bpf_jit_supports_far_kfunc_call())
> +               insn->imm = BPF_CALL_IMM(desc->addr);
> 
> in order to avoid duplicating the imm calculation logic, but I'm not
> sure if we want to move the entire desc->imm setup there.
> 
> For example, fixup_kfunc_call() considers kfunc_tab const, which is a
> nice property that I think is worth keeping.
> 
> Another option would be to drop desc->imm, but having it is very
> convenient for doing lookups the same way on all architectures. 

ok, I see..  so should we do following in fixup_kfunc_call:

	if (!bpf_jit_supports_far_kfunc_call())
		insn->imm = desc->imm;

by default there's func_id in insn->imm

jirka

> 
> > > +               /* Check whether the relative offset overflows
> > > desc->imm */
> > > +               if ((unsigned long)(s32)call_imm != call_imm) {
> > > +                       verbose(env, "address of kernel function %s
> > > is out of range\n",
> > > +                               func_name);
> > > +                       return -EINVAL;
> > > +               }
> > >         }
> > >  
> > > +
> > 
> > nit, extra line
> 
> Ouch. Thanks for spotting this.
> 
> > 
> > >         if (bpf_dev_bound_kfunc_id(func_id)) {
> > >                 err = bpf_dev_bound_kfunc_check(&env->log,
> > > prog_aux);
> > >                 if (err)
> > > @@ -2690,6 +2714,7 @@ static int add_kfunc_call(struct
> > > bpf_verifier_env *env, u32 func_id, s16 offset)
> > >         desc->func_id = func_id;
> > >         desc->imm = call_imm;
> > >         desc->offset = offset;
> > > +       desc->addr = addr;
> > >         err = btf_distill_func_proto(&env->log, desc_btf,
> > >                                      func_proto, func_name,
> > >                                      &desc->func_model);
> > > @@ -2699,19 +2724,19 @@ static int add_kfunc_call(struct
> > > bpf_verifier_env *env, u32 func_id, s16 offset)
> > >         return err;
> > >  }
> > >  
> > > -static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
> > > +static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b)
> > >  {
> > >         const struct bpf_kfunc_desc *d0 = a;
> > >         const struct bpf_kfunc_desc *d1 = b;
> > >  
> > > -       if (d0->imm > d1->imm)
> > > -               return 1;
> > > -       else if (d0->imm < d1->imm)
> > > -               return -1;
> > > +       if (d0->imm != d1->imm)
> > > +               return d0->imm < d1->imm ? -1 : 1;
> > > +       if (d0->offset != d1->offset)
> > > +               return d0->offset < d1->offset ? -1 : 1;
> > >         return 0;
> > >  }
> > >  
> > 
> > SNIP
> > 
> > > +/* replace a generic kfunc with a specialized version if necessary
> > > */
> > > +static void fixup_kfunc_desc(struct bpf_verifier_env *env,
> > > +                            struct bpf_kfunc_desc *desc)
> > > +{
> > > +       struct bpf_prog *prog = env->prog;
> > > +       u32 func_id = desc->func_id;
> > > +       u16 offset = desc->offset;
> > > +       bool seen_direct_write;
> > > +       void *xdp_kfunc;
> > > +       bool is_rdonly;
> > > +
> > > +       if (bpf_dev_bound_kfunc_id(func_id)) {
> > > +               xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog,
> > > func_id);
> > > +               if (xdp_kfunc) {
> > > +                       desc->addr = (unsigned long)xdp_kfunc;
> > > +                       return;
> > > +               }
> > > +               /* fallback to default kfunc when not supported by
> > > netdev */
> > > +       }
> > > +
> > > +       if (offset)
> > > +               return;
> > > +
> > > +       if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb])
> > > {
> > > +               seen_direct_write = env->seen_direct_write;
> > > +               is_rdonly = !may_access_direct_pkt_data(env, NULL,
> > > BPF_WRITE);
> > > +
> > > +               if (is_rdonly)
> > > +                       desc->addr = (unsigned
> > > long)bpf_dynptr_from_skb_rdonly;
> > > +
> > > +               /* restore env->seen_direct_write to its original
> > > value, since
> > > +                * may_access_direct_pkt_data mutates it
> > > +                */
> > > +               env->seen_direct_write = seen_direct_write;
> > > +       }
> > 
> > could we do this directly in add_kfunc_call?
> 
> Initially I thought that it wasn't possible, because
> may_access_direct_pkt_data() may depend on data gathered during
> verification. But on a second look that's simply not the case, so this
> code can indeed be moved to add_kfunc_call().
> 
> > 
> > thanks,
> > jirka
> 
> [...]



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux