Re: [RFC PATCH v2 04/13] bpf: Define a few bpf_link_ops for BPF_TRACE_MAP

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Good catch, applied both changes. The expectation is to remove only
one
 program. Theoretically an app could link the same program to the same
map
twice, twice, in which case close()ing one link should not detach both
programs.

I opted to also apply the _safe() suffix mostly as a matter of convention.

-       struct bpf_map_trace_prog *cur_prog;
+       struct bpf_map_trace_prog *cur_prog, *tmp;
        struct bpf_map_trace_progs *progs;

        progs = map_trace_link->map->trace_progs;
        mutex_lock(&progs->mutex);
-       list_for_each_entry(cur_prog, &progs->progs[trace_type].list, list) {
+       list_for_each_entry_safe(cur_prog, tmp, &progs->progs[trace_type].list,
+                                list) {
                if (cur_prog->prog == link->prog) {
                        progs->length[trace_type] -= 1;
                        list_del_rcu(&cur_prog->list);
                        kfree_rcu(cur_prog, rcu);
+                       break;
                }
        }


On Wed, Sep 29, 2021 at 5:26 PM Eric Dumazet <eric.dumazet@xxxxxxxxx> wrote:
>
>
>
> On 9/29/21 4:59 PM, Joe Burton wrote:
> > From: Joe Burton <jevburton@xxxxxxxxxx>
> >
> > Define release, dealloc, and update_prog for the new tracing programs.
> > Updates are protected by a single global mutex.
> >
> > Signed-off-by: Joe Burton <jevburton@xxxxxxxxxx>
> > ---
> >  kernel/bpf/map_trace.c | 71 ++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 71 insertions(+)
> >
> > diff --git a/kernel/bpf/map_trace.c b/kernel/bpf/map_trace.c
> > index 7776b8ccfe88..35906d59ba3c 100644
> > --- a/kernel/bpf/map_trace.c
> > +++ b/kernel/bpf/map_trace.c
> > @@ -14,6 +14,14 @@ struct bpf_map_trace_target_info {
> >  static struct list_head targets = LIST_HEAD_INIT(targets);
> >  static DEFINE_MUTEX(targets_mutex);
> >
> > +struct bpf_map_trace_link {
> > +     struct bpf_link link;
> > +     struct bpf_map *map;
> > +     struct bpf_map_trace_target_info *tinfo;
> > +};
> > +
> > +static DEFINE_MUTEX(link_mutex);
> > +
> >  int bpf_map_trace_reg_target(const struct bpf_map_trace_reg *reg_info)
> >  {
> >       struct bpf_map_trace_target_info *tinfo;
> > @@ -77,3 +85,66 @@ int bpf_map_initialize_trace_progs(struct bpf_map *map)
> >       return 0;
> >  }
> >
> > +static void bpf_map_trace_link_release(struct bpf_link *link)
> > +{
> > +     struct bpf_map_trace_link *map_trace_link =
> > +                     container_of(link, struct bpf_map_trace_link, link);
> > +     enum bpf_map_trace_type trace_type =
> > +                     map_trace_link->tinfo->reg_info->trace_type;
> > +     struct bpf_map_trace_prog *cur_prog;
> > +     struct bpf_map_trace_progs *progs;
> > +
> > +     progs = map_trace_link->map->trace_progs;
> > +     mutex_lock(&progs->mutex);
> > +     list_for_each_entry(cur_prog, &progs->progs[trace_type].list, list) {
>
> You might consider using list_for_each_entry_safe(), or ...
>
> > +             if (cur_prog->prog == link->prog) {
> > +                     progs->length[trace_type] -= 1;
> > +                     list_del_rcu(&cur_prog->list);
> > +                     kfree_rcu(cur_prog, rcu);
>
> or add a break; if you do not expect to find multiple entries.
>
> > +             }
> > +     }
> > +     mutex_unlock(&progs->mutex);
> > +     bpf_map_put_with_uref(map_trace_link->map);
> > +}
> > +
> > +static void bpf_map_trace_link_dealloc(struct bpf_link *link)
> > +{
> > +     struct bpf_map_trace_link *map_trace_link =
> > +                     container_of(link, struct bpf_map_trace_link, link);
> > +
> > +     kfree(map_trace_link);
> > +}
> > +
> > +static int bpf_map_trace_link_replace(struct bpf_link *link,
> > +                                   struct bpf_prog *new_prog,
> > +                                   struct bpf_prog *old_prog)
> > +{
> > +     int ret = 0;
> > +
> > +     mutex_lock(&link_mutex);
> > +     if (old_prog && link->prog != old_prog) {
> > +             ret = -EPERM;
> > +             goto out_unlock;
> > +     }
> > +
> > +     if (link->prog->type != new_prog->type ||
> > +         link->prog->expected_attach_type != new_prog->expected_attach_type ||
> > +         link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) {
> > +             ret = -EINVAL;
> > +             goto out_unlock;
> > +     }
> > +
> > +     old_prog = xchg(&link->prog, new_prog);
> > +     bpf_prog_put(old_prog);
> > +
> > +out_unlock:
> > +     mutex_unlock(&link_mutex);
> > +     return ret;
> > +}
> > +
> > +static const struct bpf_link_ops bpf_map_trace_link_ops = {
> > +     .release = bpf_map_trace_link_release,
> > +     .dealloc = bpf_map_trace_link_dealloc,
> > +     .update_prog = bpf_map_trace_link_replace,
> > +};
> > +
> >



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux