return 0;
}
@@ -491,7 +494,6 @@ static int bpf_struct_ops_map_update_elem(struct
bpf_map *map, void *key,
*(unsigned long *)(udata + moff) = prog->aux->id;
}
- refcount_set(&kvalue->refcnt, 1);
bpf_map_inc(map);
set_memory_rox((long)st_map->image, 1);
@@ -536,8 +538,7 @@ static int bpf_struct_ops_map_delete_elem(struct
bpf_map *map, void *key)
switch (prev_state) {
case BPF_STRUCT_OPS_STATE_INUSE:
st_map->st_ops->unreg(&st_map->kvalue.data);
- if (refcount_dec_and_test(&st_map->kvalue.refcnt))
- bpf_map_put(map);
+ bpf_map_put(map);
return 0;
case BPF_STRUCT_OPS_STATE_TOBEFREE:
return -EINPROGRESS;
@@ -582,6 +583,38 @@ static void bpf_struct_ops_map_free(struct
bpf_map *map)
bpf_map_area_free(st_map);
}
+static void bpf_struct_ops_map_free_wq(struct rcu_head *head)
+{
+ struct bpf_struct_ops_map *st_map;
+
+ st_map = container_of(head, struct bpf_struct_ops_map, rcu);
+
+ /* bpf_map_free_deferred should not be called in a RCU callback. */
+ INIT_WORK(&st_map->map.work, bpf_map_free_deferred);
+ queue_work(system_unbound_wq, &st_map->map.work);
+}
+
+static void bpf_struct_ops_map_free_rcu(struct bpf_map *map)
+{
+ struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map
*)map;
+
+ /* Wait for a grace period of RCU. Then, post the map_free
+ * work to the system_unbound_wq workqueue to free resources.
+ *
+ * The struct_ops's function may switch to another struct_ops.
+ *
+ * For example, bpf_tcp_cc_x->init() may switch to
+ * another tcp_cc_y by calling
+ * setsockopt(TCP_CONGESTION, "tcp_cc_y").
+ * During the switch, bpf_struct_ops_put(tcp_cc_x) is called
+ * and its refcount may reach 0 which then free its
+ * trampoline image while tcp_cc_x is still running.
+ *
+ * Thus, a rcu grace period is needed here.
+ */
+ call_rcu(&st_map->rcu, bpf_struct_ops_map_free_wq);
+}
+
static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
{
if (attr->key_size != sizeof(unsigned int) || attr->max_entries
!= 1 ||
@@ -646,6 +679,7 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = {
.map_alloc_check = bpf_struct_ops_map_alloc_check,
.map_alloc = bpf_struct_ops_map_alloc,
.map_free = bpf_struct_ops_map_free,
+ .map_free_rcu = bpf_struct_ops_map_free_rcu,