On Thu, May 4, 2023 at 1:05 PM Alexei Starovoitov <alexei.starovoitov@xxxxxxxxx> wrote: > > On Tue, May 02, 2023 at 04:06:13PM -0700, Andrii Nakryiko wrote: > > } > > > > -static struct bpf_map *array_map_alloc(union bpf_attr *attr) > > +static u32 array_index_mask(u32 max_entries) > > { > > - bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; > > - int numa_node = bpf_map_attr_numa_node(attr); > > - u32 elem_size, index_mask, max_entries; > > - bool bypass_spec_v1 = bpf_bypass_spec_v1(); > > static inline bool bpf_bypass_spec_v1(void) > { > return perfmon_capable(); > } > > > + /* unprivileged is OK, but we still record if we had CAP_BPF */ > > + unpriv = !bpf_capable(); > > map->unpriv flag makes sense as !CAP_BPF, > but it's not equivalent to bpf_bypass_spec_v1. > argh, right, it's perfmon_capable() :( what do you propose? do bpf_capable and perfmon_capable fields for each map separately? or keep unpriv and add perfmon_capable separately? or any better ideas?.. > > break; > > default: > > WARN(1, "unsupported map type %d", map_type); > > return -EPERM; > > } > > > > + /* ARRAY-like maps have special sizing provisions for mitigating Spectre v1 */ > > + if (unpriv) { > > + switch (map_type) { > > + case BPF_MAP_TYPE_ARRAY: > > + case BPF_MAP_TYPE_PERCPU_ARRAY: > > + case BPF_MAP_TYPE_PROG_ARRAY: > > + case BPF_MAP_TYPE_PERF_EVENT_ARRAY: > > + case BPF_MAP_TYPE_CGROUP_ARRAY: > > + case BPF_MAP_TYPE_ARRAY_OF_MAPS: > > + err = bpf_array_adjust_for_spec_v1(attr); > > + if (err) > > + return err; > > + break; > > + } > > + } > > + > > map = ops->map_alloc(attr); > > if (IS_ERR(map)) > > return PTR_ERR(map); > > map->ops = ops; > > map->map_type = map_type; > > + map->unpriv = unpriv; > > > > err = bpf_obj_name_cpy(map->name, attr->map_name, > > sizeof(attr->map_name)); > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > > index ff4a8ab99f08..481aaf189183 100644 > > --- a/kernel/bpf/verifier.c > > +++ b/kernel/bpf/verifier.c > > @@ -8731,11 +8731,9 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, > > } > > > > if (!BPF_MAP_PTR(aux->map_ptr_state)) > > - bpf_map_ptr_store(aux, meta->map_ptr, > > - !meta->map_ptr->bypass_spec_v1); > > + bpf_map_ptr_store(aux, meta->map_ptr, meta->map_ptr->unpriv); > > else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) > > - bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, > > - !meta->map_ptr->bypass_spec_v1); > > + bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, meta->map_ptr->unpriv); > > return 0; > > } > > > > -- > > 2.34.1 > >