On Tue, Mar 4, 2025 at 6:10 PM Emil Tsalapatis <emil@xxxxxxxxxxxxxxx> wrote: > > Signed-off-by: Emil Tsalapatis (Meta) <emil@xxxxxxxxxxxxxxx> commit log cannot be empty. > --- > kernel/bpf/cpumask.c | 27 +++++++++++++++++++++++++++ > 1 file changed, 27 insertions(+) > > diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c > index cfa1c18e3a48..e4e4109b72ad 100644 > --- a/kernel/bpf/cpumask.c > +++ b/kernel/bpf/cpumask.c > @@ -420,6 +420,32 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask) > return cpumask_weight(cpumask); > } > > +/** > + * bpf_cpumask_fill() - Populate the CPU mask from the contents of > + * a BPF memory region. > + * > + * @cpumask: The cpumask being populated. > + * @src: The BPF memory holding the bit pattern. > + * @src__sz: Length of the BPF memory region in bytes. > + * > + */ > +__bpf_kfunc int bpf_cpumask_fill(struct cpumask *cpumask, void *src, size_t src__sz) > +{ > + unsigned long source = (unsigned long)src; > + > + /* The memory region must be large enough to populate the entire CPU mask. */ > + if (src__sz < bitmap_size(nr_cpu_ids)) > + return -EACCES; > + > + /* The input region must be aligned to the nearest long. */ > + if (!IS_ALIGNED(source, sizeof(long))) > + return -EINVAL; add !IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) so we don't penalize good archs. > + > + bitmap_copy(cpumask_bits(cpumask), src, nr_cpu_ids); > + > + return 0; > +} > + > __bpf_kfunc_end_defs(); > > BTF_KFUNCS_START(cpumask_kfunc_btf_ids) > @@ -448,6 +474,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU) > BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU) > BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU) > BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU) > +BTF_ID_FLAGS(func, bpf_cpumask_fill, KF_RCU) > BTF_KFUNCS_END(cpumask_kfunc_btf_ids) > > static const struct btf_kfunc_id_set cpumask_kfunc_set = { > -- > 2.47.1 >