Initialize and free each element in a btf_field array based on the values of nelems and size in btf_field. The value of nelems is the length of the flatten array for nested arrays. Signed-off-by: Kui-Feng Lee <thinker.li@xxxxxxxxx> --- include/linux/bpf.h | 7 +++++++ kernel/bpf/syscall.c | 39 ++++++++++++++++++++++++--------------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index cab479925dfd..b25dd498b737 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -390,6 +390,9 @@ static inline u32 btf_field_type_align(enum btf_field_type type) static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) { + u32 elem_size; + int i; + memset(addr, 0, field->size); switch (field->type) { @@ -400,6 +403,10 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) RB_CLEAR_NODE((struct rb_node *)addr); break; case BPF_LIST_HEAD: + elem_size = field->size / field->nelems; + for (i = 0; i < field->nelems; i++, addr += elem_size) + INIT_LIST_HEAD((struct list_head *)addr); + break; case BPF_LIST_NODE: INIT_LIST_HEAD((struct list_head *)addr); break; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 7d392ec83655..cdabb673d358 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -672,6 +672,8 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) const struct btf_field *field = &fields[i]; void *field_ptr = obj + field->offset; void *xchgd_field; + u32 elem_size = field->size / field->nelems; + int j; switch (fields[i].type) { case BPF_SPIN_LOCK: @@ -680,35 +682,42 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) bpf_timer_cancel_and_free(field_ptr); break; case BPF_KPTR_UNREF: - WRITE_ONCE(*(u64 *)field_ptr, 0); + for (j = 0; j < field->nelems; j++, field_ptr += elem_size) + WRITE_ONCE(*(u64 *)field_ptr, 0); break; case BPF_KPTR_REF: case BPF_KPTR_PERCPU: - xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); - if (!xchgd_field) - break; - - if (!btf_is_kernel(field->kptr.btf)) { + if (!btf_is_kernel(field->kptr.btf)) pointee_struct_meta = btf_find_struct_meta(field->kptr.btf, field->kptr.btf_id); - migrate_disable(); - __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? - pointee_struct_meta->record : NULL, - fields[i].type == BPF_KPTR_PERCPU); - migrate_enable(); - } else { - field->kptr.dtor(xchgd_field); + + for (j = 0; j < field->nelems; j++, field_ptr += elem_size) { + xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0); + if (!xchgd_field) + continue; + + if (!btf_is_kernel(field->kptr.btf)) { + migrate_disable(); + __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? + pointee_struct_meta->record : NULL, + fields[i].type == BPF_KPTR_PERCPU); + migrate_enable(); + } else { + field->kptr.dtor(xchgd_field); + } } break; case BPF_LIST_HEAD: if (WARN_ON_ONCE(rec->spin_lock_off < 0)) continue; - bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); + for (j = 0; j < field->nelems; j++, field_ptr += elem_size) + bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off); break; case BPF_RB_ROOT: if (WARN_ON_ONCE(rec->spin_lock_off < 0)) continue; - bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); + for (j = 0; j < field->nelems; j++, field_ptr += elem_size) + bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off); break; case BPF_LIST_NODE: case BPF_RB_NODE: -- 2.34.1