> On Oct 13, 2021, at 12:33 AM, Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx> wrote: > > This uses the bpf_kallsyms_lookup_name helper added in previous patches > to relocate typeless ksyms. The return value ENOENT can be ignored, and > the value written to 'res' can be directly stored to the insn, as it is > overwritten to 0 on lookup failure. For repeating symbols, we can simply > copy the previously populated bpf_insn. > > Also, we need to take care to not close fds for typeless ksym_desc, so > reuse the 'off' member's space to add a marker for typeless ksym and use > that to skip them in cleanup_relos. > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx> [...] > } > > +/* Expects: > + * BPF_REG_8 - pointer to instruction > + */ > +static void emit_relo_ksym_typeless(struct bpf_gen *gen, > + struct ksym_relo_desc *relo, int insn) This function has quite some duplicated logic as emit_relo_ksym_btf(). I guess we can somehow reuse the code here. Say, we pull changes from 3/8 first to handle weak type. Then we extend the function to handle typeless. Would this work? > +{ > + struct ksym_desc *kdesc; > + > + kdesc = get_ksym_desc(gen, relo); > + if (!kdesc) > + return; > + /* try to copy from existing ldimm64 insn */ > + if (kdesc->ref > 1) { > + move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, > + kdesc->insn + offsetof(struct bpf_insn, imm)); > + move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, > + kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); > + goto log; > + } > + /* remember insn offset, so we can copy ksym addr later */ > + kdesc->insn = insn; > + /* skip typeless ksym_desc in fd closing loop in cleanup_relos */ > + kdesc->typeless = true; > + emit_bpf_kallsyms_lookup_name(gen, relo); > + emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1)); > + emit_check_err(gen); > + /* store lower half of addr into insn[insn_idx].imm */ > + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm))); > + /* store upper half of addr into insn[insn_idx + 1].imm */ > + emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); > + emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, > + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); > +log: > + if (!gen->log_level) > + return; > + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8, > + offsetof(struct bpf_insn, imm))); > + emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) + > + offsetof(struct bpf_insn, imm))); > + debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=0 w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d", > + relo->is_weak, relo->name, kdesc->ref); > + emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); > + debug_regs(gen, BPF_REG_9, -1, " var t=0 w=%d (%s:count=%d): insn.reg", > + relo->is_weak, relo->name, kdesc->ref); > [...] > +++ b/tools/lib/bpf/libbpf.c > @@ -6355,17 +6355,14 @@ static int bpf_program__record_externs(struct bpf_program *prog) > case RELO_EXTERN_VAR: > if (ext->type != EXT_KSYM) > continue; > - if (!ext->ksym.type_id) { > - pr_warn("typeless ksym %s is not supported yet\n", > - ext->name); > - return -ENOTSUP; > - } > - bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak, > + bpf_gen__record_extern(obj->gen_loader, ext->name, > + ext->is_weak, !ext->ksym.type_id, > BTF_KIND_VAR, relo->insn_idx); > break; > case RELO_EXTERN_FUNC: > - bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak, > - BTF_KIND_FUNC, relo->insn_idx); > + bpf_gen__record_extern(obj->gen_loader, ext->name, > + ext->is_weak, 0, BTF_KIND_FUNC, nit: Prefer use "false" for bool arguments. > + relo->insn_idx); > break; > default: > continue; > -- > 2.33.0 >