On 14/9/23 00:36, Leon Hwang wrote: > It's unnecessary to check imm32 in both 'if' and 'else'. > > It's better to check it first. > > Meanwhile, refactor the code for 'offs' calculation. > > Signed-off-by: Leon Hwang <hffilwlqm@xxxxxxxxx> > --- > arch/x86/net/bpf_jit_comp.c | 16 +++++++--------- > 1 file changed, 7 insertions(+), 9 deletions(-) > > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c > index 2846c21d75bfa..f06e9a48afe52 100644 > --- a/arch/x86/net/bpf_jit_comp.c > +++ b/arch/x86/net/bpf_jit_comp.c > @@ -1629,17 +1629,15 @@ st: if (is_imm8(insn->off)) > case BPF_JMP | BPF_CALL: { > int offs; > > + if (!imm32) > + return -EINVAL; > + > func = (u8 *) __bpf_call_base + imm32; > - if (tail_call_reachable) { > + if (tail_call_reachable) > RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); > - if (!imm32) > - return -EINVAL; > - offs = 7 + x86_call_depth_emit_accounting(&prog, func); > - } else { > - if (!imm32) > - return -EINVAL; > - offs = x86_call_depth_emit_accounting(&prog, func); > - } > + > + offs = (tail_call_reachable ? 7 : 0); This 7 is the byte count of RESTORE_TAIL_CALL_CNT instructions. I'll update it to RESTORE_TAIL_CALL_CNT_INSN_SIZE in v2 patch. Thanks, Leon > + offs += x86_call_depth_emit_accounting(&prog, func); > if (emit_call(&prog, func, image + addrs[i - 1] + offs)) > return -EINVAL; > break; > > base-commit: e4f30c666b4933dcd140d5110073aa01a69d2b39