On 9/14/23 2:35 PM, Leon Hwang wrote:
It's unnecessary to check 'imm32' in both 'if' and 'else'.
It's better to check it first.
Meanwhile, refactor the code for 'offs' calculation.
v1 -> v2:
* Add '#define RESTORE_TAIL_CALL_CNT_INSN_SIZE 7'.
Signed-off-by: Leon Hwang <hffilwlqm@xxxxxxxxx>
---
arch/x86/net/bpf_jit_comp.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 2846c21d75bfa..fe0393c7e7b68 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1025,6 +1025,7 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
#define RESTORE_TAIL_CALL_CNT(stack) \
EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
+#define RESTORE_TAIL_CALL_CNT_INSN_SIZE 7
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
@@ -1629,17 +1630,16 @@ st: if (is_imm8(insn->off))
case BPF_JMP | BPF_CALL: {
int offs;
+ if (!imm32)
+ return -EINVAL;
+
func = (u8 *) __bpf_call_base + imm32;
- if (tail_call_reachable) {
+ if (tail_call_reachable)
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
- if (!imm32)
- return -EINVAL;
- offs = 7 + x86_call_depth_emit_accounting(&prog, func);
- } else {
- if (!imm32)
- return -EINVAL;
- offs = x86_call_depth_emit_accounting(&prog, func);
- }
+
+ offs = (tail_call_reachable ?
+ RESTORE_TAIL_CALL_CNT_INSN_SIZE : 0);
+ offs += x86_call_depth_emit_accounting(&prog, func);
I don't think this makes anything significantly better, I would rather prefer to keep
it as is.
if (emit_call(&prog, func, image + addrs[i - 1] + offs))
return -EINVAL;
break;
base-commit: cbb1dbcd99b0ae74c45c4c83c6d213c12c31785c