The patch 9 is simpler if less places modify loop_entry field. The loop deleted by this patch does not affect correctness, but is a performance optimization. However, measurements on selftests and sched_ext programs show that this optimization is unnecessary: - at most 2 steps are done in get_loop_entry(); - most of the time 0 or 1 steps are done in get_loop_entry(). Measured using "do-not-submit" patches from here: https://github.com/eddyz87/bpf/tree/get-loop-entry-hungup Signed-off-by: Eduard Zingerman <eddyz87@xxxxxxxxx> --- kernel/bpf/verifier.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 02f60b8683b5..3c3f33d90fc0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1818,13 +1818,12 @@ static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_sta * and cur's loop entry has to be updated (case A), handle this in * update_branch_counts(); * - use st->branch > 0 as a signal that st is in the current DFS path; - * - handle cases B and C in is_state_visited(); - * - update topmost loop entry for intermediate states in get_loop_entry(). + * - handle cases B and C in is_state_visited(). */ static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { - struct bpf_verifier_state *topmost = st->loop_entry, *old; + struct bpf_verifier_state *topmost = st->loop_entry; u32 steps = 0; while (topmost && topmost->loop_entry) { @@ -1835,14 +1834,6 @@ static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_env *env, } topmost = topmost->loop_entry; } - /* Update loop entries for intermediate states to avoid this - * traversal in future get_loop_entry() calls. - */ - while (st && st->loop_entry != topmost) { - old = st->loop_entry; - st->loop_entry = topmost; - st = old; - } return topmost; } -- 2.48.1