[PATCH bpf-next v9 01/10] bpf: Check stack depth limit after visiting all subprogs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Check stack depth limit after all subprogs are visited. Note that if
private stack is enabled, the only stack size restriction is for a single
subprog with size less than or equal to MAX_BPF_STACK.

In subsequent patches, in function check_max_stack_depth(), there could
be a flip from enabling private stack to disabling private stack due to
potential nested bpf subprog run. Moving stack depth limit checking after
visiting all subprogs ensures the checking not missed in such flipping
cases.

The useless 'continue' statement in the loop in func
check_max_stack_depth() is also removed.

Signed-off-by: Yonghong Song <yonghong.song@xxxxxxxxx>
---
 kernel/bpf/verifier.c | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index ba800c7611e3..ed8f70e51141 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6032,7 +6032,8 @@ static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth)
  * Since recursion is prevented by check_cfg() this algorithm
  * only needs a local stack of MAX_CALL_FRAMES to remember callsites
  */
-static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
+static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx,
+					 int *subtree_depth, int *depth_frame)
 {
 	struct bpf_subprog_info *subprog = env->subprog_info;
 	struct bpf_insn *insn = env->prog->insnsi;
@@ -6070,10 +6071,9 @@ static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
 		return -EACCES;
 	}
 	depth += round_up_stack_depth(env, subprog[idx].stack_depth);
-	if (depth > MAX_BPF_STACK) {
-		verbose(env, "combined stack size of %d calls is %d. Too large\n",
-			frame + 1, depth);
-		return -EACCES;
+	if (depth > MAX_BPF_STACK && !*subtree_depth) {
+		*subtree_depth = depth;
+		*depth_frame = frame + 1;
 	}
 continue_func:
 	subprog_end = subprog[idx + 1].start;
@@ -6173,15 +6173,19 @@ static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
 static int check_max_stack_depth(struct bpf_verifier_env *env)
 {
 	struct bpf_subprog_info *si = env->subprog_info;
-	int ret;
+	int ret, subtree_depth = 0, depth_frame;
 
 	for (int i = 0; i < env->subprog_cnt; i++) {
 		if (!i || si[i].is_async_cb) {
-			ret = check_max_stack_depth_subprog(env, i);
+			ret = check_max_stack_depth_subprog(env, i, &subtree_depth, &depth_frame);
 			if (ret < 0)
 				return ret;
 		}
-		continue;
+	}
+	if (subtree_depth > MAX_BPF_STACK) {
+		verbose(env, "combined stack size of %d calls is %d. Too large\n",
+			depth_frame, subtree_depth);
+		return -EACCES;
 	}
 	return 0;
 }
-- 
2.43.5





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux