Re: [bpf-next PATCH 2/3] bpf: sk_msg helpers for probe_* and *current_task*

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 5/13/20 12:24 PM, John Fastabend wrote:
Often it is useful when applying policy to know something about the
task. If the administrator has CAP_SYS_ADMIN rights then they can
use kprobe + sk_msg and link the two programs together to accomplish
this. However, this is a bit clunky and also means we have to call
sk_msg program and kprobe program when we could just use a single
program and avoid passing metadata through sk_msg/skb, socket, etc.

To accomplish this add probe_* helpers to sk_msg programs guarded
by a CAP_SYS_ADMIN check. New supported helpers are the following,

  BPF_FUNC_get_current_task
  BPF_FUNC_current_task_under_cgroup
  BPF_FUNC_probe_read_user
  BPF_FUNC_probe_read_kernel
  BPF_FUNC_probe_read
  BPF_FUNC_probe_read_user_str
  BPF_FUNC_probe_read_kernel_str
  BPF_FUNC_probe_read_str

I think this is a good idea. But this will require bpf program
to be GPLed, probably it will be okay. Currently, for capabilities,
it is CAP_SYS_ADMIN now, in the future, it may be CAP_PERFMON.

Also, do we want to remove BPF_FUNC_probe_read and
BPF_FUNC_probe_read_str from the list? Since we
introduce helpers to new program types, we can deprecate
these two helpers right away.

The new helpers will be subject to new security lockdown
rules which may have impact on networking bpf programs
on particular setup.


Signed-off-by: John Fastabend <john.fastabend@xxxxxxxxx>
---
  kernel/trace/bpf_trace.c |   16 ++++++++--------
  net/core/filter.c        |   34 ++++++++++++++++++++++++++++++++++
  2 files changed, 42 insertions(+), 8 deletions(-)

diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d961428..abe6721 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -147,7 +147,7 @@ BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
  	return ret;
  }
-static const struct bpf_func_proto bpf_probe_read_user_proto = {
+const struct bpf_func_proto bpf_probe_read_user_proto = {
  	.func		= bpf_probe_read_user,
  	.gpl_only	= true,
  	.ret_type	= RET_INTEGER,
@@ -167,7 +167,7 @@ BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
  	return ret;
  }
-static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
+const struct bpf_func_proto bpf_probe_read_user_str_proto = {
  	.func		= bpf_probe_read_user_str,
  	.gpl_only	= true,
  	.ret_type	= RET_INTEGER,
@@ -198,7 +198,7 @@ BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
  	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
  }
-static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
+const struct bpf_func_proto bpf_probe_read_kernel_proto = {
  	.func		= bpf_probe_read_kernel,
  	.gpl_only	= true,
  	.ret_type	= RET_INTEGER,
@@ -213,7 +213,7 @@ BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
  	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
  }
-static const struct bpf_func_proto bpf_probe_read_compat_proto = {
+const struct bpf_func_proto bpf_probe_read_compat_proto = {
  	.func		= bpf_probe_read_compat,
  	.gpl_only	= true,
  	.ret_type	= RET_INTEGER,
@@ -253,7 +253,7 @@ BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
  	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
  }
-static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
+const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
  	.func		= bpf_probe_read_kernel_str,
  	.gpl_only	= true,
  	.ret_type	= RET_INTEGER,
@@ -268,7 +268,7 @@ BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
  	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
  }
-static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
+const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
  	.func		= bpf_probe_read_compat_str,
  	.gpl_only	= true,
  	.ret_type	= RET_INTEGER,
@@ -874,7 +874,7 @@ BPF_CALL_0(bpf_get_current_task)
  	return (long) current;
  }
-static const struct bpf_func_proto bpf_get_current_task_proto = {
+const struct bpf_func_proto bpf_get_current_task_proto = {
  	.func		= bpf_get_current_task,
  	.gpl_only	= true,
  	.ret_type	= RET_INTEGER,
@@ -895,7 +895,7 @@ BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
  	return task_under_cgroup_hierarchy(current, cgrp);
  }
-static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
+const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
  	.func           = bpf_current_task_under_cgroup,
  	.gpl_only       = false,
  	.ret_type       = RET_INTEGER,
diff --git a/net/core/filter.c b/net/core/filter.c
index 45b4a16..d1c4739 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6362,6 +6362,15 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
  const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
+const struct bpf_func_proto bpf_current_task_under_cgroup_proto __weak;
+const struct bpf_func_proto bpf_get_current_task_proto __weak;
+const struct bpf_func_proto bpf_probe_read_user_proto __weak;
+const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
+const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
+const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
+const struct bpf_func_proto bpf_probe_read_compat_proto __weak;
+const struct bpf_func_proto bpf_probe_read_compat_str_proto __weak;
+
  static const struct bpf_func_proto *
  sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  {
@@ -6397,6 +6406,31 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  		return &bpf_get_cgroup_classid_curr_proto;
  #endif
  	default:
+		break;
+	}
+
+	if (!capable(CAP_SYS_ADMIN))
+		return bpf_base_func_proto(func_id);
+
+	/* All helpers below are for CAP_SYS_ADMIN only */
+	switch (func_id) {
+	case BPF_FUNC_get_current_task:
+		return &bpf_get_current_task_proto;
+	case BPF_FUNC_current_task_under_cgroup:
+		return &bpf_current_task_under_cgroup_proto;
+	case BPF_FUNC_probe_read_user:
+		return &bpf_probe_read_user_proto;
+	case BPF_FUNC_probe_read_kernel:
+		return &bpf_probe_read_kernel_proto;
+	case BPF_FUNC_probe_read:
+		return &bpf_probe_read_compat_proto;
+	case BPF_FUNC_probe_read_user_str:
+		return &bpf_probe_read_user_str_proto;
+	case BPF_FUNC_probe_read_kernel_str:
+		return &bpf_probe_read_kernel_str_proto;
+	case BPF_FUNC_probe_read_str:
+		return &bpf_probe_read_compat_str_proto;
+	default:
  		return bpf_base_func_proto(func_id);

If we can get a consensus here, I think we can even folding all
these bpf helpers (get_current_task, ..., probe_read_kernel_str)
to bpf_base_func_proto, so any bpf program types including
other networking types can use them.
Any concerns?

  	}
  }




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux