[PATCH bpf-next 02/12] bpf: Move bpf_prog_start_time to linux/filter.h

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Moving bpf_prog_start_time to linux/filter.h and making it
globally available.

It will be used by other program types in following changes.

Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx>
---
 include/linux/bpf.h     | 20 ++++++++++++++++++++
 kernel/bpf/trampoline.c | 12 ------------
 2 files changed, 20 insertions(+), 12 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 05eece17a989..23a73f52c7bc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -29,6 +29,9 @@
 #include <linux/rcupdate_trace.h>
 #include <linux/static_call.h>
 #include <linux/memcontrol.h>
+#include <linux/sched/clock.h>
+
+DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 
 struct bpf_verifier_env;
 struct bpf_verifier_log;
@@ -2460,6 +2463,18 @@ static inline bool has_current_bpf_ctx(void)
 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
 void notrace bpf_prog_update_prog_stats(struct bpf_prog *prog, u64 start);
 
+static __always_inline u64 notrace bpf_prog_start_time(void)
+{
+	u64 start = BPF_PROG_NO_START_TIME;
+
+	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+		start = sched_clock();
+		if (unlikely(!start))
+			start = BPF_PROG_NO_START_TIME;
+	}
+	return start;
+}
+
 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
 		     enum bpf_dynptr_type type, u32 offset, u32 size);
 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
@@ -2702,6 +2717,11 @@ static void bpf_prog_update_prog_stats(struct bpf_prog *prog, u64 start)
 {
 }
 
+static inline u64 notrace bpf_prog_start_time(void)
+{
+	return 0;
+}
+
 static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
 {
 }
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index a6528e847fae..ed5b014f9532 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -819,18 +819,6 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
 	mutex_unlock(&trampoline_mutex);
 }
 
-static __always_inline u64 notrace bpf_prog_start_time(void)
-{
-	u64 start = BPF_PROG_NO_START_TIME;
-
-	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
-		start = sched_clock();
-		if (unlikely(!start))
-			start = BPF_PROG_NO_START_TIME;
-	}
-	return start;
-}
-
 /* The logic is similar to bpf_prog_run(), but with an explicit
  * rcu_read_lock() and migrate_disable() which are required
  * for the trampoline. The macro is split into
-- 
2.41.0





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux