When an rstat flush is ongoing for a cgroup, also flush bpf stats by running any attached rstat flush programs. Signed-off-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx> --- include/linux/bpf-rstat.h | 6 ++++++ kernel/bpf/rstat.c | 21 +++++++++++++++++++++ kernel/cgroup/rstat.c | 2 ++ 3 files changed, 29 insertions(+) diff --git a/include/linux/bpf-rstat.h b/include/linux/bpf-rstat.h index 23cad23b5fc2..55e000fe0f47 100644 --- a/include/linux/bpf-rstat.h +++ b/include/linux/bpf-rstat.h @@ -12,6 +12,8 @@ int bpf_rstat_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +void bpf_rstat_flush(struct cgroup *cgrp, int cpu); + #else /* defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_CGROUPS) */ static inline int bpf_rstat_link_attach(const union bpf_attr *attr, @@ -20,6 +22,10 @@ static inline int bpf_rstat_link_attach(const union bpf_attr *attr, return -ENOTSUPP; } +static inline void bpf_rstat_flush(struct cgroup *cgrp, int cpu) +{ +} + #endif /* defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_CGROUPS) */ #endif /* _BPF_RSTAT */ diff --git a/kernel/bpf/rstat.c b/kernel/bpf/rstat.c index 5f529002d4b9..e96bc080f4b9 100644 --- a/kernel/bpf/rstat.c +++ b/kernel/bpf/rstat.c @@ -164,3 +164,24 @@ int bpf_rstat_link_attach(const union bpf_attr *attr, return bpf_link_settle(&link_primer); } + +void bpf_rstat_flush(struct cgroup *cgrp, int cpu) +{ + struct bpf_rstat_flusher *flusher; + struct bpf_rstat_flush_ctx ctx = { + .cgrp = cgrp, + .parent = cgroup_parent(cgrp), + .cpu = cpu, + }; + + rcu_read_lock(); + migrate_disable(); + spin_lock(&bpf_rstat_flushers_lock); + + list_for_each_entry(flusher, &bpf_rstat_flushers, list) + (void) bpf_prog_run(flusher->prog, &ctx); + + spin_unlock(&bpf_rstat_flushers_lock); + migrate_enable(); + rcu_read_unlock(); +} diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 24b5c2ab5598..0285d496e807 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -2,6 +2,7 @@ #include "cgroup-internal.h" #include <linux/sched/cputime.h> +#include <linux/bpf-rstat.h> static DEFINE_SPINLOCK(cgroup_rstat_lock); static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock); @@ -168,6 +169,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep) struct cgroup_subsys_state *css; cgroup_base_stat_flush(pos, cpu); + bpf_rstat_flush(pos, cpu); rcu_read_lock(); list_for_each_entry_rcu(css, &pos->rstat_css_list, -- 2.36.0.550.gb090851708-goog