On 6/27/24 17:18, Jesper Dangaard Brouer wrote:
Signed-off-by: Jesper Dangaard Brouer <hawk@xxxxxxxxxx>
---
kernel/cgroup/rstat.c | 40 ++++++++++++++++++++++++++++++----------
1 file changed, 30 insertions(+), 10 deletions(-)
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index fb8b49437573..2a42be3a9bb3 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -279,17 +279,30 @@ __bpf_hook_end();
* value -1 is used when obtaining the main lock else this is the CPU
* number processed last.
*/
-static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop)
+static inline bool __cgroup_rstat_trylock(struct cgroup *cgrp, int cpu_in_loop)
+{
+ bool locked;
+
+ locked = spin_trylock_irq(&cgroup_rstat_lock);
+ if (!locked)
+ trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, !locked);
+
+ return locked;
+}
+
+static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop,
+ bool check_contention)
__acquires(&cgroup_rstat_lock)
{
- bool contended;
+ bool locked = false;
- contended = !spin_trylock_irq(&cgroup_rstat_lock);
- if (contended) {
- trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended);
+ if (check_contention)
+ locked = __cgroup_rstat_trylock(cgrp, cpu_in_loop);
+
+ if (!locked)
spin_lock_irq(&cgroup_rstat_lock);
- }
- trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended);
+
+ trace_cgroup_rstat_locked(cgrp, cpu_in_loop, !locked);
}
static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
@@ -328,7 +341,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
__cgroup_rstat_unlock(cgrp, cpu);
if (!cond_resched())
cpu_relax();
- __cgroup_rstat_lock(cgrp, cpu);
+ __cgroup_rstat_lock(cgrp, cpu, true);
}
}
}
@@ -348,9 +361,16 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
*/
__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
{
+ bool locked;
+
might_sleep();
- __cgroup_rstat_lock(cgrp, -1);
+ locked = __cgroup_rstat_trylock(cgrp, -1);
+ if (!locked) {
+ /* Opportunity to ongoing flush detection */
+ __cgroup_rstat_lock(cgrp, -1, false);
+ }
+
cgroup_rstat_flush_locked(cgrp);
__cgroup_rstat_unlock(cgrp, -1);
}
@@ -368,7 +388,7 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp)
__acquires(&cgroup_rstat_lock)
{
might_sleep();
- __cgroup_rstat_lock(cgrp, -1);
+ __cgroup_rstat_lock(cgrp, -1, true);
cgroup_rstat_flush_locked(cgrp);
}
Will it be cleaner to add a "bool *flushed" output parameter to
__cgroup_rstat_lock() so that the caller can respond differently whether
the flushed flag is set or not? In that way, you don't need to expose a
separate trylock() API. Also your commit log is empty.
Cheers,
Longman