On Thu, Apr 27, 2023 at 4:28 PM Namhyung Kim <namhyung@xxxxxxxxx> wrote: > > On Thu, Apr 27, 2023 at 3:15 PM Andrii Nakryiko > <andrii.nakryiko@xxxxxxxxx> wrote: > > Ok, I didn't manage to force compiler to behave as long as > > `&rq_old->lock` pattern was used. So I went for a different approach. > > This works: > > Thanks! It works for me too! > > Can I use this patch with your Co-developed-by tag ? of course > > Thanks, > Namhyung > > > > > > $ git diff > > diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c > > b/tools/perf/util/bpf_skel/lock_contention.bpf.c > > index 8911e2a077d8..8d3cfbb3cc65 100644 > > --- a/tools/perf/util/bpf_skel/lock_contention.bpf.c > > +++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c > > @@ -418,32 +418,32 @@ int contention_end(u64 *ctx) > > > > extern struct rq runqueues __ksym; > > > > -struct rq__old { > > +struct rq___old { > > raw_spinlock_t lock; > > } __attribute__((preserve_access_index)); > > > > -struct rq__new { > > +struct rq___new { > > raw_spinlock_t __lock; > > } __attribute__((preserve_access_index)); > > > > SEC("raw_tp/bpf_test_finish") > > int BPF_PROG(collect_lock_syms) > > { > > - __u64 lock_addr; > > + __u64 lock_addr, lock_off; > > __u32 lock_flag; > > > > + if (bpf_core_field_exists(struct rq___new, __lock)) > > + lock_off = offsetof(struct rq___new, __lock); > > + else > > + lock_off = offsetof(struct rq___old, lock); > > + > > for (int i = 0; i < MAX_CPUS; i++) { > > struct rq *rq = bpf_per_cpu_ptr(&runqueues, i); > > - struct rq__new *rq_new = (void *)rq; > > - struct rq__old *rq_old = (void *)rq; > > > > if (rq == NULL) > > break; > > > > - if (bpf_core_field_exists(rq_new->__lock)) > > - lock_addr = (__u64)&rq_new->__lock; > > - else > > - lock_addr = (__u64)&rq_old->lock; > > + lock_addr = (__u64)(void *)rq + lock_off; > > lock_flag = LOCK_CLASS_RQLOCK; > > bpf_map_update_elem(&lock_syms, &lock_addr, > > &lock_flag, BPF_ANY); > > } > > > > > > > Thanks, > > > Namhyung > > > > > >