FWW this is the output from faddr2line for Linux 6.8.11: $ scripts/faddr2line --list vmlinux blk_try_enter_queue+0xc/0x75 blk_try_enter_queue+0xc/0x75: __ref_is_percpu at include/linux/percpu-refcount.h:174 (discriminator 2) 169 * READ_ONCE() is required when fetching it. 170 * 171 * The dependency ordering from the READ_ONCE() pairs 172 * with smp_store_release() in __percpu_ref_switch_to_percpu(). 173 */ >174< percpu_ptr = READ_ONCE(ref->percpu_count_ptr); 175 176 /* 177 * Theoretically, the following could test just ATOMIC; however, 178 * then we'd have to mask off DEAD separately as DEAD may be 179 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD (inlined by) percpu_ref_tryget_live_rcu at include/linux/percpu-refcount.h:282 (discriminator 2) 277 unsigned long __percpu *percpu_count; 278 bool ret = false; 279 280 WARN_ON_ONCE(!rcu_read_lock_held()); 281 >282< if (likely(__ref_is_percpu(ref, &percpu_count))) { 283 this_cpu_inc(*percpu_count); 284 ret = true; 285 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { 286 ret = atomic_long_inc_not_zero(&ref->data->count); 287 } (inlined by) blk_try_enter_queue at block/blk.h:43 (discriminator 2) 38 void submit_bio_noacct_nocheck(struct bio *bio); 39 40 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) 41 { 42 rcu_read_lock(); >43< if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) 44 goto fail; 45 46 /* 47 * The code that increments the pm_only counter must ensure that the 48 * counter is globally visible before the queue is unfrozen.