> +/* > + * hpref_hp_get: Obtain a reference to a stable object, protected either > + * by hazard pointer (fast-path) or using reference > + * counter as fall-back. > + */ > +static inline > +bool hpref_hp_get(struct hpref_node **node_p, struct hpref_ctx *ctx) > +{ > + int cpu = rseq_current_cpu_raw(); > + struct hpref_percpu_slots *cpu_slots = rseq_percpu_ptr(hpref_percpu_slots, cpu); > + struct hpref_slot *slot = &cpu_slots->slots[cpu_slots->current_slot]; > + bool use_refcount = false; > + struct hpref_node *node, *node2; > + unsigned int next_slot; > + > +retry: > + node = uatomic_load(node_p, CMM_RELAXED); > + if (!node) > + return false; > + /* Use rseq to try setting current slot hp. Store B. */ > + if (rseq_load_cbne_store__ptr(RSEQ_MO_RELAXED, RSEQ_PERCPU_CPU_ID, > + (intptr_t *) &slot->node, (intptr_t) NULL, > + (intptr_t) node, cpu)) { > + slot = &cpu_slots->slots[HPREF_EMERGENCY_SLOT]; Can @cpu be possibly changed? if it can, it seems @cpu and @cpu_slots should be updated first. > + use_refcount = true; > + /* > + * This may busy-wait for another reader using the > + * emergency slot to transition to refcount. > + */ > + caa_cpu_relax(); > + goto retry; > + }