Le Tue, Mar 05, 2024 at 08:57:19PM +0100, Uladzislau Rezki (Sony) a écrit : > Fix a below race by not releasing a wait-head from the > GP-kthread as it can lead for reusing it whereas a worker > can still access it thus execute newly added callbacks too > early. > > CPU 0 CPU 1 > ----- ----- > > // wait_tail == HEAD1 > rcu_sr_normal_gp_cleanup() { > // has passed SR_MAX_USERS_WAKE_FROM_GP > wait_tail->next = next; > // done_tail = HEAD1 > smp_store_release(&rcu_state.srs_done_tail, wait_tail); > queue_work() { > test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work) > __queue_work() > } > } > > set_work_pool_and_clear_pending() > rcu_sr_normal_gp_cleanup_work() { > // new GP, wait_tail == HEAD2 > rcu_sr_normal_gp_cleanup() { > // executes all completion, but stop at HEAD1 > wait_tail->next = HEAD1; > // done_tail = HEAD2 > smp_store_release(&rcu_state.srs_done_tail, wait_tail); > queue_work() { > test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work) > __queue_work() > } > } > // done = HEAD2 > done = smp_load_acquire(&rcu_state.srs_done_tail); > // head = HEAD1 > head = done->next; > done->next = NULL; > llist_for_each_safe() { > // completes all callbacks, release HEAD1 > } > } > // Process second queue > set_work_pool_and_clear_pending() > rcu_sr_normal_gp_cleanup_work() { > // done = HEAD2 > done = smp_load_acquire(&rcu_state.srs_done_tail); > > // new GP, wait_tail == HEAD3 > rcu_sr_normal_gp_cleanup() { > // Finds HEAD2 with ->next == NULL at the end > rcu_sr_put_wait_head(HEAD2) > ... > > // A few more GPs later > rcu_sr_normal_gp_init() { > HEAD2 = rcu_sr_get_wait_head(); > llist_add(HEAD2, &rcu_state.srs_next); > // head == rcu_state.srs_next > head = done->next; > done->next = NULL; > llist_for_each_safe() { > // EXECUTE CALLBACKS TOO EARLY!!! > } > } > > Reported-by: Frederic Weisbecker <frederic@xxxxxxxxxx> > Fixes: 05a10b921000 ("rcu: Support direct wake-up of synchronize_rcu() users") > Signed-off-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> Reviewed-by: Frederic Weisbecker <frederic@xxxxxxxxxx>