>From ed528d05b7800d651ef5e36de37732a06b456462 Mon Sep 17 00:00:00 2001 From: Akira Yokosawa <akiyks@xxxxxxxxx> Date: Sat, 5 Jan 2019 00:15:47 +0900 Subject: [PATCH 2/2] datastruct/hash: Annotate racy accesses with READ_ONCE/WRITE_ONCE ht_get_bucket() runs concurrently with hash_resize(). As a defensive coding rule to survive compiler optimization, accesses to (*htp)->ht_resize_cur, (*htp)->ht_new, and (*htp)->ht_idx should be annotated by READ_ONCE/WRITE_ONCE. Signed-off-by: Akira Yokosawa <akiyks@xxxxxxxxx> --- CodeSamples/datastruct/hash/hash_resize.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CodeSamples/datastruct/hash/hash_resize.c b/CodeSamples/datastruct/hash/hash_resize.c index 57c75c1..e1d92a6 100644 --- a/CodeSamples/datastruct/hash/hash_resize.c +++ b/CodeSamples/datastruct/hash/hash_resize.c @@ -137,13 +137,13 @@ ht_get_bucket(struct ht **htp, void *key, long *b, int *i) htbp = ht_get_bucket_single(*htp, key, b); //\lnlbl{call_single} //\fcvexclude - if (*b <= (*htp)->ht_resize_cur) { //\lnlbl{resized} + if (*b <= READ_ONCE((*htp)->ht_resize_cur)) { //\lnlbl{resized} smp_mb(); /* order ->ht_resize_cur before ->ht_new. */ - *htp = (*htp)->ht_new; //\lnlbl{newtable} + *htp = READ_ONCE((*htp)->ht_new); //\lnlbl{newtable} htbp = ht_get_bucket_single(*htp, key, b); //\lnlbl{newbucket} } if (i) //\lnlbl{chk_i} - *i = (*htp)->ht_idx; //\lnlbl{set_idx} + *i = READ_ONCE((*htp)->ht_idx); //\lnlbl{set_idx} return htbp; //\lnlbl{return} } //\lnlbl{e} //\end{snippet} @@ -301,10 +301,10 @@ int hashtab_resize(struct hashtab *htp_master, spin_unlock(&htp_master->ht_lock); //\lnlbl{rel_nomem} return -ENOMEM; //\lnlbl{ret_nomem} } - htp->ht_new = htp_new; //\lnlbl{set_newtbl} + WRITE_ONCE(htp->ht_new, htp_new); //\lnlbl{set_newtbl} synchronize_rcu(); //\lnlbl{sync_rcu} idx = htp->ht_idx; //\lnlbl{get_curidx} - htp_new->ht_idx = !idx; + WRITE_ONCE(htp_new->ht_idx, !idx); for (i = 0; i < htp->ht_nbuckets; i++) { //\lnlbl{loop:b} htbp = &htp->ht_bkt[i]; //\lnlbl{get_oldcur} spin_lock(&htbp->htb_lock); //\lnlbl{acq_oldcur} @@ -315,7 +315,7 @@ int hashtab_resize(struct hashtab *htp_master, spin_unlock(&htbp_new->htb_lock); } //\lnlbl{loop_list:e} smp_mb(); /* Fill new buckets before claiming them. */ - htp->ht_resize_cur = i; //\lnlbl{update_resize} + WRITE_ONCE(htp->ht_resize_cur, i); //\lnlbl{update_resize} spin_unlock(&htbp->htb_lock); //\lnlbl{rel_oldcur} } //\lnlbl{loop:e} rcu_assign_pointer(htp_master->ht_cur, htp_new); //\lnlbl{rcu_assign} -- 2.7.4