xchg() on a bool is causing issues on riscv and arm32. Please squash this into the -rcu dev branch to resolve the issue. Please squash this fix. Fixes: -rcu dev commit 3cbd3aa7d9c7bdf ("rcu/tree: Add basic support for kfree_rcu() batching") Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx> --- kernel/rcu/tree.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4f7c3096d786..33192a58b39a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2717,7 +2717,7 @@ struct kfree_rcu_cpu { * is busy, ->head just continues to grow and we retry flushing later. */ struct delayed_work monitor_work; - bool monitor_todo; /* Is a delayed work pending execution? */ + int monitor_todo; /* Is a delayed work pending execution? */ }; static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc); @@ -2790,7 +2790,7 @@ static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp, /* Previous batch that was queued to RCU did not get free yet, let us * try again soon. */ - if (!xchg(&krcp->monitor_todo, true)) + if (!xchg(&krcp->monitor_todo, 1)) schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); spin_unlock_irqrestore(&krcp->lock, flags); } @@ -2806,7 +2806,7 @@ static void kfree_rcu_monitor(struct work_struct *work) monitor_work.work); spin_lock_irqsave(&krcp->lock, flags); - if (xchg(&krcp->monitor_todo, false)) + if (xchg(&krcp->monitor_todo, 0)) kfree_rcu_drain_unlock(krcp, flags); else spin_unlock_irqrestore(&krcp->lock, flags); @@ -2858,7 +2858,7 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) krcp->head = head; /* Schedule monitor for timely drain after KFREE_DRAIN_JIFFIES. */ - if (!xchg(&krcp->monitor_todo, true)) + if (!xchg(&krcp->monitor_todo, 1)) schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES); spin_unlock(&krcp->lock); -- 2.23.0.rc1.153.gdeed80330f-goog