[ Sasha's backport helper bot ] Hi, ✅ All tests passed successfully. No issues detected. No action required from the submitter. The upstream commit SHA1 provided is correct: dfd3df31c9db752234d7d2e09bef2aeabb643ce4 Status in newer kernel trees: 6.13.y | Not found Note: The patch differs from the upstream commit: --- 1: dfd3df31c9db7 ! 1: ec5a5cc49c69d mm/slab/kvfree_rcu: Switch to WQ_MEM_RECLAIM wq @@ Metadata ## Commit message ## mm/slab/kvfree_rcu: Switch to WQ_MEM_RECLAIM wq + commit dfd3df31c9db752234d7d2e09bef2aeabb643ce4 upstream. + Currently kvfree_rcu() APIs use a system workqueue which is "system_unbound_wq" to driver RCU machinery to reclaim a memory. @@ Commit message Signed-off-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> Reviewed-by: Joel Fernandes <joelagnelf@xxxxxxxxxx> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> + Signed-off-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> - ## mm/slab_common.c ## -@@ mm/slab_common.c: module_param(rcu_min_cached_objs, int, 0444); - static int rcu_delay_page_cache_fill_msec = 5000; - module_param(rcu_delay_page_cache_fill_msec, int, 0444); + ## kernel/rcu/tree.c ## +@@ kernel/rcu/tree.c: void call_rcu(struct rcu_head *head, rcu_callback_t func) + } + EXPORT_SYMBOL_GPL(call_rcu); +static struct workqueue_struct *rcu_reclaim_wq; + /* Maximum number of jiffies to wait before draining a batch. */ #define KFREE_DRAIN_JIFFIES (5 * HZ) #define KFREE_N_BATCHES 2 -@@ mm/slab_common.c: __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) +@@ kernel/rcu/tree.c: __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) if (delayed_work_pending(&krcp->monitor_work)) { delay_left = krcp->monitor_work.timer.expires - jiffies; if (delay < delay_left) @@ mm/slab_common.c: __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) } static void -@@ mm/slab_common.c: kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp) +@@ kernel/rcu/tree.c: kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp) // "free channels", the batch can handle. Break // the loop since it is done with this CPU thus // queuing an RCU work is _always_ success here. @@ mm/slab_common.c: kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp) WARN_ON_ONCE(!queued); break; } -@@ mm/slab_common.c: run_page_cache_worker(struct kfree_rcu_cpu *krcp) +@@ kernel/rcu/tree.c: run_page_cache_worker(struct kfree_rcu_cpu *krcp) if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && !atomic_xchg(&krcp->work_in_progress, 1)) { if (atomic_read(&krcp->backoff_page_cache_fill)) { @@ mm/slab_common.c: run_page_cache_worker(struct kfree_rcu_cpu *krcp) &krcp->page_cache_work, msecs_to_jiffies(rcu_delay_page_cache_fill_msec)); } else { -@@ mm/slab_common.c: void __init kvfree_rcu_init(void) +@@ kernel/rcu/tree.c: static void __init kfree_rcu_batch_init(void) int i, j; struct shrinker *kfree_rcu_shrinker; --- Results of testing on various branches: | Branch | Patch Apply | Build Test | |---------------------------|-------------|------------| | stable/linux-6.12.y | Success | Success | | stable/linux-6.13.y | Success | Success |