Patch "bpf, cpumap: Make sure kthread is running before map update returns" has been added to the 6.4-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    bpf, cpumap: Make sure kthread is running before map update returns

to the 6.4-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     bpf-cpumap-make-sure-kthread-is-running-before-map-u.patch
and it can be found in the queue-6.4 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.



commit 19ec523419b1f9d6a169dadd970e20071b8947f1
Author: Hou Tao <houtao1@xxxxxxxxxx>
Date:   Sat Jul 29 17:51:06 2023 +0800

    bpf, cpumap: Make sure kthread is running before map update returns
    
    [ Upstream commit 640a604585aa30f93e39b17d4d6ba69fcb1e66c9 ]
    
    The following warning was reported when running stress-mode enabled
    xdp_redirect_cpu with some RT threads:
    
      ------------[ cut here ]------------
      WARNING: CPU: 4 PID: 65 at kernel/bpf/cpumap.c:135
      CPU: 4 PID: 65 Comm: kworker/4:1 Not tainted 6.5.0-rc2+ #1
      Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
      Workqueue: events cpu_map_kthread_stop
      RIP: 0010:put_cpu_map_entry+0xda/0x220
      ......
      Call Trace:
       <TASK>
       ? show_regs+0x65/0x70
       ? __warn+0xa5/0x240
       ......
       ? put_cpu_map_entry+0xda/0x220
       cpu_map_kthread_stop+0x41/0x60
       process_one_work+0x6b0/0xb80
       worker_thread+0x96/0x720
       kthread+0x1a5/0x1f0
       ret_from_fork+0x3a/0x70
       ret_from_fork_asm+0x1b/0x30
       </TASK>
    
    The root cause is the same as commit 436901649731 ("bpf: cpumap: Fix memory
    leak in cpu_map_update_elem"). The kthread is stopped prematurely by
    kthread_stop() in cpu_map_kthread_stop(), and kthread() doesn't call
    cpu_map_kthread_run() at all but XDP program has already queued some
    frames or skbs into ptr_ring. So when __cpu_map_ring_cleanup() checks
    the ptr_ring, it will find it was not emptied and report a warning.
    
    An alternative fix is to use __cpu_map_ring_cleanup() to drop these
    pending frames or skbs when kthread_stop() returns -EINTR, but it may
    confuse the user, because these frames or skbs have been handled
    correctly by XDP program. So instead of dropping these frames or skbs,
    just make sure the per-cpu kthread is running before
    __cpu_map_entry_alloc() returns.
    
    After apply the fix, the error handle for kthread_stop() will be
    unnecessary because it will always return 0, so just remove it.
    
    Fixes: 6710e1126934 ("bpf: introduce new bpf cpu map type BPF_MAP_TYPE_CPUMAP")
    Signed-off-by: Hou Tao <houtao1@xxxxxxxxxx>
    Reviewed-by: Pu Lehui <pulehui@xxxxxxxxxx>
    Acked-by: Jesper Dangaard Brouer <hawk@xxxxxxxxxx>
    Link: https://lore.kernel.org/r/20230729095107.1722450-2-houtao@xxxxxxxxxxxxxxx
    Signed-off-by: Martin KaFai Lau <martin.lau@xxxxxxxxxx>
    Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>

diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 6ae02be7a48e3..7eeb200251640 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -28,6 +28,7 @@
 #include <linux/sched.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
+#include <linux/completion.h>
 #include <trace/events/xdp.h>
 #include <linux/btf_ids.h>
 
@@ -73,6 +74,7 @@ struct bpf_cpu_map_entry {
 	struct rcu_head rcu;
 
 	struct work_struct kthread_stop_wq;
+	struct completion kthread_running;
 };
 
 struct bpf_cpu_map {
@@ -153,7 +155,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
 static void cpu_map_kthread_stop(struct work_struct *work)
 {
 	struct bpf_cpu_map_entry *rcpu;
-	int err;
 
 	rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
 
@@ -163,14 +164,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
 	rcu_barrier();
 
 	/* kthread_stop will wake_up_process and wait for it to complete */
-	err = kthread_stop(rcpu->kthread);
-	if (err) {
-		/* kthread_stop may be called before cpu_map_kthread_run
-		 * is executed, so we need to release the memory related
-		 * to rcpu.
-		 */
-		put_cpu_map_entry(rcpu);
-	}
+	kthread_stop(rcpu->kthread);
 }
 
 static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
@@ -298,11 +292,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
 	return nframes;
 }
 
-
 static int cpu_map_kthread_run(void *data)
 {
 	struct bpf_cpu_map_entry *rcpu = data;
 
+	complete(&rcpu->kthread_running);
 	set_current_state(TASK_INTERRUPTIBLE);
 
 	/* When kthread gives stop order, then rcpu have been disconnected
@@ -467,6 +461,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
 		goto free_ptr_ring;
 
 	/* Setup kthread */
+	init_completion(&rcpu->kthread_running);
 	rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
 					       "cpumap/%d/map:%d", cpu,
 					       map->id);
@@ -480,6 +475,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
 	kthread_bind(rcpu->kthread, cpu);
 	wake_up_process(rcpu->kthread);
 
+	/* Make sure kthread has been running, so kthread_stop() will not
+	 * stop the kthread prematurely and all pending frames or skbs
+	 * will be handled by the kthread before kthread_stop() returns.
+	 */
+	wait_for_completion(&rcpu->kthread_running);
+
 	return rcpu;
 
 free_prog:



[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux