[PATCH bpf-next 2/3] bpf: Enable preemption after irq_work_raise() in unit_free{_rcu}()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Hou Tao <houtao1@xxxxxxxxxx>

Both unit_free() and unit_free_rcu() invoke irq_work_raise() to free
freed objects back to slab and the invocation may also be preempted by
unit_alloc() and unit_alloc() may return NULL unexpectedly as shown in
the following case:

task A         task B

unit_free()
  // high_watermark = 48
  // free_cnt = 49 after free
  irq_work_raise()
    // mark irq work as IRQ_WORK_PENDING
    irq_work_claim()

               // task B preempts task A
               unit_alloc()
                 // free_cnt = 48 after alloc

               // does unit_alloc() 32-times
	       ......
	       // free_cnt = 16

	       unit_alloc()
	         // free_cnt = 15 after alloc
                 // irq work is already PENDING,
                 // so just return
                 irq_work_raise()

	       // does unit_alloc() 15-times
               ......
	       // free_cnt = 0

               unit_alloc()
                 // free_cnt = 0 before alloc
                 return NULL

Fix it by disabling preemption before do freeing and enabling preemption
after irq_work_raise() completes.

Signed-off-by: Hou Tao <houtao1@xxxxxxxxxx>
---
 kernel/bpf/memalloc.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 83f8913ebb0a..a4b089876383 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -769,6 +769,7 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
 	 */
 	c->tgt = *(struct bpf_mem_cache **)llnode;
 
+	preempt_disable_notrace();
 	local_irq_save(flags);
 	if (local_inc_return(&c->active) == 1) {
 		__llist_add(llnode, &c->free_llist);
@@ -788,6 +789,7 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
 	if (cnt > c->high_watermark)
 		/* free few objects from current cpu into global kmalloc pool */
 		irq_work_raise(c);
+	preempt_enable_notrace();
 }
 
 static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
@@ -797,6 +799,7 @@ static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
 
 	c->tgt = *(struct bpf_mem_cache **)llnode;
 
+	preempt_disable_notrace();
 	local_irq_save(flags);
 	if (local_inc_return(&c->active) == 1) {
 		if (__llist_add(llnode, &c->free_by_rcu))
@@ -809,6 +812,7 @@ static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
 
 	if (!atomic_read(&c->call_rcu_in_progress))
 		irq_work_raise(c);
+	preempt_enable_notrace();
 }
 
 /* Called from BPF program or from sys_bpf syscall.
-- 
2.29.2





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux