Patch "perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()" has been added to the 5.18-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()

to the 5.18-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     perf-core-fix-data-race-between-perf_event_set_outpu.patch
and it can be found in the queue-5.18 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.



commit 6f77ac046a2324c27b4ec103062ce991b8ccd3e2
Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Date:   Tue Jul 5 15:07:26 2022 +0200

    perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()
    
    [ Upstream commit 68e3c69803dada336893640110cb87221bb01dcf ]
    
    Yang Jihing reported a race between perf_event_set_output() and
    perf_mmap_close():
    
            CPU1                                    CPU2
    
            perf_mmap_close(e2)
              if (atomic_dec_and_test(&e2->rb->mmap_count)) // 1 - > 0
                detach_rest = true
    
                                                    ioctl(e1, IOC_SET_OUTPUT, e2)
                                                      perf_event_set_output(e1, e2)
    
              ...
              list_for_each_entry_rcu(e, &e2->rb->event_list, rb_entry)
                ring_buffer_attach(e, NULL);
                // e1 isn't yet added and
                // therefore not detached
    
                                                        ring_buffer_attach(e1, e2->rb)
                                                          list_add_rcu(&e1->rb_entry,
                                                                       &e2->rb->event_list)
    
    After this; e1 is attached to an unmapped rb and a subsequent
    perf_mmap() will loop forever more:
    
            again:
                    mutex_lock(&e->mmap_mutex);
                    if (event->rb) {
                            ...
                            if (!atomic_inc_not_zero(&e->rb->mmap_count)) {
                                    ...
                                    mutex_unlock(&e->mmap_mutex);
                                    goto again;
                            }
                    }
    
    The loop in perf_mmap_close() holds e2->mmap_mutex, while the attach
    in perf_event_set_output() holds e1->mmap_mutex. As such there is no
    serialization to avoid this race.
    
    Change perf_event_set_output() to take both e1->mmap_mutex and
    e2->mmap_mutex to alleviate that problem. Additionally, have the loop
    in perf_mmap() detach the rb directly, this avoids having to wait for
    the concurrent perf_mmap_close() to get around to doing it to make
    progress.
    
    Fixes: 9bb5d40cd93c ("perf: Fix mmap() accounting hole")
    Reported-by: Yang Jihong <yangjihong1@xxxxxxxxxx>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
    Tested-by: Yang Jihong <yangjihong1@xxxxxxxxxx>
    Link: https://lkml.kernel.org/r/YsQ3jm2GR38SW7uD@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 950b25c3f210..82238406f5f5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6254,10 +6254,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 
 		if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
 			/*
-			 * Raced against perf_mmap_close() through
-			 * perf_event_set_output(). Try again, hope for better
-			 * luck.
+			 * Raced against perf_mmap_close(); remove the
+			 * event and try again.
 			 */
+			ring_buffer_attach(event, NULL);
 			mutex_unlock(&event->mmap_mutex);
 			goto again;
 		}
@@ -11826,14 +11826,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
 	goto out;
 }
 
+static void mutex_lock_double(struct mutex *a, struct mutex *b)
+{
+	if (b < a)
+		swap(a, b);
+
+	mutex_lock(a);
+	mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
+}
+
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
 	struct perf_buffer *rb = NULL;
 	int ret = -EINVAL;
 
-	if (!output_event)
+	if (!output_event) {
+		mutex_lock(&event->mmap_mutex);
 		goto set;
+	}
 
 	/* don't allow circular references */
 	if (event == output_event)
@@ -11871,8 +11882,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 	    event->pmu != output_event->pmu)
 		goto out;
 
+	/*
+	 * Hold both mmap_mutex to serialize against perf_mmap_close().  Since
+	 * output_event is already on rb->event_list, and the list iteration
+	 * restarts after every removal, it is guaranteed this new event is
+	 * observed *OR* if output_event is already removed, it's guaranteed we
+	 * observe !rb->mmap_count.
+	 */
+	mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
 set:
-	mutex_lock(&event->mmap_mutex);
 	/* Can't redirect output if we've got an active mmap() */
 	if (atomic_read(&event->mmap_count))
 		goto unlock;
@@ -11882,6 +11900,12 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 		rb = ring_buffer_get(output_event);
 		if (!rb)
 			goto unlock;
+
+		/* did we race against perf_mmap_close() */
+		if (!atomic_read(&rb->mmap_count)) {
+			ring_buffer_put(rb);
+			goto unlock;
+		}
 	}
 
 	ring_buffer_attach(event, rb);
@@ -11889,20 +11913,13 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 	ret = 0;
 unlock:
 	mutex_unlock(&event->mmap_mutex);
+	if (output_event)
+		mutex_unlock(&output_event->mmap_mutex);
 
 out:
 	return ret;
 }
 
-static void mutex_lock_double(struct mutex *a, struct mutex *b)
-{
-	if (b < a)
-		swap(a, b);
-
-	mutex_lock(a);
-	mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
-}
-
 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
 {
 	bool nmi_safe = false;



[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux