Skip to content

Commit 2b18593

Browse files
committed
Merge tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fix from Borislav Petkov: - A single data race fix on the perf event cleanup path to avoid endless loops due to insufficient locking * tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()
2 parents 59c80f0 + 68e3c69 commit 2b18593

File tree

1 file changed

+31
-14
lines changed

1 file changed

+31
-14
lines changed

kernel/events/core.c

Lines changed: 31 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -6253,10 +6253,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
62536253

62546254
if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
62556255
/*
6256-
* Raced against perf_mmap_close() through
6257-
* perf_event_set_output(). Try again, hope for better
6258-
* luck.
6256+
* Raced against perf_mmap_close(); remove the
6257+
* event and try again.
62596258
*/
6259+
ring_buffer_attach(event, NULL);
62606260
mutex_unlock(&event->mmap_mutex);
62616261
goto again;
62626262
}
@@ -11825,14 +11825,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
1182511825
goto out;
1182611826
}
1182711827

11828+
static void mutex_lock_double(struct mutex *a, struct mutex *b)
11829+
{
11830+
if (b < a)
11831+
swap(a, b);
11832+
11833+
mutex_lock(a);
11834+
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
11835+
}
11836+
1182811837
static int
1182911838
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
1183011839
{
1183111840
struct perf_buffer *rb = NULL;
1183211841
int ret = -EINVAL;
1183311842

11834-
if (!output_event)
11843+
if (!output_event) {
11844+
mutex_lock(&event->mmap_mutex);
1183511845
goto set;
11846+
}
1183611847

1183711848
/* don't allow circular references */
1183811849
if (event == output_event)
@@ -11870,8 +11881,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
1187011881
event->pmu != output_event->pmu)
1187111882
goto out;
1187211883

11884+
/*
11885+
* Hold both mmap_mutex to serialize against perf_mmap_close(). Since
11886+
* output_event is already on rb->event_list, and the list iteration
11887+
* restarts after every removal, it is guaranteed this new event is
11888+
* observed *OR* if output_event is already removed, it's guaranteed we
11889+
* observe !rb->mmap_count.
11890+
*/
11891+
mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
1187311892
set:
11874-
mutex_lock(&event->mmap_mutex);
1187511893
/* Can't redirect output if we've got an active mmap() */
1187611894
if (atomic_read(&event->mmap_count))
1187711895
goto unlock;
@@ -11881,27 +11899,26 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
1188111899
rb = ring_buffer_get(output_event);
1188211900
if (!rb)
1188311901
goto unlock;
11902+
11903+
/* did we race against perf_mmap_close() */
11904+
if (!atomic_read(&rb->mmap_count)) {
11905+
ring_buffer_put(rb);
11906+
goto unlock;
11907+
}
1188411908
}
1188511909

1188611910
ring_buffer_attach(event, rb);
1188711911

1188811912
ret = 0;
1188911913
unlock:
1189011914
mutex_unlock(&event->mmap_mutex);
11915+
if (output_event)
11916+
mutex_unlock(&output_event->mmap_mutex);
1189111917

1189211918
out:
1189311919
return ret;
1189411920
}
1189511921

11896-
static void mutex_lock_double(struct mutex *a, struct mutex *b)
11897-
{
11898-
if (b < a)
11899-
swap(a, b);
11900-
11901-
mutex_lock(a);
11902-
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
11903-
}
11904-
1190511922
static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
1190611923
{
1190711924
bool nmi_safe = false;

0 commit comments

Comments
 (0)