@@ -6253,10 +6253,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
6253
6253
6254
6254
if (!atomic_inc_not_zero (& event -> rb -> mmap_count )) {
6255
6255
/*
6256
- * Raced against perf_mmap_close() through
6257
- * perf_event_set_output(). Try again, hope for better
6258
- * luck.
6256
+ * Raced against perf_mmap_close(); remove the
6257
+ * event and try again.
6259
6258
*/
6259
+ ring_buffer_attach (event , NULL );
6260
6260
mutex_unlock (& event -> mmap_mutex );
6261
6261
goto again ;
6262
6262
}
@@ -11825,14 +11825,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
11825
11825
goto out ;
11826
11826
}
11827
11827
11828
+ static void mutex_lock_double (struct mutex * a , struct mutex * b )
11829
+ {
11830
+ if (b < a )
11831
+ swap (a , b );
11832
+
11833
+ mutex_lock (a );
11834
+ mutex_lock_nested (b , SINGLE_DEPTH_NESTING );
11835
+ }
11836
+
11828
11837
static int
11829
11838
perf_event_set_output (struct perf_event * event , struct perf_event * output_event )
11830
11839
{
11831
11840
struct perf_buffer * rb = NULL ;
11832
11841
int ret = - EINVAL ;
11833
11842
11834
- if (!output_event )
11843
+ if (!output_event ) {
11844
+ mutex_lock (& event -> mmap_mutex );
11835
11845
goto set ;
11846
+ }
11836
11847
11837
11848
/* don't allow circular references */
11838
11849
if (event == output_event )
@@ -11870,8 +11881,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
11870
11881
event -> pmu != output_event -> pmu )
11871
11882
goto out ;
11872
11883
11884
+ /*
11885
+ * Hold both mmap_mutex to serialize against perf_mmap_close(). Since
11886
+ * output_event is already on rb->event_list, and the list iteration
11887
+ * restarts after every removal, it is guaranteed this new event is
11888
+ * observed *OR* if output_event is already removed, it's guaranteed we
11889
+ * observe !rb->mmap_count.
11890
+ */
11891
+ mutex_lock_double (& event -> mmap_mutex , & output_event -> mmap_mutex );
11873
11892
set :
11874
- mutex_lock (& event -> mmap_mutex );
11875
11893
/* Can't redirect output if we've got an active mmap() */
11876
11894
if (atomic_read (& event -> mmap_count ))
11877
11895
goto unlock ;
@@ -11881,27 +11899,26 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
11881
11899
rb = ring_buffer_get (output_event );
11882
11900
if (!rb )
11883
11901
goto unlock ;
11902
+
11903
+ /* did we race against perf_mmap_close() */
11904
+ if (!atomic_read (& rb -> mmap_count )) {
11905
+ ring_buffer_put (rb );
11906
+ goto unlock ;
11907
+ }
11884
11908
}
11885
11909
11886
11910
ring_buffer_attach (event , rb );
11887
11911
11888
11912
ret = 0 ;
11889
11913
unlock :
11890
11914
mutex_unlock (& event -> mmap_mutex );
11915
+ if (output_event )
11916
+ mutex_unlock (& output_event -> mmap_mutex );
11891
11917
11892
11918
out :
11893
11919
return ret ;
11894
11920
}
11895
11921
11896
- static void mutex_lock_double (struct mutex * a , struct mutex * b )
11897
- {
11898
- if (b < a )
11899
- swap (a , b );
11900
-
11901
- mutex_lock (a );
11902
- mutex_lock_nested (b , SINGLE_DEPTH_NESTING );
11903
- }
11904
-
11905
11922
static int perf_event_set_clock (struct perf_event * event , clockid_t clk_id )
11906
11923
{
11907
11924
bool nmi_safe = false;
0 commit comments