@@ -380,7 +380,6 @@ enum event_type_t {
380
380
381
381
/*
382
382
* perf_sched_events : >0 events exist
383
- * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
384
383
*/
385
384
386
385
static void perf_sched_delayed (struct work_struct * work );
@@ -389,7 +388,6 @@ static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
389
388
static DEFINE_MUTEX (perf_sched_mutex );
390
389
static atomic_t perf_sched_count ;
391
390
392
- static DEFINE_PER_CPU (atomic_t , perf_cgroup_events ) ;
393
391
static DEFINE_PER_CPU (struct pmu_event_list , pmu_sb_events ) ;
394
392
395
393
static atomic_t nr_mmap_events __read_mostly ;
@@ -844,9 +842,16 @@ static void perf_cgroup_switch(struct task_struct *task)
844
842
struct perf_cpu_context * cpuctx = this_cpu_ptr (& perf_cpu_context );
845
843
struct perf_cgroup * cgrp ;
846
844
847
- cgrp = perf_cgroup_from_task (task , NULL );
845
+ /*
846
+ * cpuctx->cgrp is set when the first cgroup event enabled,
847
+ * and is cleared when the last cgroup event disabled.
848
+ */
849
+ if (READ_ONCE (cpuctx -> cgrp ) == NULL )
850
+ return ;
848
851
849
852
WARN_ON_ONCE (cpuctx -> ctx .nr_cgroups == 0 );
853
+
854
+ cgrp = perf_cgroup_from_task (task , NULL );
850
855
if (READ_ONCE (cpuctx -> cgrp ) == cgrp )
851
856
return ;
852
857
@@ -3631,8 +3636,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
3631
3636
* to check if we have to switch out PMU state.
3632
3637
* cgroup event are system-wide mode only
3633
3638
*/
3634
- if (atomic_read (this_cpu_ptr (& perf_cgroup_events )))
3635
- perf_cgroup_switch (next );
3639
+ perf_cgroup_switch (next );
3636
3640
}
3637
3641
3638
3642
static bool perf_less_group_idx (const void * l , const void * r )
@@ -4974,15 +4978,6 @@ static void unaccount_pmu_sb_event(struct perf_event *event)
4974
4978
detach_sb_event (event );
4975
4979
}
4976
4980
4977
- static void unaccount_event_cpu (struct perf_event * event , int cpu )
4978
- {
4979
- if (event -> parent )
4980
- return ;
4981
-
4982
- if (is_cgroup_event (event ))
4983
- atomic_dec (& per_cpu (perf_cgroup_events , cpu ));
4984
- }
4985
-
4986
4981
#ifdef CONFIG_NO_HZ_FULL
4987
4982
static DEFINE_SPINLOCK (nr_freq_lock );
4988
4983
#endif
@@ -5048,8 +5043,6 @@ static void unaccount_event(struct perf_event *event)
5048
5043
schedule_delayed_work (& perf_sched_work , HZ );
5049
5044
}
5050
5045
5051
- unaccount_event_cpu (event , event -> cpu );
5052
-
5053
5046
unaccount_pmu_sb_event (event );
5054
5047
}
5055
5048
@@ -11679,15 +11672,6 @@ static void account_pmu_sb_event(struct perf_event *event)
11679
11672
attach_sb_event (event );
11680
11673
}
11681
11674
11682
- static void account_event_cpu (struct perf_event * event , int cpu )
11683
- {
11684
- if (event -> parent )
11685
- return ;
11686
-
11687
- if (is_cgroup_event (event ))
11688
- atomic_inc (& per_cpu (perf_cgroup_events , cpu ));
11689
- }
11690
-
11691
11675
/* Freq events need the tick to stay alive (see perf_event_task_tick). */
11692
11676
static void account_freq_event_nohz (void )
11693
11677
{
@@ -11775,8 +11759,6 @@ static void account_event(struct perf_event *event)
11775
11759
}
11776
11760
enabled :
11777
11761
11778
- account_event_cpu (event , event -> cpu );
11779
-
11780
11762
account_pmu_sb_event (event );
11781
11763
}
11782
11764
@@ -12339,12 +12321,12 @@ SYSCALL_DEFINE5(perf_event_open,
12339
12321
if (flags & ~PERF_FLAG_ALL )
12340
12322
return - EINVAL ;
12341
12323
12342
- /* Do we allow access to perf_event_open(2) ? */
12343
- err = security_perf_event_open (& attr , PERF_SECURITY_OPEN );
12324
+ err = perf_copy_attr (attr_uptr , & attr );
12344
12325
if (err )
12345
12326
return err ;
12346
12327
12347
- err = perf_copy_attr (attr_uptr , & attr );
12328
+ /* Do we allow access to perf_event_open(2) ? */
12329
+ err = security_perf_event_open (& attr , PERF_SECURITY_OPEN );
12348
12330
if (err )
12349
12331
return err ;
12350
12332
@@ -12689,7 +12671,8 @@ SYSCALL_DEFINE5(perf_event_open,
12689
12671
return event_fd ;
12690
12672
12691
12673
err_context :
12692
- /* event->pmu_ctx freed by free_event() */
12674
+ put_pmu_ctx (event -> pmu_ctx );
12675
+ event -> pmu_ctx = NULL ; /* _free_event() */
12693
12676
err_locked :
12694
12677
mutex_unlock (& ctx -> mutex );
12695
12678
perf_unpin_context (ctx );
@@ -12802,6 +12785,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
12802
12785
12803
12786
err_pmu_ctx :
12804
12787
put_pmu_ctx (pmu_ctx );
12788
+ event -> pmu_ctx = NULL ; /* _free_event() */
12805
12789
err_unlock :
12806
12790
mutex_unlock (& ctx -> mutex );
12807
12791
perf_unpin_context (ctx );
@@ -12822,13 +12806,11 @@ static void __perf_pmu_remove(struct perf_event_context *ctx,
12822
12806
12823
12807
perf_event_groups_for_cpu_pmu (event , groups , cpu , pmu ) {
12824
12808
perf_remove_from_context (event , 0 );
12825
- unaccount_event_cpu (event , cpu );
12826
12809
put_pmu_ctx (event -> pmu_ctx );
12827
12810
list_add (& event -> migrate_entry , events );
12828
12811
12829
12812
for_each_sibling_event (sibling , event ) {
12830
12813
perf_remove_from_context (sibling , 0 );
12831
- unaccount_event_cpu (sibling , cpu );
12832
12814
put_pmu_ctx (sibling -> pmu_ctx );
12833
12815
list_add (& sibling -> migrate_entry , events );
12834
12816
}
@@ -12847,7 +12829,6 @@ static void __perf_pmu_install_event(struct pmu *pmu,
12847
12829
12848
12830
if (event -> state >= PERF_EVENT_STATE_OFF )
12849
12831
event -> state = PERF_EVENT_STATE_INACTIVE ;
12850
- account_event_cpu (event , cpu );
12851
12832
perf_install_in_context (ctx , event , cpu );
12852
12833
}
12853
12834
@@ -13231,7 +13212,7 @@ inherit_event(struct perf_event *parent_event,
13231
13212
pmu_ctx = find_get_pmu_context (child_event -> pmu , child_ctx , child_event );
13232
13213
if (IS_ERR (pmu_ctx )) {
13233
13214
free_event (child_event );
13234
- return NULL ;
13215
+ return ERR_CAST ( pmu_ctx ) ;
13235
13216
}
13236
13217
child_event -> pmu_ctx = pmu_ctx ;
13237
13218
@@ -13742,8 +13723,7 @@ static int __perf_cgroup_move(void *info)
13742
13723
struct task_struct * task = info ;
13743
13724
13744
13725
preempt_disable ();
13745
- if (atomic_read (this_cpu_ptr (& perf_cgroup_events )))
13746
- perf_cgroup_switch (task );
13726
+ perf_cgroup_switch (task );
13747
13727
preempt_enable ();
13748
13728
13749
13729
return 0 ;
0 commit comments