@@ -574,8 +574,7 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
574
574
enum event_type_t event_type );
575
575
576
576
static void cpu_ctx_sched_in (struct perf_cpu_context * cpuctx ,
577
- enum event_type_t event_type ,
578
- struct task_struct * task );
577
+ enum event_type_t event_type );
579
578
580
579
static void update_context_time (struct perf_event_context * ctx );
581
580
static u64 perf_event_time (struct perf_event * event );
@@ -801,10 +800,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
801
800
}
802
801
803
802
static inline void
804
- perf_cgroup_set_timestamp (struct task_struct * task ,
805
- struct perf_event_context * ctx )
803
+ perf_cgroup_set_timestamp (struct perf_cpu_context * cpuctx )
806
804
{
807
- struct perf_cgroup * cgrp ;
805
+ struct perf_event_context * ctx = & cpuctx -> ctx ;
806
+ struct perf_cgroup * cgrp = cpuctx -> cgrp ;
808
807
struct perf_cgroup_info * info ;
809
808
struct cgroup_subsys_state * css ;
810
809
@@ -813,10 +812,10 @@ perf_cgroup_set_timestamp(struct task_struct *task,
813
812
* ensure we do not access cgroup data
814
813
* unless we have the cgroup pinned (css_get)
815
814
*/
816
- if (!task || ! ctx -> nr_cgroups )
815
+ if (!cgrp )
817
816
return ;
818
817
819
- cgrp = perf_cgroup_from_task ( task , ctx );
818
+ WARN_ON_ONCE (! ctx -> nr_cgroups );
820
819
821
820
for (css = & cgrp -> css ; css ; css = css -> parent ) {
822
821
cgrp = container_of (css , struct perf_cgroup , css );
@@ -869,14 +868,14 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
869
868
WARN_ON_ONCE (cpuctx -> cgrp );
870
869
/*
871
870
* set cgrp before ctxsw in to allow
872
- * event_filter_match () to not have to pass
873
- * task around
871
+ * perf_cgroup_set_timestamp () in ctx_sched_in()
872
+ * to not have to pass task around
874
873
* we pass the cpuctx->ctx to perf_cgroup_from_task()
875
874
* because cgorup events are only per-cpu
876
875
*/
877
876
cpuctx -> cgrp = perf_cgroup_from_task (task ,
878
877
& cpuctx -> ctx );
879
- cpu_ctx_sched_in (cpuctx , EVENT_ALL , task );
878
+ cpu_ctx_sched_in (cpuctx , EVENT_ALL );
880
879
}
881
880
perf_pmu_enable (cpuctx -> ctx .pmu );
882
881
perf_ctx_unlock (cpuctx , cpuctx -> task_ctx );
@@ -1118,8 +1117,7 @@ static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
1118
1117
}
1119
1118
1120
1119
static inline void
1121
- perf_cgroup_set_timestamp (struct task_struct * task ,
1122
- struct perf_event_context * ctx )
1120
+ perf_cgroup_set_timestamp (struct perf_cpu_context * cpuctx )
1123
1121
{
1124
1122
}
1125
1123
@@ -2713,8 +2711,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
2713
2711
static void
2714
2712
ctx_sched_in (struct perf_event_context * ctx ,
2715
2713
struct perf_cpu_context * cpuctx ,
2716
- enum event_type_t event_type ,
2717
- struct task_struct * task );
2714
+ enum event_type_t event_type );
2718
2715
2719
2716
static void task_ctx_sched_out (struct perf_cpu_context * cpuctx ,
2720
2717
struct perf_event_context * ctx ,
@@ -2730,15 +2727,14 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2730
2727
}
2731
2728
2732
2729
static void perf_event_sched_in (struct perf_cpu_context * cpuctx ,
2733
- struct perf_event_context * ctx ,
2734
- struct task_struct * task )
2730
+ struct perf_event_context * ctx )
2735
2731
{
2736
- cpu_ctx_sched_in (cpuctx , EVENT_PINNED , task );
2732
+ cpu_ctx_sched_in (cpuctx , EVENT_PINNED );
2737
2733
if (ctx )
2738
- ctx_sched_in (ctx , cpuctx , EVENT_PINNED , task );
2739
- cpu_ctx_sched_in (cpuctx , EVENT_FLEXIBLE , task );
2734
+ ctx_sched_in (ctx , cpuctx , EVENT_PINNED );
2735
+ cpu_ctx_sched_in (cpuctx , EVENT_FLEXIBLE );
2740
2736
if (ctx )
2741
- ctx_sched_in (ctx , cpuctx , EVENT_FLEXIBLE , task );
2737
+ ctx_sched_in (ctx , cpuctx , EVENT_FLEXIBLE );
2742
2738
}
2743
2739
2744
2740
/*
@@ -2788,7 +2784,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
2788
2784
else if (ctx_event_type & EVENT_PINNED )
2789
2785
cpu_ctx_sched_out (cpuctx , EVENT_FLEXIBLE );
2790
2786
2791
- perf_event_sched_in (cpuctx , task_ctx , current );
2787
+ perf_event_sched_in (cpuctx , task_ctx );
2792
2788
perf_pmu_enable (cpuctx -> ctx .pmu );
2793
2789
}
2794
2790
@@ -3011,7 +3007,7 @@ static void __perf_event_enable(struct perf_event *event,
3011
3007
return ;
3012
3008
3013
3009
if (!event_filter_match (event )) {
3014
- ctx_sched_in (ctx , cpuctx , EVENT_TIME , current );
3010
+ ctx_sched_in (ctx , cpuctx , EVENT_TIME );
3015
3011
return ;
3016
3012
}
3017
3013
@@ -3020,7 +3016,7 @@ static void __perf_event_enable(struct perf_event *event,
3020
3016
* then don't put it on unless the group is on.
3021
3017
*/
3022
3018
if (leader != event && leader -> state != PERF_EVENT_STATE_ACTIVE ) {
3023
- ctx_sched_in (ctx , cpuctx , EVENT_TIME , current );
3019
+ ctx_sched_in (ctx , cpuctx , EVENT_TIME );
3024
3020
return ;
3025
3021
}
3026
3022
@@ -3865,8 +3861,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
3865
3861
static void
3866
3862
ctx_sched_in (struct perf_event_context * ctx ,
3867
3863
struct perf_cpu_context * cpuctx ,
3868
- enum event_type_t event_type ,
3869
- struct task_struct * task )
3864
+ enum event_type_t event_type )
3870
3865
{
3871
3866
int is_active = ctx -> is_active ;
3872
3867
@@ -3878,7 +3873,7 @@ ctx_sched_in(struct perf_event_context *ctx,
3878
3873
if (is_active ^ EVENT_TIME ) {
3879
3874
/* start ctx time */
3880
3875
__update_context_time (ctx , false);
3881
- perf_cgroup_set_timestamp (task , ctx );
3876
+ perf_cgroup_set_timestamp (cpuctx );
3882
3877
/*
3883
3878
* CPU-release for the below ->is_active store,
3884
3879
* see __load_acquire() in perf_event_time_now()
@@ -3909,12 +3904,11 @@ ctx_sched_in(struct perf_event_context *ctx,
3909
3904
}
3910
3905
3911
3906
static void cpu_ctx_sched_in (struct perf_cpu_context * cpuctx ,
3912
- enum event_type_t event_type ,
3913
- struct task_struct * task )
3907
+ enum event_type_t event_type )
3914
3908
{
3915
3909
struct perf_event_context * ctx = & cpuctx -> ctx ;
3916
3910
3917
- ctx_sched_in (ctx , cpuctx , event_type , task );
3911
+ ctx_sched_in (ctx , cpuctx , event_type );
3918
3912
}
3919
3913
3920
3914
static void perf_event_context_sched_in (struct perf_event_context * ctx ,
@@ -3956,7 +3950,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
3956
3950
*/
3957
3951
if (!RB_EMPTY_ROOT (& ctx -> pinned_groups .tree ))
3958
3952
cpu_ctx_sched_out (cpuctx , EVENT_FLEXIBLE );
3959
- perf_event_sched_in (cpuctx , ctx , task );
3953
+ perf_event_sched_in (cpuctx , ctx );
3960
3954
3961
3955
if (cpuctx -> sched_cb_usage && pmu -> sched_task )
3962
3956
pmu -> sched_task (cpuctx -> task_ctx , true);
@@ -4267,7 +4261,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
4267
4261
if (cpu_event )
4268
4262
rotate_ctx (& cpuctx -> ctx , cpu_event );
4269
4263
4270
- perf_event_sched_in (cpuctx , task_ctx , current );
4264
+ perf_event_sched_in (cpuctx , task_ctx );
4271
4265
4272
4266
perf_pmu_enable (cpuctx -> ctx .pmu );
4273
4267
perf_ctx_unlock (cpuctx , cpuctx -> task_ctx );
@@ -4339,7 +4333,7 @@ static void perf_event_enable_on_exec(int ctxn)
4339
4333
clone_ctx = unclone_ctx (ctx );
4340
4334
ctx_resched (cpuctx , ctx , event_type );
4341
4335
} else {
4342
- ctx_sched_in (ctx , cpuctx , EVENT_TIME , current );
4336
+ ctx_sched_in (ctx , cpuctx , EVENT_TIME );
4343
4337
}
4344
4338
perf_ctx_unlock (cpuctx , ctx );
4345
4339
0 commit comments