Skip to content

Commit a082771

Browse files
Chengming ZhouPeter Zijlstra
authored andcommitted
perf/core: Don't pass task around when ctx sched in
The current code pass task around for ctx_sched_in(), only to get perf_cgroup of the task, then update the timestamp of it and its ancestors and set them to active. But we can use cpuctx->cgrp to get active perf_cgroup and its ancestors since cpuctx->cgrp has been set before ctx_sched_in(). This patch remove the task argument in ctx_sched_in() and cleanup related code. Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220329154523.86438-2-zhouchengming@bytedance.com
1 parent e590928 commit a082771

File tree

1 file changed

+26
-32
lines changed

1 file changed

+26
-32
lines changed

kernel/events/core.c

Lines changed: 26 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -574,8 +574,7 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
574574
enum event_type_t event_type);
575575

576576
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
577-
enum event_type_t event_type,
578-
struct task_struct *task);
577+
enum event_type_t event_type);
579578

580579
static void update_context_time(struct perf_event_context *ctx);
581580
static u64 perf_event_time(struct perf_event *event);
@@ -801,10 +800,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
801800
}
802801

803802
static inline void
804-
perf_cgroup_set_timestamp(struct task_struct *task,
805-
struct perf_event_context *ctx)
803+
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
806804
{
807-
struct perf_cgroup *cgrp;
805+
struct perf_event_context *ctx = &cpuctx->ctx;
806+
struct perf_cgroup *cgrp = cpuctx->cgrp;
808807
struct perf_cgroup_info *info;
809808
struct cgroup_subsys_state *css;
810809

@@ -813,10 +812,10 @@ perf_cgroup_set_timestamp(struct task_struct *task,
813812
* ensure we do not access cgroup data
814813
* unless we have the cgroup pinned (css_get)
815814
*/
816-
if (!task || !ctx->nr_cgroups)
815+
if (!cgrp)
817816
return;
818817

819-
cgrp = perf_cgroup_from_task(task, ctx);
818+
WARN_ON_ONCE(!ctx->nr_cgroups);
820819

821820
for (css = &cgrp->css; css; css = css->parent) {
822821
cgrp = container_of(css, struct perf_cgroup, css);
@@ -869,14 +868,14 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
869868
WARN_ON_ONCE(cpuctx->cgrp);
870869
/*
871870
* set cgrp before ctxsw in to allow
872-
* event_filter_match() to not have to pass
873-
* task around
871+
* perf_cgroup_set_timestamp() in ctx_sched_in()
872+
* to not have to pass task around
874873
* we pass the cpuctx->ctx to perf_cgroup_from_task()
875874
* because cgorup events are only per-cpu
876875
*/
877876
cpuctx->cgrp = perf_cgroup_from_task(task,
878877
&cpuctx->ctx);
879-
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
878+
cpu_ctx_sched_in(cpuctx, EVENT_ALL);
880879
}
881880
perf_pmu_enable(cpuctx->ctx.pmu);
882881
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -1118,8 +1117,7 @@ static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
11181117
}
11191118

11201119
static inline void
1121-
perf_cgroup_set_timestamp(struct task_struct *task,
1122-
struct perf_event_context *ctx)
1120+
perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
11231121
{
11241122
}
11251123

@@ -2713,8 +2711,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
27132711
static void
27142712
ctx_sched_in(struct perf_event_context *ctx,
27152713
struct perf_cpu_context *cpuctx,
2716-
enum event_type_t event_type,
2717-
struct task_struct *task);
2714+
enum event_type_t event_type);
27182715

27192716
static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
27202717
struct perf_event_context *ctx,
@@ -2730,15 +2727,14 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
27302727
}
27312728

27322729
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2733-
struct perf_event_context *ctx,
2734-
struct task_struct *task)
2730+
struct perf_event_context *ctx)
27352731
{
2736-
cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2732+
cpu_ctx_sched_in(cpuctx, EVENT_PINNED);
27372733
if (ctx)
2738-
ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2739-
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2734+
ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
2735+
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
27402736
if (ctx)
2741-
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2737+
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
27422738
}
27432739

27442740
/*
@@ -2788,7 +2784,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
27882784
else if (ctx_event_type & EVENT_PINNED)
27892785
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
27902786

2791-
perf_event_sched_in(cpuctx, task_ctx, current);
2787+
perf_event_sched_in(cpuctx, task_ctx);
27922788
perf_pmu_enable(cpuctx->ctx.pmu);
27932789
}
27942790

@@ -3011,7 +3007,7 @@ static void __perf_event_enable(struct perf_event *event,
30113007
return;
30123008

30133009
if (!event_filter_match(event)) {
3014-
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
3010+
ctx_sched_in(ctx, cpuctx, EVENT_TIME);
30153011
return;
30163012
}
30173013

@@ -3020,7 +3016,7 @@ static void __perf_event_enable(struct perf_event *event,
30203016
* then don't put it on unless the group is on.
30213017
*/
30223018
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
3023-
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
3019+
ctx_sched_in(ctx, cpuctx, EVENT_TIME);
30243020
return;
30253021
}
30263022

@@ -3865,8 +3861,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
38653861
static void
38663862
ctx_sched_in(struct perf_event_context *ctx,
38673863
struct perf_cpu_context *cpuctx,
3868-
enum event_type_t event_type,
3869-
struct task_struct *task)
3864+
enum event_type_t event_type)
38703865
{
38713866
int is_active = ctx->is_active;
38723867

@@ -3878,7 +3873,7 @@ ctx_sched_in(struct perf_event_context *ctx,
38783873
if (is_active ^ EVENT_TIME) {
38793874
/* start ctx time */
38803875
__update_context_time(ctx, false);
3881-
perf_cgroup_set_timestamp(task, ctx);
3876+
perf_cgroup_set_timestamp(cpuctx);
38823877
/*
38833878
* CPU-release for the below ->is_active store,
38843879
* see __load_acquire() in perf_event_time_now()
@@ -3909,12 +3904,11 @@ ctx_sched_in(struct perf_event_context *ctx,
39093904
}
39103905

39113906
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
3912-
enum event_type_t event_type,
3913-
struct task_struct *task)
3907+
enum event_type_t event_type)
39143908
{
39153909
struct perf_event_context *ctx = &cpuctx->ctx;
39163910

3917-
ctx_sched_in(ctx, cpuctx, event_type, task);
3911+
ctx_sched_in(ctx, cpuctx, event_type);
39183912
}
39193913

39203914
static void perf_event_context_sched_in(struct perf_event_context *ctx,
@@ -3956,7 +3950,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
39563950
*/
39573951
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
39583952
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3959-
perf_event_sched_in(cpuctx, ctx, task);
3953+
perf_event_sched_in(cpuctx, ctx);
39603954

39613955
if (cpuctx->sched_cb_usage && pmu->sched_task)
39623956
pmu->sched_task(cpuctx->task_ctx, true);
@@ -4267,7 +4261,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
42674261
if (cpu_event)
42684262
rotate_ctx(&cpuctx->ctx, cpu_event);
42694263

4270-
perf_event_sched_in(cpuctx, task_ctx, current);
4264+
perf_event_sched_in(cpuctx, task_ctx);
42714265

42724266
perf_pmu_enable(cpuctx->ctx.pmu);
42734267
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -4339,7 +4333,7 @@ static void perf_event_enable_on_exec(int ctxn)
43394333
clone_ctx = unclone_ctx(ctx);
43404334
ctx_resched(cpuctx, ctx, event_type);
43414335
} else {
4342-
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
4336+
ctx_sched_in(ctx, cpuctx, EVENT_TIME);
43434337
}
43444338
perf_ctx_unlock(cpuctx, ctx);
43454339

0 commit comments

Comments
 (0)