Skip to content

Commit 1fbc6c8

Browse files
Kan LiangPeter Zijlstra
authored andcommitted
perf/x86: Remove swap_task_ctx()
The pmu specific data is saved in task_struct now. It doesn't need to swap between context. Remove swap_task_ctx() support. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20250314172700.438923-6-kan.liang@linux.intel.com
1 parent 3cec9fd commit 1fbc6c8

File tree

4 files changed

+0
-50
lines changed

4 files changed

+0
-50
lines changed

arch/x86/events/core.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
8787
DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling);
8888

8989
DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task);
90-
DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
9190

9291
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
9392
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
@@ -2039,7 +2038,6 @@ static void x86_pmu_static_call_update(void)
20392038
static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling);
20402039

20412040
static_call_update(x86_pmu_sched_task, x86_pmu.sched_task);
2042-
static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx);
20432041

20442042
static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
20452043
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
@@ -2644,12 +2642,6 @@ static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
26442642
static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in);
26452643
}
26462644

2647-
static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
2648-
struct perf_event_pmu_context *next_epc)
2649-
{
2650-
static_call_cond(x86_pmu_swap_task_ctx)(prev_epc, next_epc);
2651-
}
2652-
26532645
void perf_check_microcode(void)
26542646
{
26552647
if (x86_pmu.check_microcode)
@@ -2714,7 +2706,6 @@ static struct pmu pmu = {
27142706

27152707
.event_idx = x86_pmu_event_idx,
27162708
.sched_task = x86_pmu_sched_task,
2717-
.swap_task_ctx = x86_pmu_swap_task_ctx,
27182709
.check_period = x86_pmu_check_period,
27192710

27202711
.aux_output_match = x86_pmu_aux_output_match,

arch/x86/events/intel/core.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5300,12 +5300,6 @@ static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
53005300
intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
53015301
}
53025302

5303-
static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
5304-
struct perf_event_pmu_context *next_epc)
5305-
{
5306-
intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
5307-
}
5308-
53095303
static int intel_pmu_check_period(struct perf_event *event, u64 value)
53105304
{
53115305
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
@@ -5474,7 +5468,6 @@ static __initconst const struct x86_pmu intel_pmu = {
54745468

54755469
.guest_get_msrs = intel_guest_get_msrs,
54765470
.sched_task = intel_pmu_sched_task,
5477-
.swap_task_ctx = intel_pmu_swap_task_ctx,
54785471

54795472
.check_period = intel_pmu_check_period,
54805473

arch/x86/events/intel/lbr.c

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -522,29 +522,6 @@ static void __intel_pmu_lbr_save(void *ctx)
522522
cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
523523
}
524524

525-
void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
526-
struct perf_event_pmu_context *next_epc)
527-
{
528-
void *prev_ctx_data, *next_ctx_data;
529-
530-
swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);
531-
532-
/*
533-
* Architecture specific synchronization makes sense in case
534-
* both prev_epc->task_ctx_data and next_epc->task_ctx_data
535-
* pointers are allocated.
536-
*/
537-
538-
prev_ctx_data = next_epc->task_ctx_data;
539-
next_ctx_data = prev_epc->task_ctx_data;
540-
541-
if (!prev_ctx_data || !next_ctx_data)
542-
return;
543-
544-
swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
545-
task_context_opt(next_ctx_data)->lbr_callstack_users);
546-
}
547-
548525
void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
549526
struct task_struct *task, bool sched_in)
550527
{

arch/x86/events/perf_event.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -958,14 +958,6 @@ struct x86_pmu {
958958
*/
959959
int num_topdown_events;
960960

961-
/*
962-
* perf task context (i.e. struct perf_event_pmu_context::task_ctx_data)
963-
* switch helper to bridge calls from perf/core to perf/x86.
964-
* See struct pmu::swap_task_ctx() usage for examples;
965-
*/
966-
void (*swap_task_ctx)(struct perf_event_pmu_context *prev_epc,
967-
struct perf_event_pmu_context *next_epc);
968-
969961
/*
970962
* AMD bits
971963
*/
@@ -1671,9 +1663,6 @@ void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
16711663
struct cpu_hw_events *cpuc,
16721664
struct perf_event *event);
16731665

1674-
void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
1675-
struct perf_event_pmu_context *next_epc);
1676-
16771666
void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
16781667
struct task_struct *task, bool sched_in);
16791668

0 commit comments

Comments
 (0)