Skip to content

Commit d57e94f

Browse files
Kan LiangPeter Zijlstra
authored andcommitted
perf: Supply task information to sched_task()
To save/restore LBR call stack data in system-wide mode, the task_struct information is required. Extend the parameters of sched_task() to supply task_struct information. When schedule in, the LBR call stack data for new task will be restored. When schedule out, the LBR call stack data for old task will be saved. Only need to pass the required task_struct information. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20250314172700.438923-4-kan.liang@linux.intel.com
1 parent 506e64e commit d57e94f

File tree

11 files changed

+42
-26
lines changed

11 files changed

+42
-26
lines changed

arch/powerpc/perf/core-book3s.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,10 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
132132

133133
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
134134
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
135-
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {}
135+
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
136+
struct task_struct *task, bool sched_in)
137+
{
138+
}
136139
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
137140
static void pmao_restore_workaround(bool ebb) { }
138141
#endif /* CONFIG_PPC32 */
@@ -444,7 +447,8 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
444447
/* Called from ctxsw to prevent one process's branch entries to
445448
* mingle with the other process's entries during context switch.
446449
*/
447-
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
450+
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
451+
struct task_struct *task, bool sched_in)
448452
{
449453
if (!ppmu->bhrb_nr)
450454
return;

arch/s390/kernel/perf_pai_crypto.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,8 @@ static void paicrypt_have_samples(void)
518518
/* Called on schedule-in and schedule-out. No access to event structure,
519519
* but for sampling only event CRYPTO_ALL is allowed.
520520
*/
521-
static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
521+
static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
522+
struct task_struct *task, bool sched_in)
522523
{
523524
/* We started with a clean page on event installation. So read out
524525
* results on schedule_out and if page was dirty, save old values.

arch/s390/kernel/perf_pai_ext.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -542,7 +542,8 @@ static void paiext_have_samples(void)
542542
/* Called on schedule-in and schedule-out. No access to event structure,
543543
* but for sampling only event NNPA_ALL is allowed.
544544
*/
545-
static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
545+
static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx,
546+
struct task_struct *task, bool sched_in)
546547
{
547548
/* We started with a clean page on event installation. So read out
548549
* results on schedule_out and if page was dirty, save old values.

arch/x86/events/amd/brs.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -381,7 +381,8 @@ static void amd_brs_poison_buffer(void)
381381
* On ctxswin, sched_in = true, called after the PMU has started
382382
* On ctxswout, sched_in = false, called before the PMU is stopped
383383
*/
384-
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
384+
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
385+
struct task_struct *task, bool sched_in)
385386
{
386387
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
387388

arch/x86/events/amd/lbr.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,8 @@ void amd_pmu_lbr_del(struct perf_event *event)
371371
perf_sched_cb_dec(event->pmu);
372372
}
373373

374-
void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
374+
void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
375+
struct task_struct *task, bool sched_in)
375376
{
376377
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
377378

arch/x86/events/core.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2638,9 +2638,10 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
26382638
NULL,
26392639
};
26402640

2641-
static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
2641+
static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
2642+
struct task_struct *task, bool sched_in)
26422643
{
2643-
static_call_cond(x86_pmu_sched_task)(pmu_ctx, sched_in);
2644+
static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in);
26442645
}
26452646

26462647
static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,

arch/x86/events/intel/core.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5294,10 +5294,10 @@ static void intel_pmu_cpu_dead(int cpu)
52945294
}
52955295

52965296
static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
5297-
bool sched_in)
5297+
struct task_struct *task, bool sched_in)
52985298
{
52995299
intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
5300-
intel_pmu_lbr_sched_task(pmu_ctx, sched_in);
5300+
intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
53015301
}
53025302

53035303
static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,

arch/x86/events/intel/lbr.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -539,7 +539,8 @@ void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
539539
task_context_opt(next_ctx_data)->lbr_callstack_users);
540540
}
541541

542-
void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
542+
void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
543+
struct task_struct *task, bool sched_in)
543544
{
544545
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
545546
void *task_ctx;

arch/x86/events/perf_event.h

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -875,7 +875,7 @@ struct x86_pmu {
875875

876876
void (*check_microcode)(void);
877877
void (*sched_task)(struct perf_event_pmu_context *pmu_ctx,
878-
bool sched_in);
878+
struct task_struct *task, bool sched_in);
879879

880880
/*
881881
* Intel Arch Perfmon v2+
@@ -1408,7 +1408,8 @@ void amd_pmu_lbr_reset(void);
14081408
void amd_pmu_lbr_read(void);
14091409
void amd_pmu_lbr_add(struct perf_event *event);
14101410
void amd_pmu_lbr_del(struct perf_event *event);
1411-
void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
1411+
void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
1412+
struct task_struct *task, bool sched_in);
14121413
void amd_pmu_lbr_enable_all(void);
14131414
void amd_pmu_lbr_disable_all(void);
14141415
int amd_pmu_lbr_hw_config(struct perf_event *event);
@@ -1462,7 +1463,8 @@ static inline void amd_pmu_brs_del(struct perf_event *event)
14621463
perf_sched_cb_dec(event->pmu);
14631464
}
14641465

1465-
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
1466+
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
1467+
struct task_struct *task, bool sched_in);
14661468
#else
14671469
static inline int amd_brs_init(void)
14681470
{
@@ -1487,7 +1489,8 @@ static inline void amd_pmu_brs_del(struct perf_event *event)
14871489
{
14881490
}
14891491

1490-
static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
1492+
static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
1493+
struct task_struct *task, bool sched_in)
14911494
{
14921495
}
14931496

@@ -1670,7 +1673,8 @@ void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
16701673
void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
16711674
struct perf_event_pmu_context *next_epc);
16721675

1673-
void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
1676+
void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
1677+
struct task_struct *task, bool sched_in);
16741678

16751679
u64 lbr_from_signext_quirk_wr(u64 val);
16761680

include/linux/perf_event.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -494,7 +494,7 @@ struct pmu {
494494
* context-switches callback
495495
*/
496496
void (*sched_task) (struct perf_event_pmu_context *pmu_ctx,
497-
bool sched_in);
497+
struct task_struct *task, bool sched_in);
498498

499499
/*
500500
* Kmem cache of PMU specific data

0 commit comments

Comments
 (0)