Skip to content

Commit bd2da08

Browse files
Kan LiangPeter Zijlstra
authored andcommitted
perf: Clean up pmu specific data
The pmu specific data is saved in task_struct now. Remove it from event context structure. Remove swap_task_ctx() as well. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20250314172700.438923-7-kan.liang@linux.intel.com
1 parent 1fbc6c8 commit bd2da08

File tree

2 files changed

+3
-85
lines changed

2 files changed

+3
-85
lines changed

include/linux/perf_event.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -501,16 +501,6 @@ struct pmu {
501501
*/
502502
struct kmem_cache *task_ctx_cache;
503503

504-
/*
505-
* PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
506-
* can be synchronized using this function. See Intel LBR callstack support
507-
* implementation and Perf core context switch handling callbacks for usage
508-
* examples.
509-
*/
510-
void (*swap_task_ctx) (struct perf_event_pmu_context *prev_epc,
511-
struct perf_event_pmu_context *next_epc);
512-
/* optional */
513-
514504
/*
515505
* Set up pmu-private data structures for an AUX area
516506
*/
@@ -933,7 +923,6 @@ struct perf_event_pmu_context {
933923
atomic_t refcount; /* event <-> epc */
934924
struct rcu_head rcu_head;
935925

936-
void *task_ctx_data; /* pmu specific data */
937926
/*
938927
* Set when one or more (plausibly active) event can't be scheduled
939928
* due to pmu overcommit or pmu constraints, except tolerant to
@@ -981,7 +970,6 @@ struct perf_event_context {
981970
int nr_user;
982971
int is_active;
983972

984-
int nr_task_data;
985973
int nr_stat;
986974
int nr_freq;
987975
int rotate_disable;

kernel/events/core.c

Lines changed: 3 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1254,20 +1254,6 @@ static void get_ctx(struct perf_event_context *ctx)
12541254
refcount_inc(&ctx->refcount);
12551255
}
12561256

1257-
static void *alloc_task_ctx_data(struct pmu *pmu)
1258-
{
1259-
if (pmu->task_ctx_cache)
1260-
return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL);
1261-
1262-
return NULL;
1263-
}
1264-
1265-
static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data)
1266-
{
1267-
if (pmu->task_ctx_cache && task_ctx_data)
1268-
kmem_cache_free(pmu->task_ctx_cache, task_ctx_data);
1269-
}
1270-
12711257
static void free_ctx(struct rcu_head *head)
12721258
{
12731259
struct perf_event_context *ctx;
@@ -3577,42 +3563,6 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
35773563
}
35783564
}
35793565

3580-
#define double_list_for_each_entry(pos1, pos2, head1, head2, member) \
3581-
for (pos1 = list_first_entry(head1, typeof(*pos1), member), \
3582-
pos2 = list_first_entry(head2, typeof(*pos2), member); \
3583-
!list_entry_is_head(pos1, head1, member) && \
3584-
!list_entry_is_head(pos2, head2, member); \
3585-
pos1 = list_next_entry(pos1, member), \
3586-
pos2 = list_next_entry(pos2, member))
3587-
3588-
static void perf_event_swap_task_ctx_data(struct perf_event_context *prev_ctx,
3589-
struct perf_event_context *next_ctx)
3590-
{
3591-
struct perf_event_pmu_context *prev_epc, *next_epc;
3592-
3593-
if (!prev_ctx->nr_task_data)
3594-
return;
3595-
3596-
double_list_for_each_entry(prev_epc, next_epc,
3597-
&prev_ctx->pmu_ctx_list, &next_ctx->pmu_ctx_list,
3598-
pmu_ctx_entry) {
3599-
3600-
if (WARN_ON_ONCE(prev_epc->pmu != next_epc->pmu))
3601-
continue;
3602-
3603-
/*
3604-
* PMU specific parts of task perf context can require
3605-
* additional synchronization. As an example of such
3606-
* synchronization see implementation details of Intel
3607-
* LBR call stack data profiling;
3608-
*/
3609-
if (prev_epc->pmu->swap_task_ctx)
3610-
prev_epc->pmu->swap_task_ctx(prev_epc, next_epc);
3611-
else
3612-
swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);
3613-
}
3614-
}
3615-
36163566
static void perf_ctx_sched_task_cb(struct perf_event_context *ctx,
36173567
struct task_struct *task, bool sched_in)
36183568
{
@@ -3687,16 +3637,15 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
36873637
WRITE_ONCE(next_ctx->task, task);
36883638

36893639
perf_ctx_sched_task_cb(ctx, task, false);
3690-
perf_event_swap_task_ctx_data(ctx, next_ctx);
36913640

36923641
perf_ctx_enable(ctx, false);
36933642

36943643
/*
36953644
* RCU_INIT_POINTER here is safe because we've not
36963645
* modified the ctx and the above modification of
3697-
* ctx->task and ctx->task_ctx_data are immaterial
3698-
* since those values are always verified under
3699-
* ctx->lock which we're now holding.
3646+
* ctx->task is immaterial since this value is
3647+
* always verified under ctx->lock which we're now
3648+
* holding.
37003649
*/
37013650
RCU_INIT_POINTER(task->perf_event_ctxp, next_ctx);
37023651
RCU_INIT_POINTER(next->perf_event_ctxp, ctx);
@@ -5005,7 +4954,6 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
50054954
struct perf_event *event)
50064955
{
50074956
struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc;
5008-
void *task_ctx_data = NULL;
50094957

50104958
if (!ctx->task) {
50114959
/*
@@ -5038,14 +4986,6 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
50384986
if (!new)
50394987
return ERR_PTR(-ENOMEM);
50404988

5041-
if (event->attach_state & PERF_ATTACH_TASK_DATA) {
5042-
task_ctx_data = alloc_task_ctx_data(pmu);
5043-
if (!task_ctx_data) {
5044-
kfree(new);
5045-
return ERR_PTR(-ENOMEM);
5046-
}
5047-
}
5048-
50494989
__perf_init_event_pmu_context(new, pmu);
50504990

50514991
/*
@@ -5080,14 +5020,7 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
50805020
epc->ctx = ctx;
50815021

50825022
found_epc:
5083-
if (task_ctx_data && !epc->task_ctx_data) {
5084-
epc->task_ctx_data = task_ctx_data;
5085-
task_ctx_data = NULL;
5086-
ctx->nr_task_data++;
5087-
}
50885023
raw_spin_unlock_irq(&ctx->lock);
5089-
5090-
free_task_ctx_data(pmu, task_ctx_data);
50915024
kfree(new);
50925025

50935026
return epc;
@@ -5103,15 +5036,13 @@ static void free_cpc_rcu(struct rcu_head *head)
51035036
struct perf_cpu_pmu_context *cpc =
51045037
container_of(head, typeof(*cpc), epc.rcu_head);
51055038

5106-
kfree(cpc->epc.task_ctx_data);
51075039
kfree(cpc);
51085040
}
51095041

51105042
static void free_epc_rcu(struct rcu_head *head)
51115043
{
51125044
struct perf_event_pmu_context *epc = container_of(head, typeof(*epc), rcu_head);
51135045

5114-
kfree(epc->task_ctx_data);
51155046
kfree(epc);
51165047
}
51175048

@@ -14103,7 +14034,6 @@ inherit_event(struct perf_event *parent_event,
1410314034
if (is_orphaned_event(parent_event) ||
1410414035
!atomic_long_inc_not_zero(&parent_event->refcount)) {
1410514036
mutex_unlock(&parent_event->child_mutex);
14106-
/* task_ctx_data is freed with child_ctx */
1410714037
free_event(child_event);
1410814038
return NULL;
1410914039
}

0 commit comments

Comments
 (0)