@@ -1254,20 +1254,6 @@ static void get_ctx(struct perf_event_context *ctx)
1254
1254
refcount_inc (& ctx -> refcount );
1255
1255
}
1256
1256
1257
- static void * alloc_task_ctx_data (struct pmu * pmu )
1258
- {
1259
- if (pmu -> task_ctx_cache )
1260
- return kmem_cache_zalloc (pmu -> task_ctx_cache , GFP_KERNEL );
1261
-
1262
- return NULL ;
1263
- }
1264
-
1265
- static void free_task_ctx_data (struct pmu * pmu , void * task_ctx_data )
1266
- {
1267
- if (pmu -> task_ctx_cache && task_ctx_data )
1268
- kmem_cache_free (pmu -> task_ctx_cache , task_ctx_data );
1269
- }
1270
-
1271
1257
static void free_ctx (struct rcu_head * head )
1272
1258
{
1273
1259
struct perf_event_context * ctx ;
@@ -3577,42 +3563,6 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
3577
3563
}
3578
3564
}
3579
3565
3580
- #define double_list_for_each_entry (pos1 , pos2 , head1 , head2 , member ) \
3581
- for (pos1 = list_first_entry(head1, typeof(*pos1), member), \
3582
- pos2 = list_first_entry(head2, typeof(*pos2), member); \
3583
- !list_entry_is_head(pos1, head1, member) && \
3584
- !list_entry_is_head(pos2, head2, member); \
3585
- pos1 = list_next_entry(pos1, member), \
3586
- pos2 = list_next_entry(pos2, member))
3587
-
3588
- static void perf_event_swap_task_ctx_data (struct perf_event_context * prev_ctx ,
3589
- struct perf_event_context * next_ctx )
3590
- {
3591
- struct perf_event_pmu_context * prev_epc , * next_epc ;
3592
-
3593
- if (!prev_ctx -> nr_task_data )
3594
- return ;
3595
-
3596
- double_list_for_each_entry (prev_epc , next_epc ,
3597
- & prev_ctx -> pmu_ctx_list , & next_ctx -> pmu_ctx_list ,
3598
- pmu_ctx_entry ) {
3599
-
3600
- if (WARN_ON_ONCE (prev_epc -> pmu != next_epc -> pmu ))
3601
- continue ;
3602
-
3603
- /*
3604
- * PMU specific parts of task perf context can require
3605
- * additional synchronization. As an example of such
3606
- * synchronization see implementation details of Intel
3607
- * LBR call stack data profiling;
3608
- */
3609
- if (prev_epc -> pmu -> swap_task_ctx )
3610
- prev_epc -> pmu -> swap_task_ctx (prev_epc , next_epc );
3611
- else
3612
- swap (prev_epc -> task_ctx_data , next_epc -> task_ctx_data );
3613
- }
3614
- }
3615
-
3616
3566
static void perf_ctx_sched_task_cb (struct perf_event_context * ctx ,
3617
3567
struct task_struct * task , bool sched_in )
3618
3568
{
@@ -3687,16 +3637,15 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
3687
3637
WRITE_ONCE (next_ctx -> task , task );
3688
3638
3689
3639
perf_ctx_sched_task_cb (ctx , task , false);
3690
- perf_event_swap_task_ctx_data (ctx , next_ctx );
3691
3640
3692
3641
perf_ctx_enable (ctx , false);
3693
3642
3694
3643
/*
3695
3644
* RCU_INIT_POINTER here is safe because we've not
3696
3645
* modified the ctx and the above modification of
3697
- * ctx->task and ctx->task_ctx_data are immaterial
3698
- * since those values are always verified under
3699
- * ctx->lock which we're now holding.
3646
+ * ctx->task is immaterial since this value is
3647
+ * always verified under ctx->lock which we're now
3648
+ * holding.
3700
3649
*/
3701
3650
RCU_INIT_POINTER (task -> perf_event_ctxp , next_ctx );
3702
3651
RCU_INIT_POINTER (next -> perf_event_ctxp , ctx );
@@ -5005,7 +4954,6 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
5005
4954
struct perf_event * event )
5006
4955
{
5007
4956
struct perf_event_pmu_context * new = NULL , * pos = NULL , * epc ;
5008
- void * task_ctx_data = NULL ;
5009
4957
5010
4958
if (!ctx -> task ) {
5011
4959
/*
@@ -5038,14 +4986,6 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
5038
4986
if (!new )
5039
4987
return ERR_PTR (- ENOMEM );
5040
4988
5041
- if (event -> attach_state & PERF_ATTACH_TASK_DATA ) {
5042
- task_ctx_data = alloc_task_ctx_data (pmu );
5043
- if (!task_ctx_data ) {
5044
- kfree (new );
5045
- return ERR_PTR (- ENOMEM );
5046
- }
5047
- }
5048
-
5049
4989
__perf_init_event_pmu_context (new , pmu );
5050
4990
5051
4991
/*
@@ -5080,14 +5020,7 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
5080
5020
epc -> ctx = ctx ;
5081
5021
5082
5022
found_epc :
5083
- if (task_ctx_data && !epc -> task_ctx_data ) {
5084
- epc -> task_ctx_data = task_ctx_data ;
5085
- task_ctx_data = NULL ;
5086
- ctx -> nr_task_data ++ ;
5087
- }
5088
5023
raw_spin_unlock_irq (& ctx -> lock );
5089
-
5090
- free_task_ctx_data (pmu , task_ctx_data );
5091
5024
kfree (new );
5092
5025
5093
5026
return epc ;
@@ -5103,15 +5036,13 @@ static void free_cpc_rcu(struct rcu_head *head)
5103
5036
struct perf_cpu_pmu_context * cpc =
5104
5037
container_of (head , typeof (* cpc ), epc .rcu_head );
5105
5038
5106
- kfree (cpc -> epc .task_ctx_data );
5107
5039
kfree (cpc );
5108
5040
}
5109
5041
5110
5042
static void free_epc_rcu (struct rcu_head * head )
5111
5043
{
5112
5044
struct perf_event_pmu_context * epc = container_of (head , typeof (* epc ), rcu_head );
5113
5045
5114
- kfree (epc -> task_ctx_data );
5115
5046
kfree (epc );
5116
5047
}
5117
5048
@@ -14103,7 +14034,6 @@ inherit_event(struct perf_event *parent_event,
14103
14034
if (is_orphaned_event (parent_event ) ||
14104
14035
!atomic_long_inc_not_zero (& parent_event -> refcount )) {
14105
14036
mutex_unlock (& parent_event -> child_mutex );
14106
- /* task_ctx_data is freed with child_ctx */
14107
14037
free_event (child_event );
14108
14038
return NULL ;
14109
14039
}
0 commit comments