Skip to content

Commit 150aae3

Browse files
committed
Merge tag 'perf_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Borislav Petkov: - Pass only an initialized perf event attribute to the LSM hook - Fix a use-after-free on the perf syscall's error path - A potential integer overflow fix in amd_core_pmu_init() - Fix the cgroup events tracking after the context handling rewrite - Return the proper value from the inherit_event() function on error * tag 'perf_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: Call LSM hook after copying perf_event_attr perf: Fix use-after-free in error path perf/x86/amd: fix potential integer overflow on shift of a int perf/core: Fix cgroup events tracking perf core: Return error pointer if inherit_event() fails to find pmu_ctx
2 parents 5b12981 + 0a041eb commit 150aae3

File tree

2 files changed

+18
-38
lines changed

2 files changed

+18
-38
lines changed

arch/x86/events/amd/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1387,7 +1387,7 @@ static int __init amd_core_pmu_init(void)
13871387
* numbered counter following it.
13881388
*/
13891389
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
1390-
even_ctr_mask |= 1 << i;
1390+
even_ctr_mask |= BIT_ULL(i);
13911391

13921392
pair_constraint = (struct event_constraint)
13931393
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,

kernel/events/core.c

Lines changed: 17 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,6 @@ enum event_type_t {
380380

381381
/*
382382
* perf_sched_events : >0 events exist
383-
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
384383
*/
385384

386385
static void perf_sched_delayed(struct work_struct *work);
@@ -389,7 +388,6 @@ static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
389388
static DEFINE_MUTEX(perf_sched_mutex);
390389
static atomic_t perf_sched_count;
391390

392-
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
393391
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
394392

395393
static atomic_t nr_mmap_events __read_mostly;
@@ -844,9 +842,16 @@ static void perf_cgroup_switch(struct task_struct *task)
844842
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
845843
struct perf_cgroup *cgrp;
846844

847-
cgrp = perf_cgroup_from_task(task, NULL);
845+
/*
846+
* cpuctx->cgrp is set when the first cgroup event enabled,
847+
* and is cleared when the last cgroup event disabled.
848+
*/
849+
if (READ_ONCE(cpuctx->cgrp) == NULL)
850+
return;
848851

849852
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
853+
854+
cgrp = perf_cgroup_from_task(task, NULL);
850855
if (READ_ONCE(cpuctx->cgrp) == cgrp)
851856
return;
852857

@@ -3631,8 +3636,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
36313636
* to check if we have to switch out PMU state.
36323637
* cgroup event are system-wide mode only
36333638
*/
3634-
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3635-
perf_cgroup_switch(next);
3639+
perf_cgroup_switch(next);
36363640
}
36373641

36383642
static bool perf_less_group_idx(const void *l, const void *r)
@@ -4974,15 +4978,6 @@ static void unaccount_pmu_sb_event(struct perf_event *event)
49744978
detach_sb_event(event);
49754979
}
49764980

4977-
static void unaccount_event_cpu(struct perf_event *event, int cpu)
4978-
{
4979-
if (event->parent)
4980-
return;
4981-
4982-
if (is_cgroup_event(event))
4983-
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
4984-
}
4985-
49864981
#ifdef CONFIG_NO_HZ_FULL
49874982
static DEFINE_SPINLOCK(nr_freq_lock);
49884983
#endif
@@ -5048,8 +5043,6 @@ static void unaccount_event(struct perf_event *event)
50485043
schedule_delayed_work(&perf_sched_work, HZ);
50495044
}
50505045

5051-
unaccount_event_cpu(event, event->cpu);
5052-
50535046
unaccount_pmu_sb_event(event);
50545047
}
50555048

@@ -11679,15 +11672,6 @@ static void account_pmu_sb_event(struct perf_event *event)
1167911672
attach_sb_event(event);
1168011673
}
1168111674

11682-
static void account_event_cpu(struct perf_event *event, int cpu)
11683-
{
11684-
if (event->parent)
11685-
return;
11686-
11687-
if (is_cgroup_event(event))
11688-
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
11689-
}
11690-
1169111675
/* Freq events need the tick to stay alive (see perf_event_task_tick). */
1169211676
static void account_freq_event_nohz(void)
1169311677
{
@@ -11775,8 +11759,6 @@ static void account_event(struct perf_event *event)
1177511759
}
1177611760
enabled:
1177711761

11778-
account_event_cpu(event, event->cpu);
11779-
1178011762
account_pmu_sb_event(event);
1178111763
}
1178211764

@@ -12339,12 +12321,12 @@ SYSCALL_DEFINE5(perf_event_open,
1233912321
if (flags & ~PERF_FLAG_ALL)
1234012322
return -EINVAL;
1234112323

12342-
/* Do we allow access to perf_event_open(2) ? */
12343-
err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
12324+
err = perf_copy_attr(attr_uptr, &attr);
1234412325
if (err)
1234512326
return err;
1234612327

12347-
err = perf_copy_attr(attr_uptr, &attr);
12328+
/* Do we allow access to perf_event_open(2) ? */
12329+
err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
1234812330
if (err)
1234912331
return err;
1235012332

@@ -12689,7 +12671,8 @@ SYSCALL_DEFINE5(perf_event_open,
1268912671
return event_fd;
1269012672

1269112673
err_context:
12692-
/* event->pmu_ctx freed by free_event() */
12674+
put_pmu_ctx(event->pmu_ctx);
12675+
event->pmu_ctx = NULL; /* _free_event() */
1269312676
err_locked:
1269412677
mutex_unlock(&ctx->mutex);
1269512678
perf_unpin_context(ctx);
@@ -12802,6 +12785,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
1280212785

1280312786
err_pmu_ctx:
1280412787
put_pmu_ctx(pmu_ctx);
12788+
event->pmu_ctx = NULL; /* _free_event() */
1280512789
err_unlock:
1280612790
mutex_unlock(&ctx->mutex);
1280712791
perf_unpin_context(ctx);
@@ -12822,13 +12806,11 @@ static void __perf_pmu_remove(struct perf_event_context *ctx,
1282212806

1282312807
perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
1282412808
perf_remove_from_context(event, 0);
12825-
unaccount_event_cpu(event, cpu);
1282612809
put_pmu_ctx(event->pmu_ctx);
1282712810
list_add(&event->migrate_entry, events);
1282812811

1282912812
for_each_sibling_event(sibling, event) {
1283012813
perf_remove_from_context(sibling, 0);
12831-
unaccount_event_cpu(sibling, cpu);
1283212814
put_pmu_ctx(sibling->pmu_ctx);
1283312815
list_add(&sibling->migrate_entry, events);
1283412816
}
@@ -12847,7 +12829,6 @@ static void __perf_pmu_install_event(struct pmu *pmu,
1284712829

1284812830
if (event->state >= PERF_EVENT_STATE_OFF)
1284912831
event->state = PERF_EVENT_STATE_INACTIVE;
12850-
account_event_cpu(event, cpu);
1285112832
perf_install_in_context(ctx, event, cpu);
1285212833
}
1285312834

@@ -13231,7 +13212,7 @@ inherit_event(struct perf_event *parent_event,
1323113212
pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
1323213213
if (IS_ERR(pmu_ctx)) {
1323313214
free_event(child_event);
13234-
return NULL;
13215+
return ERR_CAST(pmu_ctx);
1323513216
}
1323613217
child_event->pmu_ctx = pmu_ctx;
1323713218

@@ -13742,8 +13723,7 @@ static int __perf_cgroup_move(void *info)
1374213723
struct task_struct *task = info;
1374313724

1374413725
preempt_disable();
13745-
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
13746-
perf_cgroup_switch(task);
13726+
perf_cgroup_switch(task);
1374713727
preempt_enable();
1374813728

1374913729
return 0;

0 commit comments

Comments
 (0)