Skip to content

Commit 327ecdb

Browse files
committed
Merge tag 'perf-core-2025-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull performance events updates from Ingo Molnar: "Core: - Move perf_event sysctls into kernel/events/ (Joel Granados) - Use POLLHUP for pinned events in error (Namhyung Kim) - Avoid the read if the count is already updated (Peter Zijlstra) - Allow the EPOLLRDNORM flag for poll (Tao Chen) - locking/percpu-rwsem: Add guard support [ NOTE: this got (mis-)merged into the perf tree due to related work ] (Peter Zijlstra) perf_pmu_unregister() related improvements: (Peter Zijlstra) - Simplify the perf_event_alloc() error path - Simplify the perf_pmu_register() error path - Simplify perf_pmu_register() - Simplify perf_init_event() - Simplify perf_event_alloc() - Merge struct pmu::pmu_disable_count into struct perf_cpu_pmu_context::pmu_disable_count - Add this_cpc() helper - Introduce perf_free_addr_filters() - Robustify perf_event_free_bpf_prog() - Simplify the perf_mmap() control flow - Further simplify perf_mmap() - Remove retry loop from perf_mmap() - Lift event->mmap_mutex in perf_mmap() - Detach 'struct perf_cpu_pmu_context' and 'struct pmu' lifetimes - Fix perf_mmap() failure path Uprobes: - Harden x86 uretprobe syscall trampoline check (Jiri Olsa) - Remove redundant spinlock in uprobe_deny_signal() (Liao Chang) - Remove the spinlock within handle_singlestep() (Liao Chang) x86 Intel PMU enhancements: - Support PEBS counters snapshotting (Kan Liang) - Fix intel_pmu_read_event() (Kan Liang) - Extend per event callchain limit to branch stack (Kan Liang) - Fix system-wide LBR profiling (Kan Liang) - Allocate bts_ctx only if necessary (Li RongQing) - Apply static call for drain_pebs (Peter Zijlstra) x86 AMD PMU enhancements: (Ravi Bangoria) - Remove pointless sample period check - Fix ->config to sample period calculation for OP PMU - Fix perf_ibs_op.cnt_mask for CurCnt - Don't allow freq mode event creation through ->config interface - Add PMU specific minimum period - Add ->check_period() callback - Ceil sample_period to min_period - Add support for OP Load Latency Filtering - Update DTLB/PageSize decode logic Hardware breakpoints: - Return EOPNOTSUPP for unsupported breakpoint type (Saket Kumar Bhaskar) Hardlockup detector improvements: (Li Huafei) - perf_event memory leak - Warn if watchdog_ev is leaked Fixes and cleanups: - Misc fixes and cleanups (Andy Shevchenko, Kan Liang, Peter Zijlstra, Ravi Bangoria, Thorsten Blum, XieLudan)" * tag 'perf-core-2025-03-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (55 commits) perf: Fix __percpu annotation perf: Clean up pmu specific data perf/x86: Remove swap_task_ctx() perf/x86/lbr: Fix shorter LBRs call stacks for the system-wide mode perf: Supply task information to sched_task() perf: attach/detach PMU specific data locking/percpu-rwsem: Add guard support perf: Save PMU specific data in task_struct perf: Extend per event callchain limit to branch stack perf/ring_buffer: Allow the EPOLLRDNORM flag for poll perf/core: Use POLLHUP for pinned events in error perf/core: Use sysfs_emit() instead of scnprintf() perf/core: Remove optional 'size' arguments from strscpy() calls perf/x86/intel/bts: Check if bts_ctx is allocated when calling BTS functions uprobes/x86: Harden uretprobe syscall trampoline check watchdog/hardlockup/perf: Warn if watchdog_ev is leaked watchdog/hardlockup/perf: Fix perf_event memory leak perf/x86: Annotate struct bts_buffer::buf with __counted_by() perf/core: Clean up perf_try_init_event() perf/core: Fix perf_mmap() failure path ...
2 parents 32b2253 + 12e766d commit 327ecdb

File tree

34 files changed

+1416
-737
lines changed

34 files changed

+1416
-737
lines changed

arch/powerpc/perf/core-book3s.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,10 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
132132

133133
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
134134
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
135-
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {}
135+
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
136+
struct task_struct *task, bool sched_in)
137+
{
138+
}
136139
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
137140
static void pmao_restore_workaround(bool ebb) { }
138141
#endif /* CONFIG_PPC32 */
@@ -444,7 +447,8 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
444447
/* Called from ctxsw to prevent one process's branch entries to
445448
* mingle with the other process's entries during context switch.
446449
*/
447-
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
450+
static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
451+
struct task_struct *task, bool sched_in)
448452
{
449453
if (!ppmu->bhrb_nr)
450454
return;

arch/s390/kernel/perf_pai_crypto.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,8 @@ static void paicrypt_have_samples(void)
518518
/* Called on schedule-in and schedule-out. No access to event structure,
519519
* but for sampling only event CRYPTO_ALL is allowed.
520520
*/
521-
static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
521+
static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
522+
struct task_struct *task, bool sched_in)
522523
{
523524
/* We started with a clean page on event installation. So read out
524525
* results on schedule_out and if page was dirty, save old values.

arch/s390/kernel/perf_pai_ext.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -542,7 +542,8 @@ static void paiext_have_samples(void)
542542
/* Called on schedule-in and schedule-out. No access to event structure,
543543
* but for sampling only event NNPA_ALL is allowed.
544544
*/
545-
static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
545+
static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx,
546+
struct task_struct *task, bool sched_in)
546547
{
547548
/* We started with a clean page on event installation. So read out
548549
* results on schedule_out and if page was dirty, save old values.

arch/x86/events/amd/brs.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -381,7 +381,8 @@ static void amd_brs_poison_buffer(void)
381381
* On ctxswin, sched_in = true, called after the PMU has started
382382
* On ctxswout, sched_in = false, called before the PMU is stopped
383383
*/
384-
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
384+
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
385+
struct task_struct *task, bool sched_in)
385386
{
386387
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
387388

0 commit comments

Comments
 (0)