@@ -510,16 +510,18 @@ static void pmu_sbi_set_scounteren(void *arg)
510
510
{
511
511
struct perf_event * event = (struct perf_event * )arg ;
512
512
513
- csr_write (CSR_SCOUNTEREN ,
514
- csr_read (CSR_SCOUNTEREN ) | (1 << pmu_sbi_csr_index (event )));
513
+ if (event -> hw .idx != -1 )
514
+ csr_write (CSR_SCOUNTEREN ,
515
+ csr_read (CSR_SCOUNTEREN ) | (1 << pmu_sbi_csr_index (event )));
515
516
}
516
517
517
518
static void pmu_sbi_reset_scounteren (void * arg )
518
519
{
519
520
struct perf_event * event = (struct perf_event * )arg ;
520
521
521
- csr_write (CSR_SCOUNTEREN ,
522
- csr_read (CSR_SCOUNTEREN ) & ~(1 << pmu_sbi_csr_index (event )));
522
+ if (event -> hw .idx != -1 )
523
+ csr_write (CSR_SCOUNTEREN ,
524
+ csr_read (CSR_SCOUNTEREN ) & ~(1 << pmu_sbi_csr_index (event )));
523
525
}
524
526
525
527
static void pmu_sbi_ctr_start (struct perf_event * event , u64 ival )
@@ -541,7 +543,8 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
541
543
542
544
if ((hwc -> flags & PERF_EVENT_FLAG_USER_ACCESS ) &&
543
545
(hwc -> flags & PERF_EVENT_FLAG_USER_READ_CNT ))
544
- pmu_sbi_set_scounteren ((void * )event );
546
+ on_each_cpu_mask (mm_cpumask (event -> owner -> mm ),
547
+ pmu_sbi_set_scounteren , (void * )event , 1 );
545
548
}
546
549
547
550
static void pmu_sbi_ctr_stop (struct perf_event * event , unsigned long flag )
@@ -551,7 +554,8 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
551
554
552
555
if ((hwc -> flags & PERF_EVENT_FLAG_USER_ACCESS ) &&
553
556
(hwc -> flags & PERF_EVENT_FLAG_USER_READ_CNT ))
554
- pmu_sbi_reset_scounteren ((void * )event );
557
+ on_each_cpu_mask (mm_cpumask (event -> owner -> mm ),
558
+ pmu_sbi_reset_scounteren , (void * )event , 1 );
555
559
556
560
ret = sbi_ecall (SBI_EXT_PMU , SBI_EXT_PMU_COUNTER_STOP , hwc -> idx , 1 , flag , 0 , 0 , 0 );
557
561
if (ret .error && (ret .error != SBI_ERR_ALREADY_STOPPED ) &&
0 commit comments