19
19
#include <asm/papr_pdsm.h>
20
20
#include <asm/mce.h>
21
21
#include <asm/unaligned.h>
22
+ #include <linux/perf_event.h>
22
23
23
24
#define BIND_ANY_ADDR (~0ul)
24
25
@@ -124,6 +125,8 @@ struct papr_scm_priv {
124
125
/* The bits which needs to be overridden */
125
126
u64 health_bitmap_inject_mask ;
126
127
128
+ /* array to have event_code and stat_id mappings */
129
+ char * * nvdimm_events_map ;
127
130
};
128
131
129
132
static int papr_scm_pmem_flush (struct nd_region * nd_region ,
@@ -344,6 +347,225 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
344
347
return 0 ;
345
348
}
346
349
350
+ #ifdef CONFIG_PERF_EVENTS
351
+ #define to_nvdimm_pmu (_pmu ) container_of(_pmu, struct nvdimm_pmu, pmu)
352
+
353
+ static int papr_scm_pmu_get_value (struct perf_event * event , struct device * dev , u64 * count )
354
+ {
355
+ struct papr_scm_perf_stat * stat ;
356
+ struct papr_scm_perf_stats * stats ;
357
+ struct papr_scm_priv * p = (struct papr_scm_priv * )dev -> driver_data ;
358
+ int rc , size ;
359
+
360
+ /* Allocate request buffer enough to hold single performance stat */
361
+ size = sizeof (struct papr_scm_perf_stats ) +
362
+ sizeof (struct papr_scm_perf_stat );
363
+
364
+ if (!p || !p -> nvdimm_events_map )
365
+ return - EINVAL ;
366
+
367
+ stats = kzalloc (size , GFP_KERNEL );
368
+ if (!stats )
369
+ return - ENOMEM ;
370
+
371
+ stat = & stats -> scm_statistic [0 ];
372
+ memcpy (& stat -> stat_id ,
373
+ p -> nvdimm_events_map [event -> attr .config ],
374
+ sizeof (stat -> stat_id ));
375
+ stat -> stat_val = 0 ;
376
+
377
+ rc = drc_pmem_query_stats (p , stats , 1 );
378
+ if (rc < 0 ) {
379
+ kfree (stats );
380
+ return rc ;
381
+ }
382
+
383
+ * count = be64_to_cpu (stat -> stat_val );
384
+ kfree (stats );
385
+ return 0 ;
386
+ }
387
+
388
+ static int papr_scm_pmu_event_init (struct perf_event * event )
389
+ {
390
+ struct nvdimm_pmu * nd_pmu = to_nvdimm_pmu (event -> pmu );
391
+ struct papr_scm_priv * p ;
392
+
393
+ if (!nd_pmu )
394
+ return - EINVAL ;
395
+
396
+ /* test the event attr type for PMU enumeration */
397
+ if (event -> attr .type != event -> pmu -> type )
398
+ return - ENOENT ;
399
+
400
+ /* it does not support event sampling mode */
401
+ if (is_sampling_event (event ))
402
+ return - EOPNOTSUPP ;
403
+
404
+ /* no branch sampling */
405
+ if (has_branch_stack (event ))
406
+ return - EOPNOTSUPP ;
407
+
408
+ p = (struct papr_scm_priv * )nd_pmu -> dev -> driver_data ;
409
+ if (!p )
410
+ return - EINVAL ;
411
+
412
+ /* Invalid eventcode */
413
+ if (event -> attr .config == 0 || event -> attr .config > 16 )
414
+ return - EINVAL ;
415
+
416
+ return 0 ;
417
+ }
418
+
419
+ static int papr_scm_pmu_add (struct perf_event * event , int flags )
420
+ {
421
+ u64 count ;
422
+ int rc ;
423
+ struct nvdimm_pmu * nd_pmu = to_nvdimm_pmu (event -> pmu );
424
+
425
+ if (!nd_pmu )
426
+ return - EINVAL ;
427
+
428
+ if (flags & PERF_EF_START ) {
429
+ rc = papr_scm_pmu_get_value (event , nd_pmu -> dev , & count );
430
+ if (rc )
431
+ return rc ;
432
+
433
+ local64_set (& event -> hw .prev_count , count );
434
+ }
435
+
436
+ return 0 ;
437
+ }
438
+
439
+ static void papr_scm_pmu_read (struct perf_event * event )
440
+ {
441
+ u64 prev , now ;
442
+ int rc ;
443
+ struct nvdimm_pmu * nd_pmu = to_nvdimm_pmu (event -> pmu );
444
+
445
+ if (!nd_pmu )
446
+ return ;
447
+
448
+ rc = papr_scm_pmu_get_value (event , nd_pmu -> dev , & now );
449
+ if (rc )
450
+ return ;
451
+
452
+ prev = local64_xchg (& event -> hw .prev_count , now );
453
+ local64_add (now - prev , & event -> count );
454
+ }
455
+
456
+ static void papr_scm_pmu_del (struct perf_event * event , int flags )
457
+ {
458
+ papr_scm_pmu_read (event );
459
+ }
460
+
461
+ static int papr_scm_pmu_check_events (struct papr_scm_priv * p , struct nvdimm_pmu * nd_pmu )
462
+ {
463
+ struct papr_scm_perf_stat * stat ;
464
+ struct papr_scm_perf_stats * stats ;
465
+ char * statid ;
466
+ int index , rc , count ;
467
+ u32 available_events ;
468
+
469
+ if (!p -> stat_buffer_len )
470
+ return - ENOENT ;
471
+
472
+ available_events = (p -> stat_buffer_len - sizeof (struct papr_scm_perf_stats ))
473
+ / sizeof (struct papr_scm_perf_stat );
474
+
475
+ /* Allocate the buffer for phyp where stats are written */
476
+ stats = kzalloc (p -> stat_buffer_len , GFP_KERNEL );
477
+ if (!stats ) {
478
+ rc = - ENOMEM ;
479
+ return rc ;
480
+ }
481
+
482
+ /* Allocate memory to nvdimm_event_map */
483
+ p -> nvdimm_events_map = kcalloc (available_events , sizeof (char * ), GFP_KERNEL );
484
+ if (!p -> nvdimm_events_map ) {
485
+ rc = - ENOMEM ;
486
+ goto out_stats ;
487
+ }
488
+
489
+ /* Called to get list of events supported */
490
+ rc = drc_pmem_query_stats (p , stats , 0 );
491
+ if (rc )
492
+ goto out_nvdimm_events_map ;
493
+
494
+ for (index = 0 , stat = stats -> scm_statistic , count = 0 ;
495
+ index < available_events ; index ++ , ++ stat ) {
496
+ statid = kzalloc (strlen (stat -> stat_id ) + 1 , GFP_KERNEL );
497
+ if (!statid ) {
498
+ rc = - ENOMEM ;
499
+ goto out_nvdimm_events_map ;
500
+ }
501
+
502
+ strcpy (statid , stat -> stat_id );
503
+ p -> nvdimm_events_map [count ] = statid ;
504
+ count ++ ;
505
+ }
506
+ p -> nvdimm_events_map [count ] = NULL ;
507
+ kfree (stats );
508
+ return 0 ;
509
+
510
+ out_nvdimm_events_map :
511
+ kfree (p -> nvdimm_events_map );
512
+ out_stats :
513
+ kfree (stats );
514
+ return rc ;
515
+ }
516
+
517
+ static void papr_scm_pmu_register (struct papr_scm_priv * p )
518
+ {
519
+ struct nvdimm_pmu * nd_pmu ;
520
+ int rc , nodeid ;
521
+
522
+ nd_pmu = kzalloc (sizeof (* nd_pmu ), GFP_KERNEL );
523
+ if (!nd_pmu ) {
524
+ rc = - ENOMEM ;
525
+ goto pmu_err_print ;
526
+ }
527
+
528
+ rc = papr_scm_pmu_check_events (p , nd_pmu );
529
+ if (rc )
530
+ goto pmu_check_events_err ;
531
+
532
+ nd_pmu -> pmu .task_ctx_nr = perf_invalid_context ;
533
+ nd_pmu -> pmu .name = nvdimm_name (p -> nvdimm );
534
+ nd_pmu -> pmu .event_init = papr_scm_pmu_event_init ;
535
+ nd_pmu -> pmu .read = papr_scm_pmu_read ;
536
+ nd_pmu -> pmu .add = papr_scm_pmu_add ;
537
+ nd_pmu -> pmu .del = papr_scm_pmu_del ;
538
+
539
+ nd_pmu -> pmu .capabilities = PERF_PMU_CAP_NO_INTERRUPT |
540
+ PERF_PMU_CAP_NO_EXCLUDE ;
541
+
542
+ /*updating the cpumask variable */
543
+ nodeid = numa_map_to_online_node (dev_to_node (& p -> pdev -> dev ));
544
+ nd_pmu -> arch_cpumask = * cpumask_of_node (nodeid );
545
+
546
+ rc = register_nvdimm_pmu (nd_pmu , p -> pdev );
547
+ if (rc )
548
+ goto pmu_register_err ;
549
+
550
+ /*
551
+ * Set archdata.priv value to nvdimm_pmu structure, to handle the
552
+ * unregistering of pmu device.
553
+ */
554
+ p -> pdev -> archdata .priv = nd_pmu ;
555
+ return ;
556
+
557
+ pmu_register_err :
558
+ kfree (p -> nvdimm_events_map );
559
+ pmu_check_events_err :
560
+ kfree (nd_pmu );
561
+ pmu_err_print :
562
+ dev_info (& p -> pdev -> dev , "nvdimm pmu didn't register rc=%d\n" , rc );
563
+ }
564
+
565
+ #else
566
+ static void papr_scm_pmu_register (struct papr_scm_priv * p ) { }
567
+ #endif
568
+
347
569
/*
348
570
* Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
349
571
* health information.
@@ -1320,6 +1542,7 @@ static int papr_scm_probe(struct platform_device *pdev)
1320
1542
goto err2 ;
1321
1543
1322
1544
platform_set_drvdata (pdev , p );
1545
+ papr_scm_pmu_register (p );
1323
1546
1324
1547
return 0 ;
1325
1548
@@ -1338,6 +1561,12 @@ static int papr_scm_remove(struct platform_device *pdev)
1338
1561
1339
1562
nvdimm_bus_unregister (p -> bus );
1340
1563
drc_pmem_unbind (p );
1564
+
1565
+ if (pdev -> archdata .priv )
1566
+ unregister_nvdimm_pmu (pdev -> archdata .priv );
1567
+
1568
+ pdev -> archdata .priv = NULL ;
1569
+ kfree (p -> nvdimm_events_map );
1341
1570
kfree (p -> bus_desc .provider_name );
1342
1571
kfree (p );
1343
1572
0 commit comments