1
1
// SPDX-License-Identifier: GPL-2.0
2
2
/* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
3
3
#include "uncore.h"
4
+ #include "uncore_discovery.h"
4
5
5
6
/* Uncore IMC PCI IDs */
6
7
#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
64
65
#define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53
65
66
#define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660
66
67
#define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641
68
+ #define PCI_DEVICE_ID_INTEL_ADL_3_IMC 0x4601
69
+ #define PCI_DEVICE_ID_INTEL_ADL_4_IMC 0x4602
70
+ #define PCI_DEVICE_ID_INTEL_ADL_5_IMC 0x4609
71
+ #define PCI_DEVICE_ID_INTEL_ADL_6_IMC 0x460a
72
+ #define PCI_DEVICE_ID_INTEL_ADL_7_IMC 0x4621
73
+ #define PCI_DEVICE_ID_INTEL_ADL_8_IMC 0x4623
74
+ #define PCI_DEVICE_ID_INTEL_ADL_9_IMC 0x4629
75
+ #define PCI_DEVICE_ID_INTEL_ADL_10_IMC 0x4637
76
+ #define PCI_DEVICE_ID_INTEL_ADL_11_IMC 0x463b
77
+ #define PCI_DEVICE_ID_INTEL_ADL_12_IMC 0x4648
78
+ #define PCI_DEVICE_ID_INTEL_ADL_13_IMC 0x4649
79
+ #define PCI_DEVICE_ID_INTEL_ADL_14_IMC 0x4650
80
+ #define PCI_DEVICE_ID_INTEL_ADL_15_IMC 0x4668
81
+ #define PCI_DEVICE_ID_INTEL_ADL_16_IMC 0x4670
67
82
68
83
/* SNB event control */
69
84
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
155
170
156
171
DEFINE_UNCORE_FORMAT_ATTR (event , event , "config:0-7" );
157
172
DEFINE_UNCORE_FORMAT_ATTR (umask , umask , "config:8-15" );
173
+ DEFINE_UNCORE_FORMAT_ATTR (chmask , chmask , "config:8-11" );
158
174
DEFINE_UNCORE_FORMAT_ATTR (edge , edge , "config:18" );
159
175
DEFINE_UNCORE_FORMAT_ATTR (inv , inv , "config:23" );
160
176
DEFINE_UNCORE_FORMAT_ATTR (cmask5 , cmask , "config:24-28" );
@@ -1334,6 +1350,62 @@ static const struct pci_device_id tgl_uncore_pci_ids[] = {
1334
1350
PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_2_IMC ),
1335
1351
.driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1336
1352
},
1353
+ { /* IMC */
1354
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_3_IMC ),
1355
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1356
+ },
1357
+ { /* IMC */
1358
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_4_IMC ),
1359
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1360
+ },
1361
+ { /* IMC */
1362
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_5_IMC ),
1363
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1364
+ },
1365
+ { /* IMC */
1366
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_6_IMC ),
1367
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1368
+ },
1369
+ { /* IMC */
1370
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_7_IMC ),
1371
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1372
+ },
1373
+ { /* IMC */
1374
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_8_IMC ),
1375
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1376
+ },
1377
+ { /* IMC */
1378
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_9_IMC ),
1379
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1380
+ },
1381
+ { /* IMC */
1382
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_10_IMC ),
1383
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1384
+ },
1385
+ { /* IMC */
1386
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_11_IMC ),
1387
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1388
+ },
1389
+ { /* IMC */
1390
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_12_IMC ),
1391
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1392
+ },
1393
+ { /* IMC */
1394
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_13_IMC ),
1395
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1396
+ },
1397
+ { /* IMC */
1398
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_14_IMC ),
1399
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1400
+ },
1401
+ { /* IMC */
1402
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_15_IMC ),
1403
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1404
+ },
1405
+ { /* IMC */
1406
+ PCI_DEVICE (PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_INTEL_ADL_16_IMC ),
1407
+ .driver_data = UNCORE_PCI_DEV_DATA (SNB_PCI_UNCORE_IMC , 0 ),
1408
+ },
1337
1409
{ /* end: all zeroes */ }
1338
1410
};
1339
1411
@@ -1390,7 +1462,8 @@ static struct pci_dev *tgl_uncore_get_mc_dev(void)
1390
1462
#define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000
1391
1463
#define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000
1392
1464
1393
- static void tgl_uncore_imc_freerunning_init_box (struct intel_uncore_box * box )
1465
+ static void __uncore_imc_init_box (struct intel_uncore_box * box ,
1466
+ unsigned int base_offset )
1394
1467
{
1395
1468
struct pci_dev * pdev = tgl_uncore_get_mc_dev ();
1396
1469
struct intel_uncore_pmu * pmu = box -> pmu ;
@@ -1417,11 +1490,17 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1417
1490
addr |= ((resource_size_t )mch_bar << 32 );
1418
1491
#endif
1419
1492
1493
+ addr += base_offset ;
1420
1494
box -> io_addr = ioremap (addr , type -> mmio_map_size );
1421
1495
if (!box -> io_addr )
1422
1496
pr_warn ("perf uncore: Failed to ioremap for %s.\n" , type -> name );
1423
1497
}
1424
1498
1499
+ static void tgl_uncore_imc_freerunning_init_box (struct intel_uncore_box * box )
1500
+ {
1501
+ __uncore_imc_init_box (box , 0 );
1502
+ }
1503
+
1425
1504
static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
1426
1505
.init_box = tgl_uncore_imc_freerunning_init_box ,
1427
1506
.exit_box = uncore_mmio_exit_box ,
@@ -1469,3 +1548,136 @@ void tgl_uncore_mmio_init(void)
1469
1548
}
1470
1549
1471
1550
/* end of Tiger Lake MMIO uncore support */
1551
+
1552
+ /* Alder Lake MMIO uncore support */
1553
+ #define ADL_UNCORE_IMC_BASE 0xd900
1554
+ #define ADL_UNCORE_IMC_MAP_SIZE 0x200
1555
+ #define ADL_UNCORE_IMC_CTR 0xe8
1556
+ #define ADL_UNCORE_IMC_CTRL 0xd0
1557
+ #define ADL_UNCORE_IMC_GLOBAL_CTL 0xc0
1558
+ #define ADL_UNCORE_IMC_BOX_CTL 0xc4
1559
+ #define ADL_UNCORE_IMC_FREERUNNING_BASE 0xd800
1560
+ #define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE 0x100
1561
+
1562
+ #define ADL_UNCORE_IMC_CTL_FRZ (1 << 0)
1563
+ #define ADL_UNCORE_IMC_CTL_RST_CTRL (1 << 1)
1564
+ #define ADL_UNCORE_IMC_CTL_RST_CTRS (1 << 2)
1565
+ #define ADL_UNCORE_IMC_CTL_INT (ADL_UNCORE_IMC_CTL_RST_CTRL | \
1566
+ ADL_UNCORE_IMC_CTL_RST_CTRS)
1567
+
1568
+ static void adl_uncore_imc_init_box (struct intel_uncore_box * box )
1569
+ {
1570
+ __uncore_imc_init_box (box , ADL_UNCORE_IMC_BASE );
1571
+
1572
+ /* The global control in MC1 can control both MCs. */
1573
+ if (box -> io_addr && (box -> pmu -> pmu_idx == 1 ))
1574
+ writel (ADL_UNCORE_IMC_CTL_INT , box -> io_addr + ADL_UNCORE_IMC_GLOBAL_CTL );
1575
+ }
1576
+
1577
+ static void adl_uncore_mmio_disable_box (struct intel_uncore_box * box )
1578
+ {
1579
+ if (!box -> io_addr )
1580
+ return ;
1581
+
1582
+ writel (ADL_UNCORE_IMC_CTL_FRZ , box -> io_addr + uncore_mmio_box_ctl (box ));
1583
+ }
1584
+
1585
+ static void adl_uncore_mmio_enable_box (struct intel_uncore_box * box )
1586
+ {
1587
+ if (!box -> io_addr )
1588
+ return ;
1589
+
1590
+ writel (0 , box -> io_addr + uncore_mmio_box_ctl (box ));
1591
+ }
1592
+
1593
+ static struct intel_uncore_ops adl_uncore_mmio_ops = {
1594
+ .init_box = adl_uncore_imc_init_box ,
1595
+ .exit_box = uncore_mmio_exit_box ,
1596
+ .disable_box = adl_uncore_mmio_disable_box ,
1597
+ .enable_box = adl_uncore_mmio_enable_box ,
1598
+ .disable_event = intel_generic_uncore_mmio_disable_event ,
1599
+ .enable_event = intel_generic_uncore_mmio_enable_event ,
1600
+ .read_counter = uncore_mmio_read_counter ,
1601
+ };
1602
+
1603
+ #define ADL_UNC_CTL_CHMASK_MASK 0x00000f00
1604
+ #define ADL_UNC_IMC_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
1605
+ ADL_UNC_CTL_CHMASK_MASK | \
1606
+ SNB_UNC_CTL_EDGE_DET)
1607
+
1608
+ static struct attribute * adl_uncore_imc_formats_attr [] = {
1609
+ & format_attr_event .attr ,
1610
+ & format_attr_chmask .attr ,
1611
+ & format_attr_edge .attr ,
1612
+ NULL ,
1613
+ };
1614
+
1615
+ static const struct attribute_group adl_uncore_imc_format_group = {
1616
+ .name = "format" ,
1617
+ .attrs = adl_uncore_imc_formats_attr ,
1618
+ };
1619
+
1620
+ static struct intel_uncore_type adl_uncore_imc = {
1621
+ .name = "imc" ,
1622
+ .num_counters = 5 ,
1623
+ .num_boxes = 2 ,
1624
+ .perf_ctr_bits = 64 ,
1625
+ .perf_ctr = ADL_UNCORE_IMC_CTR ,
1626
+ .event_ctl = ADL_UNCORE_IMC_CTRL ,
1627
+ .event_mask = ADL_UNC_IMC_EVENT_MASK ,
1628
+ .box_ctl = ADL_UNCORE_IMC_BOX_CTL ,
1629
+ .mmio_offset = 0 ,
1630
+ .mmio_map_size = ADL_UNCORE_IMC_MAP_SIZE ,
1631
+ .ops = & adl_uncore_mmio_ops ,
1632
+ .format_group = & adl_uncore_imc_format_group ,
1633
+ };
1634
+
1635
+ enum perf_adl_uncore_imc_freerunning_types {
1636
+ ADL_MMIO_UNCORE_IMC_DATA_TOTAL ,
1637
+ ADL_MMIO_UNCORE_IMC_DATA_READ ,
1638
+ ADL_MMIO_UNCORE_IMC_DATA_WRITE ,
1639
+ ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1640
+ };
1641
+
1642
+ static struct freerunning_counters adl_uncore_imc_freerunning [] = {
1643
+ [ADL_MMIO_UNCORE_IMC_DATA_TOTAL ] = { 0x40 , 0x0 , 0x0 , 1 , 64 },
1644
+ [ADL_MMIO_UNCORE_IMC_DATA_READ ] = { 0x58 , 0x0 , 0x0 , 1 , 64 },
1645
+ [ADL_MMIO_UNCORE_IMC_DATA_WRITE ] = { 0xA0 , 0x0 , 0x0 , 1 , 64 },
1646
+ };
1647
+
1648
+ static void adl_uncore_imc_freerunning_init_box (struct intel_uncore_box * box )
1649
+ {
1650
+ __uncore_imc_init_box (box , ADL_UNCORE_IMC_FREERUNNING_BASE );
1651
+ }
1652
+
1653
+ static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = {
1654
+ .init_box = adl_uncore_imc_freerunning_init_box ,
1655
+ .exit_box = uncore_mmio_exit_box ,
1656
+ .read_counter = uncore_mmio_read_counter ,
1657
+ .hw_config = uncore_freerunning_hw_config ,
1658
+ };
1659
+
1660
+ static struct intel_uncore_type adl_uncore_imc_free_running = {
1661
+ .name = "imc_free_running" ,
1662
+ .num_counters = 3 ,
1663
+ .num_boxes = 2 ,
1664
+ .num_freerunning_types = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX ,
1665
+ .mmio_map_size = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE ,
1666
+ .freerunning = adl_uncore_imc_freerunning ,
1667
+ .ops = & adl_uncore_imc_freerunning_ops ,
1668
+ .event_descs = tgl_uncore_imc_events ,
1669
+ .format_group = & tgl_uncore_imc_format_group ,
1670
+ };
1671
+
1672
+ static struct intel_uncore_type * adl_mmio_uncores [] = {
1673
+ & adl_uncore_imc ,
1674
+ & adl_uncore_imc_free_running ,
1675
+ NULL
1676
+ };
1677
+
1678
+ void adl_uncore_mmio_init (void )
1679
+ {
1680
+ uncore_mmio_uncores = adl_mmio_uncores ;
1681
+ }
1682
+
1683
+ /* end of Alder Lake MMIO uncore support */
0 commit comments