84
84
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9 0x3ec6
85
85
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca
86
86
87
- /* Test if HB is for Skylake or later. */
88
- #define DEVICE_ID_SKYLAKE_OR_LATER (did ) \
89
- (((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
90
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
91
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
92
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
93
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_12) || \
94
- (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
95
- PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
96
-
97
87
#define IE31200_RANKS_PER_CHANNEL 4
98
88
#define IE31200_DIMMS_PER_CHANNEL 2
99
89
#define IE31200_CHANNELS 2
118
108
#define IE31200_CAPID0_DDPCD BIT(6)
119
109
#define IE31200_CAPID0_ECC BIT(1)
120
110
121
- /* Skylake reports 1GB increments, everything else is 256MB */
122
- #define IE31200_PAGES (n , skl ) \
123
- (n << (28 + (2 * skl) - PAGE_SHIFT))
124
-
125
111
/* Non-constant mask variant of FIELD_GET() */
126
112
#define field_get (_mask , _reg ) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
127
113
@@ -141,6 +127,7 @@ struct res_config {
141
127
u64 reg_eccerrlog_rank_mask ;
142
128
u64 reg_eccerrlog_syndrome_mask ;
143
129
/* DIMM characteristics register */
130
+ u64 reg_mad_dimm_size_granularity ;
144
131
u64 reg_mad_dimm_offset [IE31200_CHANNELS ];
145
132
u32 reg_mad_dimm_size_mask [IE31200_DIMMS_PER_CHANNEL ];
146
133
u32 reg_mad_dimm_rank_mask [IE31200_DIMMS_PER_CHANNEL ];
@@ -175,9 +162,9 @@ static const struct ie31200_dev_info ie31200_devs[] = {
175
162
};
176
163
177
164
struct dimm_data {
178
- u8 size ; /* in multiples of 256MB, except Skylake is 1GB */
179
- u8 dual_rank : 1 ,
180
- x16_width : 2 ; /* 0 means x8 width */
165
+ u64 size ; /* in bytes */
166
+ u8 ranks ;
167
+ enum dev_type dtype ;
181
168
};
182
169
183
170
static int how_many_channels (struct pci_dev * pdev )
@@ -340,26 +327,20 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev, struct res_config
340
327
static void populate_dimm_info (struct dimm_data * dd , u32 addr_decode , int dimm ,
341
328
struct res_config * cfg )
342
329
{
343
- dd -> size = field_get (cfg -> reg_mad_dimm_size_mask [dimm ], addr_decode );
344
- dd -> dual_rank = field_get (cfg -> reg_mad_dimm_rank_mask [dimm ], addr_decode );
345
- dd -> x16_width = field_get (cfg -> reg_mad_dimm_width_mask [dimm ], addr_decode );
330
+ dd -> size = field_get (cfg -> reg_mad_dimm_size_mask [dimm ], addr_decode ) * cfg -> reg_mad_dimm_size_granularity ;
331
+ dd -> ranks = field_get (cfg -> reg_mad_dimm_rank_mask [dimm ], addr_decode ) + 1 ;
332
+ dd -> dtype = field_get (cfg -> reg_mad_dimm_width_mask [dimm ], addr_decode ) + DEV_X8 ;
346
333
}
347
334
348
335
static int ie31200_probe1 (struct pci_dev * pdev , struct res_config * cfg )
349
336
{
350
- int i , j , ret ;
337
+ int i , j , k , ret ;
351
338
struct mem_ctl_info * mci = NULL ;
352
339
struct edac_mc_layer layers [2 ];
353
340
void __iomem * window ;
354
341
struct ie31200_priv * priv ;
355
342
u32 addr_decode [IE31200_CHANNELS ];
356
343
357
- /*
358
- * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
359
- * this logic when adding new CPU support.
360
- */
361
- bool skl = DEVICE_ID_SKYLAKE_OR_LATER (pdev -> device );
362
-
363
344
edac_dbg (0 , "MC:\n" );
364
345
365
346
if (!ecc_capable (pdev )) {
@@ -419,32 +400,25 @@ static int ie31200_probe1(struct pci_dev *pdev, struct res_config *cfg)
419
400
unsigned long nr_pages ;
420
401
421
402
populate_dimm_info (& dimm_info , addr_decode [j ], i , cfg );
422
- edac_dbg (0 , "size: 0x%x, rank : %d, width : %d\n" ,
423
- dimm_info .size ,
424
- dimm_info .dual_rank ,
425
- dimm_info .x16_width );
403
+ edac_dbg (0 , "channel: %d, dimm: %d, size: %lld MiB, ranks : %d, DRAM chip type : %d\n" ,
404
+ j , i , dimm_info .size >> 20 ,
405
+ dimm_info .ranks ,
406
+ dimm_info .dtype );
426
407
427
- nr_pages = IE31200_PAGES (dimm_info .size , skl );
408
+ nr_pages = MiB_TO_PAGES (dimm_info .size >> 20 );
428
409
if (nr_pages == 0 )
429
410
continue ;
430
411
431
- if ( dimm_info .dual_rank ) {
432
- nr_pages = nr_pages / 2 ;
433
- dimm = edac_get_dimm (mci , (i * 2 ) + 1 , j , 0 );
412
+ nr_pages = nr_pages / dimm_info .ranks ;
413
+ for ( k = 0 ; k < dimm_info . ranks ; k ++ ) {
414
+ dimm = edac_get_dimm (mci , (i * dimm_info . ranks ) + k , j , 0 );
434
415
dimm -> nr_pages = nr_pages ;
435
416
edac_dbg (0 , "set nr pages: 0x%lx\n" , nr_pages );
436
417
dimm -> grain = 8 ; /* just a guess */
437
418
dimm -> mtype = cfg -> mtype ;
438
- dimm -> dtype = DEV_UNKNOWN ;
419
+ dimm -> dtype = dimm_info . dtype ;
439
420
dimm -> edac_mode = EDAC_UNKNOWN ;
440
421
}
441
- dimm = edac_get_dimm (mci , i * 2 , j , 0 );
442
- dimm -> nr_pages = nr_pages ;
443
- edac_dbg (0 , "set nr pages: 0x%lx\n" , nr_pages );
444
- dimm -> grain = 8 ; /* same guess */
445
- dimm -> mtype = cfg -> mtype ;
446
- dimm -> dtype = DEV_UNKNOWN ;
447
- dimm -> edac_mode = EDAC_UNKNOWN ;
448
422
}
449
423
}
450
424
@@ -510,6 +484,7 @@ static struct res_config snb_cfg = {
510
484
.reg_eccerrlog_ue_mask = BIT_ULL (1 ),
511
485
.reg_eccerrlog_rank_mask = GENMASK_ULL (28 , 27 ),
512
486
.reg_eccerrlog_syndrome_mask = GENMASK_ULL (23 , 16 ),
487
+ .reg_mad_dimm_size_granularity = BIT_ULL (28 ),
513
488
.reg_mad_dimm_offset [0 ] = 0x5004 ,
514
489
.reg_mad_dimm_offset [1 ] = 0x5008 ,
515
490
.reg_mad_dimm_size_mask [0 ] = GENMASK (7 , 0 ),
@@ -530,6 +505,7 @@ static struct res_config skl_cfg = {
530
505
.reg_eccerrlog_ue_mask = BIT_ULL (1 ),
531
506
.reg_eccerrlog_rank_mask = GENMASK_ULL (28 , 27 ),
532
507
.reg_eccerrlog_syndrome_mask = GENMASK_ULL (23 , 16 ),
508
+ .reg_mad_dimm_size_granularity = BIT_ULL (30 ),
533
509
.reg_mad_dimm_offset [0 ] = 0x500c ,
534
510
.reg_mad_dimm_offset [1 ] = 0x5010 ,
535
511
.reg_mad_dimm_size_mask [0 ] = GENMASK (5 , 0 ),
0 commit comments