72
72
#define I10NM_SAD_ENABLE (reg ) GET_BITFIELD(reg, 0, 0)
73
73
#define I10NM_SAD_NM_CACHEABLE (reg ) GET_BITFIELD(reg, 5, 5)
74
74
75
- #define RETRY_RD_ERR_LOG_UC BIT(1)
76
- #define RETRY_RD_ERR_LOG_EN_PATSPR BIT(13)
77
- #define RETRY_RD_ERR_LOG_NOOVER BIT(14)
78
- #define RETRY_RD_ERR_LOG_EN BIT(15)
79
- #define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1))
80
75
#define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0))
81
76
82
77
static struct list_head * i10nm_edac_list ;
@@ -88,153 +83,193 @@ static bool mem_cfg_2lm;
88
83
89
84
static struct reg_rrl icx_reg_rrl_ddr = {
90
85
.set_num = 2 ,
86
+ .modes = {LRE_SCRUB , LRE_DEMAND },
91
87
.offsets = {
92
88
{0x22c60 , 0x22c54 , 0x22c5c , 0x22c58 , 0x22c28 , 0x20ed8 },
93
89
{0x22e54 , 0x22e60 , 0x22e64 , 0x22e58 , 0x22e5c , 0x20ee0 },
94
90
},
91
+ .widths = {4 , 4 , 4 , 4 , 4 , 8 },
92
+ .uc_mask = BIT (1 ),
93
+ .en_patspr_mask = BIT (13 ),
94
+ .noover_mask = BIT (14 ),
95
+ .en_mask = BIT (15 ),
95
96
};
96
97
97
98
static struct reg_rrl spr_reg_rrl_ddr = {
98
99
.set_num = 3 ,
100
+ .modes = {LRE_SCRUB , LRE_DEMAND , FRE_DEMAND },
99
101
.offsets = {
100
102
{0x22c60 , 0x22c54 , 0x22f08 , 0x22c58 , 0x22c28 , 0x20ed8 },
101
103
{0x22e54 , 0x22e60 , 0x22f10 , 0x22e58 , 0x22e5c , 0x20ee0 },
102
104
{0x22c70 , 0x22d80 , 0x22f18 , 0x22d58 , 0x22c64 , 0x20f10 },
103
105
},
106
+ .widths = {4 , 4 , 8 , 4 , 4 , 8 },
107
+ .uc_mask = BIT (1 ),
108
+ .en_patspr_mask = BIT (13 ),
109
+ .noover_mask = BIT (14 ),
110
+ .en_mask = BIT (15 ),
104
111
};
105
112
106
113
static struct reg_rrl spr_reg_rrl_hbm_pch0 = {
107
114
.set_num = 2 ,
115
+ .modes = {LRE_SCRUB , LRE_DEMAND },
108
116
.offsets = {
109
117
{0x2860 , 0x2854 , 0x2b08 , 0x2858 , 0x2828 , 0x0ed8 },
110
118
{0x2a54 , 0x2a60 , 0x2b10 , 0x2a58 , 0x2a5c , 0x0ee0 },
111
119
},
120
+ .widths = {4 , 4 , 8 , 4 , 4 , 8 },
121
+ .uc_mask = BIT (1 ),
122
+ .en_patspr_mask = BIT (13 ),
123
+ .noover_mask = BIT (14 ),
124
+ .en_mask = BIT (15 ),
112
125
};
113
126
114
127
static struct reg_rrl spr_reg_rrl_hbm_pch1 = {
115
128
.set_num = 2 ,
129
+ .modes = {LRE_SCRUB , LRE_DEMAND },
116
130
.offsets = {
117
131
{0x2c60 , 0x2c54 , 0x2f08 , 0x2c58 , 0x2c28 , 0x0fa8 },
118
132
{0x2e54 , 0x2e60 , 0x2f10 , 0x2e58 , 0x2e5c , 0x0fb0 },
119
133
},
134
+ .widths = {4 , 4 , 8 , 4 , 4 , 8 },
135
+ .uc_mask = BIT (1 ),
136
+ .en_patspr_mask = BIT (13 ),
137
+ .noover_mask = BIT (14 ),
138
+ .en_mask = BIT (15 ),
120
139
};
121
140
122
- static void __enable_retry_rd_err_log (struct skx_imc * imc , int chan , bool enable , u32 * rrl_ctl ,
123
- u32 * offsets_scrub , u32 * offsets_demand ,
124
- u32 * offsets_demand2 )
141
+ static u64 read_imc_reg (struct skx_imc * imc , int chan , u32 offset , u8 width )
125
142
{
126
- u32 s , d , d2 ;
143
+ switch (width ) {
144
+ case 4 :
145
+ return I10NM_GET_REG32 (imc , chan , offset );
146
+ case 8 :
147
+ return I10NM_GET_REG64 (imc , chan , offset );
148
+ default :
149
+ i10nm_printk (KERN_ERR , "Invalid readd RRL 0x%x width %d\n" , offset , width );
150
+ return 0 ;
151
+ }
152
+ }
127
153
128
- s = I10NM_GET_REG32 (imc , chan , offsets_scrub [0 ]);
129
- d = I10NM_GET_REG32 (imc , chan , offsets_demand [0 ]);
130
- if (offsets_demand2 )
131
- d2 = I10NM_GET_REG32 (imc , chan , offsets_demand2 [0 ]);
154
+ static void write_imc_reg (struct skx_imc * imc , int chan , u32 offset , u8 width , u64 val )
155
+ {
156
+ switch (width ) {
157
+ case 4 :
158
+ return I10NM_SET_REG32 (imc , chan , offset , (u32 )val );
159
+ default :
160
+ i10nm_printk (KERN_ERR , "Invalid write RRL 0x%x width %d\n" , offset , width );
161
+ }
162
+ }
163
+
164
+ static void enable_rrl (struct skx_imc * imc , int chan , struct reg_rrl * rrl ,
165
+ int rrl_set , bool enable , u32 * rrl_ctl )
166
+ {
167
+ enum rrl_mode mode = rrl -> modes [rrl_set ];
168
+ u32 offset = rrl -> offsets [rrl_set ][0 ], v ;
169
+ u8 width = rrl -> widths [0 ];
170
+ bool first , scrub ;
171
+
172
+ /* First or last read error. */
173
+ first = (mode == FRE_SCRUB || mode == FRE_DEMAND );
174
+ /* Patrol scrub or on-demand read error. */
175
+ scrub = (mode == FRE_SCRUB || mode == LRE_SCRUB );
176
+
177
+ v = read_imc_reg (imc , chan , offset , width );
132
178
133
179
if (enable ) {
134
- /* Save default configurations */
135
- rrl_ctl [0 ] = s ;
136
- rrl_ctl [1 ] = d ;
137
- if (offsets_demand2 )
138
- rrl_ctl [2 ] = d2 ;
139
-
140
- s &= ~RETRY_RD_ERR_LOG_NOOVER_UC ;
141
- s |= RETRY_RD_ERR_LOG_EN_PATSPR ;
142
- s |= RETRY_RD_ERR_LOG_EN ;
143
- d &= ~RETRY_RD_ERR_LOG_NOOVER_UC ;
144
- d &= ~RETRY_RD_ERR_LOG_EN_PATSPR ;
145
- d |= RETRY_RD_ERR_LOG_EN ;
146
-
147
- if (offsets_demand2 ) {
148
- d2 &= ~RETRY_RD_ERR_LOG_UC ;
149
- d2 &= ~RETRY_RD_ERR_LOG_EN_PATSPR ;
150
- d2 |= RETRY_RD_ERR_LOG_NOOVER ;
151
- d2 |= RETRY_RD_ERR_LOG_EN ;
152
- }
180
+ /* Save default configurations. */
181
+ * rrl_ctl = v ;
182
+ v &= ~rrl -> uc_mask ;
183
+
184
+ if (first )
185
+ v |= rrl -> noover_mask ;
186
+ else
187
+ v &= ~rrl -> noover_mask ;
188
+
189
+ if (scrub )
190
+ v |= rrl -> en_patspr_mask ;
191
+ else
192
+ v &= ~rrl -> en_patspr_mask ;
193
+
194
+ v |= rrl -> en_mask ;
153
195
} else {
154
- /* Restore default configurations */
155
- if (rrl_ctl [0 ] & RETRY_RD_ERR_LOG_UC )
156
- s |= RETRY_RD_ERR_LOG_UC ;
157
- if (rrl_ctl [0 ] & RETRY_RD_ERR_LOG_NOOVER )
158
- s |= RETRY_RD_ERR_LOG_NOOVER ;
159
- if (!(rrl_ctl [0 ] & RETRY_RD_ERR_LOG_EN_PATSPR ))
160
- s &= ~RETRY_RD_ERR_LOG_EN_PATSPR ;
161
- if (!(rrl_ctl [0 ] & RETRY_RD_ERR_LOG_EN ))
162
- s &= ~RETRY_RD_ERR_LOG_EN ;
163
- if (rrl_ctl [1 ] & RETRY_RD_ERR_LOG_UC )
164
- d |= RETRY_RD_ERR_LOG_UC ;
165
- if (rrl_ctl [1 ] & RETRY_RD_ERR_LOG_NOOVER )
166
- d |= RETRY_RD_ERR_LOG_NOOVER ;
167
- if (rrl_ctl [1 ] & RETRY_RD_ERR_LOG_EN_PATSPR )
168
- d |= RETRY_RD_ERR_LOG_EN_PATSPR ;
169
- if (!(rrl_ctl [1 ] & RETRY_RD_ERR_LOG_EN ))
170
- d &= ~RETRY_RD_ERR_LOG_EN ;
171
-
172
- if (offsets_demand2 ) {
173
- if (rrl_ctl [2 ] & RETRY_RD_ERR_LOG_UC )
174
- d2 |= RETRY_RD_ERR_LOG_UC ;
175
- if (rrl_ctl [2 ] & RETRY_RD_ERR_LOG_EN_PATSPR )
176
- d2 |= RETRY_RD_ERR_LOG_EN_PATSPR ;
177
- if (!(rrl_ctl [2 ] & RETRY_RD_ERR_LOG_NOOVER ))
178
- d2 &= ~RETRY_RD_ERR_LOG_NOOVER ;
179
- if (!(rrl_ctl [2 ] & RETRY_RD_ERR_LOG_EN ))
180
- d2 &= ~RETRY_RD_ERR_LOG_EN ;
196
+ /* Restore default configurations. */
197
+ if (* rrl_ctl & rrl -> uc_mask )
198
+ v |= rrl -> uc_mask ;
199
+
200
+ if (first ) {
201
+ if (!(* rrl_ctl & rrl -> noover_mask ))
202
+ v &= ~rrl -> noover_mask ;
203
+ } else {
204
+ if (* rrl_ctl & rrl -> noover_mask )
205
+ v |= rrl -> noover_mask ;
181
206
}
207
+
208
+ if (scrub ) {
209
+ if (!(* rrl_ctl & rrl -> en_patspr_mask ))
210
+ v &= ~rrl -> en_patspr_mask ;
211
+ } else {
212
+ if (* rrl_ctl & rrl -> en_patspr_mask )
213
+ v |= rrl -> en_patspr_mask ;
214
+ }
215
+
216
+ if (!(* rrl_ctl & rrl -> en_mask ))
217
+ v &= ~rrl -> en_mask ;
182
218
}
183
219
184
- I10NM_SET_REG32 (imc , chan , offsets_scrub [0 ], s );
185
- I10NM_SET_REG32 (imc , chan , offsets_demand [0 ], d );
186
- if (offsets_demand2 )
187
- I10NM_SET_REG32 (imc , chan , offsets_demand2 [0 ], d2 );
220
+ write_imc_reg (imc , chan , offset , width , v );
221
+ }
222
+
223
+ static void enable_rrls (struct skx_imc * imc , int chan , struct reg_rrl * rrl ,
224
+ bool enable , u32 * rrl_ctl )
225
+ {
226
+ for (int i = 0 ; i < rrl -> set_num ; i ++ )
227
+ enable_rrl (imc , chan , rrl , i , enable , rrl_ctl + i );
228
+ }
229
+
230
+ static void enable_rrls_ddr (struct skx_imc * imc , bool enable )
231
+ {
232
+ struct reg_rrl * rrl_ddr = res_cfg -> reg_rrl_ddr ;
233
+ int i , chan_num = res_cfg -> ddr_chan_num ;
234
+ struct skx_channel * chan = imc -> chan ;
235
+
236
+ if (!imc -> mbase )
237
+ return ;
238
+
239
+ for (i = 0 ; i < chan_num ; i ++ )
240
+ enable_rrls (imc , i , rrl_ddr , enable , chan [i ].rrl_ctl [0 ]);
241
+ }
242
+
243
+ static void enable_rrls_hbm (struct skx_imc * imc , bool enable )
244
+ {
245
+ struct reg_rrl * * rrl_hbm = res_cfg -> reg_rrl_hbm ;
246
+ int i , chan_num = res_cfg -> hbm_chan_num ;
247
+ struct skx_channel * chan = imc -> chan ;
248
+
249
+ if (!imc -> mbase || !imc -> hbm_mc || !rrl_hbm [0 ] || !rrl_hbm [1 ])
250
+ return ;
251
+
252
+ for (i = 0 ; i < chan_num ; i ++ ) {
253
+ enable_rrls (imc , i , rrl_hbm [0 ], enable , chan [i ].rrl_ctl [0 ]);
254
+ enable_rrls (imc , i , rrl_hbm [1 ], enable , chan [i ].rrl_ctl [1 ]);
255
+ }
188
256
}
189
257
190
258
static void enable_retry_rd_err_log (bool enable )
191
259
{
192
- int i , j , imc_num , chan_num ;
193
- struct skx_channel * chan ;
194
- struct skx_imc * imc ;
195
260
struct skx_dev * d ;
261
+ int i , imc_num ;
196
262
197
263
edac_dbg (2 , "\n" );
198
264
199
265
list_for_each_entry (d , i10nm_edac_list , list ) {
200
266
imc_num = res_cfg -> ddr_imc_num ;
201
- chan_num = res_cfg -> ddr_chan_num ;
202
-
203
- for (i = 0 ; i < imc_num ; i ++ ) {
204
- imc = & d -> imc [i ];
205
- if (!imc -> mbase )
206
- continue ;
207
-
208
- chan = d -> imc [i ].chan ;
209
- for (j = 0 ; j < chan_num ; j ++ )
210
- __enable_retry_rd_err_log (imc , j , enable , chan [j ].rrl_ctl [0 ],
211
- res_cfg -> reg_rrl_ddr -> offsets [0 ],
212
- res_cfg -> reg_rrl_ddr -> offsets [1 ],
213
- res_cfg -> reg_rrl_ddr -> set_num > 2 ?
214
- res_cfg -> reg_rrl_ddr -> offsets [2 ] : NULL );
215
-
216
- }
267
+ for (i = 0 ; i < imc_num ; i ++ )
268
+ enable_rrls_ddr (& d -> imc [i ], enable );
217
269
218
270
imc_num += res_cfg -> hbm_imc_num ;
219
- chan_num = res_cfg -> hbm_chan_num ;
220
-
221
- for (; i < imc_num ; i ++ ) {
222
- imc = & d -> imc [i ];
223
- if (!imc -> mbase || !imc -> hbm_mc )
224
- continue ;
225
-
226
- chan = d -> imc [i ].chan ;
227
- for (j = 0 ; j < chan_num ; j ++ ) {
228
- __enable_retry_rd_err_log (imc , j , enable , chan [j ].rrl_ctl [0 ],
229
- res_cfg -> reg_rrl_hbm [0 ]-> offsets [0 ],
230
- res_cfg -> reg_rrl_hbm [0 ]-> offsets [1 ],
231
- NULL );
232
- __enable_retry_rd_err_log (imc , j , enable , chan [j ].rrl_ctl [1 ],
233
- res_cfg -> reg_rrl_hbm [1 ]-> offsets [0 ],
234
- res_cfg -> reg_rrl_hbm [1 ]-> offsets [1 ],
235
- NULL );
236
- }
237
- }
271
+ for (; i < imc_num ; i ++ )
272
+ enable_rrls_hbm (& d -> imc [i ], enable );
238
273
}
239
274
}
240
275
0 commit comments