Skip to content

Commit ba3985c

Browse files
qzhuo2aegl
authored andcommitted
EDAC/{skx_common,i10nm}: Refactor enable_retry_rd_err_log()
Refactor enable_retry_rd_err_log() using helper functions for both DDR and HBM, making the RRL control bits configurable instead of hard-coded. Additionally, explicitly define the four RRL modes for better readability. No functional changes intended. Signed-off-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com> Tested-by: Feng Xu <feng.f.xu@intel.com> Link: https://lore.kernel.org/r/20250417150724.1170168-6-qiuxu.zhuo@intel.com
1 parent 1a8a6af commit ba3985c

File tree

2 files changed

+156
-101
lines changed

2 files changed

+156
-101
lines changed

drivers/edac/i10nm_base.c

Lines changed: 136 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -72,11 +72,6 @@
7272
#define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
7373
#define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5)
7474

75-
#define RETRY_RD_ERR_LOG_UC BIT(1)
76-
#define RETRY_RD_ERR_LOG_EN_PATSPR BIT(13)
77-
#define RETRY_RD_ERR_LOG_NOOVER BIT(14)
78-
#define RETRY_RD_ERR_LOG_EN BIT(15)
79-
#define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1))
8075
#define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0))
8176

8277
static struct list_head *i10nm_edac_list;
@@ -88,153 +83,193 @@ static bool mem_cfg_2lm;
8883

8984
static struct reg_rrl icx_reg_rrl_ddr = {
9085
.set_num = 2,
86+
.modes = {LRE_SCRUB, LRE_DEMAND},
9187
.offsets = {
9288
{0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8},
9389
{0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0},
9490
},
91+
.widths = {4, 4, 4, 4, 4, 8},
92+
.uc_mask = BIT(1),
93+
.en_patspr_mask = BIT(13),
94+
.noover_mask = BIT(14),
95+
.en_mask = BIT(15),
9596
};
9697

9798
static struct reg_rrl spr_reg_rrl_ddr = {
9899
.set_num = 3,
100+
.modes = {LRE_SCRUB, LRE_DEMAND, FRE_DEMAND},
99101
.offsets = {
100102
{0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8},
101103
{0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0},
102104
{0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10},
103105
},
106+
.widths = {4, 4, 8, 4, 4, 8},
107+
.uc_mask = BIT(1),
108+
.en_patspr_mask = BIT(13),
109+
.noover_mask = BIT(14),
110+
.en_mask = BIT(15),
104111
};
105112

106113
static struct reg_rrl spr_reg_rrl_hbm_pch0 = {
107114
.set_num = 2,
115+
.modes = {LRE_SCRUB, LRE_DEMAND},
108116
.offsets = {
109117
{0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8},
110118
{0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0},
111119
},
120+
.widths = {4, 4, 8, 4, 4, 8},
121+
.uc_mask = BIT(1),
122+
.en_patspr_mask = BIT(13),
123+
.noover_mask = BIT(14),
124+
.en_mask = BIT(15),
112125
};
113126

114127
static struct reg_rrl spr_reg_rrl_hbm_pch1 = {
115128
.set_num = 2,
129+
.modes = {LRE_SCRUB, LRE_DEMAND},
116130
.offsets = {
117131
{0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8},
118132
{0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0},
119133
},
134+
.widths = {4, 4, 8, 4, 4, 8},
135+
.uc_mask = BIT(1),
136+
.en_patspr_mask = BIT(13),
137+
.noover_mask = BIT(14),
138+
.en_mask = BIT(15),
120139
};
121140

122-
static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable, u32 *rrl_ctl,
123-
u32 *offsets_scrub, u32 *offsets_demand,
124-
u32 *offsets_demand2)
141+
static u64 read_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width)
125142
{
126-
u32 s, d, d2;
143+
switch (width) {
144+
case 4:
145+
return I10NM_GET_REG32(imc, chan, offset);
146+
case 8:
147+
return I10NM_GET_REG64(imc, chan, offset);
148+
default:
149+
i10nm_printk(KERN_ERR, "Invalid readd RRL 0x%x width %d\n", offset, width);
150+
return 0;
151+
}
152+
}
127153

128-
s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
129-
d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
130-
if (offsets_demand2)
131-
d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
154+
static void write_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width, u64 val)
155+
{
156+
switch (width) {
157+
case 4:
158+
return I10NM_SET_REG32(imc, chan, offset, (u32)val);
159+
default:
160+
i10nm_printk(KERN_ERR, "Invalid write RRL 0x%x width %d\n", offset, width);
161+
}
162+
}
163+
164+
static void enable_rrl(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
165+
int rrl_set, bool enable, u32 *rrl_ctl)
166+
{
167+
enum rrl_mode mode = rrl->modes[rrl_set];
168+
u32 offset = rrl->offsets[rrl_set][0], v;
169+
u8 width = rrl->widths[0];
170+
bool first, scrub;
171+
172+
/* First or last read error. */
173+
first = (mode == FRE_SCRUB || mode == FRE_DEMAND);
174+
/* Patrol scrub or on-demand read error. */
175+
scrub = (mode == FRE_SCRUB || mode == LRE_SCRUB);
176+
177+
v = read_imc_reg(imc, chan, offset, width);
132178

133179
if (enable) {
134-
/* Save default configurations */
135-
rrl_ctl[0] = s;
136-
rrl_ctl[1] = d;
137-
if (offsets_demand2)
138-
rrl_ctl[2] = d2;
139-
140-
s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
141-
s |= RETRY_RD_ERR_LOG_EN_PATSPR;
142-
s |= RETRY_RD_ERR_LOG_EN;
143-
d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
144-
d &= ~RETRY_RD_ERR_LOG_EN_PATSPR;
145-
d |= RETRY_RD_ERR_LOG_EN;
146-
147-
if (offsets_demand2) {
148-
d2 &= ~RETRY_RD_ERR_LOG_UC;
149-
d2 &= ~RETRY_RD_ERR_LOG_EN_PATSPR;
150-
d2 |= RETRY_RD_ERR_LOG_NOOVER;
151-
d2 |= RETRY_RD_ERR_LOG_EN;
152-
}
180+
/* Save default configurations. */
181+
*rrl_ctl = v;
182+
v &= ~rrl->uc_mask;
183+
184+
if (first)
185+
v |= rrl->noover_mask;
186+
else
187+
v &= ~rrl->noover_mask;
188+
189+
if (scrub)
190+
v |= rrl->en_patspr_mask;
191+
else
192+
v &= ~rrl->en_patspr_mask;
193+
194+
v |= rrl->en_mask;
153195
} else {
154-
/* Restore default configurations */
155-
if (rrl_ctl[0] & RETRY_RD_ERR_LOG_UC)
156-
s |= RETRY_RD_ERR_LOG_UC;
157-
if (rrl_ctl[0] & RETRY_RD_ERR_LOG_NOOVER)
158-
s |= RETRY_RD_ERR_LOG_NOOVER;
159-
if (!(rrl_ctl[0] & RETRY_RD_ERR_LOG_EN_PATSPR))
160-
s &= ~RETRY_RD_ERR_LOG_EN_PATSPR;
161-
if (!(rrl_ctl[0] & RETRY_RD_ERR_LOG_EN))
162-
s &= ~RETRY_RD_ERR_LOG_EN;
163-
if (rrl_ctl[1] & RETRY_RD_ERR_LOG_UC)
164-
d |= RETRY_RD_ERR_LOG_UC;
165-
if (rrl_ctl[1] & RETRY_RD_ERR_LOG_NOOVER)
166-
d |= RETRY_RD_ERR_LOG_NOOVER;
167-
if (rrl_ctl[1] & RETRY_RD_ERR_LOG_EN_PATSPR)
168-
d |= RETRY_RD_ERR_LOG_EN_PATSPR;
169-
if (!(rrl_ctl[1] & RETRY_RD_ERR_LOG_EN))
170-
d &= ~RETRY_RD_ERR_LOG_EN;
171-
172-
if (offsets_demand2) {
173-
if (rrl_ctl[2] & RETRY_RD_ERR_LOG_UC)
174-
d2 |= RETRY_RD_ERR_LOG_UC;
175-
if (rrl_ctl[2] & RETRY_RD_ERR_LOG_EN_PATSPR)
176-
d2 |= RETRY_RD_ERR_LOG_EN_PATSPR;
177-
if (!(rrl_ctl[2] & RETRY_RD_ERR_LOG_NOOVER))
178-
d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
179-
if (!(rrl_ctl[2] & RETRY_RD_ERR_LOG_EN))
180-
d2 &= ~RETRY_RD_ERR_LOG_EN;
196+
/* Restore default configurations. */
197+
if (*rrl_ctl & rrl->uc_mask)
198+
v |= rrl->uc_mask;
199+
200+
if (first) {
201+
if (!(*rrl_ctl & rrl->noover_mask))
202+
v &= ~rrl->noover_mask;
203+
} else {
204+
if (*rrl_ctl & rrl->noover_mask)
205+
v |= rrl->noover_mask;
181206
}
207+
208+
if (scrub) {
209+
if (!(*rrl_ctl & rrl->en_patspr_mask))
210+
v &= ~rrl->en_patspr_mask;
211+
} else {
212+
if (*rrl_ctl & rrl->en_patspr_mask)
213+
v |= rrl->en_patspr_mask;
214+
}
215+
216+
if (!(*rrl_ctl & rrl->en_mask))
217+
v &= ~rrl->en_mask;
182218
}
183219

184-
I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
185-
I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
186-
if (offsets_demand2)
187-
I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
220+
write_imc_reg(imc, chan, offset, width, v);
221+
}
222+
223+
static void enable_rrls(struct skx_imc *imc, int chan, struct reg_rrl *rrl,
224+
bool enable, u32 *rrl_ctl)
225+
{
226+
for (int i = 0; i < rrl->set_num; i++)
227+
enable_rrl(imc, chan, rrl, i, enable, rrl_ctl + i);
228+
}
229+
230+
static void enable_rrls_ddr(struct skx_imc *imc, bool enable)
231+
{
232+
struct reg_rrl *rrl_ddr = res_cfg->reg_rrl_ddr;
233+
int i, chan_num = res_cfg->ddr_chan_num;
234+
struct skx_channel *chan = imc->chan;
235+
236+
if (!imc->mbase)
237+
return;
238+
239+
for (i = 0; i < chan_num; i++)
240+
enable_rrls(imc, i, rrl_ddr, enable, chan[i].rrl_ctl[0]);
241+
}
242+
243+
static void enable_rrls_hbm(struct skx_imc *imc, bool enable)
244+
{
245+
struct reg_rrl **rrl_hbm = res_cfg->reg_rrl_hbm;
246+
int i, chan_num = res_cfg->hbm_chan_num;
247+
struct skx_channel *chan = imc->chan;
248+
249+
if (!imc->mbase || !imc->hbm_mc || !rrl_hbm[0] || !rrl_hbm[1])
250+
return;
251+
252+
for (i = 0; i < chan_num; i++) {
253+
enable_rrls(imc, i, rrl_hbm[0], enable, chan[i].rrl_ctl[0]);
254+
enable_rrls(imc, i, rrl_hbm[1], enable, chan[i].rrl_ctl[1]);
255+
}
188256
}
189257

190258
static void enable_retry_rd_err_log(bool enable)
191259
{
192-
int i, j, imc_num, chan_num;
193-
struct skx_channel *chan;
194-
struct skx_imc *imc;
195260
struct skx_dev *d;
261+
int i, imc_num;
196262

197263
edac_dbg(2, "\n");
198264

199265
list_for_each_entry(d, i10nm_edac_list, list) {
200266
imc_num = res_cfg->ddr_imc_num;
201-
chan_num = res_cfg->ddr_chan_num;
202-
203-
for (i = 0; i < imc_num; i++) {
204-
imc = &d->imc[i];
205-
if (!imc->mbase)
206-
continue;
207-
208-
chan = d->imc[i].chan;
209-
for (j = 0; j < chan_num; j++)
210-
__enable_retry_rd_err_log(imc, j, enable, chan[j].rrl_ctl[0],
211-
res_cfg->reg_rrl_ddr->offsets[0],
212-
res_cfg->reg_rrl_ddr->offsets[1],
213-
res_cfg->reg_rrl_ddr->set_num > 2 ?
214-
res_cfg->reg_rrl_ddr->offsets[2] : NULL);
215-
216-
}
267+
for (i = 0; i < imc_num; i++)
268+
enable_rrls_ddr(&d->imc[i], enable);
217269

218270
imc_num += res_cfg->hbm_imc_num;
219-
chan_num = res_cfg->hbm_chan_num;
220-
221-
for (; i < imc_num; i++) {
222-
imc = &d->imc[i];
223-
if (!imc->mbase || !imc->hbm_mc)
224-
continue;
225-
226-
chan = d->imc[i].chan;
227-
for (j = 0; j < chan_num; j++) {
228-
__enable_retry_rd_err_log(imc, j, enable, chan[j].rrl_ctl[0],
229-
res_cfg->reg_rrl_hbm[0]->offsets[0],
230-
res_cfg->reg_rrl_hbm[0]->offsets[1],
231-
NULL);
232-
__enable_retry_rd_err_log(imc, j, enable, chan[j].rrl_ctl[1],
233-
res_cfg->reg_rrl_hbm[1]->offsets[0],
234-
res_cfg->reg_rrl_hbm[1]->offsets[1],
235-
NULL);
236-
}
237-
}
271+
for (; i < imc_num; i++)
272+
enable_rrls_hbm(&d->imc[i], enable);
238273
}
239274
}
240275

drivers/edac/skx_common.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,11 +84,31 @@
8484
/* Max RRL registers per set. */
8585
#define NUM_RRL_REG 6
8686

87+
/* Modes of RRL register set. */
88+
enum rrl_mode {
89+
/* Last read error from patrol scrub. */
90+
LRE_SCRUB,
91+
/* Last read error from demand. */
92+
LRE_DEMAND,
93+
/* First read error from patrol scrub. */
94+
FRE_SCRUB,
95+
/* First read error from demand. */
96+
FRE_DEMAND,
97+
};
98+
8799
/* RRL registers per {,sub-,pseudo-}channel. */
88100
struct reg_rrl {
89101
/* RRL register parts. */
90102
int set_num;
103+
enum rrl_mode modes[NUM_RRL_SET];
91104
u32 offsets[NUM_RRL_SET][NUM_RRL_REG];
105+
/* RRL register widths in byte per set. */
106+
u8 widths[NUM_RRL_REG];
107+
/* RRL control bits of the first register per set. */
108+
u32 uc_mask;
109+
u32 en_patspr_mask;
110+
u32 noover_mask;
111+
u32 en_mask;
92112
};
93113

94114
/*

0 commit comments

Comments
 (0)