@@ -91,13 +91,18 @@ static const struct rhashtable_params bch_promote_params = {
91
91
.automatic_shrinking = true,
92
92
};
93
93
94
+ static inline bool have_io_error (struct bch_io_failures * failed )
95
+ {
96
+ return failed && failed -> nr ;
97
+ }
98
+
94
99
static inline int should_promote (struct bch_fs * c , struct bkey_s_c k ,
95
100
struct bpos pos ,
96
101
struct bch_io_opts opts ,
97
102
unsigned flags ,
98
103
struct bch_io_failures * failed )
99
104
{
100
- if (!failed ) {
105
+ if (!have_io_error ( failed ) ) {
101
106
BUG_ON (!opts .promote_target );
102
107
103
108
if (!(flags & BCH_READ_MAY_PROMOTE ))
@@ -224,7 +229,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
224
229
225
230
struct data_update_opts update_opts = {};
226
231
227
- if (!failed ) {
232
+ if (!have_io_error ( failed ) ) {
228
233
update_opts .target = opts .promote_target ;
229
234
update_opts .extra_replicas = 1 ;
230
235
update_opts .write_flags = BCH_WRITE_ALLOC_NOWAIT |BCH_WRITE_CACHED ;
@@ -286,7 +291,7 @@ static struct promote_op *promote_alloc(struct btree_trans *trans,
286
291
* if failed != NULL we're not actually doing a promote, we're
287
292
* recovering from an io/checksum error
288
293
*/
289
- bool promote_full = (failed ||
294
+ bool promote_full = (have_io_error ( failed ) ||
290
295
* read_full ||
291
296
READ_ONCE (c -> opts .promote_whole_extents ));
292
297
/* data might have to be decompressed in the write path: */
@@ -989,7 +994,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
989
994
bounce = true;
990
995
}
991
996
992
- if (orig -> opts .promote_target ) // || failed)
997
+ if (orig -> opts .promote_target || have_io_error ( failed ) )
993
998
promote = promote_alloc (trans , iter , k , & pick , orig -> opts , flags ,
994
999
& rbio , & bounce , & read_full , failed );
995
1000
0 commit comments