@@ -1046,15 +1046,14 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
1046
1046
}
1047
1047
EXPORT_SYMBOL_GPL (iomap_file_buffered_write );
1048
1048
1049
- static int iomap_write_delalloc_ifs_punch (struct inode * inode ,
1049
+ static void iomap_write_delalloc_ifs_punch (struct inode * inode ,
1050
1050
struct folio * folio , loff_t start_byte , loff_t end_byte ,
1051
1051
struct iomap * iomap , iomap_punch_t punch )
1052
1052
{
1053
1053
unsigned int first_blk , last_blk , i ;
1054
1054
loff_t last_byte ;
1055
1055
u8 blkbits = inode -> i_blkbits ;
1056
1056
struct iomap_folio_state * ifs ;
1057
- int ret = 0 ;
1058
1057
1059
1058
/*
1060
1059
* When we have per-block dirty tracking, there can be
@@ -1064,56 +1063,42 @@ static int iomap_write_delalloc_ifs_punch(struct inode *inode,
1064
1063
*/
1065
1064
ifs = folio -> private ;
1066
1065
if (!ifs )
1067
- return ret ;
1066
+ return ;
1068
1067
1069
1068
last_byte = min_t (loff_t , end_byte - 1 ,
1070
1069
folio_pos (folio ) + folio_size (folio ) - 1 );
1071
1070
first_blk = offset_in_folio (folio , start_byte ) >> blkbits ;
1072
1071
last_blk = offset_in_folio (folio , last_byte ) >> blkbits ;
1073
1072
for (i = first_blk ; i <= last_blk ; i ++ ) {
1074
- if (!ifs_block_is_dirty (folio , ifs , i )) {
1075
- ret = punch (inode , folio_pos (folio ) + (i << blkbits ),
1073
+ if (!ifs_block_is_dirty (folio , ifs , i ))
1074
+ punch (inode , folio_pos (folio ) + (i << blkbits ),
1076
1075
1 << blkbits , iomap );
1077
- if (ret )
1078
- return ret ;
1079
- }
1080
1076
}
1081
-
1082
- return ret ;
1083
1077
}
1084
1078
1085
-
1086
- static int iomap_write_delalloc_punch (struct inode * inode , struct folio * folio ,
1079
+ static void iomap_write_delalloc_punch (struct inode * inode , struct folio * folio ,
1087
1080
loff_t * punch_start_byte , loff_t start_byte , loff_t end_byte ,
1088
1081
struct iomap * iomap , iomap_punch_t punch )
1089
1082
{
1090
- int ret = 0 ;
1091
-
1092
1083
if (!folio_test_dirty (folio ))
1093
- return ret ;
1084
+ return ;
1094
1085
1095
1086
/* if dirty, punch up to offset */
1096
1087
if (start_byte > * punch_start_byte ) {
1097
- ret = punch (inode , * punch_start_byte ,
1098
- start_byte - * punch_start_byte , iomap );
1099
- if (ret )
1100
- return ret ;
1088
+ punch (inode , * punch_start_byte , start_byte - * punch_start_byte ,
1089
+ iomap );
1101
1090
}
1102
1091
1103
1092
/* Punch non-dirty blocks within folio */
1104
- ret = iomap_write_delalloc_ifs_punch (inode , folio , start_byte , end_byte ,
1093
+ iomap_write_delalloc_ifs_punch (inode , folio , start_byte , end_byte ,
1105
1094
iomap , punch );
1106
- if (ret )
1107
- return ret ;
1108
1095
1109
1096
/*
1110
1097
* Make sure the next punch start is correctly bound to
1111
1098
* the end of this data range, not the end of the folio.
1112
1099
*/
1113
1100
* punch_start_byte = min_t (loff_t , end_byte ,
1114
1101
folio_pos (folio ) + folio_size (folio ));
1115
-
1116
- return ret ;
1117
1102
}
1118
1103
1119
1104
/*
@@ -1133,13 +1118,12 @@ static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1133
1118
* This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1134
1119
* simplify range iterations.
1135
1120
*/
1136
- static int iomap_write_delalloc_scan (struct inode * inode ,
1121
+ static void iomap_write_delalloc_scan (struct inode * inode ,
1137
1122
loff_t * punch_start_byte , loff_t start_byte , loff_t end_byte ,
1138
1123
struct iomap * iomap , iomap_punch_t punch )
1139
1124
{
1140
1125
while (start_byte < end_byte ) {
1141
1126
struct folio * folio ;
1142
- int ret ;
1143
1127
1144
1128
/* grab locked page */
1145
1129
folio = filemap_lock_folio (inode -> i_mapping ,
@@ -1150,20 +1134,14 @@ static int iomap_write_delalloc_scan(struct inode *inode,
1150
1134
continue ;
1151
1135
}
1152
1136
1153
- ret = iomap_write_delalloc_punch (inode , folio , punch_start_byte ,
1137
+ iomap_write_delalloc_punch (inode , folio , punch_start_byte ,
1154
1138
start_byte , end_byte , iomap , punch );
1155
- if (ret ) {
1156
- folio_unlock (folio );
1157
- folio_put (folio );
1158
- return ret ;
1159
- }
1160
1139
1161
1140
/* move offset to start of next folio in range */
1162
1141
start_byte = folio_next_index (folio ) << PAGE_SHIFT ;
1163
1142
folio_unlock (folio );
1164
1143
folio_put (folio );
1165
1144
}
1166
- return 0 ;
1167
1145
}
1168
1146
1169
1147
/*
@@ -1199,13 +1177,12 @@ static int iomap_write_delalloc_scan(struct inode *inode,
1199
1177
* require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1200
1178
* the code to subtle off-by-one bugs....
1201
1179
*/
1202
- static int iomap_write_delalloc_release (struct inode * inode , loff_t start_byte ,
1180
+ static void iomap_write_delalloc_release (struct inode * inode , loff_t start_byte ,
1203
1181
loff_t end_byte , unsigned flags , struct iomap * iomap ,
1204
1182
iomap_punch_t punch )
1205
1183
{
1206
1184
loff_t punch_start_byte = start_byte ;
1207
1185
loff_t scan_end_byte = min (i_size_read (inode ), end_byte );
1208
- int error = 0 ;
1209
1186
1210
1187
/*
1211
1188
* Lock the mapping to avoid races with page faults re-instantiating
@@ -1222,13 +1199,15 @@ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1222
1199
/*
1223
1200
* If there is no more data to scan, all that is left is to
1224
1201
* punch out the remaining range.
1202
+ *
1203
+ * Note that mapping_seek_hole_data is only supposed to return
1204
+ * either an offset or -ENXIO, so WARN on any other error as
1205
+ * that would be an API change without updating the callers.
1225
1206
*/
1226
1207
if (start_byte == - ENXIO || start_byte == scan_end_byte )
1227
1208
break ;
1228
- if (start_byte < 0 ) {
1229
- error = start_byte ;
1209
+ if (WARN_ON_ONCE (start_byte < 0 ))
1230
1210
goto out_unlock ;
1231
- }
1232
1211
WARN_ON_ONCE (start_byte < punch_start_byte );
1233
1212
WARN_ON_ONCE (start_byte > scan_end_byte );
1234
1213
@@ -1238,10 +1217,8 @@ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1238
1217
*/
1239
1218
data_end = mapping_seek_hole_data (inode -> i_mapping , start_byte ,
1240
1219
scan_end_byte , SEEK_HOLE );
1241
- if (data_end < 0 ) {
1242
- error = data_end ;
1220
+ if (WARN_ON_ONCE (data_end < 0 ))
1243
1221
goto out_unlock ;
1244
- }
1245
1222
1246
1223
/*
1247
1224
* If we race with post-direct I/O invalidation of the page cache,
@@ -1253,21 +1230,18 @@ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1253
1230
WARN_ON_ONCE (data_end < start_byte );
1254
1231
WARN_ON_ONCE (data_end > scan_end_byte );
1255
1232
1256
- error = iomap_write_delalloc_scan (inode , & punch_start_byte ,
1257
- start_byte , data_end , iomap , punch );
1258
- if (error )
1259
- goto out_unlock ;
1233
+ iomap_write_delalloc_scan (inode , & punch_start_byte , start_byte ,
1234
+ data_end , iomap , punch );
1260
1235
1261
1236
/* The next data search starts at the end of this one. */
1262
1237
start_byte = data_end ;
1263
1238
}
1264
1239
1265
1240
if (punch_start_byte < end_byte )
1266
- error = punch (inode , punch_start_byte ,
1267
- end_byte - punch_start_byte , iomap );
1241
+ punch (inode , punch_start_byte , end_byte - punch_start_byte ,
1242
+ iomap );
1268
1243
out_unlock :
1269
1244
filemap_invalidate_unlock (inode -> i_mapping );
1270
- return error ;
1271
1245
}
1272
1246
1273
1247
/*
@@ -1300,7 +1274,7 @@ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1300
1274
* ->punch
1301
1275
* internal filesystem allocation lock
1302
1276
*/
1303
- int iomap_file_buffered_write_punch_delalloc (struct inode * inode ,
1277
+ void iomap_file_buffered_write_punch_delalloc (struct inode * inode ,
1304
1278
loff_t pos , loff_t length , ssize_t written , unsigned flags ,
1305
1279
struct iomap * iomap , iomap_punch_t punch )
1306
1280
{
@@ -1309,11 +1283,11 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
1309
1283
unsigned int blocksize = i_blocksize (inode );
1310
1284
1311
1285
if (iomap -> type != IOMAP_DELALLOC )
1312
- return 0 ;
1286
+ return ;
1313
1287
1314
1288
/* If we didn't reserve the blocks, we're not allowed to punch them. */
1315
1289
if (!(iomap -> flags & IOMAP_F_NEW ))
1316
- return 0 ;
1290
+ return ;
1317
1291
1318
1292
/*
1319
1293
* start_byte refers to the first unused block after a short write. If
@@ -1328,10 +1302,10 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
1328
1302
1329
1303
/* Nothing to do if we've written the entire delalloc extent */
1330
1304
if (start_byte >= end_byte )
1331
- return 0 ;
1305
+ return ;
1332
1306
1333
- return iomap_write_delalloc_release (inode , start_byte , end_byte , flags ,
1334
- iomap , punch );
1307
+ iomap_write_delalloc_release (inode , start_byte , end_byte , flags , iomap ,
1308
+ punch );
1335
1309
}
1336
1310
EXPORT_SYMBOL_GPL (iomap_file_buffered_write_punch_delalloc );
1337
1311
0 commit comments