@@ -1216,6 +1216,12 @@ static CLOSURE_CALLBACK(bch2_nocow_write_done)
1216
1216
bch2_write_done (cl );
1217
1217
}
1218
1218
1219
+ struct bucket_to_lock {
1220
+ struct bpos b ;
1221
+ unsigned gen ;
1222
+ struct nocow_lock_bucket * l ;
1223
+ };
1224
+
1219
1225
static void bch2_nocow_write (struct bch_write_op * op )
1220
1226
{
1221
1227
struct bch_fs * c = op -> c ;
@@ -1224,18 +1230,16 @@ static void bch2_nocow_write(struct bch_write_op *op)
1224
1230
struct bkey_s_c k ;
1225
1231
struct bkey_ptrs_c ptrs ;
1226
1232
const struct bch_extent_ptr * ptr ;
1227
- struct {
1228
- struct bpos b ;
1229
- unsigned gen ;
1230
- struct nocow_lock_bucket * l ;
1231
- } buckets [BCH_REPLICAS_MAX ];
1232
- unsigned nr_buckets = 0 ;
1233
+ DARRAY_PREALLOCATED (struct bucket_to_lock , 3 ) buckets ;
1234
+ struct bucket_to_lock * i ;
1233
1235
u32 snapshot ;
1234
- int ret , i ;
1236
+ struct bucket_to_lock * stale_at ;
1237
+ int ret ;
1235
1238
1236
1239
if (op -> flags & BCH_WRITE_MOVE )
1237
1240
return ;
1238
1241
1242
+ darray_init (& buckets );
1239
1243
trans = bch2_trans_get (c );
1240
1244
retry :
1241
1245
bch2_trans_begin (trans );
@@ -1250,7 +1254,7 @@ static void bch2_nocow_write(struct bch_write_op *op)
1250
1254
while (1 ) {
1251
1255
struct bio * bio = & op -> wbio .bio ;
1252
1256
1253
- nr_buckets = 0 ;
1257
+ buckets . nr = 0 ;
1254
1258
1255
1259
k = bch2_btree_iter_peek_slot (& iter );
1256
1260
ret = bkey_err (k );
@@ -1263,26 +1267,26 @@ static void bch2_nocow_write(struct bch_write_op *op)
1263
1267
break ;
1264
1268
1265
1269
if (bch2_keylist_realloc (& op -> insert_keys ,
1266
- op -> inline_keys ,
1267
- ARRAY_SIZE (op -> inline_keys ),
1268
- k .k -> u64s ))
1270
+ op -> inline_keys ,
1271
+ ARRAY_SIZE (op -> inline_keys ),
1272
+ k .k -> u64s ))
1269
1273
break ;
1270
1274
1271
1275
/* Get iorefs before dropping btree locks: */
1272
1276
ptrs = bch2_bkey_ptrs_c (k );
1273
1277
bkey_for_each_ptr (ptrs , ptr ) {
1274
- buckets [nr_buckets ].b = PTR_BUCKET_POS (c , ptr );
1275
- buckets [nr_buckets ].gen = ptr -> gen ;
1276
- buckets [nr_buckets ].l =
1277
- bucket_nocow_lock (& c -> nocow_locks ,
1278
- bucket_to_u64 (buckets [nr_buckets ].b ));
1279
-
1280
- prefetch (buckets [nr_buckets ].l );
1278
+ struct bpos b = PTR_BUCKET_POS (c , ptr );
1279
+ struct nocow_lock_bucket * l =
1280
+ bucket_nocow_lock (& c -> nocow_locks , bucket_to_u64 (b ));
1281
+ prefetch (l );
1281
1282
1282
1283
if (unlikely (!bch2_dev_get_ioref (bch_dev_bkey_exists (c , ptr -> dev ), WRITE )))
1283
1284
goto err_get_ioref ;
1284
1285
1285
- nr_buckets ++ ;
1286
+ /* XXX allocating memory with btree locks held - rare */
1287
+ darray_push_gfp (& buckets , ((struct bucket_to_lock ) {
1288
+ .b = b , .gen = ptr -> gen , .l = l ,
1289
+ }), GFP_KERNEL |__GFP_NOFAIL );
1286
1290
1287
1291
if (ptr -> unwritten )
1288
1292
op -> flags |= BCH_WRITE_CONVERT_UNWRITTEN ;
@@ -1296,21 +1300,21 @@ static void bch2_nocow_write(struct bch_write_op *op)
1296
1300
if (op -> flags & BCH_WRITE_CONVERT_UNWRITTEN )
1297
1301
bch2_cut_back (POS (op -> pos .inode , op -> pos .offset + bio_sectors (bio )), op -> insert_keys .top );
1298
1302
1299
- for (i = 0 ; i < nr_buckets ; i ++ ) {
1300
- struct bch_dev * ca = bch_dev_bkey_exists (c , buckets [i ].b .inode );
1301
- struct nocow_lock_bucket * l = buckets [i ].l ;
1302
- bool stale ;
1303
+ darray_for_each (buckets , i ) {
1304
+ struct bch_dev * ca = bch_dev_bkey_exists (c , i -> b .inode );
1303
1305
1304
- __bch2_bucket_nocow_lock (& c -> nocow_locks , l ,
1305
- bucket_to_u64 (buckets [ i ]. b ),
1306
+ __bch2_bucket_nocow_lock (& c -> nocow_locks , i -> l ,
1307
+ bucket_to_u64 (i -> b ),
1306
1308
BUCKET_NOCOW_LOCK_UPDATE );
1307
1309
1308
1310
rcu_read_lock ();
1309
- stale = gen_after (* bucket_gen (ca , buckets [ i ]. b .offset ), buckets [ i ]. gen );
1311
+ bool stale = gen_after (* bucket_gen (ca , i -> b .offset ), i -> gen );
1310
1312
rcu_read_unlock ();
1311
1313
1312
- if (unlikely (stale ))
1314
+ if (unlikely (stale )) {
1315
+ stale_at = i ;
1313
1316
goto err_bucket_stale ;
1317
+ }
1314
1318
}
1315
1319
1316
1320
bio = & op -> wbio .bio ;
@@ -1346,15 +1350,14 @@ static void bch2_nocow_write(struct bch_write_op *op)
1346
1350
1347
1351
if (ret ) {
1348
1352
bch_err_inum_offset_ratelimited (c ,
1349
- op -> pos .inode ,
1350
- op -> pos .offset << 9 ,
1351
- "%s: btree lookup error %s" ,
1352
- __func__ , bch2_err_str (ret ));
1353
+ op -> pos .inode , op -> pos .offset << 9 ,
1354
+ "%s: btree lookup error %s" , __func__ , bch2_err_str (ret ));
1353
1355
op -> error = ret ;
1354
1356
op -> flags |= BCH_WRITE_DONE ;
1355
1357
}
1356
1358
1357
1359
bch2_trans_put (trans );
1360
+ darray_exit (& buckets );
1358
1361
1359
1362
/* fallback to cow write path? */
1360
1363
if (!(op -> flags & BCH_WRITE_DONE )) {
@@ -1374,24 +1377,21 @@ static void bch2_nocow_write(struct bch_write_op *op)
1374
1377
}
1375
1378
return ;
1376
1379
err_get_ioref :
1377
- for ( i = 0 ; i < nr_buckets ; i ++ )
1378
- percpu_ref_put (& bch_dev_bkey_exists (c , buckets [ i ]. b .inode )-> io_ref );
1380
+ darray_for_each ( buckets , i )
1381
+ percpu_ref_put (& bch_dev_bkey_exists (c , i -> b .inode )-> io_ref );
1379
1382
1380
1383
/* Fall back to COW path: */
1381
1384
goto out ;
1382
1385
err_bucket_stale :
1383
- while (i >= 0 ) {
1384
- bch2_bucket_nocow_unlock (& c -> nocow_locks ,
1385
- buckets [i ].b ,
1386
- BUCKET_NOCOW_LOCK_UPDATE );
1387
- -- i ;
1386
+ darray_for_each (buckets , i ) {
1387
+ bch2_bucket_nocow_unlock (& c -> nocow_locks , i -> b , BUCKET_NOCOW_LOCK_UPDATE );
1388
+ if (i == stale_at )
1389
+ break ;
1388
1390
}
1389
- for (i = 0 ; i < nr_buckets ; i ++ )
1390
- percpu_ref_put (& bch_dev_bkey_exists (c , buckets [i ].b .inode )-> io_ref );
1391
1391
1392
1392
/* We can retry this: */
1393
1393
ret = - BCH_ERR_transaction_restart ;
1394
- goto out ;
1394
+ goto err_get_ioref ;
1395
1395
}
1396
1396
1397
1397
static void __bch2_write (struct bch_write_op * op )
0 commit comments