@@ -1536,7 +1536,8 @@ xfs_alloc_ag_vextent_lastblock(
1536
1536
*/
1537
1537
STATIC int
1538
1538
xfs_alloc_ag_vextent_near (
1539
- struct xfs_alloc_arg * args )
1539
+ struct xfs_alloc_arg * args ,
1540
+ uint32_t alloc_flags )
1540
1541
{
1541
1542
struct xfs_alloc_cur acur = {};
1542
1543
int error ; /* error code */
@@ -1612,7 +1613,7 @@ xfs_alloc_ag_vextent_near(
1612
1613
if (acur .busy ) {
1613
1614
trace_xfs_alloc_near_busy (args );
1614
1615
xfs_extent_busy_flush (args -> mp , args -> pag ,
1615
- acur .busy_gen );
1616
+ acur .busy_gen , alloc_flags );
1616
1617
goto restart ;
1617
1618
}
1618
1619
trace_xfs_alloc_size_neither (args );
@@ -1635,21 +1636,22 @@ xfs_alloc_ag_vextent_near(
1635
1636
* and of the form k * prod + mod unless there's nothing that large.
1636
1637
* Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1637
1638
*/
1638
- STATIC int /* error */
1639
+ static int
1639
1640
xfs_alloc_ag_vextent_size (
1640
- xfs_alloc_arg_t * args ) /* allocation argument structure */
1641
+ struct xfs_alloc_arg * args ,
1642
+ uint32_t alloc_flags )
1641
1643
{
1642
- struct xfs_agf * agf = args -> agbp -> b_addr ;
1643
- struct xfs_btree_cur * bno_cur ; /* cursor for bno btree */
1644
- struct xfs_btree_cur * cnt_cur ; /* cursor for cnt btree */
1645
- int error ; /* error result */
1646
- xfs_agblock_t fbno ; /* start of found freespace */
1647
- xfs_extlen_t flen ; /* length of found freespace */
1648
- int i ; /* temp status variable */
1649
- xfs_agblock_t rbno ; /* returned block number */
1650
- xfs_extlen_t rlen ; /* length of returned extent */
1651
- bool busy ;
1652
- unsigned busy_gen ;
1644
+ struct xfs_agf * agf = args -> agbp -> b_addr ;
1645
+ struct xfs_btree_cur * bno_cur ;
1646
+ struct xfs_btree_cur * cnt_cur ;
1647
+ xfs_agblock_t fbno ; /* start of found freespace */
1648
+ xfs_extlen_t flen ; /* length of found freespace */
1649
+ xfs_agblock_t rbno ; /* returned block number */
1650
+ xfs_extlen_t rlen ; /* length of returned extent */
1651
+ bool busy ;
1652
+ unsigned busy_gen ;
1653
+ int error ;
1654
+ int i ;
1653
1655
1654
1656
restart :
1655
1657
/*
@@ -1717,8 +1719,8 @@ xfs_alloc_ag_vextent_size(
1717
1719
xfs_btree_del_cursor (cnt_cur ,
1718
1720
XFS_BTREE_NOERROR );
1719
1721
trace_xfs_alloc_size_busy (args );
1720
- xfs_extent_busy_flush (args -> mp ,
1721
- args -> pag , busy_gen );
1722
+ xfs_extent_busy_flush (args -> mp , args -> pag ,
1723
+ busy_gen , alloc_flags );
1722
1724
goto restart ;
1723
1725
}
1724
1726
}
@@ -1802,7 +1804,8 @@ xfs_alloc_ag_vextent_size(
1802
1804
if (busy ) {
1803
1805
xfs_btree_del_cursor (cnt_cur , XFS_BTREE_NOERROR );
1804
1806
trace_xfs_alloc_size_busy (args );
1805
- xfs_extent_busy_flush (args -> mp , args -> pag , busy_gen );
1807
+ xfs_extent_busy_flush (args -> mp , args -> pag , busy_gen ,
1808
+ alloc_flags );
1806
1809
goto restart ;
1807
1810
}
1808
1811
goto out_nominleft ;
@@ -2572,7 +2575,7 @@ xfs_exact_minlen_extent_available(
2572
2575
int /* error */
2573
2576
xfs_alloc_fix_freelist (
2574
2577
struct xfs_alloc_arg * args , /* allocation argument structure */
2575
- int flags ) /* XFS_ALLOC_FLAG_... */
2578
+ uint32_t alloc_flags )
2576
2579
{
2577
2580
struct xfs_mount * mp = args -> mp ;
2578
2581
struct xfs_perag * pag = args -> pag ;
@@ -2588,7 +2591,7 @@ xfs_alloc_fix_freelist(
2588
2591
ASSERT (tp -> t_flags & XFS_TRANS_PERM_LOG_RES );
2589
2592
2590
2593
if (!xfs_perag_initialised_agf (pag )) {
2591
- error = xfs_alloc_read_agf (pag , tp , flags , & agbp );
2594
+ error = xfs_alloc_read_agf (pag , tp , alloc_flags , & agbp );
2592
2595
if (error ) {
2593
2596
/* Couldn't lock the AGF so skip this AG. */
2594
2597
if (error == - EAGAIN )
@@ -2604,13 +2607,13 @@ xfs_alloc_fix_freelist(
2604
2607
*/
2605
2608
if (xfs_perag_prefers_metadata (pag ) &&
2606
2609
(args -> datatype & XFS_ALLOC_USERDATA ) &&
2607
- (flags & XFS_ALLOC_FLAG_TRYLOCK )) {
2608
- ASSERT (!(flags & XFS_ALLOC_FLAG_FREEING ));
2610
+ (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK )) {
2611
+ ASSERT (!(alloc_flags & XFS_ALLOC_FLAG_FREEING ));
2609
2612
goto out_agbp_relse ;
2610
2613
}
2611
2614
2612
2615
need = xfs_alloc_min_freelist (mp , pag );
2613
- if (!xfs_alloc_space_available (args , need , flags |
2616
+ if (!xfs_alloc_space_available (args , need , alloc_flags |
2614
2617
XFS_ALLOC_FLAG_CHECK ))
2615
2618
goto out_agbp_relse ;
2616
2619
@@ -2619,7 +2622,7 @@ xfs_alloc_fix_freelist(
2619
2622
* Can fail if we're not blocking on locks, and it's held.
2620
2623
*/
2621
2624
if (!agbp ) {
2622
- error = xfs_alloc_read_agf (pag , tp , flags , & agbp );
2625
+ error = xfs_alloc_read_agf (pag , tp , alloc_flags , & agbp );
2623
2626
if (error ) {
2624
2627
/* Couldn't lock the AGF so skip this AG. */
2625
2628
if (error == - EAGAIN )
@@ -2634,7 +2637,7 @@ xfs_alloc_fix_freelist(
2634
2637
2635
2638
/* If there isn't enough total space or single-extent, reject it. */
2636
2639
need = xfs_alloc_min_freelist (mp , pag );
2637
- if (!xfs_alloc_space_available (args , need , flags ))
2640
+ if (!xfs_alloc_space_available (args , need , alloc_flags ))
2638
2641
goto out_agbp_relse ;
2639
2642
2640
2643
#ifdef DEBUG
@@ -2672,11 +2675,12 @@ xfs_alloc_fix_freelist(
2672
2675
*/
2673
2676
memset (& targs , 0 , sizeof (targs ));
2674
2677
/* struct copy below */
2675
- if (flags & XFS_ALLOC_FLAG_NORMAP )
2678
+ if (alloc_flags & XFS_ALLOC_FLAG_NORMAP )
2676
2679
targs .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE ;
2677
2680
else
2678
2681
targs .oinfo = XFS_RMAP_OINFO_AG ;
2679
- while (!(flags & XFS_ALLOC_FLAG_NOSHRINK ) && pag -> pagf_flcount > need ) {
2682
+ while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK ) &&
2683
+ pag -> pagf_flcount > need ) {
2680
2684
error = xfs_alloc_get_freelist (pag , tp , agbp , & bno , 0 );
2681
2685
if (error )
2682
2686
goto out_agbp_relse ;
@@ -2704,7 +2708,7 @@ xfs_alloc_fix_freelist(
2704
2708
targs .resv = XFS_AG_RESV_AGFL ;
2705
2709
2706
2710
/* Allocate as many blocks as possible at once. */
2707
- error = xfs_alloc_ag_vextent_size (& targs );
2711
+ error = xfs_alloc_ag_vextent_size (& targs , alloc_flags );
2708
2712
if (error )
2709
2713
goto out_agflbp_relse ;
2710
2714
@@ -2714,7 +2718,7 @@ xfs_alloc_fix_freelist(
2714
2718
* on a completely full ag.
2715
2719
*/
2716
2720
if (targs .agbno == NULLAGBLOCK ) {
2717
- if (flags & XFS_ALLOC_FLAG_FREEING )
2721
+ if (alloc_flags & XFS_ALLOC_FLAG_FREEING )
2718
2722
break ;
2719
2723
goto out_agflbp_relse ;
2720
2724
}
@@ -3230,7 +3234,7 @@ xfs_alloc_vextent_check_args(
3230
3234
static int
3231
3235
xfs_alloc_vextent_prepare_ag (
3232
3236
struct xfs_alloc_arg * args ,
3233
- uint32_t flags )
3237
+ uint32_t alloc_flags )
3234
3238
{
3235
3239
bool need_pag = !args -> pag ;
3236
3240
int error ;
@@ -3239,7 +3243,7 @@ xfs_alloc_vextent_prepare_ag(
3239
3243
args -> pag = xfs_perag_get (args -> mp , args -> agno );
3240
3244
3241
3245
args -> agbp = NULL ;
3242
- error = xfs_alloc_fix_freelist (args , flags );
3246
+ error = xfs_alloc_fix_freelist (args , alloc_flags );
3243
3247
if (error ) {
3244
3248
trace_xfs_alloc_vextent_nofix (args );
3245
3249
if (need_pag )
@@ -3361,6 +3365,7 @@ xfs_alloc_vextent_this_ag(
3361
3365
{
3362
3366
struct xfs_mount * mp = args -> mp ;
3363
3367
xfs_agnumber_t minimum_agno ;
3368
+ uint32_t alloc_flags = 0 ;
3364
3369
int error ;
3365
3370
3366
3371
ASSERT (args -> pag != NULL );
@@ -3379,9 +3384,9 @@ xfs_alloc_vextent_this_ag(
3379
3384
return error ;
3380
3385
}
3381
3386
3382
- error = xfs_alloc_vextent_prepare_ag (args , 0 );
3387
+ error = xfs_alloc_vextent_prepare_ag (args , alloc_flags );
3383
3388
if (!error && args -> agbp )
3384
- error = xfs_alloc_ag_vextent_size (args );
3389
+ error = xfs_alloc_ag_vextent_size (args , alloc_flags );
3385
3390
3386
3391
return xfs_alloc_vextent_finish (args , minimum_agno , error , false);
3387
3392
}
@@ -3410,20 +3415,20 @@ xfs_alloc_vextent_iterate_ags(
3410
3415
xfs_agnumber_t minimum_agno ,
3411
3416
xfs_agnumber_t start_agno ,
3412
3417
xfs_agblock_t target_agbno ,
3413
- uint32_t flags )
3418
+ uint32_t alloc_flags )
3414
3419
{
3415
3420
struct xfs_mount * mp = args -> mp ;
3416
3421
xfs_agnumber_t restart_agno = minimum_agno ;
3417
3422
xfs_agnumber_t agno ;
3418
3423
int error = 0 ;
3419
3424
3420
- if (flags & XFS_ALLOC_FLAG_TRYLOCK )
3425
+ if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK )
3421
3426
restart_agno = 0 ;
3422
3427
restart :
3423
3428
for_each_perag_wrap_range (mp , start_agno , restart_agno ,
3424
3429
mp -> m_sb .sb_agcount , agno , args -> pag ) {
3425
3430
args -> agno = agno ;
3426
- error = xfs_alloc_vextent_prepare_ag (args , flags );
3431
+ error = xfs_alloc_vextent_prepare_ag (args , alloc_flags );
3427
3432
if (error )
3428
3433
break ;
3429
3434
if (!args -> agbp ) {
@@ -3437,10 +3442,10 @@ xfs_alloc_vextent_iterate_ags(
3437
3442
*/
3438
3443
if (args -> agno == start_agno && target_agbno ) {
3439
3444
args -> agbno = target_agbno ;
3440
- error = xfs_alloc_ag_vextent_near (args );
3445
+ error = xfs_alloc_ag_vextent_near (args , alloc_flags );
3441
3446
} else {
3442
3447
args -> agbno = 0 ;
3443
- error = xfs_alloc_ag_vextent_size (args );
3448
+ error = xfs_alloc_ag_vextent_size (args , alloc_flags );
3444
3449
}
3445
3450
break ;
3446
3451
}
@@ -3457,8 +3462,8 @@ xfs_alloc_vextent_iterate_ags(
3457
3462
* constraining flags by the caller, drop them and retry the allocation
3458
3463
* without any constraints being set.
3459
3464
*/
3460
- if (flags ) {
3461
- flags = 0 ;
3465
+ if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK ) {
3466
+ alloc_flags &= ~ XFS_ALLOC_FLAG_TRYLOCK ;
3462
3467
restart_agno = minimum_agno ;
3463
3468
goto restart ;
3464
3469
}
@@ -3486,6 +3491,7 @@ xfs_alloc_vextent_start_ag(
3486
3491
xfs_agnumber_t start_agno ;
3487
3492
xfs_agnumber_t rotorstep = xfs_rotorstep ;
3488
3493
bool bump_rotor = false;
3494
+ uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK ;
3489
3495
int error ;
3490
3496
3491
3497
ASSERT (args -> pag == NULL );
@@ -3512,7 +3518,7 @@ xfs_alloc_vextent_start_ag(
3512
3518
3513
3519
start_agno = max (minimum_agno , XFS_FSB_TO_AGNO (mp , target ));
3514
3520
error = xfs_alloc_vextent_iterate_ags (args , minimum_agno , start_agno ,
3515
- XFS_FSB_TO_AGBNO (mp , target ), XFS_ALLOC_FLAG_TRYLOCK );
3521
+ XFS_FSB_TO_AGBNO (mp , target ), alloc_flags );
3516
3522
3517
3523
if (bump_rotor ) {
3518
3524
if (args -> agno == start_agno )
@@ -3539,6 +3545,7 @@ xfs_alloc_vextent_first_ag(
3539
3545
struct xfs_mount * mp = args -> mp ;
3540
3546
xfs_agnumber_t minimum_agno ;
3541
3547
xfs_agnumber_t start_agno ;
3548
+ uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK ;
3542
3549
int error ;
3543
3550
3544
3551
ASSERT (args -> pag == NULL );
@@ -3557,7 +3564,7 @@ xfs_alloc_vextent_first_ag(
3557
3564
3558
3565
start_agno = max (minimum_agno , XFS_FSB_TO_AGNO (mp , target ));
3559
3566
error = xfs_alloc_vextent_iterate_ags (args , minimum_agno , start_agno ,
3560
- XFS_FSB_TO_AGBNO (mp , target ), 0 );
3567
+ XFS_FSB_TO_AGBNO (mp , target ), alloc_flags );
3561
3568
return xfs_alloc_vextent_finish (args , minimum_agno , error , true);
3562
3569
}
3563
3570
@@ -3610,6 +3617,7 @@ xfs_alloc_vextent_near_bno(
3610
3617
struct xfs_mount * mp = args -> mp ;
3611
3618
xfs_agnumber_t minimum_agno ;
3612
3619
bool needs_perag = args -> pag == NULL ;
3620
+ uint32_t alloc_flags = 0 ;
3613
3621
int error ;
3614
3622
3615
3623
if (!needs_perag )
@@ -3630,9 +3638,9 @@ xfs_alloc_vextent_near_bno(
3630
3638
if (needs_perag )
3631
3639
args -> pag = xfs_perag_grab (mp , args -> agno );
3632
3640
3633
- error = xfs_alloc_vextent_prepare_ag (args , 0 );
3641
+ error = xfs_alloc_vextent_prepare_ag (args , alloc_flags );
3634
3642
if (!error && args -> agbp )
3635
- error = xfs_alloc_ag_vextent_near (args );
3643
+ error = xfs_alloc_ag_vextent_near (args , alloc_flags );
3636
3644
3637
3645
return xfs_alloc_vextent_finish (args , minimum_agno , error , needs_perag );
3638
3646
}
0 commit comments