@@ -1403,7 +1403,8 @@ static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
1403
1403
static int btrfs_load_block_group_dup (struct btrfs_block_group * bg ,
1404
1404
struct btrfs_chunk_map * map ,
1405
1405
struct zone_info * zone_info ,
1406
- unsigned long * active )
1406
+ unsigned long * active ,
1407
+ u64 last_alloc )
1407
1408
{
1408
1409
struct btrfs_fs_info * fs_info = bg -> fs_info ;
1409
1410
@@ -1426,6 +1427,13 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
1426
1427
zone_info [1 ].physical );
1427
1428
return - EIO ;
1428
1429
}
1430
+
1431
+ if (zone_info [0 ].alloc_offset == WP_CONVENTIONAL )
1432
+ zone_info [0 ].alloc_offset = last_alloc ;
1433
+
1434
+ if (zone_info [1 ].alloc_offset == WP_CONVENTIONAL )
1435
+ zone_info [1 ].alloc_offset = last_alloc ;
1436
+
1429
1437
if (zone_info [0 ].alloc_offset != zone_info [1 ].alloc_offset ) {
1430
1438
btrfs_err (bg -> fs_info ,
1431
1439
"zoned: write pointer offset mismatch of zones in DUP profile" );
@@ -1446,7 +1454,8 @@ static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
1446
1454
static int btrfs_load_block_group_raid1 (struct btrfs_block_group * bg ,
1447
1455
struct btrfs_chunk_map * map ,
1448
1456
struct zone_info * zone_info ,
1449
- unsigned long * active )
1457
+ unsigned long * active ,
1458
+ u64 last_alloc )
1450
1459
{
1451
1460
struct btrfs_fs_info * fs_info = bg -> fs_info ;
1452
1461
int i ;
@@ -1461,10 +1470,12 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
1461
1470
bg -> zone_capacity = min_not_zero (zone_info [0 ].capacity , zone_info [1 ].capacity );
1462
1471
1463
1472
for (i = 0 ; i < map -> num_stripes ; i ++ ) {
1464
- if (zone_info [i ].alloc_offset == WP_MISSING_DEV ||
1465
- zone_info [i ].alloc_offset == WP_CONVENTIONAL )
1473
+ if (zone_info [i ].alloc_offset == WP_MISSING_DEV )
1466
1474
continue ;
1467
1475
1476
+ if (zone_info [i ].alloc_offset == WP_CONVENTIONAL )
1477
+ zone_info [i ].alloc_offset = last_alloc ;
1478
+
1468
1479
if ((zone_info [0 ].alloc_offset != zone_info [i ].alloc_offset ) &&
1469
1480
!btrfs_test_opt (fs_info , DEGRADED )) {
1470
1481
btrfs_err (fs_info ,
@@ -1494,7 +1505,8 @@ static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
1494
1505
static int btrfs_load_block_group_raid0 (struct btrfs_block_group * bg ,
1495
1506
struct btrfs_chunk_map * map ,
1496
1507
struct zone_info * zone_info ,
1497
- unsigned long * active )
1508
+ unsigned long * active ,
1509
+ u64 last_alloc )
1498
1510
{
1499
1511
struct btrfs_fs_info * fs_info = bg -> fs_info ;
1500
1512
@@ -1505,10 +1517,29 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
1505
1517
}
1506
1518
1507
1519
for (int i = 0 ; i < map -> num_stripes ; i ++ ) {
1508
- if (zone_info [i ].alloc_offset == WP_MISSING_DEV ||
1509
- zone_info [i ].alloc_offset == WP_CONVENTIONAL )
1520
+ if (zone_info [i ].alloc_offset == WP_MISSING_DEV )
1510
1521
continue ;
1511
1522
1523
+ if (zone_info [i ].alloc_offset == WP_CONVENTIONAL ) {
1524
+ u64 stripe_nr , full_stripe_nr ;
1525
+ u64 stripe_offset ;
1526
+ int stripe_index ;
1527
+
1528
+ stripe_nr = div64_u64 (last_alloc , map -> stripe_size );
1529
+ stripe_offset = stripe_nr * map -> stripe_size ;
1530
+ full_stripe_nr = div_u64 (stripe_nr , map -> num_stripes );
1531
+ div_u64_rem (stripe_nr , map -> num_stripes , & stripe_index );
1532
+
1533
+ zone_info [i ].alloc_offset =
1534
+ full_stripe_nr * map -> stripe_size ;
1535
+
1536
+ if (stripe_index > i )
1537
+ zone_info [i ].alloc_offset += map -> stripe_size ;
1538
+ else if (stripe_index == i )
1539
+ zone_info [i ].alloc_offset +=
1540
+ (last_alloc - stripe_offset );
1541
+ }
1542
+
1512
1543
if (test_bit (0 , active ) != test_bit (i , active )) {
1513
1544
if (!btrfs_zone_activate (bg ))
1514
1545
return - EIO ;
@@ -1526,7 +1557,8 @@ static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
1526
1557
static int btrfs_load_block_group_raid10 (struct btrfs_block_group * bg ,
1527
1558
struct btrfs_chunk_map * map ,
1528
1559
struct zone_info * zone_info ,
1529
- unsigned long * active )
1560
+ unsigned long * active ,
1561
+ u64 last_alloc )
1530
1562
{
1531
1563
struct btrfs_fs_info * fs_info = bg -> fs_info ;
1532
1564
@@ -1537,8 +1569,7 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
1537
1569
}
1538
1570
1539
1571
for (int i = 0 ; i < map -> num_stripes ; i ++ ) {
1540
- if (zone_info [i ].alloc_offset == WP_MISSING_DEV ||
1541
- zone_info [i ].alloc_offset == WP_CONVENTIONAL )
1572
+ if (zone_info [i ].alloc_offset == WP_MISSING_DEV )
1542
1573
continue ;
1543
1574
1544
1575
if (test_bit (0 , active ) != test_bit (i , active )) {
@@ -1549,6 +1580,29 @@ static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
1549
1580
set_bit (BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE , & bg -> runtime_flags );
1550
1581
}
1551
1582
1583
+ if (zone_info [i ].alloc_offset == WP_CONVENTIONAL ) {
1584
+ u64 stripe_nr , full_stripe_nr ;
1585
+ u64 stripe_offset ;
1586
+ int stripe_index ;
1587
+
1588
+ stripe_nr = div64_u64 (last_alloc , map -> stripe_size );
1589
+ stripe_offset = stripe_nr * map -> stripe_size ;
1590
+ full_stripe_nr = div_u64 (stripe_nr ,
1591
+ map -> num_stripes / map -> sub_stripes );
1592
+ div_u64_rem (stripe_nr ,
1593
+ (map -> num_stripes / map -> sub_stripes ),
1594
+ & stripe_index );
1595
+
1596
+ zone_info [i ].alloc_offset =
1597
+ full_stripe_nr * map -> stripe_size ;
1598
+
1599
+ if (stripe_index > (i / map -> sub_stripes ))
1600
+ zone_info [i ].alloc_offset += map -> stripe_size ;
1601
+ else if (stripe_index == (i / map -> sub_stripes ))
1602
+ zone_info [i ].alloc_offset +=
1603
+ (last_alloc - stripe_offset );
1604
+ }
1605
+
1552
1606
if ((i % map -> sub_stripes ) == 0 ) {
1553
1607
bg -> zone_capacity += zone_info [i ].capacity ;
1554
1608
bg -> alloc_offset += zone_info [i ].alloc_offset ;
@@ -1637,18 +1691,22 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1637
1691
ret = btrfs_load_block_group_single (cache , & zone_info [0 ], active );
1638
1692
break ;
1639
1693
case BTRFS_BLOCK_GROUP_DUP :
1640
- ret = btrfs_load_block_group_dup (cache , map , zone_info , active );
1694
+ ret = btrfs_load_block_group_dup (cache , map , zone_info , active ,
1695
+ last_alloc );
1641
1696
break ;
1642
1697
case BTRFS_BLOCK_GROUP_RAID1 :
1643
1698
case BTRFS_BLOCK_GROUP_RAID1C3 :
1644
1699
case BTRFS_BLOCK_GROUP_RAID1C4 :
1645
- ret = btrfs_load_block_group_raid1 (cache , map , zone_info , active );
1700
+ ret = btrfs_load_block_group_raid1 (cache , map , zone_info ,
1701
+ active , last_alloc );
1646
1702
break ;
1647
1703
case BTRFS_BLOCK_GROUP_RAID0 :
1648
- ret = btrfs_load_block_group_raid0 (cache , map , zone_info , active );
1704
+ ret = btrfs_load_block_group_raid0 (cache , map , zone_info ,
1705
+ active , last_alloc );
1649
1706
break ;
1650
1707
case BTRFS_BLOCK_GROUP_RAID10 :
1651
- ret = btrfs_load_block_group_raid10 (cache , map , zone_info , active );
1708
+ ret = btrfs_load_block_group_raid10 (cache , map , zone_info ,
1709
+ active , last_alloc );
1652
1710
break ;
1653
1711
case BTRFS_BLOCK_GROUP_RAID5 :
1654
1712
case BTRFS_BLOCK_GROUP_RAID6 :
0 commit comments