Skip to content

Commit e30729d

Browse files
naotakdave
authored andcommitted
btrfs: zoned: properly take lock to read/update block group's zoned variables
__btrfs_add_free_space_zoned() references and modifies bg's alloc_offset, ro, and zone_unusable, but without taking the lock. It is mostly safe because they monotonically increase (at least for now) and this function is mostly called by a transaction commit, which is serialized by itself. Still, taking the lock is a safer and correct option and I'm going to add a change to reset zone_unusable while a block group is still alive. So, add locking around the operations. Fixes: 169e0da ("btrfs: zoned: track unusable bytes for zones") CC: stable@vger.kernel.org # 5.15+ Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent 008e251 commit e30729d

File tree

1 file changed

+8
-6
lines changed

1 file changed

+8
-6
lines changed

fs/btrfs/free-space-cache.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2697,15 +2697,16 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
26972697
u64 offset = bytenr - block_group->start;
26982698
u64 to_free, to_unusable;
26992699
int bg_reclaim_threshold = 0;
2700-
bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
2700+
bool initial;
27012701
u64 reclaimable_unusable;
27022702

2703-
WARN_ON(!initial && offset + size > block_group->zone_capacity);
2703+
spin_lock(&block_group->lock);
27042704

2705+
initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
2706+
WARN_ON(!initial && offset + size > block_group->zone_capacity);
27052707
if (!initial)
27062708
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
27072709

2708-
spin_lock(&ctl->tree_lock);
27092710
if (!used)
27102711
to_free = size;
27112712
else if (initial)
@@ -2718,7 +2719,9 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
27182719
to_free = offset + size - block_group->alloc_offset;
27192720
to_unusable = size - to_free;
27202721

2722+
spin_lock(&ctl->tree_lock);
27212723
ctl->free_space += to_free;
2724+
spin_unlock(&ctl->tree_lock);
27222725
/*
27232726
* If the block group is read-only, we should account freed space into
27242727
* bytes_readonly.
@@ -2727,11 +2730,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
27272730
block_group->zone_unusable += to_unusable;
27282731
WARN_ON(block_group->zone_unusable > block_group->length);
27292732
}
2730-
spin_unlock(&ctl->tree_lock);
27312733
if (!used) {
2732-
spin_lock(&block_group->lock);
27332734
block_group->alloc_offset -= size;
2734-
spin_unlock(&block_group->lock);
27352735
}
27362736

27372737
reclaimable_unusable = block_group->zone_unusable -
@@ -2745,6 +2745,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
27452745
btrfs_mark_bg_to_reclaim(block_group);
27462746
}
27472747

2748+
spin_unlock(&block_group->lock);
2749+
27482750
return 0;
27492751
}
27502752

0 commit comments

Comments
 (0)