Skip to content

Commit b06d800

Browse files
fdmananakdave
authored andcommitted
btrfs: use refcount_t type for the extent buffer reference counter
Instead of using a bare atomic, use the refcount_t type, which despite being a structure that contains only an atomic, has an API that checks for underflows and other hazards. This doesn't change the size of the extent_buffer structure. This removes the need to do things like this: WARN_ON(atomic_read(&eb->refs) == 0); if (atomic_dec_and_test(&eb->refs)) { (...) } And do just: if (refcount_dec_and_test(&eb->refs)) { (...) } Since refcount_dec_and_test() already triggers a warning when we decrement a ref count that has a value of 0 (or below zero). Reviewed-by: Boris Burkov <boris@bur.io> Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent 03a4713 commit b06d800

File tree

11 files changed

+42
-43
lines changed

11 files changed

+42
-43
lines changed

fs/btrfs/ctree.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
198198
* the inc_not_zero dance and if it doesn't work then
199199
* synchronize_rcu and try again.
200200
*/
201-
if (atomic_inc_not_zero(&eb->refs)) {
201+
if (refcount_inc_not_zero(&eb->refs)) {
202202
rcu_read_unlock();
203203
break;
204204
}
@@ -560,7 +560,7 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
560560
btrfs_abort_transaction(trans, ret);
561561
goto error_unlock_cow;
562562
}
563-
atomic_inc(&cow->refs);
563+
refcount_inc(&cow->refs);
564564
rcu_assign_pointer(root->node, cow);
565565

566566
ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
@@ -1092,7 +1092,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
10921092
/* update the path */
10931093
if (left) {
10941094
if (btrfs_header_nritems(left) > orig_slot) {
1095-
atomic_inc(&left->refs);
1095+
refcount_inc(&left->refs);
10961096
/* left was locked after cow */
10971097
path->nodes[level] = left;
10981098
path->slots[level + 1] -= 1;
@@ -1696,7 +1696,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
16961696

16971697
if (p->search_commit_root) {
16981698
b = root->commit_root;
1699-
atomic_inc(&b->refs);
1699+
refcount_inc(&b->refs);
17001700
level = btrfs_header_level(b);
17011701
/*
17021702
* Ensure that all callers have set skip_locking when
@@ -2894,7 +2894,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
28942894
free_extent_buffer(old);
28952895

28962896
add_root_to_dirty_list(root);
2897-
atomic_inc(&c->refs);
2897+
refcount_inc(&c->refs);
28982898
path->nodes[level] = c;
28992899
path->locks[level] = BTRFS_WRITE_LOCK;
29002900
path->slots[level] = 0;
@@ -4451,7 +4451,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
44514451

44524452
root_sub_used_bytes(root);
44534453

4454-
atomic_inc(&leaf->refs);
4454+
refcount_inc(&leaf->refs);
44554455
ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
44564456
free_extent_buffer_stale(leaf);
44574457
if (ret < 0)
@@ -4536,7 +4536,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
45364536
* for possible call to btrfs_del_ptr below
45374537
*/
45384538
slot = path->slots[1];
4539-
atomic_inc(&leaf->refs);
4539+
refcount_inc(&leaf->refs);
45404540
/*
45414541
* We want to be able to at least push one item to the
45424542
* left neighbour leaf, and that's the first item.

fs/btrfs/extent-tree.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6348,7 +6348,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
63486348

63496349
btrfs_assert_tree_write_locked(parent);
63506350
parent_level = btrfs_header_level(parent);
6351-
atomic_inc(&parent->refs);
6351+
refcount_inc(&parent->refs);
63526352
path->nodes[parent_level] = parent;
63536353
path->slots[parent_level] = btrfs_header_nritems(parent);
63546354

fs/btrfs/extent_io.c

Lines changed: 22 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
7777
struct extent_buffer, leak_list);
7878
pr_err(
7979
"BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
80-
eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
80+
eb->start, eb->len, refcount_read(&eb->refs), eb->bflags,
8181
btrfs_header_owner(eb));
8282
list_del(&eb->leak_list);
8383
WARN_ON_ONCE(1);
@@ -1961,7 +1961,7 @@ static inline struct extent_buffer *find_get_eb(struct xa_state *xas, unsigned l
19611961
if (!eb)
19621962
return NULL;
19631963

1964-
if (!atomic_inc_not_zero(&eb->refs)) {
1964+
if (!refcount_inc_not_zero(&eb->refs)) {
19651965
xas_reset(xas);
19661966
goto retry;
19671967
}
@@ -2012,7 +2012,7 @@ static struct extent_buffer *find_extent_buffer_nolock(
20122012

20132013
rcu_read_lock();
20142014
eb = xa_load(&fs_info->buffer_tree, index);
2015-
if (eb && !atomic_inc_not_zero(&eb->refs))
2015+
if (eb && !refcount_inc_not_zero(&eb->refs))
20162016
eb = NULL;
20172017
rcu_read_unlock();
20182018
return eb;
@@ -2842,7 +2842,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info
28422842
btrfs_leak_debug_add_eb(eb);
28432843

28442844
spin_lock_init(&eb->refs_lock);
2845-
atomic_set(&eb->refs, 1);
2845+
refcount_set(&eb->refs, 1);
28462846

28472847
ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE);
28482848

@@ -2975,13 +2975,13 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
29752975
* once io is initiated, TREE_REF can no longer be cleared, so that is
29762976
* the moment at which any such race is best fixed.
29772977
*/
2978-
refs = atomic_read(&eb->refs);
2978+
refs = refcount_read(&eb->refs);
29792979
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
29802980
return;
29812981

29822982
spin_lock(&eb->refs_lock);
29832983
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2984-
atomic_inc(&eb->refs);
2984+
refcount_inc(&eb->refs);
29852985
spin_unlock(&eb->refs_lock);
29862986
}
29872987

@@ -3047,7 +3047,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
30473047
return ERR_PTR(ret);
30483048
}
30493049
if (exists) {
3050-
if (!atomic_inc_not_zero(&exists->refs)) {
3050+
if (!refcount_inc_not_zero(&exists->refs)) {
30513051
/* The extent buffer is being freed, retry. */
30523052
xa_unlock_irq(&fs_info->buffer_tree);
30533053
goto again;
@@ -3092,7 +3092,7 @@ static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
30923092
* just overwrite folio private.
30933093
*/
30943094
exists = folio_get_private(folio);
3095-
if (atomic_inc_not_zero(&exists->refs))
3095+
if (refcount_inc_not_zero(&exists->refs))
30963096
return exists;
30973097

30983098
WARN_ON(folio_test_dirty(folio));
@@ -3363,7 +3363,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
33633363
goto out;
33643364
}
33653365
if (existing_eb) {
3366-
if (!atomic_inc_not_zero(&existing_eb->refs)) {
3366+
if (!refcount_inc_not_zero(&existing_eb->refs)) {
33673367
xa_unlock_irq(&fs_info->buffer_tree);
33683368
goto again;
33693369
}
@@ -3392,7 +3392,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
33923392
return eb;
33933393

33943394
out:
3395-
WARN_ON(!atomic_dec_and_test(&eb->refs));
3395+
WARN_ON(!refcount_dec_and_test(&eb->refs));
33963396

33973397
/*
33983398
* Any attached folios need to be detached before we unlock them. This
@@ -3438,8 +3438,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
34383438
{
34393439
lockdep_assert_held(&eb->refs_lock);
34403440

3441-
WARN_ON(atomic_read(&eb->refs) == 0);
3442-
if (atomic_dec_and_test(&eb->refs)) {
3441+
if (refcount_dec_and_test(&eb->refs)) {
34433442
struct btrfs_fs_info *fs_info = eb->fs_info;
34443443

34453444
spin_unlock(&eb->refs_lock);
@@ -3485,7 +3484,7 @@ void free_extent_buffer(struct extent_buffer *eb)
34853484
if (!eb)
34863485
return;
34873486

3488-
refs = atomic_read(&eb->refs);
3487+
refs = refcount_read(&eb->refs);
34893488
while (1) {
34903489
if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) {
34913490
if (refs == 1)
@@ -3495,16 +3494,16 @@ void free_extent_buffer(struct extent_buffer *eb)
34953494
}
34963495

34973496
/* Optimization to avoid locking eb->refs_lock. */
3498-
if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3497+
if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1))
34993498
return;
35003499
}
35013500

35023501
spin_lock(&eb->refs_lock);
3503-
if (atomic_read(&eb->refs) == 2 &&
3502+
if (refcount_read(&eb->refs) == 2 &&
35043503
test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
35053504
!extent_buffer_under_io(eb) &&
35063505
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3507-
atomic_dec(&eb->refs);
3506+
refcount_dec(&eb->refs);
35083507

35093508
/*
35103509
* I know this is terrible, but it's temporary until we stop tracking
@@ -3521,9 +3520,9 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
35213520
spin_lock(&eb->refs_lock);
35223521
set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
35233522

3524-
if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3523+
if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
35253524
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3526-
atomic_dec(&eb->refs);
3525+
refcount_dec(&eb->refs);
35273526
release_extent_buffer(eb);
35283527
}
35293528

@@ -3581,7 +3580,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
35813580
btree_clear_folio_dirty_tag(folio);
35823581
folio_unlock(folio);
35833582
}
3584-
WARN_ON(atomic_read(&eb->refs) == 0);
3583+
WARN_ON(refcount_read(&eb->refs) == 0);
35853584
}
35863585

35873586
void set_extent_buffer_dirty(struct extent_buffer *eb)
@@ -3592,7 +3591,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
35923591

35933592
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
35943593

3595-
WARN_ON(atomic_read(&eb->refs) == 0);
3594+
WARN_ON(refcount_read(&eb->refs) == 0);
35963595
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
35973596
WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
35983597

@@ -3718,7 +3717,7 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
37183717

37193718
eb->read_mirror = 0;
37203719
check_buffer_tree_ref(eb);
3721-
atomic_inc(&eb->refs);
3720+
refcount_inc(&eb->refs);
37223721

37233722
bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
37243723
REQ_OP_READ | REQ_META, eb->fs_info,
@@ -4313,7 +4312,7 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
43134312
* won't disappear out from under us.
43144313
*/
43154314
spin_lock(&eb->refs_lock);
4316-
if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4315+
if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
43174316
spin_unlock(&eb->refs_lock);
43184317
continue;
43194318
}
@@ -4379,7 +4378,7 @@ int try_release_extent_buffer(struct folio *folio)
43794378
* this page.
43804379
*/
43814380
spin_lock(&eb->refs_lock);
4382-
if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4381+
if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
43834382
spin_unlock(&eb->refs_lock);
43844383
spin_unlock(&folio->mapping->i_private_lock);
43854384
return 0;

fs/btrfs/extent_io.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ struct extent_buffer {
9898
void *addr;
9999

100100
spinlock_t refs_lock;
101-
atomic_t refs;
101+
refcount_t refs;
102102
int read_mirror;
103103
/* >= 0 if eb belongs to a log tree, -1 otherwise */
104104
s8 log_index;

fs/btrfs/fiemap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,7 @@ static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *p
320320
* the cost of allocating a new one.
321321
*/
322322
ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
323-
atomic_inc(&clone->refs);
323+
refcount_inc(&clone->refs);
324324

325325
ret = btrfs_next_leaf(inode->root, path);
326326
if (ret != 0)

fs/btrfs/print-tree.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ static void print_eb_refs_lock(const struct extent_buffer *eb)
223223
{
224224
#ifdef CONFIG_BTRFS_DEBUG
225225
btrfs_info(eb->fs_info, "refs %u lock_owner %u current %u",
226-
atomic_read(&eb->refs), eb->lock_owner, current->pid);
226+
refcount_read(&eb->refs), eb->lock_owner, current->pid);
227227
#endif
228228
}
229229

fs/btrfs/qgroup.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2338,7 +2338,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
23382338
btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
23392339

23402340
/* For src_path */
2341-
atomic_inc(&src_eb->refs);
2341+
refcount_inc(&src_eb->refs);
23422342
src_path->nodes[root_level] = src_eb;
23432343
src_path->slots[root_level] = dst_path->slots[root_level];
23442344
src_path->locks[root_level] = 0;
@@ -2571,7 +2571,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
25712571
goto out;
25722572
}
25732573
/* For dst_path */
2574-
atomic_inc(&dst_eb->refs);
2574+
refcount_inc(&dst_eb->refs);
25752575
dst_path->nodes[level] = dst_eb;
25762576
dst_path->slots[level] = 0;
25772577
dst_path->locks[level] = 0;
@@ -2663,7 +2663,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
26632663
* walk back up the tree (adjusting slot pointers as we go)
26642664
* and restart the search process.
26652665
*/
2666-
atomic_inc(&root_eb->refs); /* For path */
2666+
refcount_inc(&root_eb->refs); /* For path */
26672667
path->nodes[root_level] = root_eb;
26682668
path->slots[root_level] = 0;
26692669
path->locks[root_level] = 0; /* so release_path doesn't try to unlock */

fs/btrfs/relocation.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1524,7 +1524,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
15241524

15251525
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
15261526
level = btrfs_root_level(root_item);
1527-
atomic_inc(&reloc_root->node->refs);
1527+
refcount_inc(&reloc_root->node->refs);
15281528
path->nodes[level] = reloc_root->node;
15291529
path->slots[level] = 0;
15301530
} else {
@@ -4347,7 +4347,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
43474347
}
43484348

43494349
btrfs_backref_drop_node_buffer(node);
4350-
atomic_inc(&cow->refs);
4350+
refcount_inc(&cow->refs);
43514351
node->eb = cow;
43524352
node->new_bytenr = cow->start;
43534353

fs/btrfs/tree-log.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2719,7 +2719,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
27192719
level = btrfs_header_level(log->node);
27202720
orig_level = level;
27212721
path->nodes[level] = log->node;
2722-
atomic_inc(&log->node->refs);
2722+
refcount_inc(&log->node->refs);
27232723
path->slots[level] = 0;
27242724

27252725
while (1) {
@@ -3683,7 +3683,7 @@ static int clone_leaf(struct btrfs_path *path, struct btrfs_log_ctx *ctx)
36833683
* Add extra ref to scratch eb so that it is not freed when callers
36843684
* release the path, so we can reuse it later if needed.
36853685
*/
3686-
atomic_inc(&ctx->scratch_eb->refs);
3686+
refcount_inc(&ctx->scratch_eb->refs);
36873687

36883688
return 0;
36893689
}

fs/btrfs/zoned.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2427,7 +2427,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
24272427

24282428
/* For the work */
24292429
btrfs_get_block_group(bg);
2430-
atomic_inc(&eb->refs);
2430+
refcount_inc(&eb->refs);
24312431
bg->last_eb = eb;
24322432
INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
24332433
queue_work(system_unbound_wq, &bg->zone_finish_work);

0 commit comments

Comments
 (0)