Skip to content

Commit 131c040

Browse files
committed
Merge tag 'bcachefs-2025-03-13' of git://evilpiepirate.org/bcachefs
Pull bcachefs fixes from Kent Overstreet: "Roxana caught an unitialized value that might explain some of the rebalance weirdness we're still tracking down - cool. Otherwise pretty minor" * tag 'bcachefs-2025-03-13' of git://evilpiepirate.org/bcachefs: bcachefs: bch2_get_random_u64_below() bcachefs: target_congested -> get_random_u32_below() bcachefs: fix tiny leak in bch2_dev_add() bcachefs: Make sure trans is unlocked when submitting read IO bcachefs: Initialize from_inode members for bch_io_opts bcachefs: Fix b->written overflow
2 parents 4003c9e + 9c18ea7 commit 131c040

File tree

7 files changed

+36
-24
lines changed

7 files changed

+36
-24
lines changed

fs/bcachefs/btree_io.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1186,7 +1186,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
11861186
le64_to_cpu(i->journal_seq),
11871187
b->written, b->written + sectors, ptr_written);
11881188

1189-
b->written += sectors;
1189+
b->written = min(b->written + sectors, btree_sectors(c));
11901190

11911191
if (blacklisted && !first)
11921192
continue;

fs/bcachefs/extents.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ static inline bool ptr_better(struct bch_fs *c,
9999

100100
/* Pick at random, biased in favor of the faster device: */
101101

102-
return bch2_rand_range(l1 + l2) > l1;
102+
return bch2_get_random_u64_below(l1 + l2) > l1;
103103
}
104104

105105
if (bch2_force_reconstruct_read)

fs/bcachefs/inode.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1198,6 +1198,7 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
11981198
opts->_name##_from_inode = true; \
11991199
} else { \
12001200
opts->_name = c->opts._name; \
1201+
opts->_name##_from_inode = false; \
12011202
}
12021203
BCH_INODE_OPTS()
12031204
#undef x

fs/bcachefs/io_read.c

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ static bool bch2_target_congested(struct bch_fs *c, u16 target)
5959
}
6060
rcu_read_unlock();
6161

62-
return bch2_rand_range(nr * CONGESTED_MAX) < total;
62+
return get_random_u32_below(nr * CONGESTED_MAX) < total;
6363
}
6464

6565
#else
@@ -951,12 +951,6 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
951951
goto retry_pick;
952952
}
953953

954-
/*
955-
* Unlock the iterator while the btree node's lock is still in
956-
* cache, before doing the IO:
957-
*/
958-
bch2_trans_unlock(trans);
959-
960954
if (flags & BCH_READ_NODECODE) {
961955
/*
962956
* can happen if we retry, and the extent we were going to read
@@ -1113,6 +1107,15 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
11131107
trace_and_count(c, read_split, &orig->bio);
11141108
}
11151109

1110+
/*
1111+
* Unlock the iterator while the btree node's lock is still in
1112+
* cache, before doing the IO:
1113+
*/
1114+
if (!(flags & BCH_READ_IN_RETRY))
1115+
bch2_trans_unlock(trans);
1116+
else
1117+
bch2_trans_unlock_long(trans);
1118+
11161119
if (!rbio->pick.idx) {
11171120
if (unlikely(!rbio->have_ioref)) {
11181121
struct printbuf buf = PRINTBUF;
@@ -1160,6 +1163,8 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
11601163
if (likely(!(flags & BCH_READ_IN_RETRY))) {
11611164
return 0;
11621165
} else {
1166+
bch2_trans_unlock(trans);
1167+
11631168
int ret;
11641169

11651170
rbio->context = RBIO_CONTEXT_UNBOUND;

fs/bcachefs/super.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1811,7 +1811,11 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
18111811
goto err_late;
18121812

18131813
up_write(&c->state_lock);
1814-
return 0;
1814+
out:
1815+
printbuf_exit(&label);
1816+
printbuf_exit(&errbuf);
1817+
bch_err_fn(c, ret);
1818+
return ret;
18151819

18161820
err_unlock:
18171821
mutex_unlock(&c->sb_lock);
@@ -1820,10 +1824,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
18201824
if (ca)
18211825
bch2_dev_free(ca);
18221826
bch2_free_super(&sb);
1823-
printbuf_exit(&label);
1824-
printbuf_exit(&errbuf);
1825-
bch_err_fn(c, ret);
1826-
return ret;
1827+
goto out;
18271828
err_late:
18281829
up_write(&c->state_lock);
18291830
ca = NULL;

fs/bcachefs/util.c

Lines changed: 14 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -653,19 +653,24 @@ int bch2_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask)
653653
return 0;
654654
}
655655

656-
size_t bch2_rand_range(size_t max)
656+
u64 bch2_get_random_u64_below(u64 ceil)
657657
{
658-
size_t rand;
658+
if (ceil <= U32_MAX)
659+
return __get_random_u32_below(ceil);
659660

660-
if (!max)
661-
return 0;
661+
/* this is the same (clever) algorithm as in __get_random_u32_below() */
662+
u64 rand = get_random_u64();
663+
u64 mult = ceil * rand;
662664

663-
do {
664-
rand = get_random_long();
665-
rand &= roundup_pow_of_two(max) - 1;
666-
} while (rand >= max);
665+
if (unlikely(mult < ceil)) {
666+
u64 bound = -ceil % ceil;
667+
while (unlikely(mult < bound)) {
668+
rand = get_random_u64();
669+
mult = ceil * rand;
670+
}
671+
}
667672

668-
return rand;
673+
return mul_u64_u64_shr(ceil, rand, 64);
669674
}
670675

671676
void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)

fs/bcachefs/util.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ do { \
401401
_ret; \
402402
})
403403

404-
size_t bch2_rand_range(size_t);
404+
u64 bch2_get_random_u64_below(u64);
405405

406406
void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
407407
void memcpy_from_bio(void *, struct bio *, struct bvec_iter);

0 commit comments

Comments
 (0)