@@ -1773,7 +1773,7 @@ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *e
1773
1773
*/
1774
1774
spin_lock (& eb -> refs_lock );
1775
1775
if (test_and_clear_bit (EXTENT_BUFFER_DIRTY , & eb -> bflags )) {
1776
- XA_STATE (xas , & fs_info -> buffer_tree , eb -> start >> fs_info -> sectorsize_bits );
1776
+ XA_STATE (xas , & fs_info -> buffer_tree , eb -> start >> fs_info -> node_bits );
1777
1777
unsigned long flags ;
1778
1778
1779
1779
set_bit (EXTENT_BUFFER_WRITEBACK , & eb -> bflags );
@@ -1873,7 +1873,7 @@ static void set_btree_ioerr(struct extent_buffer *eb)
1873
1873
static void buffer_tree_set_mark (const struct extent_buffer * eb , xa_mark_t mark )
1874
1874
{
1875
1875
struct btrfs_fs_info * fs_info = eb -> fs_info ;
1876
- XA_STATE (xas , & fs_info -> buffer_tree , eb -> start >> fs_info -> sectorsize_bits );
1876
+ XA_STATE (xas , & fs_info -> buffer_tree , eb -> start >> fs_info -> node_bits );
1877
1877
unsigned long flags ;
1878
1878
1879
1879
xas_lock_irqsave (& xas , flags );
@@ -1885,7 +1885,7 @@ static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark)
1885
1885
static void buffer_tree_clear_mark (const struct extent_buffer * eb , xa_mark_t mark )
1886
1886
{
1887
1887
struct btrfs_fs_info * fs_info = eb -> fs_info ;
1888
- XA_STATE (xas , & fs_info -> buffer_tree , eb -> start >> fs_info -> sectorsize_bits );
1888
+ XA_STATE (xas , & fs_info -> buffer_tree , eb -> start >> fs_info -> node_bits );
1889
1889
unsigned long flags ;
1890
1890
1891
1891
xas_lock_irqsave (& xas , flags );
@@ -1985,7 +1985,7 @@ static unsigned int buffer_tree_get_ebs_tag(struct btrfs_fs_info *fs_info,
1985
1985
rcu_read_lock ();
1986
1986
while ((eb = find_get_eb (& xas , end , tag )) != NULL ) {
1987
1987
if (!eb_batch_add (batch , eb )) {
1988
- * start = ((eb -> start + eb -> len ) >> fs_info -> sectorsize_bits );
1988
+ * start = ((eb -> start + eb -> len ) >> fs_info -> node_bits );
1989
1989
goto out ;
1990
1990
}
1991
1991
}
@@ -2007,7 +2007,7 @@ static struct extent_buffer *find_extent_buffer_nolock(
2007
2007
struct btrfs_fs_info * fs_info , u64 start )
2008
2008
{
2009
2009
struct extent_buffer * eb ;
2010
- unsigned long index = (start >> fs_info -> sectorsize_bits );
2010
+ unsigned long index = (start >> fs_info -> node_bits );
2011
2011
2012
2012
rcu_read_lock ();
2013
2013
eb = xa_load (& fs_info -> buffer_tree , index );
@@ -2113,8 +2113,8 @@ void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start,
2113
2113
u64 end )
2114
2114
{
2115
2115
struct eb_batch batch ;
2116
- unsigned long start_index = (start >> fs_info -> sectorsize_bits );
2117
- unsigned long end_index = (end >> fs_info -> sectorsize_bits );
2116
+ unsigned long start_index = (start >> fs_info -> node_bits );
2117
+ unsigned long end_index = (end >> fs_info -> node_bits );
2118
2118
2119
2119
eb_batch_init (& batch );
2120
2120
while (start_index <= end_index ) {
@@ -2150,7 +2150,7 @@ int btree_write_cache_pages(struct address_space *mapping,
2150
2150
2151
2151
eb_batch_init (& batch );
2152
2152
if (wbc -> range_cyclic ) {
2153
- index = ((mapping -> writeback_index << PAGE_SHIFT ) >> fs_info -> sectorsize_bits );
2153
+ index = ((mapping -> writeback_index << PAGE_SHIFT ) >> fs_info -> node_bits );
2154
2154
end = -1 ;
2155
2155
2156
2156
/*
@@ -2159,8 +2159,8 @@ int btree_write_cache_pages(struct address_space *mapping,
2159
2159
*/
2160
2160
scanned = (index == 0 );
2161
2161
} else {
2162
- index = (wbc -> range_start >> fs_info -> sectorsize_bits );
2163
- end = (wbc -> range_end >> fs_info -> sectorsize_bits );
2162
+ index = (wbc -> range_start >> fs_info -> node_bits );
2163
+ end = (wbc -> range_end >> fs_info -> node_bits );
2164
2164
2165
2165
scanned = 1 ;
2166
2166
}
@@ -3037,7 +3037,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3037
3037
eb -> fs_info = fs_info ;
3038
3038
again :
3039
3039
xa_lock_irq (& fs_info -> buffer_tree );
3040
- exists = __xa_cmpxchg (& fs_info -> buffer_tree , start >> fs_info -> sectorsize_bits ,
3040
+ exists = __xa_cmpxchg (& fs_info -> buffer_tree , start >> fs_info -> node_bits ,
3041
3041
NULL , eb , GFP_NOFS );
3042
3042
if (xa_is_err (exists )) {
3043
3043
ret = xa_err (exists );
@@ -3354,7 +3354,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3354
3354
again :
3355
3355
xa_lock_irq (& fs_info -> buffer_tree );
3356
3356
existing_eb = __xa_cmpxchg (& fs_info -> buffer_tree ,
3357
- start >> fs_info -> sectorsize_bits , NULL , eb ,
3357
+ start >> fs_info -> node_bits , NULL , eb ,
3358
3358
GFP_NOFS );
3359
3359
if (xa_is_err (existing_eb )) {
3360
3360
ret = xa_err (existing_eb );
@@ -3457,7 +3457,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
3457
3457
* in this case.
3458
3458
*/
3459
3459
xa_cmpxchg_irq (& fs_info -> buffer_tree ,
3460
- eb -> start >> fs_info -> sectorsize_bits , eb , NULL ,
3460
+ eb -> start >> fs_info -> node_bits , eb , NULL ,
3461
3461
GFP_ATOMIC );
3462
3462
3463
3463
btrfs_leak_debug_del_eb (eb );
@@ -4299,9 +4299,9 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
4299
4299
{
4300
4300
struct btrfs_fs_info * fs_info = folio_to_fs_info (folio );
4301
4301
struct extent_buffer * eb ;
4302
- unsigned long start = (folio_pos (folio ) >> fs_info -> sectorsize_bits );
4302
+ unsigned long start = (folio_pos (folio ) >> fs_info -> node_bits );
4303
4303
unsigned long index = start ;
4304
- unsigned long end = index + (PAGE_SIZE >> fs_info -> sectorsize_bits ) - 1 ;
4304
+ unsigned long end = index + (PAGE_SIZE >> fs_info -> node_bits ) - 1 ;
4305
4305
int ret ;
4306
4306
4307
4307
xa_lock_irq (& fs_info -> buffer_tree );
0 commit comments