@@ -3689,6 +3689,8 @@ static struct extent_buffer *grab_extent_buffer(
3689
3689
struct folio * folio = page_folio (page );
3690
3690
struct extent_buffer * exists ;
3691
3691
3692
+ lockdep_assert_held (& page -> mapping -> i_private_lock );
3693
+
3692
3694
/*
3693
3695
* For subpage case, we completely rely on radix tree to ensure we
3694
3696
* don't try to insert two ebs for the same bytenr. So here we always
@@ -3756,13 +3758,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
3756
3758
* The caller needs to free the existing folios and retry using the same order.
3757
3759
*/
3758
3760
static int attach_eb_folio_to_filemap (struct extent_buffer * eb , int i ,
3761
+ struct btrfs_subpage * prealloc ,
3759
3762
struct extent_buffer * * found_eb_ret )
3760
3763
{
3761
3764
3762
3765
struct btrfs_fs_info * fs_info = eb -> fs_info ;
3763
3766
struct address_space * mapping = fs_info -> btree_inode -> i_mapping ;
3764
3767
const unsigned long index = eb -> start >> PAGE_SHIFT ;
3765
- struct folio * existing_folio ;
3768
+ struct folio * existing_folio = NULL ;
3766
3769
int ret ;
3767
3770
3768
3771
ASSERT (found_eb_ret );
@@ -3774,12 +3777,14 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3774
3777
ret = filemap_add_folio (mapping , eb -> folios [i ], index + i ,
3775
3778
GFP_NOFS | __GFP_NOFAIL );
3776
3779
if (!ret )
3777
- return 0 ;
3780
+ goto finish ;
3778
3781
3779
3782
existing_folio = filemap_lock_folio (mapping , index + i );
3780
3783
/* The page cache only exists for a very short time, just retry. */
3781
- if (IS_ERR (existing_folio ))
3784
+ if (IS_ERR (existing_folio )) {
3785
+ existing_folio = NULL ;
3782
3786
goto retry ;
3787
+ }
3783
3788
3784
3789
/* For now, we should only have single-page folios for btree inode. */
3785
3790
ASSERT (folio_nr_pages (existing_folio ) == 1 );
@@ -3790,21 +3795,21 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3790
3795
return - EAGAIN ;
3791
3796
}
3792
3797
3793
- if (fs_info -> nodesize < PAGE_SIZE ) {
3794
- /*
3795
- * We're going to reuse the existing page, can drop our page
3796
- * and subpage structure now.
3797
- */
3798
+ finish :
3799
+ spin_lock (& mapping -> i_private_lock );
3800
+ if (existing_folio && fs_info -> nodesize < PAGE_SIZE ) {
3801
+ /* We're going to reuse the existing page, can drop our folio now. */
3798
3802
__free_page (folio_page (eb -> folios [i ], 0 ));
3799
3803
eb -> folios [i ] = existing_folio ;
3800
- } else {
3804
+ } else if ( existing_folio ) {
3801
3805
struct extent_buffer * existing_eb ;
3802
3806
3803
3807
existing_eb = grab_extent_buffer (fs_info ,
3804
3808
folio_page (existing_folio , 0 ));
3805
3809
if (existing_eb ) {
3806
3810
/* The extent buffer still exists, we can use it directly. */
3807
3811
* found_eb_ret = existing_eb ;
3812
+ spin_unlock (& mapping -> i_private_lock );
3808
3813
folio_unlock (existing_folio );
3809
3814
folio_put (existing_folio );
3810
3815
return 1 ;
@@ -3813,6 +3818,22 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
3813
3818
__free_page (folio_page (eb -> folios [i ], 0 ));
3814
3819
eb -> folios [i ] = existing_folio ;
3815
3820
}
3821
+ eb -> folio_size = folio_size (eb -> folios [i ]);
3822
+ eb -> folio_shift = folio_shift (eb -> folios [i ]);
3823
+ /* Should not fail, as we have preallocated the memory. */
3824
+ ret = attach_extent_buffer_folio (eb , eb -> folios [i ], prealloc );
3825
+ ASSERT (!ret );
3826
+ /*
3827
+ * To inform we have an extra eb under allocation, so that
3828
+ * detach_extent_buffer_page() won't release the folio private when the
3829
+ * eb hasn't been inserted into radix tree yet.
3830
+ *
3831
+ * The ref will be decreased when the eb releases the page, in
3832
+ * detach_extent_buffer_page(). Thus needs no special handling in the
3833
+ * error path.
3834
+ */
3835
+ btrfs_folio_inc_eb_refs (fs_info , eb -> folios [i ]);
3836
+ spin_unlock (& mapping -> i_private_lock );
3816
3837
return 0 ;
3817
3838
}
3818
3839
@@ -3824,7 +3845,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3824
3845
int attached = 0 ;
3825
3846
struct extent_buffer * eb ;
3826
3847
struct extent_buffer * existing_eb = NULL ;
3827
- struct address_space * mapping = fs_info -> btree_inode -> i_mapping ;
3828
3848
struct btrfs_subpage * prealloc = NULL ;
3829
3849
u64 lockdep_owner = owner_root ;
3830
3850
bool page_contig = true;
@@ -3890,7 +3910,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3890
3910
for (int i = 0 ; i < num_folios ; i ++ ) {
3891
3911
struct folio * folio ;
3892
3912
3893
- ret = attach_eb_folio_to_filemap (eb , i , & existing_eb );
3913
+ ret = attach_eb_folio_to_filemap (eb , i , prealloc , & existing_eb );
3894
3914
if (ret > 0 ) {
3895
3915
ASSERT (existing_eb );
3896
3916
goto out ;
@@ -3927,24 +3947,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3927
3947
* and free the allocated page.
3928
3948
*/
3929
3949
folio = eb -> folios [i ];
3930
- eb -> folio_size = folio_size (folio );
3931
- eb -> folio_shift = folio_shift (folio );
3932
- spin_lock (& mapping -> i_private_lock );
3933
- /* Should not fail, as we have preallocated the memory */
3934
- ret = attach_extent_buffer_folio (eb , folio , prealloc );
3935
- ASSERT (!ret );
3936
- /*
3937
- * To inform we have extra eb under allocation, so that
3938
- * detach_extent_buffer_page() won't release the folio private
3939
- * when the eb hasn't yet been inserted into radix tree.
3940
- *
3941
- * The ref will be decreased when the eb released the page, in
3942
- * detach_extent_buffer_page().
3943
- * Thus needs no special handling in error path.
3944
- */
3945
- btrfs_folio_inc_eb_refs (fs_info , folio );
3946
- spin_unlock (& mapping -> i_private_lock );
3947
-
3948
3950
WARN_ON (btrfs_folio_test_dirty (fs_info , folio , eb -> start , eb -> len ));
3949
3951
3950
3952
/*
0 commit comments