@@ -78,7 +78,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
78
78
}
79
79
80
80
spin_lock (& root -> inode_lock );
81
- node = xa_load (& root -> delayed_nodes , ino );
81
+ node = radix_tree_lookup (& root -> delayed_nodes_tree , ino );
82
82
83
83
if (node ) {
84
84
if (btrfs_inode -> delayed_node ) {
@@ -90,17 +90,17 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
90
90
91
91
/*
92
92
* It's possible that we're racing into the middle of removing
93
- * this node from the xarray . In this case, the refcount
93
+ * this node from the radix tree . In this case, the refcount
94
94
* was zero and it should never go back to one. Just return
95
- * NULL like it was never in the xarray at all; our release
95
+ * NULL like it was never in the radix at all; our release
96
96
* function is in the process of removing it.
97
97
*
98
98
* Some implementations of refcount_inc refuse to bump the
99
99
* refcount once it has hit zero. If we don't do this dance
100
100
* here, refcount_inc() may decide to just WARN_ONCE() instead
101
101
* of actually bumping the refcount.
102
102
*
103
- * If this node is properly in the xarray , we want to bump the
103
+ * If this node is properly in the radix , we want to bump the
104
104
* refcount twice, once for the inode and once for this get
105
105
* operation.
106
106
*/
@@ -128,30 +128,36 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
128
128
u64 ino = btrfs_ino (btrfs_inode );
129
129
int ret ;
130
130
131
- do {
132
- node = btrfs_get_delayed_node (btrfs_inode );
133
- if (node )
134
- return node ;
131
+ again :
132
+ node = btrfs_get_delayed_node (btrfs_inode );
133
+ if (node )
134
+ return node ;
135
135
136
- node = kmem_cache_zalloc (delayed_node_cache , GFP_NOFS );
137
- if (!node )
138
- return ERR_PTR (- ENOMEM );
139
- btrfs_init_delayed_node (node , root , ino );
136
+ node = kmem_cache_zalloc (delayed_node_cache , GFP_NOFS );
137
+ if (!node )
138
+ return ERR_PTR (- ENOMEM );
139
+ btrfs_init_delayed_node (node , root , ino );
140
140
141
- /* Cached in the inode and can be accessed */
142
- refcount_set (& node -> refs , 2 );
141
+ /* cached in the btrfs inode and can be accessed */
142
+ refcount_set (& node -> refs , 2 );
143
143
144
- spin_lock (& root -> inode_lock );
145
- ret = xa_insert (& root -> delayed_nodes , ino , node , GFP_NOFS );
146
- if (ret ) {
147
- spin_unlock (& root -> inode_lock );
148
- kmem_cache_free (delayed_node_cache , node );
149
- if (ret != - EBUSY )
150
- return ERR_PTR (ret );
151
- }
152
- } while (ret );
144
+ ret = radix_tree_preload (GFP_NOFS );
145
+ if (ret ) {
146
+ kmem_cache_free (delayed_node_cache , node );
147
+ return ERR_PTR (ret );
148
+ }
149
+
150
+ spin_lock (& root -> inode_lock );
151
+ ret = radix_tree_insert (& root -> delayed_nodes_tree , ino , node );
152
+ if (ret == - EEXIST ) {
153
+ spin_unlock (& root -> inode_lock );
154
+ kmem_cache_free (delayed_node_cache , node );
155
+ radix_tree_preload_end ();
156
+ goto again ;
157
+ }
153
158
btrfs_inode -> delayed_node = node ;
154
159
spin_unlock (& root -> inode_lock );
160
+ radix_tree_preload_end ();
155
161
156
162
return node ;
157
163
}
@@ -270,7 +276,8 @@ static void __btrfs_release_delayed_node(
270
276
* back up. We can delete it now.
271
277
*/
272
278
ASSERT (refcount_read (& delayed_node -> refs ) == 0 );
273
- xa_erase (& root -> delayed_nodes , delayed_node -> inode_id );
279
+ radix_tree_delete (& root -> delayed_nodes_tree ,
280
+ delayed_node -> inode_id );
274
281
spin_unlock (& root -> inode_lock );
275
282
kmem_cache_free (delayed_node_cache , delayed_node );
276
283
}
@@ -1863,35 +1870,34 @@ void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1863
1870
1864
1871
void btrfs_kill_all_delayed_nodes (struct btrfs_root * root )
1865
1872
{
1866
- unsigned long index = 0 ;
1867
- struct btrfs_delayed_node * delayed_node ;
1873
+ u64 inode_id = 0 ;
1868
1874
struct btrfs_delayed_node * delayed_nodes [8 ];
1875
+ int i , n ;
1869
1876
1870
1877
while (1 ) {
1871
- int n = 0 ;
1872
-
1873
1878
spin_lock (& root -> inode_lock );
1874
- if (xa_empty (& root -> delayed_nodes )) {
1879
+ n = radix_tree_gang_lookup (& root -> delayed_nodes_tree ,
1880
+ (void * * )delayed_nodes , inode_id ,
1881
+ ARRAY_SIZE (delayed_nodes ));
1882
+ if (!n ) {
1875
1883
spin_unlock (& root -> inode_lock );
1876
- return ;
1884
+ break ;
1877
1885
}
1878
1886
1879
- xa_for_each_start (& root -> delayed_nodes , index , delayed_node , index ) {
1887
+ inode_id = delayed_nodes [n - 1 ]-> inode_id + 1 ;
1888
+ for (i = 0 ; i < n ; i ++ ) {
1880
1889
/*
1881
1890
* Don't increase refs in case the node is dead and
1882
1891
* about to be removed from the tree in the loop below
1883
1892
*/
1884
- if (refcount_inc_not_zero (& delayed_node -> refs )) {
1885
- delayed_nodes [n ] = delayed_node ;
1886
- n ++ ;
1887
- }
1888
- if (n >= ARRAY_SIZE (delayed_nodes ))
1889
- break ;
1893
+ if (!refcount_inc_not_zero (& delayed_nodes [i ]-> refs ))
1894
+ delayed_nodes [i ] = NULL ;
1890
1895
}
1891
- index ++ ;
1892
1896
spin_unlock (& root -> inode_lock );
1893
1897
1894
- for (int i = 0 ; i < n ; i ++ ) {
1898
+ for (i = 0 ; i < n ; i ++ ) {
1899
+ if (!delayed_nodes [i ])
1900
+ continue ;
1895
1901
__btrfs_kill_delayed_node (delayed_nodes [i ]);
1896
1902
btrfs_release_delayed_node (delayed_nodes [i ]);
1897
1903
}
0 commit comments