16
16
#include <linux/prefetch.h>
17
17
#include <linux/sched/mm.h>
18
18
19
+ #define BTREE_CACHE_NOT_FREED_INCREMENT (counter ) \
20
+ do { \
21
+ if (shrinker_counter) \
22
+ bc->not_freed_##counter++; \
23
+ } while (0)
24
+
19
25
const char * const bch2_btree_node_flags [] = {
20
26
#define x (f ) #f ,
21
27
BTREE_FLAGS ()
@@ -238,7 +244,7 @@ static inline struct btree *btree_cache_find(struct btree_cache *bc,
238
244
* this version is for btree nodes that have already been freed (we're not
239
245
* reaping a real btree node)
240
246
*/
241
- static int __btree_node_reclaim (struct bch_fs * c , struct btree * b , bool flush )
247
+ static int __btree_node_reclaim (struct bch_fs * c , struct btree * b , bool flush , bool shrinker_counter )
242
248
{
243
249
struct btree_cache * bc = & c -> btree_cache ;
244
250
int ret = 0 ;
@@ -260,38 +266,64 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
260
266
if (b -> flags & ((1U << BTREE_NODE_dirty )|
261
267
(1U << BTREE_NODE_read_in_flight )|
262
268
(1U << BTREE_NODE_write_in_flight ))) {
263
- if (!flush )
269
+ if (!flush ) {
270
+ if (btree_node_dirty (b ))
271
+ BTREE_CACHE_NOT_FREED_INCREMENT (dirty );
272
+ else if (btree_node_read_in_flight (b ))
273
+ BTREE_CACHE_NOT_FREED_INCREMENT (read_in_flight );
274
+ else if (btree_node_write_in_flight (b ))
275
+ BTREE_CACHE_NOT_FREED_INCREMENT (write_in_flight );
264
276
return - BCH_ERR_ENOMEM_btree_node_reclaim ;
277
+ }
265
278
266
279
/* XXX: waiting on IO with btree cache lock held */
267
280
bch2_btree_node_wait_on_read (b );
268
281
bch2_btree_node_wait_on_write (b );
269
282
}
270
283
271
- if (!six_trylock_intent (& b -> c .lock ))
284
+ if (!six_trylock_intent (& b -> c .lock )) {
285
+ BTREE_CACHE_NOT_FREED_INCREMENT (lock_intent );
272
286
return - BCH_ERR_ENOMEM_btree_node_reclaim ;
287
+ }
273
288
274
- if (!six_trylock_write (& b -> c .lock ))
289
+ if (!six_trylock_write (& b -> c .lock )) {
290
+ BTREE_CACHE_NOT_FREED_INCREMENT (lock_write );
275
291
goto out_unlock_intent ;
292
+ }
276
293
277
294
/* recheck under lock */
278
295
if (b -> flags & ((1U << BTREE_NODE_read_in_flight )|
279
296
(1U << BTREE_NODE_write_in_flight ))) {
280
- if (!flush )
297
+ if (!flush ) {
298
+ if (btree_node_read_in_flight (b ))
299
+ BTREE_CACHE_NOT_FREED_INCREMENT (read_in_flight );
300
+ else if (btree_node_write_in_flight (b ))
301
+ BTREE_CACHE_NOT_FREED_INCREMENT (write_in_flight );
281
302
goto out_unlock ;
303
+ }
282
304
six_unlock_write (& b -> c .lock );
283
305
six_unlock_intent (& b -> c .lock );
284
306
goto wait_on_io ;
285
307
}
286
308
287
- if (btree_node_noevict (b ) ||
288
- btree_node_write_blocked (b ) ||
289
- btree_node_will_make_reachable (b ))
309
+ if (btree_node_noevict (b )) {
310
+ BTREE_CACHE_NOT_FREED_INCREMENT (noevict );
311
+ goto out_unlock ;
312
+ }
313
+ if (btree_node_write_blocked (b )) {
314
+ BTREE_CACHE_NOT_FREED_INCREMENT (write_blocked );
290
315
goto out_unlock ;
316
+ }
317
+ if (btree_node_will_make_reachable (b )) {
318
+ BTREE_CACHE_NOT_FREED_INCREMENT (will_make_reachable );
319
+ goto out_unlock ;
320
+ }
291
321
292
322
if (btree_node_dirty (b )) {
293
- if (!flush )
323
+ if (!flush ) {
324
+ BTREE_CACHE_NOT_FREED_INCREMENT (dirty );
294
325
goto out_unlock ;
326
+ }
295
327
/*
296
328
* Using the underscore version because we don't want to compact
297
329
* bsets after the write, since this node is about to be evicted
@@ -321,14 +353,14 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
321
353
goto out ;
322
354
}
323
355
324
- static int btree_node_reclaim (struct bch_fs * c , struct btree * b )
356
+ static int btree_node_reclaim (struct bch_fs * c , struct btree * b , bool shrinker_counter )
325
357
{
326
- return __btree_node_reclaim (c , b , false);
358
+ return __btree_node_reclaim (c , b , false, shrinker_counter );
327
359
}
328
360
329
361
static int btree_node_write_and_reclaim (struct bch_fs * c , struct btree * b )
330
362
{
331
- return __btree_node_reclaim (c , b , true);
363
+ return __btree_node_reclaim (c , b , true, false );
332
364
}
333
365
334
366
static unsigned long bch2_btree_cache_scan (struct shrinker * shrink ,
@@ -376,11 +408,12 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
376
408
if (touched >= nr )
377
409
goto out ;
378
410
379
- if (!btree_node_reclaim (c , b )) {
411
+ if (!btree_node_reclaim (c , b , true )) {
380
412
btree_node_data_free (c , b );
381
413
six_unlock_write (& b -> c .lock );
382
414
six_unlock_intent (& b -> c .lock );
383
415
freed ++ ;
416
+ bc -> freed ++ ;
384
417
}
385
418
}
386
419
restart :
@@ -389,9 +422,11 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
389
422
390
423
if (btree_node_accessed (b )) {
391
424
clear_btree_node_accessed (b );
392
- } else if (!btree_node_reclaim (c , b )) {
425
+ bc -> not_freed_access_bit ++ ;
426
+ } else if (!btree_node_reclaim (c , b , true)) {
393
427
freed ++ ;
394
428
btree_node_data_free (c , b );
429
+ bc -> freed ++ ;
395
430
396
431
bch2_btree_node_hash_remove (bc , b );
397
432
six_unlock_write (& b -> c .lock );
@@ -599,7 +634,7 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
599
634
struct btree * b ;
600
635
601
636
list_for_each_entry_reverse (b , & bc -> live , list )
602
- if (!btree_node_reclaim (c , b ))
637
+ if (!btree_node_reclaim (c , b , false ))
603
638
return b ;
604
639
605
640
while (1 ) {
@@ -635,7 +670,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
635
670
* disk node. Check the freed list before allocating a new one:
636
671
*/
637
672
list_for_each_entry (b , freed , list )
638
- if (!btree_node_reclaim (c , b )) {
673
+ if (!btree_node_reclaim (c , b , false )) {
639
674
list_del_init (& b -> list );
640
675
goto got_node ;
641
676
}
@@ -661,7 +696,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
661
696
* the list. Check if there's any freed nodes there:
662
697
*/
663
698
list_for_each_entry (b2 , & bc -> freeable , list )
664
- if (!btree_node_reclaim (c , b2 )) {
699
+ if (!btree_node_reclaim (c , b2 , false )) {
665
700
swap (b -> data , b2 -> data );
666
701
swap (b -> aux_data , b2 -> aux_data );
667
702
btree_node_to_freedlist (bc , b2 );
@@ -1280,12 +1315,12 @@ static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
1280
1315
prt_printf (out , " (%u)\n" , nr );
1281
1316
}
1282
1317
1283
- void bch2_btree_cache_to_text (struct printbuf * out , const struct bch_fs * c )
1318
+ void bch2_btree_cache_to_text (struct printbuf * out , const struct btree_cache * bc )
1284
1319
{
1285
- const struct btree_cache * bc = & c -> btree_cache ;
1320
+ struct bch_fs * c = container_of ( bc , struct bch_fs , btree_cache ) ;
1286
1321
1287
1322
if (!out -> nr_tabstops )
1288
- printbuf_tabstop_push (out , 24 );
1323
+ printbuf_tabstop_push (out , 32 );
1289
1324
1290
1325
prt_btree_cache_line (out , c , "total:" , bc -> used );
1291
1326
prt_btree_cache_line (out , c , "nr dirty:" , atomic_read (& bc -> dirty ));
@@ -1294,4 +1329,17 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct bch_fs *c)
1294
1329
1295
1330
for (unsigned i = 0 ; i < ARRAY_SIZE (bc -> used_by_btree ); i ++ )
1296
1331
prt_btree_cache_line (out , c , bch2_btree_id_str (i ), bc -> used_by_btree [i ]);
1332
+
1333
+ prt_newline (out );
1334
+ prt_printf (out , "freed:\t%u\n" , bc -> freed );
1335
+ prt_printf (out , "not freed:\n" );
1336
+ prt_printf (out , " dirty\t%u\n" , bc -> not_freed_dirty );
1337
+ prt_printf (out , " write in flight\t%u\n" , bc -> not_freed_write_in_flight );
1338
+ prt_printf (out , " read in flight\t%u\n" , bc -> not_freed_read_in_flight );
1339
+ prt_printf (out , " lock intent failed\t%u\n" , bc -> not_freed_lock_intent );
1340
+ prt_printf (out , " lock write failed\t%u\n" , bc -> not_freed_lock_write );
1341
+ prt_printf (out , " access bit\t%u\n" , bc -> not_freed_access_bit );
1342
+ prt_printf (out , " no evict failed\t%u\n" , bc -> not_freed_noevict );
1343
+ prt_printf (out , " write blocked\t%u\n" , bc -> not_freed_write_blocked );
1344
+ prt_printf (out , " will make reachable\t%u\n" , bc -> not_freed_will_make_reachable );
1297
1345
}
0 commit comments