@@ -254,7 +254,7 @@ enum evict_result {
254
254
255
255
typedef enum evict_result (* le_predicate )(struct lru_entry * le , void * context );
256
256
257
- static struct lru_entry * lru_evict (struct lru * lru , le_predicate pred , void * context )
257
+ static struct lru_entry * lru_evict (struct lru * lru , le_predicate pred , void * context , bool no_sleep )
258
258
{
259
259
unsigned long tested = 0 ;
260
260
struct list_head * h = lru -> cursor ;
@@ -295,7 +295,8 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
295
295
296
296
h = h -> next ;
297
297
298
- cond_resched ();
298
+ if (!no_sleep )
299
+ cond_resched ();
299
300
}
300
301
301
302
return NULL ;
@@ -382,7 +383,10 @@ struct dm_buffer {
382
383
*/
383
384
384
385
struct buffer_tree {
385
- struct rw_semaphore lock ;
386
+ union {
387
+ struct rw_semaphore lock ;
388
+ rwlock_t spinlock ;
389
+ } u ;
386
390
struct rb_root root ;
387
391
} ____cacheline_aligned_in_smp ;
388
392
@@ -393,32 +397,47 @@ struct dm_buffer_cache {
393
397
* on the locks.
394
398
*/
395
399
unsigned int num_locks ;
400
+ bool no_sleep ;
396
401
struct buffer_tree trees [];
397
402
};
398
403
404
+ static DEFINE_STATIC_KEY_FALSE (no_sleep_enabled );
405
+
399
406
static inline unsigned int cache_index (sector_t block , unsigned int num_locks )
400
407
{
401
408
return dm_hash_locks_index (block , num_locks );
402
409
}
403
410
404
411
static inline void cache_read_lock (struct dm_buffer_cache * bc , sector_t block )
405
412
{
406
- down_read (& bc -> trees [cache_index (block , bc -> num_locks )].lock );
413
+ if (static_branch_unlikely (& no_sleep_enabled ) && bc -> no_sleep )
414
+ read_lock_bh (& bc -> trees [cache_index (block , bc -> num_locks )].u .spinlock );
415
+ else
416
+ down_read (& bc -> trees [cache_index (block , bc -> num_locks )].u .lock );
407
417
}
408
418
409
419
static inline void cache_read_unlock (struct dm_buffer_cache * bc , sector_t block )
410
420
{
411
- up_read (& bc -> trees [cache_index (block , bc -> num_locks )].lock );
421
+ if (static_branch_unlikely (& no_sleep_enabled ) && bc -> no_sleep )
422
+ read_unlock_bh (& bc -> trees [cache_index (block , bc -> num_locks )].u .spinlock );
423
+ else
424
+ up_read (& bc -> trees [cache_index (block , bc -> num_locks )].u .lock );
412
425
}
413
426
414
427
static inline void cache_write_lock (struct dm_buffer_cache * bc , sector_t block )
415
428
{
416
- down_write (& bc -> trees [cache_index (block , bc -> num_locks )].lock );
429
+ if (static_branch_unlikely (& no_sleep_enabled ) && bc -> no_sleep )
430
+ write_lock_bh (& bc -> trees [cache_index (block , bc -> num_locks )].u .spinlock );
431
+ else
432
+ down_write (& bc -> trees [cache_index (block , bc -> num_locks )].u .lock );
417
433
}
418
434
419
435
static inline void cache_write_unlock (struct dm_buffer_cache * bc , sector_t block )
420
436
{
421
- up_write (& bc -> trees [cache_index (block , bc -> num_locks )].lock );
437
+ if (static_branch_unlikely (& no_sleep_enabled ) && bc -> no_sleep )
438
+ write_unlock_bh (& bc -> trees [cache_index (block , bc -> num_locks )].u .spinlock );
439
+ else
440
+ up_write (& bc -> trees [cache_index (block , bc -> num_locks )].u .lock );
422
441
}
423
442
424
443
/*
@@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool
442
461
443
462
static void __lh_lock (struct lock_history * lh , unsigned int index )
444
463
{
445
- if (lh -> write )
446
- down_write (& lh -> cache -> trees [index ].lock );
447
- else
448
- down_read (& lh -> cache -> trees [index ].lock );
464
+ if (lh -> write ) {
465
+ if (static_branch_unlikely (& no_sleep_enabled ) && lh -> cache -> no_sleep )
466
+ write_lock_bh (& lh -> cache -> trees [index ].u .spinlock );
467
+ else
468
+ down_write (& lh -> cache -> trees [index ].u .lock );
469
+ } else {
470
+ if (static_branch_unlikely (& no_sleep_enabled ) && lh -> cache -> no_sleep )
471
+ read_lock_bh (& lh -> cache -> trees [index ].u .spinlock );
472
+ else
473
+ down_read (& lh -> cache -> trees [index ].u .lock );
474
+ }
449
475
}
450
476
451
477
static void __lh_unlock (struct lock_history * lh , unsigned int index )
452
478
{
453
- if (lh -> write )
454
- up_write (& lh -> cache -> trees [index ].lock );
455
- else
456
- up_read (& lh -> cache -> trees [index ].lock );
479
+ if (lh -> write ) {
480
+ if (static_branch_unlikely (& no_sleep_enabled ) && lh -> cache -> no_sleep )
481
+ write_unlock_bh (& lh -> cache -> trees [index ].u .spinlock );
482
+ else
483
+ up_write (& lh -> cache -> trees [index ].u .lock );
484
+ } else {
485
+ if (static_branch_unlikely (& no_sleep_enabled ) && lh -> cache -> no_sleep )
486
+ read_unlock_bh (& lh -> cache -> trees [index ].u .spinlock );
487
+ else
488
+ up_read (& lh -> cache -> trees [index ].u .lock );
489
+ }
457
490
}
458
491
459
492
/*
@@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
502
535
return le_to_buffer (le );
503
536
}
504
537
505
- static void cache_init (struct dm_buffer_cache * bc , unsigned int num_locks )
538
+ static void cache_init (struct dm_buffer_cache * bc , unsigned int num_locks , bool no_sleep )
506
539
{
507
540
unsigned int i ;
508
541
509
542
bc -> num_locks = num_locks ;
543
+ bc -> no_sleep = no_sleep ;
510
544
511
545
for (i = 0 ; i < bc -> num_locks ; i ++ ) {
512
- init_rwsem (& bc -> trees [i ].lock );
546
+ if (no_sleep )
547
+ rwlock_init (& bc -> trees [i ].u .spinlock );
548
+ else
549
+ init_rwsem (& bc -> trees [i ].u .lock );
513
550
bc -> trees [i ].root = RB_ROOT ;
514
551
}
515
552
@@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
648
685
struct lru_entry * le ;
649
686
struct dm_buffer * b ;
650
687
651
- le = lru_evict (& bc -> lru [list_mode ], __evict_pred , & w );
688
+ le = lru_evict (& bc -> lru [list_mode ], __evict_pred , & w , bc -> no_sleep );
652
689
if (!le )
653
690
return NULL ;
654
691
@@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_
702
739
struct evict_wrapper w = {.lh = lh , .pred = pred , .context = context };
703
740
704
741
while (true) {
705
- le = lru_evict (& bc -> lru [old_mode ], __evict_pred , & w );
742
+ le = lru_evict (& bc -> lru [old_mode ], __evict_pred , & w , bc -> no_sleep );
706
743
if (!le )
707
744
break ;
708
745
@@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
915
952
{
916
953
unsigned int i ;
917
954
955
+ BUG_ON (bc -> no_sleep );
918
956
for (i = 0 ; i < bc -> num_locks ; i ++ ) {
919
- down_write (& bc -> trees [i ].lock );
957
+ down_write (& bc -> trees [i ].u . lock );
920
958
__remove_range (bc , & bc -> trees [i ].root , begin , end , pred , release );
921
- up_write (& bc -> trees [i ].lock );
959
+ up_write (& bc -> trees [i ].u . lock );
922
960
}
923
961
}
924
962
@@ -979,8 +1017,6 @@ struct dm_bufio_client {
979
1017
struct dm_buffer_cache cache ; /* must be last member */
980
1018
};
981
1019
982
- static DEFINE_STATIC_KEY_FALSE (no_sleep_enabled );
983
-
984
1020
/*----------------------------------------------------------------*/
985
1021
986
1022
#define dm_bufio_in_request () (!!current->bio_list)
@@ -1871,7 +1907,8 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
1871
1907
if (need_submit )
1872
1908
submit_io (b , REQ_OP_READ , read_endio );
1873
1909
1874
- wait_on_bit_io (& b -> state , B_READING , TASK_UNINTERRUPTIBLE );
1910
+ if (nf != NF_GET ) /* we already tested this condition above */
1911
+ wait_on_bit_io (& b -> state , B_READING , TASK_UNINTERRUPTIBLE );
1875
1912
1876
1913
if (b -> read_error ) {
1877
1914
int error = blk_status_to_errno (b -> read_error );
@@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
2421
2458
r = - ENOMEM ;
2422
2459
goto bad_client ;
2423
2460
}
2424
- cache_init (& c -> cache , num_locks );
2461
+ cache_init (& c -> cache , num_locks , ( flags & DM_BUFIO_CLIENT_NO_SLEEP ) != 0 );
2425
2462
2426
2463
c -> bdev = bdev ;
2427
2464
c -> block_size = block_size ;
0 commit comments