@@ -414,7 +414,15 @@ static inline void nvme_end_req_zoned(struct request *req)
414
414
}
415
415
}
416
416
417
- static inline void nvme_end_req (struct request * req )
417
+ static inline void __nvme_end_req (struct request * req )
418
+ {
419
+ nvme_end_req_zoned (req );
420
+ nvme_trace_bio_complete (req );
421
+ if (req -> cmd_flags & REQ_NVME_MPATH )
422
+ nvme_mpath_end_request (req );
423
+ }
424
+
425
+ void nvme_end_req (struct request * req )
418
426
{
419
427
blk_status_t status = nvme_error_status (nvme_req (req )-> status );
420
428
@@ -424,10 +432,7 @@ static inline void nvme_end_req(struct request *req)
424
432
else
425
433
nvme_log_error (req );
426
434
}
427
- nvme_end_req_zoned (req );
428
- nvme_trace_bio_complete (req );
429
- if (req -> cmd_flags & REQ_NVME_MPATH )
430
- nvme_mpath_end_request (req );
435
+ __nvme_end_req (req );
431
436
blk_mq_end_request (req , status );
432
437
}
433
438
@@ -476,7 +481,7 @@ void nvme_complete_batch_req(struct request *req)
476
481
{
477
482
trace_nvme_complete_rq (req );
478
483
nvme_cleanup_cmd (req );
479
- nvme_end_req_zoned (req );
484
+ __nvme_end_req (req );
480
485
}
481
486
EXPORT_SYMBOL_GPL (nvme_complete_batch_req );
482
487
@@ -673,7 +678,7 @@ static void nvme_free_ns(struct kref *kref)
673
678
kfree (ns );
674
679
}
675
680
676
- static inline bool nvme_get_ns (struct nvme_ns * ns )
681
+ bool nvme_get_ns (struct nvme_ns * ns )
677
682
{
678
683
return kref_get_unless_zero (& ns -> kref );
679
684
}
@@ -3679,9 +3684,10 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
3679
3684
struct nvme_ns * nvme_find_get_ns (struct nvme_ctrl * ctrl , unsigned nsid )
3680
3685
{
3681
3686
struct nvme_ns * ns , * ret = NULL ;
3687
+ int srcu_idx ;
3682
3688
3683
- down_read (& ctrl -> namespaces_rwsem );
3684
- list_for_each_entry (ns , & ctrl -> namespaces , list ) {
3689
+ srcu_idx = srcu_read_lock (& ctrl -> srcu );
3690
+ list_for_each_entry_rcu (ns , & ctrl -> namespaces , list ) {
3685
3691
if (ns -> head -> ns_id == nsid ) {
3686
3692
if (!nvme_get_ns (ns ))
3687
3693
continue ;
@@ -3691,7 +3697,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3691
3697
if (ns -> head -> ns_id > nsid )
3692
3698
break ;
3693
3699
}
3694
- up_read (& ctrl -> namespaces_rwsem );
3700
+ srcu_read_unlock (& ctrl -> srcu , srcu_idx );
3695
3701
return ret ;
3696
3702
}
3697
3703
EXPORT_SYMBOL_NS_GPL (nvme_find_get_ns , NVME_TARGET_PASSTHRU );
@@ -3705,7 +3711,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
3705
3711
3706
3712
list_for_each_entry_reverse (tmp , & ns -> ctrl -> namespaces , list ) {
3707
3713
if (tmp -> head -> ns_id < ns -> head -> ns_id ) {
3708
- list_add (& ns -> list , & tmp -> list );
3714
+ list_add_rcu (& ns -> list , & tmp -> list );
3709
3715
return ;
3710
3716
}
3711
3717
}
@@ -3771,17 +3777,18 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
3771
3777
if (nvme_update_ns_info (ns , info ))
3772
3778
goto out_unlink_ns ;
3773
3779
3774
- down_write (& ctrl -> namespaces_rwsem );
3780
+ mutex_lock (& ctrl -> namespaces_lock );
3775
3781
/*
3776
3782
* Ensure that no namespaces are added to the ctrl list after the queues
3777
3783
* are frozen, thereby avoiding a deadlock between scan and reset.
3778
3784
*/
3779
3785
if (test_bit (NVME_CTRL_FROZEN , & ctrl -> flags )) {
3780
- up_write (& ctrl -> namespaces_rwsem );
3786
+ mutex_unlock (& ctrl -> namespaces_lock );
3781
3787
goto out_unlink_ns ;
3782
3788
}
3783
3789
nvme_ns_add_to_ctrl_list (ns );
3784
- up_write (& ctrl -> namespaces_rwsem );
3790
+ mutex_unlock (& ctrl -> namespaces_lock );
3791
+ synchronize_srcu (& ctrl -> srcu );
3785
3792
nvme_get_ctrl (ctrl );
3786
3793
3787
3794
if (device_add_disk (ctrl -> device , ns -> disk , nvme_ns_attr_groups ))
@@ -3804,9 +3811,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
3804
3811
3805
3812
out_cleanup_ns_from_list :
3806
3813
nvme_put_ctrl (ctrl );
3807
- down_write (& ctrl -> namespaces_rwsem );
3808
- list_del_init (& ns -> list );
3809
- up_write (& ctrl -> namespaces_rwsem );
3814
+ mutex_lock (& ctrl -> namespaces_lock );
3815
+ list_del_rcu (& ns -> list );
3816
+ mutex_unlock (& ctrl -> namespaces_lock );
3817
+ synchronize_srcu (& ctrl -> srcu );
3810
3818
out_unlink_ns :
3811
3819
mutex_lock (& ctrl -> subsys -> lock );
3812
3820
list_del_rcu (& ns -> siblings );
@@ -3856,9 +3864,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
3856
3864
nvme_cdev_del (& ns -> cdev , & ns -> cdev_device );
3857
3865
del_gendisk (ns -> disk );
3858
3866
3859
- down_write (& ns -> ctrl -> namespaces_rwsem );
3860
- list_del_init (& ns -> list );
3861
- up_write (& ns -> ctrl -> namespaces_rwsem );
3867
+ mutex_lock (& ns -> ctrl -> namespaces_lock );
3868
+ list_del_rcu (& ns -> list );
3869
+ mutex_unlock (& ns -> ctrl -> namespaces_lock );
3870
+ synchronize_srcu (& ns -> ctrl -> srcu );
3862
3871
3863
3872
if (last_path )
3864
3873
nvme_mpath_shutdown_disk (ns -> head );
@@ -3948,16 +3957,17 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3948
3957
struct nvme_ns * ns , * next ;
3949
3958
LIST_HEAD (rm_list );
3950
3959
3951
- down_write (& ctrl -> namespaces_rwsem );
3960
+ mutex_lock (& ctrl -> namespaces_lock );
3952
3961
list_for_each_entry_safe (ns , next , & ctrl -> namespaces , list ) {
3953
3962
if (ns -> head -> ns_id > nsid )
3954
- list_move_tail (& ns -> list , & rm_list );
3963
+ list_splice_init_rcu (& ns -> list , & rm_list ,
3964
+ synchronize_rcu );
3955
3965
}
3956
- up_write (& ctrl -> namespaces_rwsem );
3966
+ mutex_unlock (& ctrl -> namespaces_lock );
3967
+ synchronize_srcu (& ctrl -> srcu );
3957
3968
3958
3969
list_for_each_entry_safe (ns , next , & rm_list , list )
3959
3970
nvme_ns_remove (ns );
3960
-
3961
3971
}
3962
3972
3963
3973
static int nvme_scan_ns_list (struct nvme_ctrl * ctrl )
@@ -4127,9 +4137,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
4127
4137
/* this is a no-op when called from the controller reset handler */
4128
4138
nvme_change_ctrl_state (ctrl , NVME_CTRL_DELETING_NOIO );
4129
4139
4130
- down_write (& ctrl -> namespaces_rwsem );
4131
- list_splice_init (& ctrl -> namespaces , & ns_list );
4132
- up_write (& ctrl -> namespaces_rwsem );
4140
+ mutex_lock (& ctrl -> namespaces_lock );
4141
+ list_splice_init_rcu (& ctrl -> namespaces , & ns_list , synchronize_rcu );
4142
+ mutex_unlock (& ctrl -> namespaces_lock );
4143
+ synchronize_srcu (& ctrl -> srcu );
4133
4144
4134
4145
list_for_each_entry_safe (ns , next , & ns_list , list )
4135
4146
nvme_ns_remove (ns );
@@ -4577,6 +4588,7 @@ static void nvme_free_ctrl(struct device *dev)
4577
4588
key_put (ctrl -> tls_key );
4578
4589
nvme_free_cels (ctrl );
4579
4590
nvme_mpath_uninit (ctrl );
4591
+ cleanup_srcu_struct (& ctrl -> srcu );
4580
4592
nvme_auth_stop (ctrl );
4581
4593
nvme_auth_free (ctrl );
4582
4594
__free_page (ctrl -> discard_page );
@@ -4609,10 +4621,15 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4609
4621
ctrl -> passthru_err_log_enabled = false;
4610
4622
clear_bit (NVME_CTRL_FAILFAST_EXPIRED , & ctrl -> flags );
4611
4623
spin_lock_init (& ctrl -> lock );
4624
+ mutex_init (& ctrl -> namespaces_lock );
4625
+
4626
+ ret = init_srcu_struct (& ctrl -> srcu );
4627
+ if (ret )
4628
+ return ret ;
4629
+
4612
4630
mutex_init (& ctrl -> scan_lock );
4613
4631
INIT_LIST_HEAD (& ctrl -> namespaces );
4614
4632
xa_init (& ctrl -> cels );
4615
- init_rwsem (& ctrl -> namespaces_rwsem );
4616
4633
ctrl -> dev = dev ;
4617
4634
ctrl -> ops = ops ;
4618
4635
ctrl -> quirks = quirks ;
@@ -4692,6 +4709,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4692
4709
out :
4693
4710
if (ctrl -> discard_page )
4694
4711
__free_page (ctrl -> discard_page );
4712
+ cleanup_srcu_struct (& ctrl -> srcu );
4695
4713
return ret ;
4696
4714
}
4697
4715
EXPORT_SYMBOL_GPL (nvme_init_ctrl );
@@ -4700,61 +4718,66 @@ EXPORT_SYMBOL_GPL(nvme_init_ctrl);
4700
4718
void nvme_mark_namespaces_dead (struct nvme_ctrl * ctrl )
4701
4719
{
4702
4720
struct nvme_ns * ns ;
4721
+ int srcu_idx ;
4703
4722
4704
- down_read (& ctrl -> namespaces_rwsem );
4705
- list_for_each_entry (ns , & ctrl -> namespaces , list )
4723
+ srcu_idx = srcu_read_lock (& ctrl -> srcu );
4724
+ list_for_each_entry_rcu (ns , & ctrl -> namespaces , list )
4706
4725
blk_mark_disk_dead (ns -> disk );
4707
- up_read (& ctrl -> namespaces_rwsem );
4726
+ srcu_read_unlock (& ctrl -> srcu , srcu_idx );
4708
4727
}
4709
4728
EXPORT_SYMBOL_GPL (nvme_mark_namespaces_dead );
4710
4729
4711
4730
void nvme_unfreeze (struct nvme_ctrl * ctrl )
4712
4731
{
4713
4732
struct nvme_ns * ns ;
4733
+ int srcu_idx ;
4714
4734
4715
- down_read (& ctrl -> namespaces_rwsem );
4716
- list_for_each_entry (ns , & ctrl -> namespaces , list )
4735
+ srcu_idx = srcu_read_lock (& ctrl -> srcu );
4736
+ list_for_each_entry_rcu (ns , & ctrl -> namespaces , list )
4717
4737
blk_mq_unfreeze_queue (ns -> queue );
4718
- up_read (& ctrl -> namespaces_rwsem );
4738
+ srcu_read_unlock (& ctrl -> srcu , srcu_idx );
4719
4739
clear_bit (NVME_CTRL_FROZEN , & ctrl -> flags );
4720
4740
}
4721
4741
EXPORT_SYMBOL_GPL (nvme_unfreeze );
4722
4742
4723
4743
int nvme_wait_freeze_timeout (struct nvme_ctrl * ctrl , long timeout )
4724
4744
{
4725
4745
struct nvme_ns * ns ;
4746
+ int srcu_idx ;
4726
4747
4727
- down_read (& ctrl -> namespaces_rwsem );
4728
- list_for_each_entry (ns , & ctrl -> namespaces , list ) {
4748
+ srcu_idx = srcu_read_lock (& ctrl -> srcu );
4749
+ list_for_each_entry_rcu (ns , & ctrl -> namespaces , list ) {
4729
4750
timeout = blk_mq_freeze_queue_wait_timeout (ns -> queue , timeout );
4730
4751
if (timeout <= 0 )
4731
4752
break ;
4732
4753
}
4733
- up_read (& ctrl -> namespaces_rwsem );
4754
+ srcu_read_unlock (& ctrl -> srcu , srcu_idx );
4734
4755
return timeout ;
4735
4756
}
4736
4757
EXPORT_SYMBOL_GPL (nvme_wait_freeze_timeout );
4737
4758
4738
4759
void nvme_wait_freeze (struct nvme_ctrl * ctrl )
4739
4760
{
4740
4761
struct nvme_ns * ns ;
4762
+ int srcu_idx ;
4741
4763
4742
- down_read (& ctrl -> namespaces_rwsem );
4743
- list_for_each_entry (ns , & ctrl -> namespaces , list )
4764
+ srcu_idx = srcu_read_lock (& ctrl -> srcu );
4765
+ list_for_each_entry_rcu (ns , & ctrl -> namespaces , list )
4744
4766
blk_mq_freeze_queue_wait (ns -> queue );
4745
- up_read (& ctrl -> namespaces_rwsem );
4767
+ srcu_read_unlock (& ctrl -> srcu , srcu_idx );
4746
4768
}
4747
4769
EXPORT_SYMBOL_GPL (nvme_wait_freeze );
4748
4770
4749
4771
void nvme_start_freeze (struct nvme_ctrl * ctrl )
4750
4772
{
4751
4773
struct nvme_ns * ns ;
4774
+ int srcu_idx ;
4752
4775
4753
4776
set_bit (NVME_CTRL_FROZEN , & ctrl -> flags );
4754
- down_read (& ctrl -> namespaces_rwsem );
4755
- list_for_each_entry (ns , & ctrl -> namespaces , list )
4777
+ srcu_idx = srcu_read_lock (& ctrl -> srcu );
4778
+ list_for_each_entry_rcu (ns , & ctrl -> namespaces , list )
4756
4779
blk_freeze_queue_start (ns -> queue );
4757
- up_read (& ctrl -> namespaces_rwsem );
4780
+ srcu_read_unlock (& ctrl -> srcu , srcu_idx );
4758
4781
}
4759
4782
EXPORT_SYMBOL_GPL (nvme_start_freeze );
4760
4783
@@ -4797,11 +4820,12 @@ EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
4797
4820
void nvme_sync_io_queues (struct nvme_ctrl * ctrl )
4798
4821
{
4799
4822
struct nvme_ns * ns ;
4823
+ int srcu_idx ;
4800
4824
4801
- down_read (& ctrl -> namespaces_rwsem );
4802
- list_for_each_entry (ns , & ctrl -> namespaces , list )
4825
+ srcu_idx = srcu_read_lock (& ctrl -> srcu );
4826
+ list_for_each_entry_rcu (ns , & ctrl -> namespaces , list )
4803
4827
blk_sync_queue (ns -> queue );
4804
- up_read (& ctrl -> namespaces_rwsem );
4828
+ srcu_read_unlock (& ctrl -> srcu , srcu_idx );
4805
4829
}
4806
4830
EXPORT_SYMBOL_GPL (nvme_sync_io_queues );
4807
4831
0 commit comments