@@ -131,7 +131,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
131
131
/*
132
132
* Only new queue scan work when admin and IO queues are both alive
133
133
*/
134
- if (ctrl -> state == NVME_CTRL_LIVE && ctrl -> tagset )
134
+ if (nvme_ctrl_state ( ctrl ) == NVME_CTRL_LIVE && ctrl -> tagset )
135
135
queue_work (nvme_wq , & ctrl -> scan_work );
136
136
}
137
137
@@ -143,7 +143,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
143
143
*/
144
144
int nvme_try_sched_reset (struct nvme_ctrl * ctrl )
145
145
{
146
- if (ctrl -> state != NVME_CTRL_RESETTING )
146
+ if (nvme_ctrl_state ( ctrl ) != NVME_CTRL_RESETTING )
147
147
return - EBUSY ;
148
148
if (!queue_work (nvme_reset_wq , & ctrl -> reset_work ))
149
149
return - EBUSY ;
@@ -156,7 +156,7 @@ static void nvme_failfast_work(struct work_struct *work)
156
156
struct nvme_ctrl * ctrl = container_of (to_delayed_work (work ),
157
157
struct nvme_ctrl , failfast_work );
158
158
159
- if (ctrl -> state != NVME_CTRL_CONNECTING )
159
+ if (nvme_ctrl_state ( ctrl ) != NVME_CTRL_CONNECTING )
160
160
return ;
161
161
162
162
set_bit (NVME_CTRL_FAILFAST_EXPIRED , & ctrl -> flags );
@@ -200,7 +200,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
200
200
ret = nvme_reset_ctrl (ctrl );
201
201
if (!ret ) {
202
202
flush_work (& ctrl -> reset_work );
203
- if (ctrl -> state != NVME_CTRL_LIVE )
203
+ if (nvme_ctrl_state ( ctrl ) != NVME_CTRL_LIVE )
204
204
ret = - ENETRESET ;
205
205
}
206
206
@@ -499,7 +499,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
499
499
500
500
spin_lock_irqsave (& ctrl -> lock , flags );
501
501
502
- old_state = ctrl -> state ;
502
+ old_state = nvme_ctrl_state ( ctrl ) ;
503
503
switch (new_state ) {
504
504
case NVME_CTRL_LIVE :
505
505
switch (old_state ) {
@@ -567,19 +567,19 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
567
567
}
568
568
569
569
if (changed ) {
570
- ctrl -> state = new_state ;
570
+ WRITE_ONCE ( ctrl -> state , new_state ) ;
571
571
wake_up_all (& ctrl -> state_wq );
572
572
}
573
573
574
574
spin_unlock_irqrestore (& ctrl -> lock , flags );
575
575
if (!changed )
576
576
return false;
577
577
578
- if (ctrl -> state == NVME_CTRL_LIVE ) {
578
+ if (new_state == NVME_CTRL_LIVE ) {
579
579
if (old_state == NVME_CTRL_CONNECTING )
580
580
nvme_stop_failfast_work (ctrl );
581
581
nvme_kick_requeue_lists (ctrl );
582
- } else if (ctrl -> state == NVME_CTRL_CONNECTING &&
582
+ } else if (new_state == NVME_CTRL_CONNECTING &&
583
583
old_state == NVME_CTRL_RESETTING ) {
584
584
nvme_start_failfast_work (ctrl );
585
585
}
@@ -592,7 +592,7 @@ EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
592
592
*/
593
593
static bool nvme_state_terminal (struct nvme_ctrl * ctrl )
594
594
{
595
- switch (ctrl -> state ) {
595
+ switch (nvme_ctrl_state ( ctrl ) ) {
596
596
case NVME_CTRL_NEW :
597
597
case NVME_CTRL_LIVE :
598
598
case NVME_CTRL_RESETTING :
@@ -617,7 +617,7 @@ bool nvme_wait_reset(struct nvme_ctrl *ctrl)
617
617
wait_event (ctrl -> state_wq ,
618
618
nvme_change_ctrl_state (ctrl , NVME_CTRL_RESETTING ) ||
619
619
nvme_state_terminal (ctrl ));
620
- return ctrl -> state == NVME_CTRL_RESETTING ;
620
+ return nvme_ctrl_state ( ctrl ) == NVME_CTRL_RESETTING ;
621
621
}
622
622
EXPORT_SYMBOL_GPL (nvme_wait_reset );
623
623
@@ -704,9 +704,11 @@ EXPORT_SYMBOL_GPL(nvme_init_request);
704
704
blk_status_t nvme_fail_nonready_command (struct nvme_ctrl * ctrl ,
705
705
struct request * rq )
706
706
{
707
- if (ctrl -> state != NVME_CTRL_DELETING_NOIO &&
708
- ctrl -> state != NVME_CTRL_DELETING &&
709
- ctrl -> state != NVME_CTRL_DEAD &&
707
+ enum nvme_ctrl_state state = nvme_ctrl_state (ctrl );
708
+
709
+ if (state != NVME_CTRL_DELETING_NOIO &&
710
+ state != NVME_CTRL_DELETING &&
711
+ state != NVME_CTRL_DEAD &&
710
712
!test_bit (NVME_CTRL_FAILFAST_EXPIRED , & ctrl -> flags ) &&
711
713
!blk_noretry_request (rq ) && !(rq -> cmd_flags & REQ_NVME_MPATH ))
712
714
return BLK_STS_RESOURCE ;
@@ -736,7 +738,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
736
738
* command, which is require to set the queue live in the
737
739
* appropinquate states.
738
740
*/
739
- switch (ctrl -> state ) {
741
+ switch (nvme_ctrl_state ( ctrl ) ) {
740
742
case NVME_CTRL_CONNECTING :
741
743
if (blk_rq_is_passthrough (rq ) && nvme_is_fabrics (req -> cmd ) &&
742
744
(req -> cmd -> fabrics .fctype == nvme_fabrics_type_connect ||
@@ -2550,7 +2552,7 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2550
2552
2551
2553
if (ctrl -> ps_max_latency_us != latency ) {
2552
2554
ctrl -> ps_max_latency_us = latency ;
2553
- if (ctrl -> state == NVME_CTRL_LIVE )
2555
+ if (nvme_ctrl_state ( ctrl ) == NVME_CTRL_LIVE )
2554
2556
nvme_configure_apst (ctrl );
2555
2557
}
2556
2558
}
@@ -3238,7 +3240,7 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
3238
3240
struct nvme_ctrl * ctrl =
3239
3241
container_of (inode -> i_cdev , struct nvme_ctrl , cdev );
3240
3242
3241
- switch (ctrl -> state ) {
3243
+ switch (nvme_ctrl_state ( ctrl ) ) {
3242
3244
case NVME_CTRL_LIVE :
3243
3245
break ;
3244
3246
default :
@@ -3660,6 +3662,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
3660
3662
goto out_unlink_ns ;
3661
3663
3662
3664
down_write (& ctrl -> namespaces_rwsem );
3665
+ /*
3666
+ * Ensure that no namespaces are added to the ctrl list after the queues
3667
+ * are frozen, thereby avoiding a deadlock between scan and reset.
3668
+ */
3669
+ if (test_bit (NVME_CTRL_FROZEN , & ctrl -> flags )) {
3670
+ up_write (& ctrl -> namespaces_rwsem );
3671
+ goto out_unlink_ns ;
3672
+ }
3663
3673
nvme_ns_add_to_ctrl_list (ns );
3664
3674
up_write (& ctrl -> namespaces_rwsem );
3665
3675
nvme_get_ctrl (ctrl );
@@ -3924,7 +3934,7 @@ static void nvme_scan_work(struct work_struct *work)
3924
3934
int ret ;
3925
3935
3926
3936
/* No tagset on a live ctrl means IO queues could not created */
3927
- if (ctrl -> state != NVME_CTRL_LIVE || !ctrl -> tagset )
3937
+ if (nvme_ctrl_state ( ctrl ) != NVME_CTRL_LIVE || !ctrl -> tagset )
3928
3938
return ;
3929
3939
3930
3940
/*
@@ -3994,7 +4004,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3994
4004
* removing the namespaces' disks; fail all the queues now to avoid
3995
4005
* potentially having to clean up the failed sync later.
3996
4006
*/
3997
- if (ctrl -> state == NVME_CTRL_DEAD )
4007
+ if (nvme_ctrl_state ( ctrl ) == NVME_CTRL_DEAD )
3998
4008
nvme_mark_namespaces_dead (ctrl );
3999
4009
4000
4010
/* this is a no-op when called from the controller reset handler */
@@ -4076,7 +4086,7 @@ static void nvme_async_event_work(struct work_struct *work)
4076
4086
* flushing ctrl async_event_work after changing the controller state
4077
4087
* from LIVE and before freeing the admin queue.
4078
4088
*/
4079
- if (ctrl -> state == NVME_CTRL_LIVE )
4089
+ if (nvme_ctrl_state ( ctrl ) == NVME_CTRL_LIVE )
4080
4090
ctrl -> ops -> submit_async_event (ctrl );
4081
4091
}
4082
4092
@@ -4471,7 +4481,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
4471
4481
{
4472
4482
int ret ;
4473
4483
4474
- ctrl -> state = NVME_CTRL_NEW ;
4484
+ WRITE_ONCE ( ctrl -> state , NVME_CTRL_NEW ) ;
4475
4485
clear_bit (NVME_CTRL_FAILFAST_EXPIRED , & ctrl -> flags );
4476
4486
spin_lock_init (& ctrl -> lock );
4477
4487
mutex_init (& ctrl -> scan_lock );
@@ -4581,6 +4591,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
4581
4591
list_for_each_entry (ns , & ctrl -> namespaces , list )
4582
4592
blk_mq_unfreeze_queue (ns -> queue );
4583
4593
up_read (& ctrl -> namespaces_rwsem );
4594
+ clear_bit (NVME_CTRL_FROZEN , & ctrl -> flags );
4584
4595
}
4585
4596
EXPORT_SYMBOL_GPL (nvme_unfreeze );
4586
4597
@@ -4614,6 +4625,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
4614
4625
{
4615
4626
struct nvme_ns * ns ;
4616
4627
4628
+ set_bit (NVME_CTRL_FROZEN , & ctrl -> flags );
4617
4629
down_read (& ctrl -> namespaces_rwsem );
4618
4630
list_for_each_entry (ns , & ctrl -> namespaces , list )
4619
4631
blk_freeze_queue_start (ns -> queue );
0 commit comments