@@ -1090,13 +1090,28 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1090
1090
}
1091
1091
1092
1092
static void
1093
- nvmet_fc_delete_assoc (struct work_struct * work )
1093
+ nvmet_fc_delete_assoc (struct nvmet_fc_tgt_assoc * assoc )
1094
+ {
1095
+ nvmet_fc_delete_target_assoc (assoc );
1096
+ nvmet_fc_tgt_a_put (assoc );
1097
+ }
1098
+
1099
+ static void
1100
+ nvmet_fc_delete_assoc_work (struct work_struct * work )
1094
1101
{
1095
1102
struct nvmet_fc_tgt_assoc * assoc =
1096
1103
container_of (work , struct nvmet_fc_tgt_assoc , del_work );
1104
+ struct nvmet_fc_tgtport * tgtport = assoc -> tgtport ;
1097
1105
1098
- nvmet_fc_delete_target_assoc (assoc );
1099
- nvmet_fc_tgt_a_put (assoc );
1106
+ nvmet_fc_delete_assoc (assoc );
1107
+ nvmet_fc_tgtport_put (tgtport );
1108
+ }
1109
+
1110
+ static void
1111
+ nvmet_fc_schedule_delete_assoc (struct nvmet_fc_tgt_assoc * assoc )
1112
+ {
1113
+ nvmet_fc_tgtport_get (assoc -> tgtport );
1114
+ queue_work (nvmet_wq , & assoc -> del_work );
1100
1115
}
1101
1116
1102
1117
static struct nvmet_fc_tgt_assoc *
@@ -1127,7 +1142,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1127
1142
assoc -> a_id = idx ;
1128
1143
INIT_LIST_HEAD (& assoc -> a_list );
1129
1144
kref_init (& assoc -> ref );
1130
- INIT_WORK (& assoc -> del_work , nvmet_fc_delete_assoc );
1145
+ INIT_WORK (& assoc -> del_work , nvmet_fc_delete_assoc_work );
1131
1146
atomic_set (& assoc -> terminating , 0 );
1132
1147
1133
1148
while (needrandom ) {
@@ -1483,7 +1498,7 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1483
1498
list_for_each_entry_rcu (assoc , & tgtport -> assoc_list , a_list ) {
1484
1499
if (!nvmet_fc_tgt_a_get (assoc ))
1485
1500
continue ;
1486
- queue_work ( nvmet_wq , & assoc -> del_work );
1501
+ nvmet_fc_schedule_delete_assoc ( assoc );
1487
1502
nvmet_fc_tgt_a_put (assoc );
1488
1503
}
1489
1504
rcu_read_unlock ();
@@ -1536,7 +1551,7 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1536
1551
continue ;
1537
1552
assoc -> hostport -> invalid = 1 ;
1538
1553
noassoc = false;
1539
- queue_work ( nvmet_wq , & assoc -> del_work );
1554
+ nvmet_fc_schedule_delete_assoc ( assoc );
1540
1555
nvmet_fc_tgt_a_put (assoc );
1541
1556
}
1542
1557
spin_unlock_irqrestore (& tgtport -> lock , flags );
@@ -1581,7 +1596,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1581
1596
nvmet_fc_tgtport_put (tgtport );
1582
1597
1583
1598
if (found_ctrl ) {
1584
- queue_work ( nvmet_wq , & assoc -> del_work );
1599
+ nvmet_fc_schedule_delete_assoc ( assoc );
1585
1600
nvmet_fc_tgt_a_put (assoc );
1586
1601
return ;
1587
1602
}
@@ -1888,7 +1903,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1888
1903
nvmet_fc_xmt_ls_rsp (tgtport , oldls );
1889
1904
}
1890
1905
1891
- queue_work ( nvmet_wq , & assoc -> del_work );
1906
+ nvmet_fc_schedule_delete_assoc ( assoc );
1892
1907
nvmet_fc_tgt_a_put (assoc );
1893
1908
1894
1909
return false;
0 commit comments