@@ -133,6 +133,11 @@ struct vhost_scsi_cmd {
133
133
struct se_cmd tvc_se_cmd ;
134
134
/* Sense buffer that will be mapped into outgoing status */
135
135
unsigned char tvc_sense_buf [TRANSPORT_SENSE_BUFFER ];
136
+ /*
137
+ * Dirty write descriptors of this command.
138
+ */
139
+ struct vhost_log * tvc_log ;
140
+ unsigned int tvc_log_num ;
136
141
/* Completed commands list, serviced from vhost worker thread */
137
142
struct llist_node tvc_completion_list ;
138
143
/* Used to track inflight cmd */
@@ -362,6 +367,45 @@ static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
362
367
return tpg -> tv_fabric_prot_type ;
363
368
}
364
369
370
+ static int vhost_scsi_copy_cmd_log (struct vhost_virtqueue * vq ,
371
+ struct vhost_scsi_cmd * cmd ,
372
+ struct vhost_log * log ,
373
+ unsigned int log_num )
374
+ {
375
+ if (!cmd -> tvc_log )
376
+ cmd -> tvc_log = kmalloc_array (vq -> dev -> iov_limit ,
377
+ sizeof (* cmd -> tvc_log ),
378
+ GFP_KERNEL );
379
+
380
+ if (unlikely (!cmd -> tvc_log )) {
381
+ vq_err (vq , "Failed to alloc tvc_log\n" );
382
+ return - ENOMEM ;
383
+ }
384
+
385
+ memcpy (cmd -> tvc_log , log , sizeof (* cmd -> tvc_log ) * log_num );
386
+ cmd -> tvc_log_num = log_num ;
387
+
388
+ return 0 ;
389
+ }
390
+
391
+ static void vhost_scsi_log_write (struct vhost_virtqueue * vq ,
392
+ struct vhost_log * log ,
393
+ unsigned int log_num )
394
+ {
395
+ if (likely (!vhost_has_feature (vq , VHOST_F_LOG_ALL )))
396
+ return ;
397
+
398
+ if (likely (!log_num || !log ))
399
+ return ;
400
+
401
+ /*
402
+ * vhost-scsi doesn't support VIRTIO_F_ACCESS_PLATFORM.
403
+ * No requirement for vq->iotlb case.
404
+ */
405
+ WARN_ON_ONCE (unlikely (vq -> iotlb ));
406
+ vhost_log_write (vq , log , log_num , U64_MAX , NULL , 0 );
407
+ }
408
+
365
409
static void vhost_scsi_release_cmd_res (struct se_cmd * se_cmd )
366
410
{
367
411
struct vhost_scsi_cmd * tv_cmd = container_of (se_cmd ,
@@ -660,6 +704,9 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
660
704
} else
661
705
pr_err ("Faulted on virtio_scsi_cmd_resp\n" );
662
706
707
+ vhost_scsi_log_write (cmd -> tvc_vq , cmd -> tvc_log ,
708
+ cmd -> tvc_log_num );
709
+
663
710
vhost_scsi_release_cmd_res (se_cmd );
664
711
}
665
712
@@ -676,6 +723,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
676
723
struct vhost_scsi_virtqueue , vq );
677
724
struct vhost_scsi_cmd * cmd ;
678
725
struct scatterlist * sgl , * prot_sgl ;
726
+ struct vhost_log * log ;
679
727
int tag ;
680
728
681
729
tag = sbitmap_get (& svq -> scsi_tags );
@@ -687,9 +735,11 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, u64 scsi_tag)
687
735
cmd = & svq -> scsi_cmds [tag ];
688
736
sgl = cmd -> sgl ;
689
737
prot_sgl = cmd -> prot_sgl ;
738
+ log = cmd -> tvc_log ;
690
739
memset (cmd , 0 , sizeof (* cmd ));
691
740
cmd -> sgl = sgl ;
692
741
cmd -> prot_sgl = prot_sgl ;
742
+ cmd -> tvc_log = log ;
693
743
cmd -> tvc_se_cmd .map_tag = tag ;
694
744
cmd -> inflight = vhost_scsi_get_inflight (vq );
695
745
@@ -1225,6 +1275,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1225
1275
u8 task_attr ;
1226
1276
bool t10_pi = vhost_has_feature (vq , VIRTIO_SCSI_F_T10_PI );
1227
1277
u8 * cdb ;
1278
+ struct vhost_log * vq_log ;
1279
+ unsigned int log_num ;
1228
1280
1229
1281
mutex_lock (& vq -> mutex );
1230
1282
/*
@@ -1240,8 +1292,11 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1240
1292
1241
1293
vhost_disable_notify (& vs -> dev , vq );
1242
1294
1295
+ vq_log = unlikely (vhost_has_feature (vq , VHOST_F_LOG_ALL )) ?
1296
+ vq -> log : NULL ;
1297
+
1243
1298
do {
1244
- ret = vhost_scsi_get_desc (vs , vq , & vc , NULL , NULL );
1299
+ ret = vhost_scsi_get_desc (vs , vq , & vc , vq_log , & log_num );
1245
1300
if (ret )
1246
1301
goto err ;
1247
1302
@@ -1390,6 +1445,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1390
1445
goto err ;
1391
1446
}
1392
1447
1448
+ if (unlikely (vq_log && log_num )) {
1449
+ ret = vhost_scsi_copy_cmd_log (vq , cmd , vq_log , log_num );
1450
+ if (unlikely (ret )) {
1451
+ vhost_scsi_release_cmd_res (& cmd -> tvc_se_cmd );
1452
+ goto err ;
1453
+ }
1454
+ }
1455
+
1393
1456
pr_debug ("vhost_scsi got command opcode: %#02x, lun: %d\n" ,
1394
1457
cdb [0 ], lun );
1395
1458
pr_debug ("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
@@ -1425,11 +1488,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1425
1488
*/
1426
1489
if (ret == - ENXIO )
1427
1490
break ;
1428
- else if (ret == - EIO )
1491
+ else if (ret == - EIO ) {
1429
1492
vhost_scsi_send_bad_target (vs , vq , & vc , TYPE_IO_CMD );
1430
- else if (ret == - ENOMEM )
1493
+ vhost_scsi_log_write (vq , vq_log , log_num );
1494
+ } else if (ret == - ENOMEM ) {
1431
1495
vhost_scsi_send_status (vs , vq , & vc ,
1432
1496
SAM_STAT_TASK_SET_FULL );
1497
+ vhost_scsi_log_write (vq , vq_log , log_num );
1498
+ }
1433
1499
} while (likely (!vhost_exceeds_weight (vq , ++ c , 0 )));
1434
1500
out :
1435
1501
mutex_unlock (& vq -> mutex );
@@ -1760,6 +1826,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
1760
1826
wait_for_completion (& vs -> old_inflight [i ]-> comp );
1761
1827
}
1762
1828
1829
+ static void vhost_scsi_destroy_vq_log (struct vhost_virtqueue * vq )
1830
+ {
1831
+ struct vhost_scsi_virtqueue * svq = container_of (vq ,
1832
+ struct vhost_scsi_virtqueue , vq );
1833
+ struct vhost_scsi_cmd * tv_cmd ;
1834
+ unsigned int i ;
1835
+
1836
+ if (!svq -> scsi_cmds )
1837
+ return ;
1838
+
1839
+ for (i = 0 ; i < svq -> max_cmds ; i ++ ) {
1840
+ tv_cmd = & svq -> scsi_cmds [i ];
1841
+ kfree (tv_cmd -> tvc_log );
1842
+ tv_cmd -> tvc_log = NULL ;
1843
+ tv_cmd -> tvc_log_num = 0 ;
1844
+ }
1845
+ }
1846
+
1763
1847
static void vhost_scsi_destroy_vq_cmds (struct vhost_virtqueue * vq )
1764
1848
{
1765
1849
struct vhost_scsi_virtqueue * svq = container_of (vq ,
@@ -1779,6 +1863,7 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
1779
1863
1780
1864
sbitmap_free (& svq -> scsi_tags );
1781
1865
kfree (svq -> upages );
1866
+ vhost_scsi_destroy_vq_log (vq );
1782
1867
kfree (svq -> scsi_cmds );
1783
1868
svq -> scsi_cmds = NULL ;
1784
1869
}
@@ -2088,6 +2173,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
2088
2173
static int vhost_scsi_set_features (struct vhost_scsi * vs , u64 features )
2089
2174
{
2090
2175
struct vhost_virtqueue * vq ;
2176
+ bool is_log , was_log ;
2091
2177
int i ;
2092
2178
2093
2179
if (features & ~VHOST_SCSI_FEATURES )
@@ -2100,12 +2186,39 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
2100
2186
return - EFAULT ;
2101
2187
}
2102
2188
2189
+ if (!vs -> dev .nvqs )
2190
+ goto out ;
2191
+
2192
+ is_log = features & (1 << VHOST_F_LOG_ALL );
2193
+ /*
2194
+ * All VQs should have same feature.
2195
+ */
2196
+ was_log = vhost_has_feature (& vs -> vqs [0 ].vq , VHOST_F_LOG_ALL );
2197
+
2103
2198
for (i = 0 ; i < vs -> dev .nvqs ; i ++ ) {
2104
2199
vq = & vs -> vqs [i ].vq ;
2105
2200
mutex_lock (& vq -> mutex );
2106
2201
vq -> acked_features = features ;
2107
2202
mutex_unlock (& vq -> mutex );
2108
2203
}
2204
+
2205
+ /*
2206
+ * If VHOST_F_LOG_ALL is removed, free tvc_log after
2207
+ * vq->acked_features is committed.
2208
+ */
2209
+ if (!is_log && was_log ) {
2210
+ for (i = VHOST_SCSI_VQ_IO ; i < vs -> dev .nvqs ; i ++ ) {
2211
+ if (!vs -> vqs [i ].scsi_cmds )
2212
+ continue ;
2213
+
2214
+ vq = & vs -> vqs [i ].vq ;
2215
+ mutex_lock (& vq -> mutex );
2216
+ vhost_scsi_destroy_vq_log (vq );
2217
+ mutex_unlock (& vq -> mutex );
2218
+ }
2219
+ }
2220
+
2221
+ out :
2109
2222
mutex_unlock (& vs -> dev .mutex );
2110
2223
return 0 ;
2111
2224
}
0 commit comments