@@ -175,6 +175,11 @@ struct vring_virtqueue {
175
175
/* Do DMA mapping by driver */
176
176
bool premapped ;
177
177
178
+ /* Do unmap or not for desc. Just when premapped is False and
179
+ * use_dma_api is true, this is true.
180
+ */
181
+ bool do_unmap ;
182
+
178
183
/* Head of free buffer list. */
179
184
unsigned int free_head ;
180
185
/* Number we've added since last sync. */
@@ -440,7 +445,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
440
445
{
441
446
u16 flags ;
442
447
443
- if (!vq -> use_dma_api )
448
+ if (!vq -> do_unmap )
444
449
return ;
445
450
446
451
flags = virtio16_to_cpu (vq -> vq .vdev , desc -> flags );
@@ -458,18 +463,21 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
458
463
struct vring_desc_extra * extra = vq -> split .desc_extra ;
459
464
u16 flags ;
460
465
461
- if (!vq -> use_dma_api )
462
- goto out ;
463
-
464
466
flags = extra [i ].flags ;
465
467
466
468
if (flags & VRING_DESC_F_INDIRECT ) {
469
+ if (!vq -> use_dma_api )
470
+ goto out ;
471
+
467
472
dma_unmap_single (vring_dma_dev (vq ),
468
473
extra [i ].addr ,
469
474
extra [i ].len ,
470
475
(flags & VRING_DESC_F_WRITE ) ?
471
476
DMA_FROM_DEVICE : DMA_TO_DEVICE );
472
477
} else {
478
+ if (!vq -> do_unmap )
479
+ goto out ;
480
+
473
481
dma_unmap_page (vring_dma_dev (vq ),
474
482
extra [i ].addr ,
475
483
extra [i ].len ,
@@ -635,7 +643,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
635
643
}
636
644
/* Last one doesn't continue. */
637
645
desc [prev ].flags &= cpu_to_virtio16 (_vq -> vdev , ~VRING_DESC_F_NEXT );
638
- if (!indirect && vq -> use_dma_api )
646
+ if (!indirect && vq -> do_unmap )
639
647
vq -> split .desc_extra [prev & (vq -> split .vring .num - 1 )].flags &=
640
648
~VRING_DESC_F_NEXT ;
641
649
@@ -794,7 +802,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
794
802
VRING_DESC_F_INDIRECT ));
795
803
BUG_ON (len == 0 || len % sizeof (struct vring_desc ));
796
804
797
- if (vq -> use_dma_api ) {
805
+ if (vq -> do_unmap ) {
798
806
for (j = 0 ; j < len / sizeof (struct vring_desc ); j ++ )
799
807
vring_unmap_one_split_indirect (vq , & indir_desc [j ]);
800
808
}
@@ -1217,17 +1225,20 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
1217
1225
{
1218
1226
u16 flags ;
1219
1227
1220
- if (!vq -> use_dma_api )
1221
- return ;
1222
-
1223
1228
flags = extra -> flags ;
1224
1229
1225
1230
if (flags & VRING_DESC_F_INDIRECT ) {
1231
+ if (!vq -> use_dma_api )
1232
+ return ;
1233
+
1226
1234
dma_unmap_single (vring_dma_dev (vq ),
1227
1235
extra -> addr , extra -> len ,
1228
1236
(flags & VRING_DESC_F_WRITE ) ?
1229
1237
DMA_FROM_DEVICE : DMA_TO_DEVICE );
1230
1238
} else {
1239
+ if (!vq -> do_unmap )
1240
+ return ;
1241
+
1231
1242
dma_unmap_page (vring_dma_dev (vq ),
1232
1243
extra -> addr , extra -> len ,
1233
1244
(flags & VRING_DESC_F_WRITE ) ?
@@ -1240,7 +1251,7 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
1240
1251
{
1241
1252
u16 flags ;
1242
1253
1243
- if (!vq -> use_dma_api )
1254
+ if (!vq -> do_unmap )
1244
1255
return ;
1245
1256
1246
1257
flags = le16_to_cpu (desc -> flags );
@@ -1329,7 +1340,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1329
1340
sizeof (struct vring_packed_desc ));
1330
1341
vq -> packed .vring .desc [head ].id = cpu_to_le16 (id );
1331
1342
1332
- if (vq -> use_dma_api ) {
1343
+ if (vq -> do_unmap ) {
1333
1344
vq -> packed .desc_extra [id ].addr = addr ;
1334
1345
vq -> packed .desc_extra [id ].len = total_sg *
1335
1346
sizeof (struct vring_packed_desc );
@@ -1470,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
1470
1481
desc [i ].len = cpu_to_le32 (sg -> length );
1471
1482
desc [i ].id = cpu_to_le16 (id );
1472
1483
1473
- if (unlikely (vq -> use_dma_api )) {
1484
+ if (unlikely (vq -> do_unmap )) {
1474
1485
vq -> packed .desc_extra [curr ].addr = addr ;
1475
1486
vq -> packed .desc_extra [curr ].len = sg -> length ;
1476
1487
vq -> packed .desc_extra [curr ].flags =
@@ -1604,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
1604
1615
vq -> free_head = id ;
1605
1616
vq -> vq .num_free += state -> num ;
1606
1617
1607
- if (unlikely (vq -> use_dma_api )) {
1618
+ if (unlikely (vq -> do_unmap )) {
1608
1619
curr = id ;
1609
1620
for (i = 0 ; i < state -> num ; i ++ ) {
1610
1621
vring_unmap_extra_packed (vq ,
@@ -1621,7 +1632,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
1621
1632
if (!desc )
1622
1633
return ;
1623
1634
1624
- if (vq -> use_dma_api ) {
1635
+ if (vq -> do_unmap ) {
1625
1636
len = vq -> packed .desc_extra [id ].len ;
1626
1637
for (i = 0 ; i < len / sizeof (struct vring_packed_desc );
1627
1638
i ++ )
@@ -2080,6 +2091,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
2080
2091
vq -> dma_dev = dma_dev ;
2081
2092
vq -> use_dma_api = vring_use_dma_api (vdev );
2082
2093
vq -> premapped = false;
2094
+ vq -> do_unmap = vq -> use_dma_api ;
2083
2095
2084
2096
vq -> indirect = virtio_has_feature (vdev , VIRTIO_RING_F_INDIRECT_DESC ) &&
2085
2097
!context ;
@@ -2587,6 +2599,7 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
2587
2599
vq -> dma_dev = dma_dev ;
2588
2600
vq -> use_dma_api = vring_use_dma_api (vdev );
2589
2601
vq -> premapped = false;
2602
+ vq -> do_unmap = vq -> use_dma_api ;
2590
2603
2591
2604
vq -> indirect = virtio_has_feature (vdev , VIRTIO_RING_F_INDIRECT_DESC ) &&
2592
2605
!context ;
@@ -2771,6 +2784,7 @@ int virtqueue_set_dma_premapped(struct virtqueue *_vq)
2771
2784
}
2772
2785
2773
2786
vq -> premapped = true;
2787
+ vq -> do_unmap = false;
2774
2788
2775
2789
END_USE (vq );
2776
2790
0 commit comments