@@ -355,9 +355,8 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
355
355
}
356
356
357
357
/* Map one sg entry. */
358
- static dma_addr_t vring_map_one_sg (const struct vring_virtqueue * vq ,
359
- struct scatterlist * sg ,
360
- enum dma_data_direction direction )
358
+ static int vring_map_one_sg (const struct vring_virtqueue * vq , struct scatterlist * sg ,
359
+ enum dma_data_direction direction , dma_addr_t * addr )
361
360
{
362
361
if (!vq -> use_dma_api ) {
363
362
/*
@@ -366,17 +365,23 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
366
365
* depending on the direction.
367
366
*/
368
367
kmsan_handle_dma (sg_page (sg ), sg -> offset , sg -> length , direction );
369
- return (dma_addr_t )sg_phys (sg );
368
+ * addr = (dma_addr_t )sg_phys (sg );
369
+ return 0 ;
370
370
}
371
371
372
372
/*
373
373
* We can't use dma_map_sg, because we don't use scatterlists in
374
374
* the way it expects (we don't guarantee that the scatterlist
375
375
* will exist for the lifetime of the mapping).
376
376
*/
377
- return dma_map_page (vring_dma_dev (vq ),
377
+ * addr = dma_map_page (vring_dma_dev (vq ),
378
378
sg_page (sg ), sg -> offset , sg -> length ,
379
379
direction );
380
+
381
+ if (dma_mapping_error (vring_dma_dev (vq ), * addr ))
382
+ return - ENOMEM ;
383
+
384
+ return 0 ;
380
385
}
381
386
382
387
static dma_addr_t vring_map_single (const struct vring_virtqueue * vq ,
@@ -588,8 +593,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
588
593
589
594
for (n = 0 ; n < out_sgs ; n ++ ) {
590
595
for (sg = sgs [n ]; sg ; sg = sg_next (sg )) {
591
- dma_addr_t addr = vring_map_one_sg (vq , sg , DMA_TO_DEVICE );
592
- if (vring_mapping_error (vq , addr ))
596
+ dma_addr_t addr ;
597
+
598
+ if (vring_map_one_sg (vq , sg , DMA_TO_DEVICE , & addr ))
593
599
goto unmap_release ;
594
600
595
601
prev = i ;
@@ -603,8 +609,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
603
609
}
604
610
for (; n < (out_sgs + in_sgs ); n ++ ) {
605
611
for (sg = sgs [n ]; sg ; sg = sg_next (sg )) {
606
- dma_addr_t addr = vring_map_one_sg (vq , sg , DMA_FROM_DEVICE );
607
- if (vring_mapping_error (vq , addr ))
612
+ dma_addr_t addr ;
613
+
614
+ if (vring_map_one_sg (vq , sg , DMA_FROM_DEVICE , & addr ))
608
615
goto unmap_release ;
609
616
610
617
prev = i ;
@@ -1281,9 +1288,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
1281
1288
1282
1289
for (n = 0 ; n < out_sgs + in_sgs ; n ++ ) {
1283
1290
for (sg = sgs [n ]; sg ; sg = sg_next (sg )) {
1284
- addr = vring_map_one_sg (vq , sg , n < out_sgs ?
1285
- DMA_TO_DEVICE : DMA_FROM_DEVICE );
1286
- if (vring_mapping_error (vq , addr ))
1291
+ if (vring_map_one_sg (vq , sg , n < out_sgs ?
1292
+ DMA_TO_DEVICE : DMA_FROM_DEVICE , & addr ))
1287
1293
goto unmap_release ;
1288
1294
1289
1295
desc [i ].flags = cpu_to_le16 (n < out_sgs ?
@@ -1428,9 +1434,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
1428
1434
c = 0 ;
1429
1435
for (n = 0 ; n < out_sgs + in_sgs ; n ++ ) {
1430
1436
for (sg = sgs [n ]; sg ; sg = sg_next (sg )) {
1431
- dma_addr_t addr = vring_map_one_sg (vq , sg , n < out_sgs ?
1432
- DMA_TO_DEVICE : DMA_FROM_DEVICE );
1433
- if (vring_mapping_error (vq , addr ))
1437
+ dma_addr_t addr ;
1438
+
1439
+ if (vring_map_one_sg (vq , sg , n < out_sgs ?
1440
+ DMA_TO_DEVICE : DMA_FROM_DEVICE , & addr ))
1434
1441
goto unmap_release ;
1435
1442
1436
1443
flags = cpu_to_le16 (vq -> packed .avail_used_flags |
0 commit comments