@@ -223,6 +223,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
223
223
debug_dma_map_sg (dev , sg , nents , ents , dir , attrs );
224
224
} else if (WARN_ON_ONCE (ents != - EINVAL && ents != - ENOMEM &&
225
225
ents != - EIO && ents != - EREMOTEIO )) {
226
+ trace_dma_map_sg_err (dev , sg , nents , ents , dir , attrs );
226
227
return - EIO ;
227
228
}
228
229
@@ -604,20 +605,26 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
604
605
if (WARN_ON_ONCE (flag & __GFP_COMP ))
605
606
return NULL ;
606
607
607
- if (dma_alloc_from_dev_coherent (dev , size , dma_handle , & cpu_addr ))
608
+ if (dma_alloc_from_dev_coherent (dev , size , dma_handle , & cpu_addr )) {
609
+ trace_dma_alloc (dev , cpu_addr , * dma_handle , size ,
610
+ DMA_BIDIRECTIONAL , flag , attrs );
608
611
return cpu_addr ;
612
+ }
609
613
610
614
/* let the implementation decide on the zone to allocate from: */
611
615
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM );
612
616
613
- if (dma_alloc_direct (dev , ops ))
617
+ if (dma_alloc_direct (dev , ops )) {
614
618
cpu_addr = dma_direct_alloc (dev , size , dma_handle , flag , attrs );
615
- else if (use_dma_iommu (dev ))
619
+ } else if (use_dma_iommu (dev )) {
616
620
cpu_addr = iommu_dma_alloc (dev , size , dma_handle , flag , attrs );
617
- else if (ops -> alloc )
621
+ } else if (ops -> alloc ) {
618
622
cpu_addr = ops -> alloc (dev , size , dma_handle , flag , attrs );
619
- else
623
+ } else {
624
+ trace_dma_alloc (dev , NULL , 0 , size , DMA_BIDIRECTIONAL , flag ,
625
+ attrs );
620
626
return NULL ;
627
+ }
621
628
622
629
trace_dma_alloc (dev , cpu_addr , * dma_handle , size , DMA_BIDIRECTIONAL ,
623
630
flag , attrs );
@@ -642,11 +649,11 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
642
649
*/
643
650
WARN_ON (irqs_disabled ());
644
651
652
+ trace_dma_free (dev , cpu_addr , dma_handle , size , DMA_BIDIRECTIONAL ,
653
+ attrs );
645
654
if (!cpu_addr )
646
655
return ;
647
656
648
- trace_dma_free (dev , cpu_addr , dma_handle , size , DMA_BIDIRECTIONAL ,
649
- attrs );
650
657
debug_dma_free_coherent (dev , size , cpu_addr , dma_handle );
651
658
if (dma_alloc_direct (dev , ops ))
652
659
dma_direct_free (dev , size , cpu_addr , dma_handle , attrs );
@@ -688,6 +695,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
688
695
trace_dma_alloc_pages (dev , page_to_virt (page ), * dma_handle ,
689
696
size , dir , gfp , 0 );
690
697
debug_dma_map_page (dev , page , 0 , size , dir , * dma_handle , 0 );
698
+ } else {
699
+ trace_dma_alloc_pages (dev , NULL , 0 , size , dir , gfp , 0 );
691
700
}
692
701
return page ;
693
702
}
@@ -772,6 +781,8 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
772
781
sgt -> nents = 1 ;
773
782
trace_dma_alloc_sgt (dev , sgt , size , dir , gfp , attrs );
774
783
debug_dma_map_sg (dev , sgt -> sgl , sgt -> orig_nents , 1 , dir , attrs );
784
+ } else {
785
+ trace_dma_alloc_sgt_err (dev , NULL , 0 , size , gfp , dir , attrs );
775
786
}
776
787
return sgt ;
777
788
}
0 commit comments