Skip to content

Commit 68b6dbf

Browse files
Sean AndersonChristoph Hellwig
authored andcommitted
dma-mapping: trace more error paths
It can be surprising to the user if DMA functions are only traced on success. On failure, it can be unclear what the source of the problem is. Fix this by tracing all functions even when they fail. Cases where we BUG/WARN are skipped, since those should be sufficiently noisy already. Signed-off-by: Sean Anderson <sean.anderson@linux.dev> Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
1 parent c4484ab commit 68b6dbf

File tree

2 files changed

+54
-7
lines changed

2 files changed

+54
-7
lines changed

include/trace/events/dma.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,7 @@ DEFINE_EVENT(dma_alloc_class, name, \
156156

157157
DEFINE_ALLOC_EVENT(dma_alloc);
158158
DEFINE_ALLOC_EVENT(dma_alloc_pages);
159+
DEFINE_ALLOC_EVENT(dma_alloc_sgt_err);
159160

160161
TRACE_EVENT(dma_alloc_sgt,
161162
TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
@@ -320,6 +321,41 @@ TRACE_EVENT(dma_map_sg,
320321
decode_dma_attrs(__entry->attrs))
321322
);
322323

324+
TRACE_EVENT(dma_map_sg_err,
325+
TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
326+
int err, enum dma_data_direction dir, unsigned long attrs),
327+
TP_ARGS(dev, sgl, nents, err, dir, attrs),
328+
329+
TP_STRUCT__entry(
330+
__string(device, dev_name(dev))
331+
__dynamic_array(u64, phys_addrs, nents)
332+
__field(int, err)
333+
__field(enum dma_data_direction, dir)
334+
__field(unsigned long, attrs)
335+
),
336+
337+
TP_fast_assign(
338+
struct scatterlist *sg;
339+
int i;
340+
341+
__assign_str(device);
342+
for_each_sg(sgl, sg, nents, i)
343+
((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
344+
__entry->err = err;
345+
__entry->dir = dir;
346+
__entry->attrs = attrs;
347+
),
348+
349+
TP_printk("%s dir=%s dma_addrs=%s err=%d attrs=%s",
350+
__get_str(device),
351+
decode_dma_data_direction(__entry->dir),
352+
__print_array(__get_dynamic_array(phys_addrs),
353+
__get_dynamic_array_len(phys_addrs) /
354+
sizeof(u64), sizeof(u64)),
355+
__entry->err,
356+
decode_dma_attrs(__entry->attrs))
357+
);
358+
323359
TRACE_EVENT(dma_unmap_sg,
324360
TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
325361
enum dma_data_direction dir, unsigned long attrs),

kernel/dma/mapping.c

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
223223
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
224224
} else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
225225
ents != -EIO && ents != -EREMOTEIO)) {
226+
trace_dma_map_sg_err(dev, sg, nents, ents, dir, attrs);
226227
return -EIO;
227228
}
228229

@@ -604,20 +605,26 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
604605
if (WARN_ON_ONCE(flag & __GFP_COMP))
605606
return NULL;
606607

607-
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
608+
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) {
609+
trace_dma_alloc(dev, cpu_addr, *dma_handle, size,
610+
DMA_BIDIRECTIONAL, flag, attrs);
608611
return cpu_addr;
612+
}
609613

610614
/* let the implementation decide on the zone to allocate from: */
611615
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
612616

613-
if (dma_alloc_direct(dev, ops))
617+
if (dma_alloc_direct(dev, ops)) {
614618
cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
615-
else if (use_dma_iommu(dev))
619+
} else if (use_dma_iommu(dev)) {
616620
cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
617-
else if (ops->alloc)
621+
} else if (ops->alloc) {
618622
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
619-
else
623+
} else {
624+
trace_dma_alloc(dev, NULL, 0, size, DMA_BIDIRECTIONAL, flag,
625+
attrs);
620626
return NULL;
627+
}
621628

622629
trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL,
623630
flag, attrs);
@@ -642,11 +649,11 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
642649
*/
643650
WARN_ON(irqs_disabled());
644651

652+
trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
653+
attrs);
645654
if (!cpu_addr)
646655
return;
647656

648-
trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
649-
attrs);
650657
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
651658
if (dma_alloc_direct(dev, ops))
652659
dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
@@ -688,6 +695,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
688695
trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
689696
size, dir, gfp, 0);
690697
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
698+
} else {
699+
trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
691700
}
692701
return page;
693702
}
@@ -772,6 +781,8 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
772781
sgt->nents = 1;
773782
trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs);
774783
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
784+
} else {
785+
trace_dma_alloc_sgt_err(dev, NULL, 0, size, gfp, dir, attrs);
775786
}
776787
return sgt;
777788
}

0 commit comments

Comments
 (0)