@@ -637,15 +637,19 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
637
637
return entry ;
638
638
}
639
639
640
- static void __dma_entry_alloc_check_leak (void )
640
+ /*
641
+ * This should be called outside of free_entries_lock scope to avoid potential
642
+ * deadlocks with serial consoles that use DMA.
643
+ */
644
+ static void __dma_entry_alloc_check_leak (u32 nr_entries )
641
645
{
642
- u32 tmp = nr_total_entries % nr_prealloc_entries ;
646
+ u32 tmp = nr_entries % nr_prealloc_entries ;
643
647
644
648
/* Shout each time we tick over some multiple of the initial pool */
645
649
if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES ) {
646
650
pr_info ("dma_debug_entry pool grown to %u (%u00%%)\n" ,
647
- nr_total_entries ,
648
- (nr_total_entries / nr_prealloc_entries ));
651
+ nr_entries ,
652
+ (nr_entries / nr_prealloc_entries ));
649
653
}
650
654
}
651
655
@@ -656,8 +660,10 @@ static void __dma_entry_alloc_check_leak(void)
656
660
*/
657
661
static struct dma_debug_entry * dma_entry_alloc (void )
658
662
{
663
+ bool alloc_check_leak = false;
659
664
struct dma_debug_entry * entry ;
660
665
unsigned long flags ;
666
+ u32 nr_entries ;
661
667
662
668
spin_lock_irqsave (& free_entries_lock , flags );
663
669
if (num_free_entries == 0 ) {
@@ -667,13 +673,17 @@ static struct dma_debug_entry *dma_entry_alloc(void)
667
673
pr_err ("debugging out of memory - disabling\n" );
668
674
return NULL ;
669
675
}
670
- __dma_entry_alloc_check_leak ();
676
+ alloc_check_leak = true;
677
+ nr_entries = nr_total_entries ;
671
678
}
672
679
673
680
entry = __dma_entry_alloc ();
674
681
675
682
spin_unlock_irqrestore (& free_entries_lock , flags );
676
683
684
+ if (alloc_check_leak )
685
+ __dma_entry_alloc_check_leak (nr_entries );
686
+
677
687
#ifdef CONFIG_STACKTRACE
678
688
entry -> stack_len = stack_trace_save (entry -> stack_entries ,
679
689
ARRAY_SIZE (entry -> stack_entries ),
0 commit comments