Skip to content

Commit e71e60c

Browse files
committed
Merge tag 'dma-mapping-5.19-2022-06-06' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - fix a regressin in setting swiotlb ->force_bounce (me) - make dma-debug less chatty (Rob Clark) * tag 'dma-mapping-5.19-2022-06-06' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: fix setting ->force_bounce dma-debug: make things less spammy under memory pressure
2 parents f2906aa + e15db62 commit e71e60c

File tree

2 files changed

+7
-9
lines changed

2 files changed

+7
-9
lines changed

kernel/dma/debug.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
564564

565565
rc = active_cacheline_insert(entry);
566566
if (rc == -ENOMEM) {
567-
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
567+
pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
568568
global_disable = true;
569569
} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
570570
err_printk(entry->dev, entry,

kernel/dma/swiotlb.c

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ void __init swiotlb_update_mem_attributes(void)
192192
}
193193

194194
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
195-
unsigned long nslabs, bool late_alloc)
195+
unsigned long nslabs, unsigned int flags, bool late_alloc)
196196
{
197197
void *vaddr = phys_to_virt(start);
198198
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
@@ -203,8 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
203203
mem->index = 0;
204204
mem->late_alloc = late_alloc;
205205

206-
if (swiotlb_force_bounce)
207-
mem->force_bounce = true;
206+
mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
208207

209208
spin_lock_init(&mem->lock);
210209
for (i = 0; i < mem->nslabs; i++) {
@@ -275,8 +274,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
275274
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
276275
__func__, alloc_size, PAGE_SIZE);
277276

278-
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
279-
mem->force_bounce = flags & SWIOTLB_FORCE;
277+
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false);
280278

281279
if (flags & SWIOTLB_VERBOSE)
282280
swiotlb_print_info();
@@ -348,7 +346,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
348346

349347
set_memory_decrypted((unsigned long)vstart,
350348
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
351-
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
349+
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true);
352350

353351
swiotlb_print_info();
354352
return 0;
@@ -835,8 +833,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
835833

836834
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
837835
rmem->size >> PAGE_SHIFT);
838-
swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
839-
mem->force_bounce = true;
836+
swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
837+
false);
840838
mem->for_alloc = true;
841839

842840
rmem->priv = mem;

0 commit comments

Comments
 (0)