Skip to content

Commit 16547b2

Browse files
committed
Merge tag 'dma-mapping-6.0-2022-09-10' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - revert a panic on swiotlb initialization failure (Yu Zhao) - fix the lookup for partial syncs in dma-debug (Robin Murphy) - fix a shift overflow in swiotlb (Chao Gao) - fix a comment typo in swiotlb (Chao Gao) - mark a function static now that all abusers are gone (Christoph Hellwig) * tag 'dma-mapping-6.0-2022-09-10' of git://git.infradead.org/users/hch/dma-mapping: dma-mapping: mark dma_supported static swiotlb: fix a typo swiotlb: avoid potential left shift overflow dma-debug: improve search for partial syncs Revert "swiotlb: panic if nslabs is too small"
2 parents ce88822 + 9fc18f6 commit 16547b2

File tree

4 files changed

+9
-18
lines changed

4 files changed

+9
-18
lines changed

include/linux/dma-mapping.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
139139
void *cpu_addr, dma_addr_t dma_addr, size_t size,
140140
unsigned long attrs);
141141
bool dma_can_mmap(struct device *dev);
142-
int dma_supported(struct device *dev, u64 mask);
143142
bool dma_pci_p2pdma_supported(struct device *dev);
144143
int dma_set_mask(struct device *dev, u64 mask);
145144
int dma_set_coherent_mask(struct device *dev, u64 mask);
@@ -248,10 +247,6 @@ static inline bool dma_can_mmap(struct device *dev)
248247
{
249248
return false;
250249
}
251-
static inline int dma_supported(struct device *dev, u64 mask)
252-
{
253-
return 0;
254-
}
255250
static inline bool dma_pci_p2pdma_supported(struct device *dev)
256251
{
257252
return false;

kernel/dma/debug.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -350,11 +350,10 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
350350
unsigned long *flags)
351351
{
352352

353-
unsigned int max_range = dma_get_max_seg_size(ref->dev);
354353
struct dma_debug_entry *entry, index = *ref;
355-
unsigned int range = 0;
354+
int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
356355

357-
while (range <= max_range) {
356+
for (int i = 0; i < limit; i++) {
358357
entry = __hash_bucket_find(*bucket, ref, containing_match);
359358

360359
if (entry)
@@ -364,7 +363,6 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
364363
* Nothing found, go back a hash bucket
365364
*/
366365
put_hash_bucket(*bucket, *flags);
367-
range += (1 << HASH_FN_SHIFT);
368366
index.dev_addr -= (1 << HASH_FN_SHIFT);
369367
*bucket = get_hash_bucket(&index, flags);
370368
}

kernel/dma/mapping.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -707,7 +707,7 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
707707
}
708708
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
709709

710-
int dma_supported(struct device *dev, u64 mask)
710+
static int dma_supported(struct device *dev, u64 mask)
711711
{
712712
const struct dma_map_ops *ops = get_dma_ops(dev);
713713

@@ -721,7 +721,6 @@ int dma_supported(struct device *dev, u64 mask)
721721
return 1;
722722
return ops->dma_supported(dev, mask);
723723
}
724-
EXPORT_SYMBOL(dma_supported);
725724

726725
bool dma_pci_p2pdma_supported(struct device *dev)
727726
{

kernel/dma/swiotlb.c

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -326,9 +326,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
326326
swiotlb_adjust_nareas(num_possible_cpus());
327327

328328
nslabs = default_nslabs;
329-
if (nslabs < IO_TLB_MIN_SLABS)
330-
panic("%s: nslabs = %lu too small\n", __func__, nslabs);
331-
332329
/*
333330
* By default allocate the bounce buffer memory from low memory, but
334331
* allow to pick a location everywhere for hypervisors with guest
@@ -341,8 +338,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
341338
else
342339
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
343340
if (!tlb) {
344-
pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
345-
__func__, bytes);
341+
pr_warn("%s: failed to allocate tlb structure\n", __func__);
346342
return;
347343
}
348344

@@ -579,7 +575,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
579575
}
580576
}
581577

582-
#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
578+
static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
579+
{
580+
return start + (idx << IO_TLB_SHIFT);
581+
}
583582

584583
/*
585584
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
@@ -765,7 +764,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
765764
/*
766765
* When dir == DMA_FROM_DEVICE we could omit the copy from the orig
767766
* to the tlb buffer, if we knew for sure the device will
768-
* overwirte the entire current content. But we don't. Thus
767+
* overwrite the entire current content. But we don't. Thus
769768
* unconditional bounce may prevent leaking swiotlb content (i.e.
770769
* kernel memory) to user-space.
771770
*/

0 commit comments

Comments
 (0)