Skip to content

Commit 391ce5b

Browse files
committed
Merge tag 'dma-mapping-6.7-2023-11-10' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - don't leave pages decrypted for DMA in encrypted memory setups linger around on failure (Petr Tesarik) - fix an out of bounds access in the new dynamic swiotlb code (Petr Tesarik) - fix dma_addressing_limited for systems with weird physical memory layouts (Jia He) * tag 'dma-mapping-6.7-2023-11-10' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: fix out-of-bounds TLB allocations with CONFIG_SWIOTLB_DYNAMIC dma-mapping: fix dma_addressing_limited() if dma_range_map can't cover all system RAM dma-mapping: move dma_addressing_limited() out of line swiotlb: do not free decrypted pages if dynamic
2 parents ead3b62 + 53c87e8 commit 391ce5b

File tree

5 files changed

+86
-24
lines changed

5 files changed

+86
-24
lines changed

include/linux/dma-mapping.h

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev);
144144
int dma_set_mask(struct device *dev, u64 mask);
145145
int dma_set_coherent_mask(struct device *dev, u64 mask);
146146
u64 dma_get_required_mask(struct device *dev);
147+
bool dma_addressing_limited(struct device *dev);
147148
size_t dma_max_mapping_size(struct device *dev);
148149
size_t dma_opt_mapping_size(struct device *dev);
149150
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
@@ -264,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
264265
{
265266
return 0;
266267
}
268+
static inline bool dma_addressing_limited(struct device *dev)
269+
{
270+
return false;
271+
}
267272
static inline size_t dma_max_mapping_size(struct device *dev)
268273
{
269274
return 0;
@@ -465,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
465470
return dma_set_mask_and_coherent(dev, mask);
466471
}
467472

468-
/**
469-
* dma_addressing_limited - return if the device is addressing limited
470-
* @dev: device to check
471-
*
472-
* Return %true if the devices DMA mask is too small to address all memory in
473-
* the system, else %false. Lack of addressing bits is the prime reason for
474-
* bounce buffering, but might not be the only one.
475-
*/
476-
static inline bool dma_addressing_limited(struct device *dev)
477-
{
478-
return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
479-
dma_get_required_mask(dev);
480-
}
481-
482473
static inline unsigned int dma_get_max_seg_size(struct device *dev)
483474
{
484475
if (dev->dma_parms && dev->dma_parms->max_segment_size)

kernel/dma/direct.c

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -587,6 +587,46 @@ int dma_direct_supported(struct device *dev, u64 mask)
587587
return mask >= phys_to_dma_unencrypted(dev, min_mask);
588588
}
589589

590+
/*
591+
* To check whether all ram resource ranges are covered by dma range map
592+
* Returns 0 when further check is needed
593+
* Returns 1 if there is some RAM range can't be covered by dma_range_map
594+
*/
595+
static int check_ram_in_range_map(unsigned long start_pfn,
596+
unsigned long nr_pages, void *data)
597+
{
598+
unsigned long end_pfn = start_pfn + nr_pages;
599+
const struct bus_dma_region *bdr = NULL;
600+
const struct bus_dma_region *m;
601+
struct device *dev = data;
602+
603+
while (start_pfn < end_pfn) {
604+
for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
605+
unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
606+
607+
if (start_pfn >= cpu_start_pfn &&
608+
start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
609+
bdr = m;
610+
break;
611+
}
612+
}
613+
if (!bdr)
614+
return 1;
615+
616+
start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
617+
}
618+
619+
return 0;
620+
}
621+
622+
bool dma_direct_all_ram_mapped(struct device *dev)
623+
{
624+
if (!dev->dma_range_map)
625+
return true;
626+
return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev,
627+
check_ram_in_range_map);
628+
}
629+
590630
size_t dma_direct_max_mapping_size(struct device *dev)
591631
{
592632
/* If SWIOTLB is active, use its maximum mapping size */

kernel/dma/direct.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
2020
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
2121
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
2222
enum dma_data_direction dir, unsigned long attrs);
23+
bool dma_direct_all_ram_mapped(struct device *dev);
2324
size_t dma_direct_max_mapping_size(struct device *dev);
2425

2526
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \

kernel/dma/mapping.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -793,6 +793,28 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
793793
}
794794
EXPORT_SYMBOL(dma_set_coherent_mask);
795795

796+
/**
797+
* dma_addressing_limited - return if the device is addressing limited
798+
* @dev: device to check
799+
*
800+
* Return %true if the devices DMA mask is too small to address all memory in
801+
* the system, else %false. Lack of addressing bits is the prime reason for
802+
* bounce buffering, but might not be the only one.
803+
*/
804+
bool dma_addressing_limited(struct device *dev)
805+
{
806+
const struct dma_map_ops *ops = get_dma_ops(dev);
807+
808+
if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
809+
dma_get_required_mask(dev))
810+
return true;
811+
812+
if (unlikely(ops))
813+
return false;
814+
return !dma_direct_all_ram_mapped(dev);
815+
}
816+
EXPORT_SYMBOL_GPL(dma_addressing_limited);
817+
796818
size_t dma_max_mapping_size(struct device *dev)
797819
{
798820
const struct dma_map_ops *ops = get_dma_ops(dev);

kernel/dma/swiotlb.c

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,8 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
283283
}
284284

285285
for (i = 0; i < mem->nslabs; i++) {
286-
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
286+
mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
287+
mem->nslabs - i);
287288
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
288289
mem->slots[i].alloc_size = 0;
289290
}
@@ -558,29 +559,40 @@ void __init swiotlb_exit(void)
558559
* alloc_dma_pages() - allocate pages to be used for DMA
559560
* @gfp: GFP flags for the allocation.
560561
* @bytes: Size of the buffer.
562+
* @phys_limit: Maximum allowed physical address of the buffer.
561563
*
562564
* Allocate pages from the buddy allocator. If successful, make the allocated
563565
* pages decrypted that they can be used for DMA.
564566
*
565-
* Return: Decrypted pages, or %NULL on failure.
567+
* Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
568+
* if the allocated physical address was above @phys_limit.
566569
*/
567-
static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
570+
static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
568571
{
569572
unsigned int order = get_order(bytes);
570573
struct page *page;
574+
phys_addr_t paddr;
571575
void *vaddr;
572576

573577
page = alloc_pages(gfp, order);
574578
if (!page)
575579
return NULL;
576580

577-
vaddr = page_address(page);
581+
paddr = page_to_phys(page);
582+
if (paddr + bytes - 1 > phys_limit) {
583+
__free_pages(page, order);
584+
return ERR_PTR(-EAGAIN);
585+
}
586+
587+
vaddr = phys_to_virt(paddr);
578588
if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
579589
goto error;
580590
return page;
581591

582592
error:
583-
__free_pages(page, order);
593+
/* Intentional leak if pages cannot be encrypted again. */
594+
if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
595+
__free_pages(page, order);
584596
return NULL;
585597
}
586598

@@ -618,11 +630,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
618630
else if (phys_limit <= DMA_BIT_MASK(32))
619631
gfp |= __GFP_DMA32;
620632

621-
while ((page = alloc_dma_pages(gfp, bytes)) &&
622-
page_to_phys(page) + bytes - 1 > phys_limit) {
623-
/* allocated, but too high */
624-
__free_pages(page, get_order(bytes));
625-
633+
while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
626634
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
627635
phys_limit < DMA_BIT_MASK(64) &&
628636
!(gfp & (__GFP_DMA32 | __GFP_DMA)))

0 commit comments

Comments
 (0)