Skip to content

Commit 7f215d0

Browse files
Merge patch series "riscv: dma-mapping: unify support for cache flushes"
Prabhakar <prabhakar.csengg@gmail.com> says: From: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> This patch series is a subset from Arnd's original series [0]. Ive just picked up the bits required for RISC-V unification of cache flushing. Remaining patches from the series [0] will be taken care by Arnd soon. * b4-shazam-merge: riscv: dma-mapping: switch over to generic implementation riscv: dma-mapping: skip invalidation before bidirectional DMA riscv: dma-mapping: only invalidate after DMA, not flush Link: https://lore.kernel.org/r/20230816232336.164413-1-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2 parents 580253b + 9357301 commit 7f215d0

File tree

1 file changed

+51
-9
lines changed

1 file changed

+51
-9
lines changed

arch/riscv/mm/dma-noncoherent.c

Lines changed: 51 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,21 +14,61 @@ static bool noncoherent_supported __ro_after_init;
1414
int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
1515
EXPORT_SYMBOL_GPL(dma_cache_alignment);
1616

17-
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
18-
enum dma_data_direction dir)
17+
static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
18+
{
19+
void *vaddr = phys_to_virt(paddr);
20+
21+
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
22+
}
23+
24+
static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
25+
{
26+
void *vaddr = phys_to_virt(paddr);
27+
28+
ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
29+
}
30+
31+
static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
1932
{
2033
void *vaddr = phys_to_virt(paddr);
2134

35+
ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
36+
}
37+
38+
static inline bool arch_sync_dma_clean_before_fromdevice(void)
39+
{
40+
return true;
41+
}
42+
43+
static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void)
44+
{
45+
return true;
46+
}
47+
48+
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
49+
enum dma_data_direction dir)
50+
{
2251
switch (dir) {
2352
case DMA_TO_DEVICE:
24-
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
53+
arch_dma_cache_wback(paddr, size);
2554
break;
55+
2656
case DMA_FROM_DEVICE:
27-
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
28-
break;
57+
if (!arch_sync_dma_clean_before_fromdevice()) {
58+
arch_dma_cache_inv(paddr, size);
59+
break;
60+
}
61+
fallthrough;
62+
2963
case DMA_BIDIRECTIONAL:
30-
ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
64+
/* Skip the invalidate here if it's done later */
65+
if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
66+
arch_sync_dma_cpu_needs_post_dma_flush())
67+
arch_dma_cache_wback(paddr, size);
68+
else
69+
arch_dma_cache_wback_inv(paddr, size);
3170
break;
71+
3272
default:
3373
break;
3474
}
@@ -37,15 +77,17 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
3777
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
3878
enum dma_data_direction dir)
3979
{
40-
void *vaddr = phys_to_virt(paddr);
41-
4280
switch (dir) {
4381
case DMA_TO_DEVICE:
4482
break;
83+
4584
case DMA_FROM_DEVICE:
4685
case DMA_BIDIRECTIONAL:
47-
ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
86+
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
87+
if (arch_sync_dma_cpu_needs_post_dma_flush())
88+
arch_dma_cache_inv(paddr, size);
4889
break;
90+
4991
default:
5092
break;
5193
}

0 commit comments

Comments
 (0)