Skip to content

Commit 9357301

Browse files
prabhakarladpalmer-dabbelt
authored andcommitted
riscv: dma-mapping: switch over to generic implementation
Add helper functions for cache wback/inval/clean and use them arch_sync_dma_for_device()/arch_sync_dma_for_cpu() functions. The proposed changes are in preparation for switching over to generic implementation. Reorganization of the code is based on the patch (Link[0]) from Arnd. For now I have dropped CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU check as this will be enabled by default upon selection of RISCV_DMA_NONCOHERENT and also dropped arch_dma_mark_dcache_clean(). Link[0]: https://lore.kernel.org/all/20230327121317.4081816-22-arnd@kernel.org/ Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> Link: https://lore.kernel.org/r/20230816232336.164413-4-prabhakar.mahadev-lad.rj@bp.renesas.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
1 parent 482069e commit 9357301

File tree

1 file changed

+51
-9
lines changed

1 file changed

+51
-9
lines changed

arch/riscv/mm/dma-noncoherent.c

Lines changed: 51 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,21 +12,61 @@
1212

1313
static bool noncoherent_supported __ro_after_init;
1414

15-
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
16-
enum dma_data_direction dir)
15+
static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
16+
{
17+
void *vaddr = phys_to_virt(paddr);
18+
19+
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
20+
}
21+
22+
static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
23+
{
24+
void *vaddr = phys_to_virt(paddr);
25+
26+
ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
27+
}
28+
29+
static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
1730
{
1831
void *vaddr = phys_to_virt(paddr);
1932

33+
ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
34+
}
35+
36+
static inline bool arch_sync_dma_clean_before_fromdevice(void)
37+
{
38+
return true;
39+
}
40+
41+
static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void)
42+
{
43+
return true;
44+
}
45+
46+
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
47+
enum dma_data_direction dir)
48+
{
2049
switch (dir) {
2150
case DMA_TO_DEVICE:
22-
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
51+
arch_dma_cache_wback(paddr, size);
2352
break;
53+
2454
case DMA_FROM_DEVICE:
25-
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
26-
break;
55+
if (!arch_sync_dma_clean_before_fromdevice()) {
56+
arch_dma_cache_inv(paddr, size);
57+
break;
58+
}
59+
fallthrough;
60+
2761
case DMA_BIDIRECTIONAL:
28-
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
62+
/* Skip the invalidate here if it's done later */
63+
if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
64+
arch_sync_dma_cpu_needs_post_dma_flush())
65+
arch_dma_cache_wback(paddr, size);
66+
else
67+
arch_dma_cache_wback_inv(paddr, size);
2968
break;
69+
3070
default:
3171
break;
3272
}
@@ -35,15 +75,17 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
3575
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
3676
enum dma_data_direction dir)
3777
{
38-
void *vaddr = phys_to_virt(paddr);
39-
4078
switch (dir) {
4179
case DMA_TO_DEVICE:
4280
break;
81+
4382
case DMA_FROM_DEVICE:
4483
case DMA_BIDIRECTIONAL:
45-
ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
84+
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
85+
if (arch_sync_dma_cpu_needs_post_dma_flush())
86+
arch_dma_cache_inv(paddr, size);
4687
break;
88+
4789
default:
4890
break;
4991
}

0 commit comments

Comments
 (0)