|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * non-coherent cache functions for Andes AX45MP |
| 4 | + * |
| 5 | + * Copyright (C) 2023 Renesas Electronics Corp. |
| 6 | + */ |
| 7 | + |
| 8 | +#include <linux/cacheflush.h> |
| 9 | +#include <linux/cacheinfo.h> |
| 10 | +#include <linux/dma-direction.h> |
| 11 | +#include <linux/of_address.h> |
| 12 | +#include <linux/of_platform.h> |
| 13 | + |
| 14 | +#include <asm/dma-noncoherent.h> |
| 15 | + |
| 16 | +/* L2 cache registers */ |
| 17 | +#define AX45MP_L2C_REG_CTL_OFFSET 0x8 |
| 18 | + |
| 19 | +#define AX45MP_L2C_REG_C0_CMD_OFFSET 0x40 |
| 20 | +#define AX45MP_L2C_REG_C0_ACC_OFFSET 0x48 |
| 21 | +#define AX45MP_L2C_REG_STATUS_OFFSET 0x80 |
| 22 | + |
| 23 | +/* D-cache operation */ |
| 24 | +#define AX45MP_CCTL_L1D_VA_INVAL 0 /* Invalidate an L1 cache entry */ |
| 25 | +#define AX45MP_CCTL_L1D_VA_WB 1 /* Write-back an L1 cache entry */ |
| 26 | + |
| 27 | +/* L2 CCTL status */ |
| 28 | +#define AX45MP_CCTL_L2_STATUS_IDLE 0 |
| 29 | + |
| 30 | +/* L2 CCTL status cores mask */ |
| 31 | +#define AX45MP_CCTL_L2_STATUS_C0_MASK 0xf |
| 32 | + |
| 33 | +/* L2 cache operation */ |
| 34 | +#define AX45MP_CCTL_L2_PA_INVAL 0x8 /* Invalidate an L2 cache entry */ |
| 35 | +#define AX45MP_CCTL_L2_PA_WB 0x9 /* Write-back an L2 cache entry */ |
| 36 | + |
| 37 | +#define AX45MP_L2C_REG_PER_CORE_OFFSET 0x10 |
| 38 | +#define AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET 4 |
| 39 | + |
| 40 | +#define AX45MP_L2C_REG_CN_CMD_OFFSET(n) \ |
| 41 | + (AX45MP_L2C_REG_C0_CMD_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET)) |
| 42 | +#define AX45MP_L2C_REG_CN_ACC_OFFSET(n) \ |
| 43 | + (AX45MP_L2C_REG_C0_ACC_OFFSET + ((n) * AX45MP_L2C_REG_PER_CORE_OFFSET)) |
| 44 | +#define AX45MP_CCTL_L2_STATUS_CN_MASK(n) \ |
| 45 | + (AX45MP_CCTL_L2_STATUS_C0_MASK << ((n) * AX45MP_CCTL_L2_STATUS_PER_CORE_OFFSET)) |
| 46 | + |
| 47 | +#define AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM 0x80b |
| 48 | +#define AX45MP_CCTL_REG_UCCTLCOMMAND_NUM 0x80c |
| 49 | + |
| 50 | +#define AX45MP_CACHE_LINE_SIZE 64 |
| 51 | + |
| 52 | +struct ax45mp_priv { |
| 53 | + void __iomem *l2c_base; |
| 54 | + u32 ax45mp_cache_line_size; |
| 55 | +}; |
| 56 | + |
| 57 | +static struct ax45mp_priv ax45mp_priv; |
| 58 | + |
| 59 | +/* L2 Cache operations */ |
| 60 | +static inline uint32_t ax45mp_cpu_l2c_get_cctl_status(void) |
| 61 | +{ |
| 62 | + return readl(ax45mp_priv.l2c_base + AX45MP_L2C_REG_STATUS_OFFSET); |
| 63 | +} |
| 64 | + |
| 65 | +static void ax45mp_cpu_cache_operation(unsigned long start, unsigned long end, |
| 66 | + unsigned int l1_op, unsigned int l2_op) |
| 67 | +{ |
| 68 | + unsigned long line_size = ax45mp_priv.ax45mp_cache_line_size; |
| 69 | + void __iomem *base = ax45mp_priv.l2c_base; |
| 70 | + int mhartid = smp_processor_id(); |
| 71 | + unsigned long pa; |
| 72 | + |
| 73 | + while (end > start) { |
| 74 | + csr_write(AX45MP_CCTL_REG_UCCTLBEGINADDR_NUM, start); |
| 75 | + csr_write(AX45MP_CCTL_REG_UCCTLCOMMAND_NUM, l1_op); |
| 76 | + |
| 77 | + pa = virt_to_phys((void *)start); |
| 78 | + writel(pa, base + AX45MP_L2C_REG_CN_ACC_OFFSET(mhartid)); |
| 79 | + writel(l2_op, base + AX45MP_L2C_REG_CN_CMD_OFFSET(mhartid)); |
| 80 | + while ((ax45mp_cpu_l2c_get_cctl_status() & |
| 81 | + AX45MP_CCTL_L2_STATUS_CN_MASK(mhartid)) != |
| 82 | + AX45MP_CCTL_L2_STATUS_IDLE) |
| 83 | + ; |
| 84 | + |
| 85 | + start += line_size; |
| 86 | + } |
| 87 | +} |
| 88 | + |
| 89 | +/* Write-back L1 and L2 cache entry */ |
| 90 | +static inline void ax45mp_cpu_dcache_wb_range(unsigned long start, unsigned long end) |
| 91 | +{ |
| 92 | + ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_WB, |
| 93 | + AX45MP_CCTL_L2_PA_WB); |
| 94 | +} |
| 95 | + |
| 96 | +/* Invalidate the L1 and L2 cache entry */ |
| 97 | +static inline void ax45mp_cpu_dcache_inval_range(unsigned long start, unsigned long end) |
| 98 | +{ |
| 99 | + ax45mp_cpu_cache_operation(start, end, AX45MP_CCTL_L1D_VA_INVAL, |
| 100 | + AX45MP_CCTL_L2_PA_INVAL); |
| 101 | +} |
| 102 | + |
| 103 | +static void ax45mp_dma_cache_inv(phys_addr_t paddr, size_t size) |
| 104 | +{ |
| 105 | + unsigned long start = (unsigned long)phys_to_virt(paddr); |
| 106 | + unsigned long end = start + size; |
| 107 | + unsigned long line_size; |
| 108 | + unsigned long flags; |
| 109 | + |
| 110 | + if (unlikely(start == end)) |
| 111 | + return; |
| 112 | + |
| 113 | + line_size = ax45mp_priv.ax45mp_cache_line_size; |
| 114 | + |
| 115 | + start = start & (~(line_size - 1)); |
| 116 | + end = ((end + line_size - 1) & (~(line_size - 1))); |
| 117 | + |
| 118 | + local_irq_save(flags); |
| 119 | + |
| 120 | + ax45mp_cpu_dcache_inval_range(start, end); |
| 121 | + |
| 122 | + local_irq_restore(flags); |
| 123 | +} |
| 124 | + |
| 125 | +static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size) |
| 126 | +{ |
| 127 | + unsigned long start = (unsigned long)phys_to_virt(paddr); |
| 128 | + unsigned long end = start + size; |
| 129 | + unsigned long line_size; |
| 130 | + unsigned long flags; |
| 131 | + |
| 132 | + line_size = ax45mp_priv.ax45mp_cache_line_size; |
| 133 | + start = start & (~(line_size - 1)); |
| 134 | + local_irq_save(flags); |
| 135 | + ax45mp_cpu_dcache_wb_range(start, end); |
| 136 | + local_irq_restore(flags); |
| 137 | +} |
| 138 | + |
| 139 | +static void ax45mp_dma_cache_wback_inv(phys_addr_t paddr, size_t size) |
| 140 | +{ |
| 141 | + ax45mp_dma_cache_wback(paddr, size); |
| 142 | + ax45mp_dma_cache_inv(paddr, size); |
| 143 | +} |
| 144 | + |
| 145 | +static int ax45mp_get_l2_line_size(struct device_node *np) |
| 146 | +{ |
| 147 | + int ret; |
| 148 | + |
| 149 | + ret = of_property_read_u32(np, "cache-line-size", &ax45mp_priv.ax45mp_cache_line_size); |
| 150 | + if (ret) { |
| 151 | + pr_err("Failed to get cache-line-size, defaulting to 64 bytes\n"); |
| 152 | + return ret; |
| 153 | + } |
| 154 | + |
| 155 | + if (ax45mp_priv.ax45mp_cache_line_size != AX45MP_CACHE_LINE_SIZE) { |
| 156 | + pr_err("Expected cache-line-size to be 64 bytes (found:%u)\n", |
| 157 | + ax45mp_priv.ax45mp_cache_line_size); |
| 158 | + return -EINVAL; |
| 159 | + } |
| 160 | + |
| 161 | + return 0; |
| 162 | +} |
| 163 | + |
| 164 | +static const struct riscv_nonstd_cache_ops ax45mp_cmo_ops __initdata = { |
| 165 | + .wback = &ax45mp_dma_cache_wback, |
| 166 | + .inv = &ax45mp_dma_cache_inv, |
| 167 | + .wback_inv = &ax45mp_dma_cache_wback_inv, |
| 168 | +}; |
| 169 | + |
| 170 | +static const struct of_device_id ax45mp_cache_ids[] = { |
| 171 | + { .compatible = "andestech,ax45mp-cache" }, |
| 172 | + { /* sentinel */ } |
| 173 | +}; |
| 174 | + |
| 175 | +static int __init ax45mp_cache_init(void) |
| 176 | +{ |
| 177 | + struct device_node *np; |
| 178 | + struct resource res; |
| 179 | + int ret; |
| 180 | + |
| 181 | + np = of_find_matching_node(NULL, ax45mp_cache_ids); |
| 182 | + if (!of_device_is_available(np)) |
| 183 | + return -ENODEV; |
| 184 | + |
| 185 | + ret = of_address_to_resource(np, 0, &res); |
| 186 | + if (ret) |
| 187 | + return ret; |
| 188 | + |
| 189 | + /* |
| 190 | + * If IOCP is present on the Andes AX45MP core riscv_cbom_block_size |
| 191 | + * will be 0 for sure, so we can definitely rely on it. If |
| 192 | + * riscv_cbom_block_size = 0 we don't need to handle CMO using SW any |
| 193 | + * more so we just return success here and only if its being set we |
| 194 | + * continue further in the probe path. |
| 195 | + */ |
| 196 | + if (!riscv_cbom_block_size) |
| 197 | + return 0; |
| 198 | + |
| 199 | + ax45mp_priv.l2c_base = ioremap(res.start, resource_size(&res)); |
| 200 | + if (!ax45mp_priv.l2c_base) |
| 201 | + return -ENOMEM; |
| 202 | + |
| 203 | + ret = ax45mp_get_l2_line_size(np); |
| 204 | + if (ret) { |
| 205 | + iounmap(ax45mp_priv.l2c_base); |
| 206 | + return ret; |
| 207 | + } |
| 208 | + |
| 209 | + riscv_noncoherent_register_cache_ops(&ax45mp_cmo_ops); |
| 210 | + |
| 211 | + return 0; |
| 212 | +} |
| 213 | +early_initcall(ax45mp_cache_init); |
0 commit comments