Skip to content

Commit e7c9d66

Browse files
Mr-Bossmanpalmer-dabbelt
authored andcommitted
RISC-V: Report vector unaligned access speed hwprobe
Detect if vector misaligned accesses are faster or slower than equivalent vector byte accesses. This is useful for usermode to know whether vector byte accesses or vector misaligned accesses have a better bandwidth for operations like memcpy. Signed-off-by: Jesse Taube <jesse@rivosinc.com> Reviewed-by: Charlie Jenkins <charlie@rivosinc.com> Link: https://lore.kernel.org/r/20241017-jesse_unaligned_vector-v10-5-5b33500160f8@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
1 parent d1703dc commit e7c9d66

File tree

6 files changed

+228
-3
lines changed

6 files changed

+228
-3
lines changed

arch/riscv/Kconfig

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -875,6 +875,24 @@ config RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
875875
will dynamically determine the speed of vector unaligned accesses on
876876
the underlying system if they are supported.
877877

878+
config RISCV_SLOW_VECTOR_UNALIGNED_ACCESS
879+
bool "Assume the system supports slow vector unaligned memory accesses"
880+
depends on NONPORTABLE
881+
help
882+
Assume that the system supports slow vector unaligned memory accesses. The
883+
kernel and userspace programs may not be able to run at all on systems
884+
that do not support unaligned memory accesses.
885+
886+
config RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
887+
bool "Assume the system supports fast vector unaligned memory accesses"
888+
depends on NONPORTABLE
889+
help
890+
Assume that the system supports fast vector unaligned memory accesses. When
891+
enabled, this option improves the performance of the kernel on such
892+
systems. However, the kernel and userspace programs will run much more
893+
slowly, or will not be able to run at all, on systems that do not
894+
support efficient unaligned memory accesses.
895+
878896
endchoice
879897

880898
source "arch/riscv/Kconfig.vendor"

arch/riscv/kernel/Makefile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,8 @@ obj-$(CONFIG_MMU) += vdso.o vdso/
7070

7171
obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
7272
obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o
73-
obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o
73+
obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o
74+
obj-$(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS) += vec-copy-unaligned.o
7475

7576
obj-$(CONFIG_FPU) += fpu.o
7677
obj-$(CONFIG_FPU) += kernel_mode_fpu.o

arch/riscv/kernel/copy-unaligned.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,9 @@
1010
void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size);
1111
void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size);
1212

13+
#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
14+
void __riscv_copy_vec_words_unaligned(void *dst, const void *src, size_t size);
15+
void __riscv_copy_vec_bytes_unaligned(void *dst, const void *src, size_t size);
16+
#endif
17+
1318
#endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */

arch/riscv/kernel/sys_hwprobe.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,12 @@ static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
228228
#else
229229
static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
230230
{
231+
if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
232+
return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
233+
234+
if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
235+
return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
236+
231237
return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
232238
}
233239
#endif

arch/riscv/kernel/unaligned_access_speed.c

Lines changed: 139 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,13 @@
66
#include <linux/cpu.h>
77
#include <linux/cpumask.h>
88
#include <linux/jump_label.h>
9+
#include <linux/kthread.h>
910
#include <linux/mm.h>
1011
#include <linux/smp.h>
1112
#include <linux/types.h>
1213
#include <asm/cpufeature.h>
1314
#include <asm/hwprobe.h>
15+
#include <asm/vector.h>
1416

1517
#include "copy-unaligned.h"
1618

@@ -268,12 +270,147 @@ static int check_unaligned_access_speed_all_cpus(void)
268270
}
269271
#endif
270272

273+
#ifdef CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
274+
static void check_vector_unaligned_access(struct work_struct *work __always_unused)
275+
{
276+
int cpu = smp_processor_id();
277+
u64 start_cycles, end_cycles;
278+
u64 word_cycles;
279+
u64 byte_cycles;
280+
int ratio;
281+
unsigned long start_jiffies, now;
282+
struct page *page;
283+
void *dst;
284+
void *src;
285+
long speed = RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
286+
287+
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN)
288+
return;
289+
290+
page = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
291+
if (!page) {
292+
pr_warn("Allocation failure, not measuring vector misaligned performance\n");
293+
return;
294+
}
295+
296+
/* Make an unaligned destination buffer. */
297+
dst = (void *)((unsigned long)page_address(page) | 0x1);
298+
/* Unalign src as well, but differently (off by 1 + 2 = 3). */
299+
src = dst + (MISALIGNED_BUFFER_SIZE / 2);
300+
src += 2;
301+
word_cycles = -1ULL;
302+
303+
/* Do a warmup. */
304+
kernel_vector_begin();
305+
__riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
306+
307+
start_jiffies = jiffies;
308+
while ((now = jiffies) == start_jiffies)
309+
cpu_relax();
310+
311+
/*
312+
* For a fixed amount of time, repeatedly try the function, and take
313+
* the best time in cycles as the measurement.
314+
*/
315+
while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
316+
start_cycles = get_cycles64();
317+
/* Ensure the CSR read can't reorder WRT to the copy. */
318+
mb();
319+
__riscv_copy_vec_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
320+
/* Ensure the copy ends before the end time is snapped. */
321+
mb();
322+
end_cycles = get_cycles64();
323+
if ((end_cycles - start_cycles) < word_cycles)
324+
word_cycles = end_cycles - start_cycles;
325+
}
326+
327+
byte_cycles = -1ULL;
328+
__riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
329+
start_jiffies = jiffies;
330+
while ((now = jiffies) == start_jiffies)
331+
cpu_relax();
332+
333+
while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
334+
start_cycles = get_cycles64();
335+
/* Ensure the CSR read can't reorder WRT to the copy. */
336+
mb();
337+
__riscv_copy_vec_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
338+
/* Ensure the copy ends before the end time is snapped. */
339+
mb();
340+
end_cycles = get_cycles64();
341+
if ((end_cycles - start_cycles) < byte_cycles)
342+
byte_cycles = end_cycles - start_cycles;
343+
}
344+
345+
kernel_vector_end();
346+
347+
/* Don't divide by zero. */
348+
if (!word_cycles || !byte_cycles) {
349+
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned vector access speed\n",
350+
cpu);
351+
352+
return;
353+
}
354+
355+
if (word_cycles < byte_cycles)
356+
speed = RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
357+
358+
ratio = div_u64((byte_cycles * 100), word_cycles);
359+
pr_info("cpu%d: Ratio of vector byte access time to vector unaligned word access is %d.%02d, unaligned accesses are %s\n",
360+
cpu,
361+
ratio / 100,
362+
ratio % 100,
363+
(speed == RISCV_HWPROBE_MISALIGNED_VECTOR_FAST) ? "fast" : "slow");
364+
365+
per_cpu(vector_misaligned_access, cpu) = speed;
366+
}
367+
368+
static int riscv_online_cpu_vec(unsigned int cpu)
369+
{
370+
if (!has_vector())
371+
return 0;
372+
373+
if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED)
374+
return 0;
375+
376+
check_vector_unaligned_access_emulated(NULL);
377+
check_vector_unaligned_access(NULL);
378+
return 0;
379+
}
380+
381+
/* Measure unaligned access speed on all CPUs present at boot in parallel. */
382+
static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
383+
{
384+
schedule_on_each_cpu(check_vector_unaligned_access);
385+
386+
/*
387+
* Setup hotplug callbacks for any new CPUs that come online or go
388+
* offline.
389+
*/
390+
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
391+
riscv_online_cpu_vec, NULL);
392+
393+
return 0;
394+
}
395+
#else /* CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS */
396+
static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
397+
{
398+
return 0;
399+
}
400+
#endif
401+
271402
static int check_unaligned_access_all_cpus(void)
272403
{
273-
bool all_cpus_emulated;
404+
bool all_cpus_emulated, all_cpus_vec_unsupported;
274405

275406
all_cpus_emulated = check_unaligned_access_emulated_all_cpus();
276-
check_vector_unaligned_access_emulated_all_cpus();
407+
all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus();
408+
409+
if (!all_cpus_vec_unsupported &&
410+
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
411+
kthread_run(vec_check_unaligned_access_speed_all_cpus,
412+
NULL, "vec_check_unaligned_access_speed_all_cpus");
413+
}
277414

278415
if (!all_cpus_emulated)
279416
return check_unaligned_access_speed_all_cpus();
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/* Copyright (C) 2024 Rivos Inc. */
3+
4+
#include <linux/args.h>
5+
#include <linux/linkage.h>
6+
#include <asm/asm.h>
7+
8+
.text
9+
10+
#define WORD_EEW 32
11+
12+
#define WORD_SEW CONCATENATE(e, WORD_EEW)
13+
#define VEC_L CONCATENATE(vle, WORD_EEW).v
14+
#define VEC_S CONCATENATE(vle, WORD_EEW).v
15+
16+
/* void __riscv_copy_vec_words_unaligned(void *, const void *, size_t) */
17+
/* Performs a memcpy without aligning buffers, using word loads and stores. */
18+
/* Note: The size is truncated to a multiple of WORD_EEW */
19+
SYM_FUNC_START(__riscv_copy_vec_words_unaligned)
20+
andi a4, a2, ~(WORD_EEW-1)
21+
beqz a4, 2f
22+
add a3, a1, a4
23+
.option push
24+
.option arch, +zve32x
25+
1:
26+
vsetivli t0, 8, WORD_SEW, m8, ta, ma
27+
VEC_L v0, (a1)
28+
VEC_S v0, (a0)
29+
addi a0, a0, WORD_EEW
30+
addi a1, a1, WORD_EEW
31+
bltu a1, a3, 1b
32+
33+
2:
34+
.option pop
35+
ret
36+
SYM_FUNC_END(__riscv_copy_vec_words_unaligned)
37+
38+
/* void __riscv_copy_vec_bytes_unaligned(void *, const void *, size_t) */
39+
/* Performs a memcpy without aligning buffers, using only byte accesses. */
40+
/* Note: The size is truncated to a multiple of 8 */
41+
SYM_FUNC_START(__riscv_copy_vec_bytes_unaligned)
42+
andi a4, a2, ~(8-1)
43+
beqz a4, 2f
44+
add a3, a1, a4
45+
.option push
46+
.option arch, +zve32x
47+
1:
48+
vsetivli t0, 8, e8, m8, ta, ma
49+
vle8.v v0, (a1)
50+
vse8.v v0, (a0)
51+
addi a0, a0, 8
52+
addi a1, a1, 8
53+
bltu a1, a3, 1b
54+
55+
2:
56+
.option pop
57+
ret
58+
SYM_FUNC_END(__riscv_copy_vec_bytes_unaligned)

0 commit comments

Comments
 (0)