Skip to content

Commit 584ea65

Browse files
evangreenpalmer-dabbelt
authored andcommitted
RISC-V: Probe for unaligned access speed
Rather than deferring unaligned access speed determinations to a vendor function, let's probe them and find out how fast they are. If we determine that an unaligned word access is faster than N byte accesses, mark the hardware's unaligned access as "fast". Otherwise, we mark accesses as slow. The algorithm itself runs for a fixed amount of jiffies. Within each iteration it attempts to time a single loop, and then keeps only the best (fastest) loop it saw. This algorithm was found to have lower variance from run to run than my first attempt, which counted the total number of iterations that could be done in that fixed amount of jiffies. By taking only the best iteration in the loop, assuming at least one loop wasn't perturbed by an interrupt, we eliminate the effects of interrupts and other "warm up" factors like branch prediction. The only downside is it depends on having an rdtime granular and accurate enough to measure a single copy. If we ever manage to complete a loop in 0 rdtime ticks, we leave the unaligned setting at UNKNOWN. There is a slight change in user-visible behavior here. Previously, all boards except the THead C906 reported misaligned access speed of UNKNOWN. C906 reported FAST. With this change, since we're now measuring misaligned access speed on each hart, all RISC-V systems will have this key set as either FAST or SLOW. Currently, we don't have a way to confidently measure the difference between SLOW and EMULATED, so we label anything not fast as SLOW. This will mislabel some systems that are actually EMULATED as SLOW. When we get support for delegating misaligned access traps to the kernel (as opposed to the firmware quietly handling it), we can explicitly test in Linux to see if unaligned accesses trap. Those systems will start to report EMULATED, though older (today's) systems without that new SBI mechanism will continue to report SLOW. I've updated the documentation for those hwprobe values to reflect this, specifically: SLOW may or may not be emulated by software, and FAST represents means being faster than equivalent byte accesses. The change in documentation is accurate with respect to both the former and current behavior. Signed-off-by: Evan Green <evan@rivosinc.com> Acked-by: Conor Dooley <conor.dooley@microchip.com> Link: https://lore.kernel.org/r/20230818194136.4084400-2-evan@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
1 parent 06c2afb commit 584ea65

File tree

7 files changed

+198
-6
lines changed

7 files changed

+198
-6
lines changed

Documentation/riscv/hwprobe.rst

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,13 +87,12 @@ The following keys are defined:
8787
emulated via software, either in or below the kernel. These accesses are
8888
always extremely slow.
8989

90-
* :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are supported
91-
in hardware, but are slower than the cooresponding aligned accesses
92-
sequences.
90+
* :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower
91+
than equivalent byte accesses. Misaligned accesses may be supported
92+
directly in hardware, or trapped and emulated by software.
9393

94-
* :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are supported
95-
in hardware and are faster than the cooresponding aligned accesses
96-
sequences.
94+
* :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster
95+
than equivalent byte accesses.
9796

9897
* :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are
9998
not supported at all and will generate a misaligned address fault.

arch/riscv/include/asm/cpufeature.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,4 +30,6 @@ DECLARE_PER_CPU(long, misaligned_access_speed);
3030
/* Per-cpu ISA extensions. */
3131
extern struct riscv_isainfo hart_isa[NR_CPUS];
3232

33+
void check_unaligned_access(int cpu);
34+
3335
#endif

arch/riscv/kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ extra-y += vmlinux.lds
3838
obj-y += head.o
3939
obj-y += soc.o
4040
obj-$(CONFIG_RISCV_ALTERNATIVE) += alternative.o
41+
obj-y += copy-unaligned.o
4142
obj-y += cpu.o
4243
obj-y += cpufeature.o
4344
obj-y += entry.o

arch/riscv/kernel/copy-unaligned.S

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/* Copyright (C) 2023 Rivos Inc. */
3+
4+
#include <linux/linkage.h>
5+
#include <asm/asm.h>
6+
7+
.text
8+
9+
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
10+
/* Performs a memcpy without aligning buffers, using word loads and stores. */
11+
/* Note: The size is truncated to a multiple of 8 * SZREG */
12+
ENTRY(__riscv_copy_words_unaligned)
13+
andi a4, a2, ~((8*SZREG)-1)
14+
beqz a4, 2f
15+
add a3, a1, a4
16+
1:
17+
REG_L a4, 0(a1)
18+
REG_L a5, SZREG(a1)
19+
REG_L a6, 2*SZREG(a1)
20+
REG_L a7, 3*SZREG(a1)
21+
REG_L t0, 4*SZREG(a1)
22+
REG_L t1, 5*SZREG(a1)
23+
REG_L t2, 6*SZREG(a1)
24+
REG_L t3, 7*SZREG(a1)
25+
REG_S a4, 0(a0)
26+
REG_S a5, SZREG(a0)
27+
REG_S a6, 2*SZREG(a0)
28+
REG_S a7, 3*SZREG(a0)
29+
REG_S t0, 4*SZREG(a0)
30+
REG_S t1, 5*SZREG(a0)
31+
REG_S t2, 6*SZREG(a0)
32+
REG_S t3, 7*SZREG(a0)
33+
addi a0, a0, 8*SZREG
34+
addi a1, a1, 8*SZREG
35+
bltu a1, a3, 1b
36+
37+
2:
38+
ret
39+
END(__riscv_copy_words_unaligned)
40+
41+
/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
42+
/* Performs a memcpy without aligning buffers, using only byte accesses. */
43+
/* Note: The size is truncated to a multiple of 8 */
44+
ENTRY(__riscv_copy_bytes_unaligned)
45+
andi a4, a2, ~(8-1)
46+
beqz a4, 2f
47+
add a3, a1, a4
48+
1:
49+
lb a4, 0(a1)
50+
lb a5, 1(a1)
51+
lb a6, 2(a1)
52+
lb a7, 3(a1)
53+
lb t0, 4(a1)
54+
lb t1, 5(a1)
55+
lb t2, 6(a1)
56+
lb t3, 7(a1)
57+
sb a4, 0(a0)
58+
sb a5, 1(a0)
59+
sb a6, 2(a0)
60+
sb a7, 3(a0)
61+
sb t0, 4(a0)
62+
sb t1, 5(a0)
63+
sb t2, 6(a0)
64+
sb t3, 7(a0)
65+
addi a0, a0, 8
66+
addi a1, a1, 8
67+
bltu a1, a3, 1b
68+
69+
2:
70+
ret
71+
END(__riscv_copy_bytes_unaligned)

arch/riscv/kernel/copy-unaligned.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2023 Rivos, Inc.
4+
*/
5+
#ifndef __RISCV_KERNEL_COPY_UNALIGNED_H
6+
#define __RISCV_KERNEL_COPY_UNALIGNED_H
7+
8+
#include <linux/types.h>
9+
10+
void __riscv_copy_words_unaligned(void *dst, const void *src, size_t size);
11+
void __riscv_copy_bytes_unaligned(void *dst, const void *src, size_t size);
12+
13+
#endif /* __RISCV_KERNEL_COPY_UNALIGNED_H */

arch/riscv/kernel/cpufeature.c

Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,19 @@
1919
#include <asm/cacheflush.h>
2020
#include <asm/cpufeature.h>
2121
#include <asm/hwcap.h>
22+
#include <asm/hwprobe.h>
2223
#include <asm/patch.h>
2324
#include <asm/processor.h>
2425
#include <asm/vector.h>
2526

27+
#include "copy-unaligned.h"
28+
2629
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
2730

31+
#define MISALIGNED_ACCESS_JIFFIES_LG2 1
32+
#define MISALIGNED_BUFFER_SIZE 0x4000
33+
#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
34+
2835
unsigned long elf_hwcap __read_mostly;
2936

3037
/* Host ISA bitmap */
@@ -396,6 +403,103 @@ unsigned long riscv_get_elf_hwcap(void)
396403
return hwcap;
397404
}
398405

406+
void check_unaligned_access(int cpu)
407+
{
408+
u64 start_cycles, end_cycles;
409+
u64 word_cycles;
410+
u64 byte_cycles;
411+
int ratio;
412+
unsigned long start_jiffies, now;
413+
struct page *page;
414+
void *dst;
415+
void *src;
416+
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
417+
418+
page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
419+
if (!page) {
420+
pr_warn("Can't alloc pages to measure memcpy performance");
421+
return;
422+
}
423+
424+
/* Make an unaligned destination buffer. */
425+
dst = (void *)((unsigned long)page_address(page) | 0x1);
426+
/* Unalign src as well, but differently (off by 1 + 2 = 3). */
427+
src = dst + (MISALIGNED_BUFFER_SIZE / 2);
428+
src += 2;
429+
word_cycles = -1ULL;
430+
/* Do a warmup. */
431+
__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
432+
preempt_disable();
433+
start_jiffies = jiffies;
434+
while ((now = jiffies) == start_jiffies)
435+
cpu_relax();
436+
437+
/*
438+
* For a fixed amount of time, repeatedly try the function, and take
439+
* the best time in cycles as the measurement.
440+
*/
441+
while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
442+
start_cycles = get_cycles64();
443+
/* Ensure the CSR read can't reorder WRT to the copy. */
444+
mb();
445+
__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
446+
/* Ensure the copy ends before the end time is snapped. */
447+
mb();
448+
end_cycles = get_cycles64();
449+
if ((end_cycles - start_cycles) < word_cycles)
450+
word_cycles = end_cycles - start_cycles;
451+
}
452+
453+
byte_cycles = -1ULL;
454+
__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
455+
start_jiffies = jiffies;
456+
while ((now = jiffies) == start_jiffies)
457+
cpu_relax();
458+
459+
while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
460+
start_cycles = get_cycles64();
461+
mb();
462+
__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
463+
mb();
464+
end_cycles = get_cycles64();
465+
if ((end_cycles - start_cycles) < byte_cycles)
466+
byte_cycles = end_cycles - start_cycles;
467+
}
468+
469+
preempt_enable();
470+
471+
/* Don't divide by zero. */
472+
if (!word_cycles || !byte_cycles) {
473+
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
474+
cpu);
475+
476+
goto out;
477+
}
478+
479+
if (word_cycles < byte_cycles)
480+
speed = RISCV_HWPROBE_MISALIGNED_FAST;
481+
482+
ratio = div_u64((byte_cycles * 100), word_cycles);
483+
pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
484+
cpu,
485+
ratio / 100,
486+
ratio % 100,
487+
(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
488+
489+
per_cpu(misaligned_access_speed, cpu) = speed;
490+
491+
out:
492+
__free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
493+
}
494+
495+
static int check_unaligned_access_boot_cpu(void)
496+
{
497+
check_unaligned_access(0);
498+
return 0;
499+
}
500+
501+
arch_initcall(check_unaligned_access_boot_cpu);
502+
399503
#ifdef CONFIG_RISCV_ALTERNATIVE
400504
/*
401505
* Alternative patch sites consider 48 bits when determining when to patch

arch/riscv/kernel/smpboot.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <linux/sched/task_stack.h>
2727
#include <linux/sched/mm.h>
2828
#include <asm/cpu_ops.h>
29+
#include <asm/cpufeature.h>
2930
#include <asm/irq.h>
3031
#include <asm/mmu_context.h>
3132
#include <asm/numa.h>
@@ -245,6 +246,7 @@ asmlinkage __visible void smp_callin(void)
245246

246247
numa_add_cpu(curr_cpuid);
247248
set_cpu_online(curr_cpuid, 1);
249+
check_unaligned_access(curr_cpuid);
248250
probe_vendor_features(curr_cpuid);
249251

250252
if (has_vector()) {

0 commit comments

Comments
 (0)