@@ -83,6 +83,7 @@ config RISCV
83
83
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
84
84
select ARCH_WANTS_NO_INSTR
85
85
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
86
+ select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
86
87
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
87
88
select BUILDTIME_TABLE_SORT if MMU
88
89
select CLINT_TIMER if RISCV_M_MODE
@@ -116,6 +117,7 @@ config RISCV
116
117
select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO
117
118
select HARDIRQS_SW_RESEND
118
119
select HAS_IOPORT if MMU
120
+ select HAVE_ALIGNED_STRUCT_PAGE
119
121
select HAVE_ARCH_AUDITSYSCALL
120
122
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
121
123
select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
@@ -507,6 +509,39 @@ config NODES_SHIFT
507
509
Specify the maximum number of NUMA Nodes available on the target
508
510
system. Increases memory reserved to accommodate various tables.
509
511
512
+ choice
513
+ prompt "RISC-V spinlock type"
514
+ default RISCV_COMBO_SPINLOCKS
515
+
516
+ config RISCV_TICKET_SPINLOCKS
517
+ bool "Using ticket spinlock"
518
+
519
+ config RISCV_QUEUED_SPINLOCKS
520
+ bool "Using queued spinlock"
521
+ depends on SMP && MMU && NONPORTABLE
522
+ select ARCH_USE_QUEUED_SPINLOCKS
523
+ help
524
+ The queued spinlock implementation requires the forward progress
525
+ guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
526
+ LR/SC with Ziccrse provide such guarantee.
527
+
528
+ Select this if and only if Zabha or Ziccrse is available on your
529
+ platform, RISCV_QUEUED_SPINLOCKS must not be selected for platforms
530
+ without one of those extensions.
531
+
532
+ If unsure, select RISCV_COMBO_SPINLOCKS, which will use qspinlocks
533
+ when supported and otherwise ticket spinlocks.
534
+
535
+ config RISCV_COMBO_SPINLOCKS
536
+ bool "Using combo spinlock"
537
+ depends on SMP && MMU
538
+ select ARCH_USE_QUEUED_SPINLOCKS
539
+ help
540
+ Embed both queued spinlock and ticket lock so that the spinlock
541
+ implementation can be chosen at runtime.
542
+
543
+ endchoice
544
+
510
545
config RISCV_ALTERNATIVE
511
546
bool
512
547
depends on !XIP_KERNEL
@@ -532,6 +567,17 @@ config RISCV_ISA_C
532
567
533
568
If you don't know what to do here, say Y.
534
569
570
+ config RISCV_ISA_SUPM
571
+ bool "Supm extension for userspace pointer masking"
572
+ depends on 64BIT
573
+ default y
574
+ help
575
+ Add support for pointer masking in userspace (Supm) when the
576
+ underlying hardware extension (Smnpm or Ssnpm) is detected at boot.
577
+
578
+ If this option is disabled, userspace will be unable to use
579
+ the prctl(PR_{SET,GET}_TAGGED_ADDR_CTRL) API.
580
+
535
581
config RISCV_ISA_SVNAPOT
536
582
bool "Svnapot extension support for supervisor mode NAPOT pages"
537
583
depends on 64BIT && MMU
@@ -633,6 +679,40 @@ config RISCV_ISA_ZAWRS
633
679
use of these instructions in the kernel when the Zawrs extension is
634
680
detected at boot.
635
681
682
+ config TOOLCHAIN_HAS_ZABHA
683
+ bool
684
+ default y
685
+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zabha)
686
+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zabha)
687
+ depends on AS_HAS_OPTION_ARCH
688
+
689
+ config RISCV_ISA_ZABHA
690
+ bool "Zabha extension support for atomic byte/halfword operations"
691
+ depends on TOOLCHAIN_HAS_ZABHA
692
+ depends on RISCV_ALTERNATIVE
693
+ default y
694
+ help
695
+ Enable the use of the Zabha ISA-extension to implement kernel
696
+ byte/halfword atomic memory operations when it is detected at boot.
697
+
698
+ If you don't know what to do here, say Y.
699
+
700
+ config TOOLCHAIN_HAS_ZACAS
701
+ bool
702
+ default y
703
+ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zacas)
704
+ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zacas)
705
+ depends on AS_HAS_OPTION_ARCH
706
+
707
+ config RISCV_ISA_ZACAS
708
+ bool "Zacas extension support for atomic CAS"
709
+ depends on TOOLCHAIN_HAS_ZACAS
710
+ depends on RISCV_ALTERNATIVE
711
+ default y
712
+ help
713
+ Enable the use of the Zacas ISA-extension to implement kernel atomic
714
+ cmpxchg operations when it is detected at boot.
715
+
636
716
If you don't know what to do here, say Y.
637
717
638
718
config TOOLCHAIN_HAS_ZBB
@@ -786,10 +866,24 @@ config THREAD_SIZE_ORDER
786
866
787
867
config RISCV_MISALIGNED
788
868
bool
869
+ help
870
+ Embed support for detecting and emulating misaligned
871
+ scalar or vector loads and stores.
872
+
873
+ config RISCV_SCALAR_MISALIGNED
874
+ bool
875
+ select RISCV_MISALIGNED
789
876
select SYSCTL_ARCH_UNALIGN_ALLOW
790
877
help
791
878
Embed support for emulating misaligned loads and stores.
792
879
880
+ config RISCV_VECTOR_MISALIGNED
881
+ bool
882
+ select RISCV_MISALIGNED
883
+ depends on RISCV_ISA_V
884
+ help
885
+ Enable detecting support for vector misaligned loads and stores.
886
+
793
887
choice
794
888
prompt "Unaligned Accesses Support"
795
889
default RISCV_PROBE_UNALIGNED_ACCESS
@@ -801,7 +895,7 @@ choice
801
895
802
896
config RISCV_PROBE_UNALIGNED_ACCESS
803
897
bool "Probe for hardware unaligned access support"
804
- select RISCV_MISALIGNED
898
+ select RISCV_SCALAR_MISALIGNED
805
899
help
806
900
During boot, the kernel will run a series of tests to determine the
807
901
speed of unaligned accesses. This probing will dynamically determine
@@ -812,7 +906,7 @@ config RISCV_PROBE_UNALIGNED_ACCESS
812
906
813
907
config RISCV_EMULATED_UNALIGNED_ACCESS
814
908
bool "Emulate unaligned access where system support is missing"
815
- select RISCV_MISALIGNED
909
+ select RISCV_SCALAR_MISALIGNED
816
910
help
817
911
If unaligned memory accesses trap into the kernel as they are not
818
912
supported by the system, the kernel will emulate the unaligned
@@ -841,6 +935,46 @@ config RISCV_EFFICIENT_UNALIGNED_ACCESS
841
935
842
936
endchoice
843
937
938
+ choice
939
+ prompt "Vector unaligned Accesses Support"
940
+ depends on RISCV_ISA_V
941
+ default RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
942
+ help
943
+ This determines the level of support for vector unaligned accesses. This
944
+ information is used by the kernel to perform optimizations. It is also
945
+ exposed to user space via the hwprobe syscall. The hardware will be
946
+ probed at boot by default.
947
+
948
+ config RISCV_PROBE_VECTOR_UNALIGNED_ACCESS
949
+ bool "Probe speed of vector unaligned accesses"
950
+ select RISCV_VECTOR_MISALIGNED
951
+ depends on RISCV_ISA_V
952
+ help
953
+ During boot, the kernel will run a series of tests to determine the
954
+ speed of vector unaligned accesses if they are supported. This probing
955
+ will dynamically determine the speed of vector unaligned accesses on
956
+ the underlying system if they are supported.
957
+
958
+ config RISCV_SLOW_VECTOR_UNALIGNED_ACCESS
959
+ bool "Assume the system supports slow vector unaligned memory accesses"
960
+ depends on NONPORTABLE
961
+ help
962
+ Assume that the system supports slow vector unaligned memory accesses. The
963
+ kernel and userspace programs may not be able to run at all on systems
964
+ that do not support unaligned memory accesses.
965
+
966
+ config RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
967
+ bool "Assume the system supports fast vector unaligned memory accesses"
968
+ depends on NONPORTABLE
969
+ help
970
+ Assume that the system supports fast vector unaligned memory accesses. When
971
+ enabled, this option improves the performance of the kernel on such
972
+ systems. However, the kernel and userspace programs will run much more
973
+ slowly, or will not be able to run at all, on systems that do not
974
+ support efficient unaligned memory accesses.
975
+
976
+ endchoice
977
+
844
978
source "arch/riscv/Kconfig.vendor"
845
979
846
980
endmenu # "Platform type"
0 commit comments