Skip to content

Commit 8c7bfd0

Browse files
committed
Merge branch 'bits/220-tso' into asahi-wip
2 parents 31369e3 + dc7a6df commit 8c7bfd0

File tree

12 files changed

+210
-3
lines changed

12 files changed

+210
-3
lines changed

arch/arm64/Kconfig

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -412,6 +412,9 @@ config KASAN_SHADOW_OFFSET
412412
config UNWIND_TABLES
413413
bool
414414

415+
config ARM64_ACTLR_STATE
416+
bool
417+
415418
source "arch/arm64/Kconfig.platforms"
416419

417420
menu "Kernel Features"
@@ -2166,6 +2169,17 @@ config ARM64_DEBUG_PRIORITY_MASKING
21662169
If unsure, say N
21672170
endif # ARM64_PSEUDO_NMI
21682171

2172+
config ARM64_MEMORY_MODEL_CONTROL
2173+
bool "Runtime memory model control"
2174+
default ARCH_APPLE
2175+
select ARM64_ACTLR_STATE
2176+
help
2177+
Some ARM64 CPUs support runtime switching of the CPU memory
2178+
model, which can be useful to emulate other CPU architectures
2179+
which have different memory models. Say Y to enable support
2180+
for the PR_SET_MEM_MODEL/PR_GET_MEM_MODEL prctl() calls on
2181+
CPUs with this feature.
2182+
21692183
config RELOCATABLE
21702184
bool "Build a relocatable kernel image" if EXPERT
21712185
select ARCH_HAS_RELR
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
#ifndef __ASM_APPLE_CPUFEATURES_H
4+
#define __ASM_APPLE_CPUFEATURES_H
5+
6+
#include <linux/bits.h>
7+
#include <asm/sysreg.h>
8+
9+
#define AIDR_APPLE_TSO_SHIFT 9
10+
#define AIDR_APPLE_TSO BIT(9)
11+
12+
#define ACTLR_APPLE_TSO_SHIFT 1
13+
#define ACTLR_APPLE_TSO BIT(1)
14+
15+
#endif

arch/arm64/include/asm/cpufeature.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -902,6 +902,12 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
902902
return 8;
903903
}
904904

905+
static __always_inline bool system_has_actlr_state(void)
906+
{
907+
return IS_ENABLED(CONFIG_ARM64_ACTLR_STATE) &&
908+
alternative_has_cap_unlikely(ARM64_HAS_TSO_APPLE);
909+
}
910+
905911
s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
906912
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
907913

arch/arm64/include/asm/processor.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,9 @@ struct thread_struct {
182182
u64 sctlr_user;
183183
u64 svcr;
184184
u64 tpidr2_el0;
185+
#ifdef CONFIG_ARM64_ACTLR_STATE
186+
u64 actlr;
187+
#endif
185188
};
186189

187190
static inline unsigned int thread_get_vl(struct thread_struct *thread,

arch/arm64/kernel/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
3434
cpufeature.o alternative.o cacheinfo.o \
3535
smp.o smp_spin_table.o topology.o smccc-call.o \
3636
syscall.o proton-pack.o idreg-override.o idle.o \
37-
patching.o
37+
patching.o cpufeature_impdef.o
3838

3939
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
4040
sys_compat.o

arch/arm64/kernel/cpufeature.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,8 @@ DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
134134
*/
135135
static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
136136

137+
void __init init_cpucap_indirect_list_impdef(void);
138+
137139
void dump_cpu_features(void)
138140
{
139141
/* file-wide pr_fmt adds "CPU features: " prefix */
@@ -951,7 +953,7 @@ static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
951953
extern const struct arm64_cpu_capabilities arm64_errata[];
952954
static const struct arm64_cpu_capabilities arm64_features[];
953955

954-
static void __init
956+
void __init
955957
init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
956958
{
957959
for (; caps->matches; caps++) {
@@ -1081,6 +1083,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
10811083

10821084
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
10831085
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
1086+
init_cpucap_indirect_list_impdef();
10841087
}
10851088

10861089
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
@@ -1448,7 +1451,7 @@ has_always(const struct arm64_cpu_capabilities *entry, int scope)
14481451
return true;
14491452
}
14501453

1451-
static bool
1454+
bool
14521455
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
14531456
{
14541457
int val = cpuid_feature_extract_field_width(reg, entry->field_pos,

arch/arm64/kernel/cpufeature_impdef.c

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Contains implementation-defined CPU feature definitions.
4+
*/
5+
6+
#include <asm/cpufeature.h>
7+
#include <asm/apple_cpufeature.h>
8+
9+
void __init init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps);
10+
bool feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry);
11+
12+
bool has_apple_feature(const struct arm64_cpu_capabilities *entry, int scope)
13+
{
14+
u64 val;
15+
WARN_ON(scope != SCOPE_SYSTEM);
16+
17+
if (read_cpuid_implementor() != ARM_CPU_IMP_APPLE)
18+
return false;
19+
20+
val = read_sysreg(aidr_el1);
21+
return feature_matches(val, entry);
22+
}
23+
24+
bool has_tso_fixed(const struct arm64_cpu_capabilities *entry, int scope)
25+
{
26+
/* List of CPUs that always use the TSO memory model */
27+
static const struct midr_range fixed_tso_list[] = {
28+
MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
29+
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
30+
MIDR_ALL_VERSIONS(MIDR_FUJITSU_A64FX),
31+
{ /* sentinel */ }
32+
};
33+
34+
return is_midr_in_range_list(read_cpuid_id(), fixed_tso_list);
35+
}
36+
37+
static const struct arm64_cpu_capabilities arm64_impdef_features[] = {
38+
#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
39+
{
40+
.desc = "TSO memory model (Apple)",
41+
.capability = ARM64_HAS_TSO_APPLE,
42+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
43+
.matches = has_apple_feature,
44+
.field_pos = AIDR_APPLE_TSO_SHIFT,
45+
.field_width = 1,
46+
.sign = FTR_UNSIGNED,
47+
.min_field_value = 1,
48+
},
49+
{
50+
.desc = "TSO memory model (Fixed)",
51+
.capability = ARM64_HAS_TSO_FIXED,
52+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
53+
.matches = has_tso_fixed,
54+
},
55+
#endif
56+
{},
57+
};
58+
59+
void __init init_cpucap_indirect_list_impdef(void)
60+
{
61+
init_cpucap_indirect_list_from_array(arm64_impdef_features);
62+
}

arch/arm64/kernel/process.c

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
#include <linux/stacktrace.h>
4444

4545
#include <asm/alternative.h>
46+
#include <asm/apple_cpufeature.h>
4647
#include <asm/compat.h>
4748
#include <asm/cpufeature.h>
4849
#include <asm/cacheflush.h>
@@ -374,6 +375,11 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
374375
if (system_supports_tpidr2())
375376
p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
376377

378+
#ifdef CONFIG_ARM64_ACTLR_STATE
379+
if (system_has_actlr_state())
380+
p->thread.actlr = read_sysreg(actlr_el1);
381+
#endif
382+
377383
if (stack_start) {
378384
if (is_compat_thread(task_thread_info(p)))
379385
childregs->compat_sp = stack_start;
@@ -516,6 +522,64 @@ void update_sctlr_el1(u64 sctlr)
516522
isb();
517523
}
518524

525+
#ifdef CONFIG_ARM64_ACTLR_STATE
526+
/*
527+
* IMPDEF control register ACTLR_EL1 handling. Some CPUs use this to
528+
* expose features that can be controlled by userspace.
529+
*/
530+
static void actlr_thread_switch(struct task_struct *next)
531+
{
532+
if (!system_has_actlr_state())
533+
return;
534+
535+
current->thread.actlr = read_sysreg(actlr_el1);
536+
write_sysreg(next->thread.actlr, actlr_el1);
537+
}
538+
#else
539+
static inline void actlr_thread_switch(struct task_struct *next)
540+
{
541+
}
542+
#endif
543+
544+
#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
545+
int arch_prctl_mem_model_get(struct task_struct *t)
546+
{
547+
if (alternative_has_cap_unlikely(ARM64_HAS_TSO_APPLE) &&
548+
t->thread.actlr & ACTLR_APPLE_TSO)
549+
return PR_SET_MEM_MODEL_TSO;
550+
551+
return PR_SET_MEM_MODEL_DEFAULT;
552+
}
553+
554+
int arch_prctl_mem_model_set(struct task_struct *t, unsigned long val)
555+
{
556+
if (alternative_has_cap_unlikely(ARM64_HAS_TSO_FIXED) && val == PR_SET_MEM_MODEL_TSO)
557+
return 0;
558+
559+
if (alternative_has_cap_unlikely(ARM64_HAS_TSO_APPLE)) {
560+
WARN_ON(!system_has_actlr_state());
561+
562+
switch (val) {
563+
case PR_SET_MEM_MODEL_TSO:
564+
t->thread.actlr |= ACTLR_APPLE_TSO;
565+
break;
566+
case PR_SET_MEM_MODEL_DEFAULT:
567+
t->thread.actlr &= ~ACTLR_APPLE_TSO;
568+
break;
569+
default:
570+
return -EINVAL;
571+
}
572+
write_sysreg(t->thread.actlr, actlr_el1);
573+
return 0;
574+
}
575+
576+
if (val == PR_SET_MEM_MODEL_DEFAULT)
577+
return 0;
578+
579+
return -EINVAL;
580+
}
581+
#endif
582+
519583
/*
520584
* Thread switching.
521585
*/
@@ -533,6 +597,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
533597
ssbs_thread_switch(next);
534598
erratum_1418040_thread_switch(next);
535599
ptrauth_thread_switch_user(next);
600+
actlr_thread_switch(next);
536601

537602
/*
538603
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -654,6 +719,10 @@ void arch_setup_new_exec(void)
654719
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
655720
PR_SPEC_ENABLE);
656721
}
722+
723+
#ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
724+
arch_prctl_mem_model_set(current, PR_SET_MEM_MODEL_DEFAULT);
725+
#endif
657726
}
658727

659728
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI

arch/arm64/kernel/setup.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -382,6 +382,14 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
382382
*/
383383
init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
384384
#endif
385+
#ifdef CONFIG_ARM64_ACTLR_STATE
386+
/* Store the boot CPU ACTLR_EL1 value as the default. This will only
387+
* be actually restored during context switching iff the platform is
388+
* known to use ACTLR_EL1 for exposable features and its layout is
389+
* known to be the same on all CPUs.
390+
*/
391+
init_task.thread.actlr = read_sysreg(actlr_el1);
392+
#endif
385393

386394
if (boot_args[1] || boot_args[2] || boot_args[3]) {
387395
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"

arch/arm64/tools/cpucaps

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,8 @@ HAS_STAGE2_FWB
5050
HAS_TCR2
5151
HAS_TIDCP1
5252
HAS_TLB_RANGE
53+
HAS_TSO_APPLE
54+
HAS_TSO_FIXED
5355
HAS_VIRT_HOST_EXTN
5456
HAS_WFXT
5557
HW_DBM

0 commit comments

Comments
 (0)