@@ -180,12 +180,12 @@ static inline void set_sve_default_vl(int val)
180
180
set_default_vl (ARM64_VEC_SVE , val );
181
181
}
182
182
183
- static void __percpu * efi_sve_state ;
183
+ static u8 * efi_sve_state ;
184
184
185
185
#else /* ! CONFIG_ARM64_SVE */
186
186
187
187
/* Dummy declaration for code that will be optimised out: */
188
- extern void __percpu * efi_sve_state ;
188
+ extern u8 * efi_sve_state ;
189
189
190
190
#endif /* ! CONFIG_ARM64_SVE */
191
191
@@ -1131,15 +1131,15 @@ static void __init sve_efi_setup(void)
1131
1131
if (!sve_vl_valid (max_vl ))
1132
1132
goto fail ;
1133
1133
1134
- efi_sve_state = __alloc_percpu (
1135
- SVE_SIG_REGS_SIZE ( sve_vq_from_vl ( max_vl )), SVE_VQ_BYTES );
1134
+ efi_sve_state = kmalloc ( SVE_SIG_REGS_SIZE ( sve_vq_from_vl ( max_vl )),
1135
+ GFP_KERNEL );
1136
1136
if (!efi_sve_state )
1137
1137
goto fail ;
1138
1138
1139
1139
return ;
1140
1140
1141
1141
fail :
1142
- panic ("Cannot allocate percpu memory for EFI SVE save/restore" );
1142
+ panic ("Cannot allocate memory for EFI SVE save/restore" );
1143
1143
}
1144
1144
1145
1145
void cpu_enable_sve (const struct arm64_cpu_capabilities * __always_unused p )
@@ -1948,10 +1948,10 @@ EXPORT_SYMBOL_GPL(kernel_neon_end);
1948
1948
1949
1949
#ifdef CONFIG_EFI
1950
1950
1951
- static DEFINE_PER_CPU ( struct user_fpsimd_state , efi_fpsimd_state ) ;
1952
- static DEFINE_PER_CPU ( bool , efi_fpsimd_state_used ) ;
1953
- static DEFINE_PER_CPU ( bool , efi_sve_state_used ) ;
1954
- static DEFINE_PER_CPU ( bool , efi_sm_state ) ;
1951
+ static struct user_fpsimd_state efi_fpsimd_state ;
1952
+ static bool efi_fpsimd_state_used ;
1953
+ static bool efi_sve_state_used ;
1954
+ static bool efi_sm_state ;
1955
1955
1956
1956
/*
1957
1957
* EFI runtime services support functions
@@ -1984,18 +1984,16 @@ void __efi_fpsimd_begin(void)
1984
1984
* If !efi_sve_state, SVE can't be in use yet and doesn't need
1985
1985
* preserving:
1986
1986
*/
1987
- if (system_supports_sve () && likely (efi_sve_state )) {
1988
- char * sve_state = this_cpu_ptr (efi_sve_state );
1987
+ if (system_supports_sve () && efi_sve_state != NULL ) {
1989
1988
bool ffr = true;
1990
1989
u64 svcr ;
1991
1990
1992
- __this_cpu_write ( efi_sve_state_used , true) ;
1991
+ efi_sve_state_used = true;
1993
1992
1994
1993
if (system_supports_sme ()) {
1995
1994
svcr = read_sysreg_s (SYS_SVCR );
1996
1995
1997
- __this_cpu_write (efi_sm_state ,
1998
- svcr & SVCR_SM_MASK );
1996
+ efi_sm_state = svcr & SVCR_SM_MASK ;
1999
1997
2000
1998
/*
2001
1999
* Unless we have FA64 FFR does not
@@ -2005,19 +2003,18 @@ void __efi_fpsimd_begin(void)
2005
2003
ffr = !(svcr & SVCR_SM_MASK );
2006
2004
}
2007
2005
2008
- sve_save_state (sve_state + sve_ffr_offset (sve_max_vl ()),
2009
- & this_cpu_ptr (& efi_fpsimd_state )-> fpsr ,
2010
- ffr );
2006
+ sve_save_state (efi_sve_state + sve_ffr_offset (sve_max_vl ()),
2007
+ & efi_fpsimd_state .fpsr , ffr );
2011
2008
2012
2009
if (system_supports_sme ())
2013
2010
sysreg_clear_set_s (SYS_SVCR ,
2014
2011
SVCR_SM_MASK , 0 );
2015
2012
2016
2013
} else {
2017
- fpsimd_save_state (this_cpu_ptr ( & efi_fpsimd_state ) );
2014
+ fpsimd_save_state (& efi_fpsimd_state );
2018
2015
}
2019
2016
2020
- __this_cpu_write ( efi_fpsimd_state_used , true) ;
2017
+ efi_fpsimd_state_used = true;
2021
2018
}
2022
2019
}
2023
2020
@@ -2029,12 +2026,10 @@ void __efi_fpsimd_end(void)
2029
2026
if (!system_supports_fpsimd ())
2030
2027
return ;
2031
2028
2032
- if (!__this_cpu_xchg ( efi_fpsimd_state_used , false) ) {
2029
+ if (!efi_fpsimd_state_used ) {
2033
2030
kernel_neon_end ();
2034
2031
} else {
2035
- if (system_supports_sve () &&
2036
- likely (__this_cpu_read (efi_sve_state_used ))) {
2037
- char const * sve_state = this_cpu_ptr (efi_sve_state );
2032
+ if (system_supports_sve () && efi_sve_state_used ) {
2038
2033
bool ffr = true;
2039
2034
2040
2035
/*
@@ -2043,7 +2038,7 @@ void __efi_fpsimd_end(void)
2043
2038
* streaming mode.
2044
2039
*/
2045
2040
if (system_supports_sme ()) {
2046
- if (__this_cpu_read ( efi_sm_state ) ) {
2041
+ if (efi_sm_state ) {
2047
2042
sysreg_clear_set_s (SYS_SVCR ,
2048
2043
0 ,
2049
2044
SVCR_SM_MASK );
@@ -2057,14 +2052,15 @@ void __efi_fpsimd_end(void)
2057
2052
}
2058
2053
}
2059
2054
2060
- sve_load_state (sve_state + sve_ffr_offset (sve_max_vl ()),
2061
- & this_cpu_ptr (& efi_fpsimd_state )-> fpsr ,
2062
- ffr );
2055
+ sve_load_state (efi_sve_state + sve_ffr_offset (sve_max_vl ()),
2056
+ & efi_fpsimd_state .fpsr , ffr );
2063
2057
2064
- __this_cpu_write ( efi_sve_state_used , false) ;
2058
+ efi_sve_state_used = false;
2065
2059
} else {
2066
- fpsimd_load_state (this_cpu_ptr ( & efi_fpsimd_state ) );
2060
+ fpsimd_load_state (& efi_fpsimd_state );
2067
2061
}
2062
+
2063
+ efi_fpsimd_state_used = false;
2068
2064
}
2069
2065
}
2070
2066
0 commit comments