@@ -270,7 +270,7 @@ int amd_iommu_get_num_iommus(void)
270
270
* Iterate through all the IOMMUs to get common EFR
271
271
* masks among all IOMMUs and warn if found inconsistency.
272
272
*/
273
- static void get_global_efr (void )
273
+ static __init void get_global_efr (void )
274
274
{
275
275
struct amd_iommu * iommu ;
276
276
@@ -302,16 +302,6 @@ static void get_global_efr(void)
302
302
pr_info ("Using global IVHD EFR:%#llx, EFR2:%#llx\n" , amd_iommu_efr , amd_iommu_efr2 );
303
303
}
304
304
305
- static bool check_feature_on_all_iommus (u64 mask )
306
- {
307
- return !!(amd_iommu_efr & mask );
308
- }
309
-
310
- static inline int check_feature_gpt_level (void )
311
- {
312
- return ((amd_iommu_efr >> FEATURE_GATS_SHIFT ) & FEATURE_GATS_MASK );
313
- }
314
-
315
305
/*
316
306
* For IVHD type 0x11/0x40, EFR is also available via IVHD.
317
307
* Default to IVHD EFR since it is available sooner
@@ -397,7 +387,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
397
387
u64 start = iommu_virt_to_phys ((void * )iommu -> cmd_sem );
398
388
u64 entry = start & PM_ADDR_MASK ;
399
389
400
- if (!check_feature_on_all_iommus (FEATURE_SNP ))
390
+ if (!check_feature (FEATURE_SNP ))
401
391
return ;
402
392
403
393
/* Note:
@@ -867,7 +857,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
867
857
void * buf = (void * )__get_free_pages (gfp , order );
868
858
869
859
if (buf &&
870
- check_feature_on_all_iommus (FEATURE_SNP ) &&
860
+ check_feature (FEATURE_SNP ) &&
871
861
set_memory_4k ((unsigned long )buf , (1 << order ))) {
872
862
free_pages ((unsigned long )buf , order );
873
863
buf = NULL ;
@@ -1046,7 +1036,7 @@ static void iommu_enable_xt(struct amd_iommu *iommu)
1046
1036
1047
1037
static void iommu_enable_gt (struct amd_iommu * iommu )
1048
1038
{
1049
- if (!iommu_feature ( iommu , FEATURE_GT ))
1039
+ if (!check_feature ( FEATURE_GT ))
1050
1040
return ;
1051
1041
1052
1042
iommu_feature_enable (iommu , CONTROL_GT_EN );
@@ -1985,7 +1975,7 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1985
1975
u64 val ;
1986
1976
struct pci_dev * pdev = iommu -> dev ;
1987
1977
1988
- if (!iommu_feature ( iommu , FEATURE_PC ))
1978
+ if (!check_feature ( FEATURE_PC ))
1989
1979
return ;
1990
1980
1991
1981
amd_iommu_pc_present = true;
@@ -2012,8 +2002,7 @@ static ssize_t amd_iommu_show_features(struct device *dev,
2012
2002
struct device_attribute * attr ,
2013
2003
char * buf )
2014
2004
{
2015
- struct amd_iommu * iommu = dev_to_amd_iommu (dev );
2016
- return sysfs_emit (buf , "%llx:%llx\n" , iommu -> features2 , iommu -> features );
2005
+ return sysfs_emit (buf , "%llx:%llx\n" , amd_iommu_efr , amd_iommu_efr2 );
2017
2006
}
2018
2007
static DEVICE_ATTR (features , S_IRUGO , amd_iommu_show_features , NULL) ;
2019
2008
@@ -2049,22 +2038,22 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
2049
2038
features = readq (iommu -> mmio_base + MMIO_EXT_FEATURES );
2050
2039
features2 = readq (iommu -> mmio_base + MMIO_EXT_FEATURES2 );
2051
2040
2052
- if (!iommu -> features ) {
2053
- iommu -> features = features ;
2054
- iommu -> features2 = features2 ;
2041
+ if (!amd_iommu_efr ) {
2042
+ amd_iommu_efr = features ;
2043
+ amd_iommu_efr2 = features2 ;
2055
2044
return ;
2056
2045
}
2057
2046
2058
2047
/*
2059
2048
* Sanity check and warn if EFR values from
2060
2049
* IVHD and MMIO conflict.
2061
2050
*/
2062
- if (features != iommu -> features ||
2063
- features2 != iommu -> features2 ) {
2051
+ if (features != amd_iommu_efr ||
2052
+ features2 != amd_iommu_efr2 ) {
2064
2053
pr_warn (FW_WARN
2065
2054
"EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n" ,
2066
- features , iommu -> features ,
2067
- features2 , iommu -> features2 );
2055
+ features , amd_iommu_efr ,
2056
+ features2 , amd_iommu_efr2 );
2068
2057
}
2069
2058
}
2070
2059
@@ -2090,20 +2079,20 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
2090
2079
2091
2080
late_iommu_features_init (iommu );
2092
2081
2093
- if (iommu_feature ( iommu , FEATURE_GT )) {
2082
+ if (check_feature ( FEATURE_GT )) {
2094
2083
int glxval ;
2095
2084
u32 max_pasid ;
2096
2085
u64 pasmax ;
2097
2086
2098
- pasmax = iommu -> features & FEATURE_PASID_MASK ;
2087
+ pasmax = amd_iommu_efr & FEATURE_PASID_MASK ;
2099
2088
pasmax >>= FEATURE_PASID_SHIFT ;
2100
2089
max_pasid = (1 << (pasmax + 1 )) - 1 ;
2101
2090
2102
2091
amd_iommu_max_pasid = min (amd_iommu_max_pasid , max_pasid );
2103
2092
2104
2093
BUG_ON (amd_iommu_max_pasid & ~PASID_MASK );
2105
2094
2106
- glxval = iommu -> features & FEATURE_GLXVAL_MASK ;
2095
+ glxval = amd_iommu_efr & FEATURE_GLXVAL_MASK ;
2107
2096
glxval >>= FEATURE_GLXVAL_SHIFT ;
2108
2097
2109
2098
if (amd_iommu_max_glx_val == -1 )
@@ -2112,13 +2101,13 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
2112
2101
amd_iommu_max_glx_val = min (amd_iommu_max_glx_val , glxval );
2113
2102
}
2114
2103
2115
- if (iommu_feature ( iommu , FEATURE_GT ) &&
2116
- iommu_feature ( iommu , FEATURE_PPR )) {
2104
+ if (check_feature ( FEATURE_GT ) &&
2105
+ check_feature ( FEATURE_PPR )) {
2117
2106
iommu -> is_iommu_v2 = true;
2118
2107
amd_iommu_v2_present = true;
2119
2108
}
2120
2109
2121
- if (iommu_feature ( iommu , FEATURE_PPR ) && alloc_ppr_log (iommu ))
2110
+ if (check_feature ( FEATURE_PPR ) && alloc_ppr_log (iommu ))
2122
2111
return - ENOMEM ;
2123
2112
2124
2113
if (iommu -> cap & (1UL << IOMMU_CAP_NPCACHE )) {
@@ -2130,8 +2119,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
2130
2119
init_iommu_perf_ctr (iommu );
2131
2120
2132
2121
if (amd_iommu_pgtable == AMD_IOMMU_V2 ) {
2133
- if (!iommu_feature ( iommu , FEATURE_GIOSUP ) ||
2134
- !iommu_feature ( iommu , FEATURE_GT )) {
2122
+ if (!check_feature ( FEATURE_GIOSUP ) ||
2123
+ !check_feature ( FEATURE_GT )) {
2135
2124
pr_warn ("Cannot enable v2 page table for DMA-API. Fallback to v1.\n" );
2136
2125
amd_iommu_pgtable = AMD_IOMMU_V1 ;
2137
2126
}
@@ -2181,35 +2170,29 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
2181
2170
2182
2171
static void print_iommu_info (void )
2183
2172
{
2173
+ int i ;
2184
2174
static const char * const feat_str [] = {
2185
2175
"PreF" , "PPR" , "X2APIC" , "NX" , "GT" , "[5]" ,
2186
2176
"IA" , "GA" , "HE" , "PC"
2187
2177
};
2188
- struct amd_iommu * iommu ;
2189
-
2190
- for_each_iommu (iommu ) {
2191
- struct pci_dev * pdev = iommu -> dev ;
2192
- int i ;
2193
-
2194
- pci_info (pdev , "Found IOMMU cap 0x%x\n" , iommu -> cap_ptr );
2195
2178
2196
- if (iommu -> cap & ( 1 << IOMMU_CAP_EFR ) ) {
2197
- pr_info ("Extended features (%#llx, %#llx):" , iommu -> features , iommu -> features2 );
2179
+ if (amd_iommu_efr ) {
2180
+ pr_info ("Extended features (%#llx, %#llx):" , amd_iommu_efr , amd_iommu_efr2 );
2198
2181
2199
- for (i = 0 ; i < ARRAY_SIZE (feat_str ); ++ i ) {
2200
- if (iommu_feature ( iommu , ( 1ULL << i ) ))
2201
- pr_cont (" %s" , feat_str [i ]);
2202
- }
2182
+ for (i = 0 ; i < ARRAY_SIZE (feat_str ); ++ i ) {
2183
+ if (check_feature ( 1ULL << i ))
2184
+ pr_cont (" %s" , feat_str [i ]);
2185
+ }
2203
2186
2204
- if (iommu -> features & FEATURE_GAM_VAPIC )
2205
- pr_cont (" GA_vAPIC" );
2187
+ if (check_feature ( FEATURE_GAM_VAPIC ) )
2188
+ pr_cont (" GA_vAPIC" );
2206
2189
2207
- if (iommu -> features & FEATURE_SNP )
2208
- pr_cont (" SNP" );
2190
+ if (check_feature ( FEATURE_SNP ) )
2191
+ pr_cont (" SNP" );
2209
2192
2210
- pr_cont ("\n" );
2211
- }
2193
+ pr_cont ("\n" );
2212
2194
}
2195
+
2213
2196
if (irq_remapping_enabled ) {
2214
2197
pr_info ("Interrupt remapping enabled\n" );
2215
2198
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE )
@@ -2907,7 +2890,7 @@ static void enable_iommus_vapic(void)
2907
2890
}
2908
2891
2909
2892
if (AMD_IOMMU_GUEST_IR_VAPIC (amd_iommu_guest_ir ) &&
2910
- !check_feature_on_all_iommus (FEATURE_GAM_VAPIC )) {
2893
+ !check_feature (FEATURE_GAM_VAPIC )) {
2911
2894
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA ;
2912
2895
return ;
2913
2896
}
@@ -3819,7 +3802,7 @@ int amd_iommu_snp_enable(void)
3819
3802
return - EINVAL ;
3820
3803
}
3821
3804
3822
- amd_iommu_snp_en = check_feature_on_all_iommus (FEATURE_SNP );
3805
+ amd_iommu_snp_en = check_feature (FEATURE_SNP );
3823
3806
if (!amd_iommu_snp_en )
3824
3807
return - EINVAL ;
3825
3808
0 commit comments