@@ -101,38 +101,25 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
101
101
#define MCA_BANK_IPID (_ip , _hwid , _type ) \
102
102
[AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
103
103
104
- static inline bool smu_v13_0_6_is_unified_metrics (struct smu_context * smu )
105
- {
106
- return (smu -> adev -> flags & AMD_IS_APU ) &&
107
- smu -> smc_fw_version <= 0x4556900 ;
108
- }
109
-
110
- static inline bool smu_v13_0_6_is_other_end_count_available (struct smu_context * smu )
111
- {
112
- switch (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )) {
113
- case IP_VERSION (13 , 0 , 6 ):
114
- return smu -> smc_fw_version >= 0x557600 ;
115
- case IP_VERSION (13 , 0 , 14 ):
116
- return smu -> smc_fw_version >= 0x05550E00 ;
117
- default :
118
- return false;
119
- }
120
- }
121
-
122
- static inline bool smu_v13_0_6_is_blw_host_limit_available (struct smu_context * smu )
123
- {
124
- if (smu -> adev -> flags & AMD_IS_APU )
125
- return smu -> smc_fw_version >= 0x04556F00 ;
104
+ enum smu_v13_0_6_caps {
105
+ SMU_13_0_6_CAPS_DPM ,
106
+ SMU_13_0_6_CAPS_UNI_METRICS ,
107
+ SMU_13_0_6_CAPS_DPM_POLICY ,
108
+ SMU_13_0_6_CAPS_OTHER_END_METRICS ,
109
+ SMU_13_0_6_CAPS_SET_UCLK_MAX ,
110
+ SMU_13_0_6_CAPS_PCIE_METRICS ,
111
+ SMU_13_0_6_CAPS_HST_LIMIT_METRICS ,
112
+ SMU_13_0_6_CAPS_MCA_DEBUG_MODE ,
113
+ SMU_13_0_6_CAPS_PER_INST_METRICS ,
114
+ SMU_13_0_6_CAPS_CTF_LIMIT ,
115
+ SMU_13_0_6_CAPS_RMA_MSG ,
116
+ SMU_13_0_6_CAPS_ACA_SYND ,
117
+ SMU_13_0_6_CAPS_SDMA_RESET ,
118
+ SMU_13_0_6_CAPS_ALL ,
119
+ };
126
120
127
- switch (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )) {
128
- case IP_VERSION (13 , 0 , 6 ):
129
- return smu -> smc_fw_version >= 0x557900 ;
130
- case IP_VERSION (13 , 0 , 14 ):
131
- return smu -> smc_fw_version >= 0x05551000 ;
132
- default :
133
- return false;
134
- }
135
- }
121
+ #define SMU_CAPS_MASK (x ) (ULL(1) << x)
122
+ #define SMU_CAPS (x ) SMU_CAPS_MASK(SMU_13_0_6_CAPS_##x)
136
123
137
124
struct mca_bank_ipid {
138
125
enum amdgpu_mca_ip ip ;
@@ -297,6 +284,119 @@ struct smu_v13_0_6_dpm_map {
297
284
uint32_t * freq_table ;
298
285
};
299
286
287
+ static void smu_v13_0_14_init_caps (struct smu_context * smu )
288
+ {
289
+ struct smu_13_0_dpm_context * dpm_context = smu -> smu_dpm .dpm_context ;
290
+ uint64_t caps = SMU_CAPS (DPM ) | SMU_CAPS (UNI_METRICS ) |
291
+ SMU_CAPS (SET_UCLK_MAX ) | SMU_CAPS (DPM_POLICY ) |
292
+ SMU_CAPS (PCIE_METRICS ) | SMU_CAPS (CTF_LIMIT ) |
293
+ SMU_CAPS (MCA_DEBUG_MODE ) | SMU_CAPS (RMA_MSG ) |
294
+ SMU_CAPS (ACA_SYND );
295
+ uint32_t fw_ver = smu -> smc_fw_version ;
296
+
297
+ if (fw_ver >= 0x05550E00 )
298
+ caps |= SMU_CAPS (OTHER_END_METRICS );
299
+ if (fw_ver >= 0x05551000 )
300
+ caps |= SMU_CAPS (HST_LIMIT_METRICS );
301
+ if (fw_ver >= 0x05550B00 )
302
+ caps |= SMU_CAPS (PER_INST_METRICS );
303
+ if (fw_ver > 0x05550f00 )
304
+ caps |= SMU_CAPS (SDMA_RESET );
305
+
306
+ dpm_context -> caps = caps ;
307
+ }
308
+
309
+ static void smu_v13_0_6_init_caps (struct smu_context * smu )
310
+ {
311
+ uint64_t caps = SMU_CAPS (DPM ) | SMU_CAPS (UNI_METRICS ) |
312
+ SMU_CAPS (SET_UCLK_MAX ) | SMU_CAPS (DPM_POLICY ) |
313
+ SMU_CAPS (PCIE_METRICS ) | SMU_CAPS (MCA_DEBUG_MODE ) |
314
+ SMU_CAPS (CTF_LIMIT ) | SMU_CAPS (RMA_MSG ) |
315
+ SMU_CAPS (ACA_SYND );
316
+ struct smu_13_0_dpm_context * dpm_context = smu -> smu_dpm .dpm_context ;
317
+ struct amdgpu_device * adev = smu -> adev ;
318
+ uint32_t fw_ver = smu -> smc_fw_version ;
319
+ uint32_t pgm = (fw_ver >> 24 ) & 0xFF ;
320
+
321
+ if (fw_ver < 0x552F00 )
322
+ caps &= ~SMU_CAPS (DPM );
323
+
324
+ if (adev -> flags & AMD_IS_APU ) {
325
+ caps &= ~SMU_CAPS (PCIE_METRICS );
326
+ caps &= ~SMU_CAPS (SET_UCLK_MAX );
327
+ caps &= ~SMU_CAPS (DPM_POLICY );
328
+ caps &= ~SMU_CAPS (RMA_MSG );
329
+ caps &= ~SMU_CAPS (ACA_SYND );
330
+
331
+ if (fw_ver <= 0x4556900 )
332
+ caps &= ~SMU_CAPS (UNI_METRICS );
333
+
334
+ if (fw_ver >= 0x04556F00 )
335
+ caps |= SMU_CAPS (HST_LIMIT_METRICS );
336
+ if (fw_ver >= 0x04556A00 )
337
+ caps |= SMU_CAPS (PER_INST_METRICS );
338
+ if (fw_ver < 0x554500 )
339
+ caps &= ~SMU_CAPS (CTF_LIMIT );
340
+ } else {
341
+ if (fw_ver >= 0x557600 )
342
+ caps |= SMU_CAPS (OTHER_END_METRICS );
343
+ if (fw_ver < 0x00556000 )
344
+ caps &= ~SMU_CAPS (DPM_POLICY );
345
+ if (amdgpu_sriov_vf (adev ) && (fw_ver < 0x556600 ))
346
+ caps &= ~SMU_CAPS (SET_UCLK_MAX );
347
+ if (fw_ver < 0x556300 )
348
+ caps &= ~SMU_CAPS (PCIE_METRICS );
349
+ if (fw_ver < 0x554800 )
350
+ caps &= ~SMU_CAPS (MCA_DEBUG_MODE );
351
+ if (fw_ver >= 0x556F00 )
352
+ caps |= SMU_CAPS (PER_INST_METRICS );
353
+ if (fw_ver < 0x554500 )
354
+ caps &= ~SMU_CAPS (CTF_LIMIT );
355
+ if (fw_ver < 0x00555a00 )
356
+ caps &= ~SMU_CAPS (RMA_MSG );
357
+ if (fw_ver < 0x00555600 )
358
+ caps &= ~SMU_CAPS (ACA_SYND );
359
+ if (pgm == 0 && fw_ver >= 0x557900 )
360
+ caps |= SMU_CAPS (HST_LIMIT_METRICS );
361
+ }
362
+ if (((pgm == 7 ) && (fw_ver > 0x07550700 )) ||
363
+ ((pgm == 0 ) && (fw_ver > 0x00557700 )) ||
364
+ ((pgm == 4 ) && (fw_ver > 0x4556e6c )))
365
+ caps |= SMU_CAPS (SDMA_RESET );
366
+
367
+ dpm_context -> caps = caps ;
368
+ }
369
+
370
+ static inline bool smu_v13_0_6_caps_supported (struct smu_context * smu ,
371
+ enum smu_v13_0_6_caps caps )
372
+ {
373
+ struct smu_13_0_dpm_context * dpm_context = smu -> smu_dpm .dpm_context ;
374
+
375
+ return (dpm_context -> caps & SMU_CAPS_MASK (caps )) == SMU_CAPS_MASK (caps );
376
+ }
377
+
378
+ static void smu_v13_0_x_init_caps (struct smu_context * smu )
379
+ {
380
+ switch (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )) {
381
+ case IP_VERSION (13 , 0 , 14 ):
382
+ return smu_v13_0_14_init_caps (smu );
383
+ default :
384
+ return smu_v13_0_6_init_caps (smu );
385
+ }
386
+ }
387
+
388
+ static int smu_v13_0_6_check_fw_version (struct smu_context * smu )
389
+ {
390
+ int r ;
391
+
392
+ r = smu_v13_0_check_fw_version (smu );
393
+ /* Initialize caps flags once fw version is fetched */
394
+ if (!r )
395
+ smu_v13_0_x_init_caps (smu );
396
+
397
+ return r ;
398
+ }
399
+
300
400
static int smu_v13_0_6_init_microcode (struct smu_context * smu )
301
401
{
302
402
const struct smc_firmware_header_v2_1 * v2_1 ;
@@ -618,7 +718,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
618
718
MetricsTableA_t * metrics_a = (MetricsTableA_t * )smu_table -> metrics_table ;
619
719
struct PPTable_t * pptable =
620
720
(struct PPTable_t * )smu_table -> driver_pptable ;
621
- bool flag = smu_v13_0_6_is_unified_metrics (smu );
721
+ bool flag = ! smu_v13_0_6_caps_supported (smu , SMU_CAPS ( UNI_METRICS ) );
622
722
int ret , i , retry = 100 ;
623
723
uint32_t table_version ;
624
724
@@ -814,8 +914,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
814
914
smu_v13_0_6_setup_driver_pptable (smu );
815
915
816
916
/* DPM policy not supported in older firmwares */
817
- if (!(smu -> adev -> flags & AMD_IS_APU ) &&
818
- (smu -> smc_fw_version < 0x00556000 )) {
917
+ if (!smu_v13_0_6_caps_supported (smu , SMU_CAPS (DPM_POLICY ))) {
819
918
struct smu_dpm_context * smu_dpm = & smu -> smu_dpm ;
820
919
821
920
smu_dpm -> dpm_policies -> policy_mask &=
@@ -992,7 +1091,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
992
1091
struct smu_table_context * smu_table = & smu -> smu_table ;
993
1092
MetricsTableX_t * metrics_x = (MetricsTableX_t * )smu_table -> metrics_table ;
994
1093
MetricsTableA_t * metrics_a = (MetricsTableA_t * )smu_table -> metrics_table ;
995
- bool flag = smu_v13_0_6_is_unified_metrics (smu );
1094
+ bool flag = ! smu_v13_0_6_caps_supported (smu , SMU_CAPS ( UNI_METRICS ) );
996
1095
struct amdgpu_device * adev = smu -> adev ;
997
1096
int ret = 0 ;
998
1097
int xcc_id ;
@@ -1005,7 +1104,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
1005
1104
switch (member ) {
1006
1105
case METRICS_CURR_GFXCLK :
1007
1106
case METRICS_AVERAGE_GFXCLK :
1008
- if (smu -> smc_fw_version >= 0x552F00 ) {
1107
+ if (smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( DPM )) ) {
1009
1108
xcc_id = GET_INST (GC , 0 );
1010
1109
* value = SMUQ10_ROUND (GET_METRIC_FIELD (GfxclkFrequency , flag )[xcc_id ]);
1011
1110
} else {
@@ -1692,7 +1791,7 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu)
1692
1791
static int smu_v13_0_6_mca_set_debug_mode (struct smu_context * smu , bool enable )
1693
1792
{
1694
1793
/* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
1695
- if (smu -> smc_fw_version < 0x554800 )
1794
+ if (! smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( MCA_DEBUG_MODE )) )
1696
1795
return 0 ;
1697
1796
1698
1797
return smu_cmn_send_smc_msg_with_param (smu , SMU_MSG_ClearMcaOnRead ,
@@ -1837,9 +1936,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
1837
1936
if (max == pstate_table -> uclk_pstate .curr .max )
1838
1937
return 0 ;
1839
1938
/* For VF, only allowed in FW versions 85.102 or greater */
1840
- if (amdgpu_sriov_vf (adev ) &&
1841
- ((smu -> smc_fw_version < 0x556600 ) ||
1842
- (adev -> flags & AMD_IS_APU )))
1939
+ if (!smu_v13_0_6_caps_supported (smu ,
1940
+ SMU_CAPS (SET_UCLK_MAX )))
1843
1941
return - EOPNOTSUPP ;
1844
1942
/* Only max clock limiting is allowed for UCLK */
1845
1943
ret = smu_v13_0_set_soft_freq_limited_range (
@@ -2043,7 +2141,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
2043
2141
2044
2142
ret = smu_cmn_get_enabled_mask (smu , feature_mask );
2045
2143
2046
- if (ret == - EIO && smu -> smc_fw_version < 0x552F00 ) {
2144
+ if (ret == - EIO && ! smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( DPM )) ) {
2047
2145
* feature_mask = 0 ;
2048
2146
ret = 0 ;
2049
2147
}
@@ -2336,18 +2434,18 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
2336
2434
2337
2435
static ssize_t smu_v13_0_6_get_gpu_metrics (struct smu_context * smu , void * * table )
2338
2436
{
2339
- bool per_inst , smu_13_0_6_per_inst , smu_13_0_14_per_inst , apu_per_inst ;
2340
2437
struct smu_table_context * smu_table = & smu -> smu_table ;
2341
2438
struct gpu_metrics_v1_7 * gpu_metrics =
2342
2439
(struct gpu_metrics_v1_7 * )smu_table -> gpu_metrics_table ;
2343
- bool flag = smu_v13_0_6_is_unified_metrics (smu );
2440
+ bool flag = ! smu_v13_0_6_caps_supported (smu , SMU_CAPS ( UNI_METRICS ) );
2344
2441
int ret = 0 , xcc_id , inst , i , j , k , idx ;
2345
2442
struct amdgpu_device * adev = smu -> adev ;
2346
2443
MetricsTableX_t * metrics_x ;
2347
2444
MetricsTableA_t * metrics_a ;
2348
2445
struct amdgpu_xcp * xcp ;
2349
2446
u16 link_width_level ;
2350
2447
u32 inst_mask ;
2448
+ bool per_inst ;
2351
2449
2352
2450
metrics_x = kzalloc (max (sizeof (MetricsTableX_t ), sizeof (MetricsTableA_t )), GFP_KERNEL );
2353
2451
ret = smu_v13_0_6_get_metrics_table (smu , metrics_x , true);
@@ -2421,7 +2519,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
2421
2519
* table for both pf & one vf for smu version 85.99.0 or higher else report only
2422
2520
* for pf from registers
2423
2521
*/
2424
- if (smu -> smc_fw_version >= 0x556300 ) {
2522
+ if (smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( PCIE_METRICS )) ) {
2425
2523
gpu_metrics -> pcie_link_width = metrics_x -> PCIeLinkWidth ;
2426
2524
gpu_metrics -> pcie_link_speed =
2427
2525
pcie_gen_to_speed (metrics_x -> PCIeLinkSpeed );
@@ -2450,7 +2548,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
2450
2548
metrics_x -> PCIeNAKSentCountAcc ;
2451
2549
gpu_metrics -> pcie_nak_rcvd_count_acc =
2452
2550
metrics_x -> PCIeNAKReceivedCountAcc ;
2453
- if (smu_v13_0_6_is_other_end_count_available (smu ))
2551
+ if (smu_v13_0_6_caps_supported (smu ,
2552
+ SMU_CAPS (OTHER_END_METRICS )))
2454
2553
gpu_metrics -> pcie_lc_perf_other_end_recovery =
2455
2554
metrics_x -> PCIeOtherEndRecoveryAcc ;
2456
2555
@@ -2475,17 +2574,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
2475
2574
2476
2575
gpu_metrics -> num_partition = adev -> xcp_mgr -> num_xcps ;
2477
2576
2478
- apu_per_inst = (adev -> flags & AMD_IS_APU ) && (smu -> smc_fw_version >= 0x04556A00 );
2479
- smu_13_0_6_per_inst = !(adev -> flags & AMD_IS_APU ) &&
2480
- (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )
2481
- == IP_VERSION (13 , 0 , 6 )) &&
2482
- (smu -> smc_fw_version >= 0x556F00 );
2483
- smu_13_0_14_per_inst = !(adev -> flags & AMD_IS_APU ) &&
2484
- (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )
2485
- == IP_VERSION (13 , 0 , 14 )) &&
2486
- (smu -> smc_fw_version >= 0x05550B00 );
2487
-
2488
- per_inst = apu_per_inst || smu_13_0_6_per_inst || smu_13_0_14_per_inst ;
2577
+ per_inst = smu_v13_0_6_caps_supported (smu , SMU_CAPS (PER_INST_METRICS ));
2489
2578
2490
2579
for_each_xcp (adev -> xcp_mgr , xcp , i ) {
2491
2580
amdgpu_xcp_get_inst_details (xcp , AMDGPU_XCP_VCN , & inst_mask );
@@ -2516,7 +2605,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
2516
2605
gpu_metrics -> xcp_stats [i ].gfx_busy_acc [idx ] =
2517
2606
SMUQ10_ROUND (metrics_x -> GfxBusyAcc [inst ]);
2518
2607
2519
- if (smu_v13_0_6_is_blw_host_limit_available (smu ))
2608
+ if (smu_v13_0_6_caps_supported (
2609
+ smu , SMU_CAPS (HST_LIMIT_METRICS )))
2520
2610
gpu_metrics -> xcp_stats [i ].gfx_below_host_limit_acc [idx ] =
2521
2611
SMUQ10_ROUND (metrics_x -> GfxclkBelowHostLimitAcc
2522
2612
[inst ]);
@@ -2624,7 +2714,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
2624
2714
return - EINVAL ;
2625
2715
2626
2716
/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
2627
- if (smu -> smc_fw_version < 0x554500 )
2717
+ if (! smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( CTF_LIMIT )) )
2628
2718
return 0 ;
2629
2719
2630
2720
/* Get SOC Max operating temperature */
@@ -2726,11 +2816,10 @@ static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
2726
2816
2727
2817
static int smu_v13_0_6_send_rma_reason (struct smu_context * smu )
2728
2818
{
2729
- struct amdgpu_device * adev = smu -> adev ;
2730
2819
int ret ;
2731
2820
2732
2821
/* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
2733
- if (( adev -> flags & AMD_IS_APU ) || smu -> smc_fw_version < 0x00555a00 )
2822
+ if (! smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( RMA_MSG )) )
2734
2823
return 0 ;
2735
2824
2736
2825
ret = smu_cmn_send_smc_msg (smu , SMU_MSG_RmaDueToBadPageThreshold , NULL );
@@ -2750,18 +2839,17 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
2750
2839
smu_program = (smu -> smc_fw_version >> 24 ) & 0xff ;
2751
2840
switch (amdgpu_ip_version (smu -> adev , MP1_HWIP , 0 )) {
2752
2841
case IP_VERSION (13 , 0 , 6 ):
2753
- if ((( smu_program == 7 ) && ( smu -> smc_fw_version > 0x07550700 )) ||
2754
- (( smu_program == 0 ) && (smu -> smc_fw_version > 0x00557700 )))
2842
+ if ((smu_program == 7 || smu_program == 0 ) &&
2843
+ smu_v13_0_6_caps_supported (smu , SMU_CAPS ( SDMA_RESET )))
2755
2844
ret = smu_cmn_send_smc_msg_with_param (smu ,
2756
2845
SMU_MSG_ResetSDMA , inst_mask , NULL );
2757
2846
else if ((smu_program == 4 ) &&
2758
- (smu -> smc_fw_version > 0x4556e6c ))
2847
+ smu_v13_0_6_caps_supported (smu , SMU_CAPS ( SDMA_RESET ) ))
2759
2848
ret = smu_cmn_send_smc_msg_with_param (smu ,
2760
2849
SMU_MSG_ResetSDMA2 , inst_mask , NULL );
2761
2850
break ;
2762
2851
case IP_VERSION (13 , 0 , 14 ):
2763
- if ((smu_program == 5 ) &&
2764
- (smu -> smc_fw_version > 0x05550f00 ))
2852
+ if (smu_v13_0_6_caps_supported (smu , SMU_CAPS (SDMA_RESET )))
2765
2853
ret = smu_cmn_send_smc_msg_with_param (smu ,
2766
2854
SMU_MSG_ResetSDMA2 , inst_mask , NULL );
2767
2855
break ;
@@ -3087,7 +3175,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
3087
3175
if (instlo != 0x03b30400 )
3088
3176
return false;
3089
3177
3090
- if (!( adev -> flags & AMD_IS_APU ) && smu -> smc_fw_version >= 0x00555600 ) {
3178
+ if (smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( ACA_SYND )) ) {
3091
3179
errcode = MCA_REG__SYND__ERRORINFORMATION (entry -> regs [MCA_REG_IDX_SYND ]);
3092
3180
errcode &= 0xff ;
3093
3181
} else {
@@ -3373,9 +3461,10 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
3373
3461
3374
3462
static int aca_smu_parse_error_code (struct amdgpu_device * adev , struct aca_bank * bank )
3375
3463
{
3464
+ struct smu_context * smu = adev -> powerplay .pp_handle ;
3376
3465
int error_code ;
3377
3466
3378
- if (!( adev -> flags & AMD_IS_APU ) && adev -> pm . fw_version >= 0x00555600 )
3467
+ if (smu_v13_0_6_caps_supported ( smu , SMU_CAPS ( ACA_SYND )) )
3379
3468
error_code = ACA_REG__SYND__ERRORINFORMATION (bank -> regs [ACA_REG_IDX_SYND ]);
3380
3469
else
3381
3470
error_code = ACA_REG__STATUS__ERRORCODE (bank -> regs [ACA_REG_IDX_STATUS ]);
@@ -3413,7 +3502,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
3413
3502
.fini_power = smu_v13_0_fini_power ,
3414
3503
.check_fw_status = smu_v13_0_6_check_fw_status ,
3415
3504
/* pptable related */
3416
- .check_fw_version = smu_v13_0_check_fw_version ,
3505
+ .check_fw_version = smu_v13_0_6_check_fw_version ,
3417
3506
.set_driver_table_location = smu_v13_0_set_driver_table_location ,
3418
3507
.set_tool_table_location = smu_v13_0_set_tool_table_location ,
3419
3508
.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location ,
0 commit comments