23
23
24
24
#include <linux/earlycpio.h>
25
25
#include <linux/firmware.h>
26
+ #include <linux/bsearch.h>
26
27
#include <linux/uaccess.h>
27
28
#include <linux/vmalloc.h>
28
29
#include <linux/initrd.h>
29
30
#include <linux/kernel.h>
30
31
#include <linux/pci.h>
31
32
33
+ #include <crypto/sha2.h>
34
+
32
35
#include <asm/microcode.h>
33
36
#include <asm/processor.h>
37
+ #include <asm/cmdline.h>
34
38
#include <asm/setup.h>
35
39
#include <asm/cpu.h>
36
40
#include <asm/msr.h>
@@ -145,6 +149,107 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
145
149
*/
146
150
static u32 bsp_cpuid_1_eax __ro_after_init ;
147
151
152
+ static bool sha_check = true;
153
+
154
+ struct patch_digest {
155
+ u32 patch_id ;
156
+ u8 sha256 [SHA256_DIGEST_SIZE ];
157
+ };
158
+
159
+ #include "amd_shas.c"
160
+
161
+ static int cmp_id (const void * key , const void * elem )
162
+ {
163
+ struct patch_digest * pd = (struct patch_digest * )elem ;
164
+ u32 patch_id = * (u32 * )key ;
165
+
166
+ if (patch_id == pd -> patch_id )
167
+ return 0 ;
168
+ else if (patch_id < pd -> patch_id )
169
+ return -1 ;
170
+ else
171
+ return 1 ;
172
+ }
173
+
174
+ static bool need_sha_check (u32 cur_rev )
175
+ {
176
+ switch (cur_rev >> 8 ) {
177
+ case 0x80012 : return cur_rev <= 0x800126f ; break ;
178
+ case 0x83010 : return cur_rev <= 0x830107c ; break ;
179
+ case 0x86001 : return cur_rev <= 0x860010e ; break ;
180
+ case 0x86081 : return cur_rev <= 0x8608108 ; break ;
181
+ case 0x87010 : return cur_rev <= 0x8701034 ; break ;
182
+ case 0x8a000 : return cur_rev <= 0x8a0000a ; break ;
183
+ case 0xa0011 : return cur_rev <= 0xa0011da ; break ;
184
+ case 0xa0012 : return cur_rev <= 0xa001243 ; break ;
185
+ case 0xa1011 : return cur_rev <= 0xa101153 ; break ;
186
+ case 0xa1012 : return cur_rev <= 0xa10124e ; break ;
187
+ case 0xa1081 : return cur_rev <= 0xa108109 ; break ;
188
+ case 0xa2010 : return cur_rev <= 0xa20102f ; break ;
189
+ case 0xa2012 : return cur_rev <= 0xa201212 ; break ;
190
+ case 0xa6012 : return cur_rev <= 0xa60120a ; break ;
191
+ case 0xa7041 : return cur_rev <= 0xa704109 ; break ;
192
+ case 0xa7052 : return cur_rev <= 0xa705208 ; break ;
193
+ case 0xa7080 : return cur_rev <= 0xa708009 ; break ;
194
+ case 0xa70c0 : return cur_rev <= 0xa70C009 ; break ;
195
+ case 0xaa002 : return cur_rev <= 0xaa00218 ; break ;
196
+ default : break ;
197
+ }
198
+
199
+ pr_info ("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n" );
200
+ pr_info ("CPUID(1).EAX: 0x%x, current revision: 0x%x\n" , bsp_cpuid_1_eax , cur_rev );
201
+ return true;
202
+ }
203
+
204
+ static bool verify_sha256_digest (u32 patch_id , u32 cur_rev , const u8 * data , unsigned int len )
205
+ {
206
+ struct patch_digest * pd = NULL ;
207
+ u8 digest [SHA256_DIGEST_SIZE ];
208
+ struct sha256_state s ;
209
+ int i ;
210
+
211
+ if (x86_family (bsp_cpuid_1_eax ) < 0x17 ||
212
+ x86_family (bsp_cpuid_1_eax ) > 0x19 )
213
+ return true;
214
+
215
+ if (!need_sha_check (cur_rev ))
216
+ return true;
217
+
218
+ if (!sha_check )
219
+ return true;
220
+
221
+ pd = bsearch (& patch_id , phashes , ARRAY_SIZE (phashes ), sizeof (struct patch_digest ), cmp_id );
222
+ if (!pd ) {
223
+ pr_err ("No sha256 digest for patch ID: 0x%x found\n" , patch_id );
224
+ return false;
225
+ }
226
+
227
+ sha256_init (& s );
228
+ sha256_update (& s , data , len );
229
+ sha256_final (& s , digest );
230
+
231
+ if (memcmp (digest , pd -> sha256 , sizeof (digest ))) {
232
+ pr_err ("Patch 0x%x SHA256 digest mismatch!\n" , patch_id );
233
+
234
+ for (i = 0 ; i < SHA256_DIGEST_SIZE ; i ++ )
235
+ pr_cont ("0x%x " , digest [i ]);
236
+ pr_info ("\n" );
237
+
238
+ return false;
239
+ }
240
+
241
+ return true;
242
+ }
243
+
244
+ static u32 get_patch_level (void )
245
+ {
246
+ u32 rev , dummy __always_unused ;
247
+
248
+ native_rdmsr (MSR_AMD64_PATCH_LEVEL , rev , dummy );
249
+
250
+ return rev ;
251
+ }
252
+
148
253
static union cpuid_1_eax ucode_rev_to_cpuid (unsigned int val )
149
254
{
150
255
union zen_patch_rev p ;
@@ -246,8 +351,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
246
351
* On success, @sh_psize returns the patch size according to the section header,
247
352
* to the caller.
248
353
*/
249
- static bool
250
- __verify_patch_section (const u8 * buf , size_t buf_size , u32 * sh_psize )
354
+ static bool __verify_patch_section (const u8 * buf , size_t buf_size , u32 * sh_psize )
251
355
{
252
356
u32 p_type , p_size ;
253
357
const u32 * hdr ;
@@ -484,10 +588,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
484
588
}
485
589
}
486
590
487
- static bool __apply_microcode_amd (struct microcode_amd * mc , unsigned int psize )
591
+ static bool __apply_microcode_amd (struct microcode_amd * mc , u32 * cur_rev ,
592
+ unsigned int psize )
488
593
{
489
594
unsigned long p_addr = (unsigned long )& mc -> hdr .data_code ;
490
- u32 rev , dummy ;
595
+
596
+ if (!verify_sha256_digest (mc -> hdr .patch_id , * cur_rev , (const u8 * )p_addr , psize ))
597
+ return -1 ;
491
598
492
599
native_wrmsrl (MSR_AMD64_PATCH_LOADER , p_addr );
493
600
@@ -505,47 +612,13 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
505
612
}
506
613
507
614
/* verify patch application was successful */
508
- native_rdmsr (MSR_AMD64_PATCH_LEVEL , rev , dummy );
509
-
510
- if (rev != mc -> hdr .patch_id )
615
+ * cur_rev = get_patch_level ();
616
+ if (* cur_rev != mc -> hdr .patch_id )
511
617
return false;
512
618
513
619
return true;
514
620
}
515
621
516
- /*
517
- * Early load occurs before we can vmalloc(). So we look for the microcode
518
- * patch container file in initrd, traverse equivalent cpu table, look for a
519
- * matching microcode patch, and update, all in initrd memory in place.
520
- * When vmalloc() is available for use later -- on 64-bit during first AP load,
521
- * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
522
- * load_microcode_amd() to save equivalent cpu table and microcode patches in
523
- * kernel heap memory.
524
- *
525
- * Returns true if container found (sets @desc), false otherwise.
526
- */
527
- static bool early_apply_microcode (u32 old_rev , void * ucode , size_t size )
528
- {
529
- struct cont_desc desc = { 0 };
530
- struct microcode_amd * mc ;
531
-
532
- scan_containers (ucode , size , & desc );
533
-
534
- mc = desc .mc ;
535
- if (!mc )
536
- return false;
537
-
538
- /*
539
- * Allow application of the same revision to pick up SMT-specific
540
- * changes even if the revision of the other SMT thread is already
541
- * up-to-date.
542
- */
543
- if (old_rev > mc -> hdr .patch_id )
544
- return false;
545
-
546
- return __apply_microcode_amd (mc , desc .psize );
547
- }
548
-
549
622
static bool get_builtin_microcode (struct cpio_data * cp )
550
623
{
551
624
char fw_name [36 ] = "amd-ucode/microcode_amd.bin" ;
@@ -583,52 +656,59 @@ static bool __init find_blobs_in_containers(struct cpio_data *ret)
583
656
return found ;
584
657
}
585
658
659
+ /*
660
+ * Early load occurs before we can vmalloc(). So we look for the microcode
661
+ * patch container file in initrd, traverse equivalent cpu table, look for a
662
+ * matching microcode patch, and update, all in initrd memory in place.
663
+ * When vmalloc() is available for use later -- on 64-bit during first AP load,
664
+ * and on 32-bit during save_microcode_in_initrd() -- we can call
665
+ * load_microcode_amd() to save equivalent cpu table and microcode patches in
666
+ * kernel heap memory.
667
+ */
586
668
void __init load_ucode_amd_bsp (struct early_load_data * ed , unsigned int cpuid_1_eax )
587
669
{
670
+ struct cont_desc desc = { };
671
+ struct microcode_amd * mc ;
588
672
struct cpio_data cp = { };
589
- u32 dummy ;
673
+ char buf [4 ];
674
+ u32 rev ;
675
+
676
+ if (cmdline_find_option (boot_command_line , "microcode.amd_sha_check" , buf , 4 )) {
677
+ if (!strncmp (buf , "off" , 3 )) {
678
+ sha_check = false;
679
+ pr_warn_once ("It is a very very bad idea to disable the blobs SHA check!\n" );
680
+ add_taint (TAINT_CPU_OUT_OF_SPEC , LOCKDEP_STILL_OK );
681
+ }
682
+ }
590
683
591
684
bsp_cpuid_1_eax = cpuid_1_eax ;
592
685
593
- native_rdmsr (MSR_AMD64_PATCH_LEVEL , ed -> old_rev , dummy );
686
+ rev = get_patch_level ();
687
+ ed -> old_rev = rev ;
594
688
595
689
/* Needed in load_microcode_amd() */
596
690
ucode_cpu_info [0 ].cpu_sig .sig = cpuid_1_eax ;
597
691
598
692
if (!find_blobs_in_containers (& cp ))
599
693
return ;
600
694
601
- if (early_apply_microcode (ed -> old_rev , cp .data , cp .size ))
602
- native_rdmsr (MSR_AMD64_PATCH_LEVEL , ed -> new_rev , dummy );
603
- }
604
-
605
- static enum ucode_state _load_microcode_amd (u8 family , const u8 * data , size_t size );
606
-
607
- static int __init save_microcode_in_initrd (void )
608
- {
609
- unsigned int cpuid_1_eax = native_cpuid_eax (1 );
610
- struct cpuinfo_x86 * c = & boot_cpu_data ;
611
- struct cont_desc desc = { 0 };
612
- enum ucode_state ret ;
613
- struct cpio_data cp ;
614
-
615
- if (dis_ucode_ldr || c -> x86_vendor != X86_VENDOR_AMD || c -> x86 < 0x10 )
616
- return 0 ;
617
-
618
- if (!find_blobs_in_containers (& cp ))
619
- return - EINVAL ;
620
-
621
695
scan_containers (cp .data , cp .size , & desc );
622
- if (!desc .mc )
623
- return - EINVAL ;
624
696
625
- ret = _load_microcode_amd ( x86_family ( cpuid_1_eax ), desc .data , desc . size ) ;
626
- if (ret > UCODE_UPDATED )
627
- return - EINVAL ;
697
+ mc = desc .mc ;
698
+ if (! mc )
699
+ return ;
628
700
629
- return 0 ;
701
+ /*
702
+ * Allow application of the same revision to pick up SMT-specific
703
+ * changes even if the revision of the other SMT thread is already
704
+ * up-to-date.
705
+ */
706
+ if (ed -> old_rev > mc -> hdr .patch_id )
707
+ return ;
708
+
709
+ if (__apply_microcode_amd (mc , & rev , desc .psize ))
710
+ ed -> new_rev = rev ;
630
711
}
631
- early_initcall (save_microcode_in_initrd );
632
712
633
713
static inline bool patch_cpus_equivalent (struct ucode_patch * p ,
634
714
struct ucode_patch * n ,
@@ -729,14 +809,9 @@ static void free_cache(void)
729
809
static struct ucode_patch * find_patch (unsigned int cpu )
730
810
{
731
811
struct ucode_cpu_info * uci = ucode_cpu_info + cpu ;
732
- u32 rev , dummy __always_unused ;
733
812
u16 equiv_id = 0 ;
734
813
735
- /* fetch rev if not populated yet: */
736
- if (!uci -> cpu_sig .rev ) {
737
- rdmsr (MSR_AMD64_PATCH_LEVEL , rev , dummy );
738
- uci -> cpu_sig .rev = rev ;
739
- }
814
+ uci -> cpu_sig .rev = get_patch_level ();
740
815
741
816
if (x86_family (bsp_cpuid_1_eax ) < 0x17 ) {
742
817
equiv_id = find_equiv_id (& equiv_table , uci -> cpu_sig .sig );
@@ -759,22 +834,20 @@ void reload_ucode_amd(unsigned int cpu)
759
834
760
835
mc = p -> data ;
761
836
762
- rdmsr (MSR_AMD64_PATCH_LEVEL , rev , dummy );
763
-
837
+ rev = get_patch_level ();
764
838
if (rev < mc -> hdr .patch_id ) {
765
- if (__apply_microcode_amd (mc , p -> size ))
766
- pr_info_once ("reload revision: 0x%08x\n" , mc -> hdr . patch_id );
839
+ if (__apply_microcode_amd (mc , & rev , p -> size ))
840
+ pr_info_once ("reload revision: 0x%08x\n" , rev );
767
841
}
768
842
}
769
843
770
844
static int collect_cpu_info_amd (int cpu , struct cpu_signature * csig )
771
845
{
772
- struct cpuinfo_x86 * c = & cpu_data (cpu );
773
846
struct ucode_cpu_info * uci = ucode_cpu_info + cpu ;
774
847
struct ucode_patch * p ;
775
848
776
849
csig -> sig = cpuid_eax (0x00000001 );
777
- csig -> rev = c -> microcode ;
850
+ csig -> rev = get_patch_level () ;
778
851
779
852
/*
780
853
* a patch could have been loaded early, set uci->mc so that
@@ -815,7 +888,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
815
888
goto out ;
816
889
}
817
890
818
- if (!__apply_microcode_amd (mc_amd , p -> size )) {
891
+ if (!__apply_microcode_amd (mc_amd , & rev , p -> size )) {
819
892
pr_err ("CPU%d: update failed for patch_level=0x%08x\n" ,
820
893
cpu , mc_amd -> hdr .patch_id );
821
894
return UCODE_ERROR ;
@@ -937,8 +1010,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
937
1010
}
938
1011
939
1012
/* Scan the blob in @data and add microcode patches to the cache. */
940
- static enum ucode_state __load_microcode_amd (u8 family , const u8 * data ,
941
- size_t size )
1013
+ static enum ucode_state __load_microcode_amd (u8 family , const u8 * data , size_t size )
942
1014
{
943
1015
u8 * fw = (u8 * )data ;
944
1016
size_t offset ;
@@ -1013,6 +1085,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
1013
1085
return ret ;
1014
1086
}
1015
1087
1088
+ static int __init save_microcode_in_initrd (void )
1089
+ {
1090
+ unsigned int cpuid_1_eax = native_cpuid_eax (1 );
1091
+ struct cpuinfo_x86 * c = & boot_cpu_data ;
1092
+ struct cont_desc desc = { 0 };
1093
+ enum ucode_state ret ;
1094
+ struct cpio_data cp ;
1095
+
1096
+ if (dis_ucode_ldr || c -> x86_vendor != X86_VENDOR_AMD || c -> x86 < 0x10 )
1097
+ return 0 ;
1098
+
1099
+ if (!find_blobs_in_containers (& cp ))
1100
+ return - EINVAL ;
1101
+
1102
+ scan_containers (cp .data , cp .size , & desc );
1103
+ if (!desc .mc )
1104
+ return - EINVAL ;
1105
+
1106
+ ret = _load_microcode_amd (x86_family (cpuid_1_eax ), desc .data , desc .size );
1107
+ if (ret > UCODE_UPDATED )
1108
+ return - EINVAL ;
1109
+
1110
+ return 0 ;
1111
+ }
1112
+ early_initcall (save_microcode_in_initrd );
1113
+
1016
1114
/*
1017
1115
* AMD microcode firmware naming convention, up to family 15h they are in
1018
1116
* the legacy file:
0 commit comments