@@ -56,9 +56,6 @@ struct cont_desc {
56
56
57
57
static u32 ucode_new_rev ;
58
58
59
- /* One blob per node. */
60
- static u8 amd_ucode_patch [MAX_NUMNODES ][PATCH_MAX_SIZE ];
61
-
62
59
/*
63
60
* Microcode patch container file is prepended to the initrd in cpio
64
61
* format. See Documentation/arch/x86/microcode.rst
@@ -415,20 +412,17 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
415
412
*
416
413
* Returns true if container found (sets @desc), false otherwise.
417
414
*/
418
- static bool early_apply_microcode (u32 cpuid_1_eax , void * ucode , size_t size , bool save_patch )
415
+ static bool early_apply_microcode (u32 cpuid_1_eax , void * ucode , size_t size )
419
416
{
420
417
struct cont_desc desc = { 0 };
421
- u8 (* patch )[PATCH_MAX_SIZE ];
422
418
struct microcode_amd * mc ;
423
419
u32 rev , dummy , * new_rev ;
424
420
bool ret = false;
425
421
426
422
#ifdef CONFIG_X86_32
427
423
new_rev = (u32 * )__pa_nodebug (& ucode_new_rev );
428
- patch = (u8 (* )[PATCH_MAX_SIZE ])__pa_nodebug (& amd_ucode_patch );
429
424
#else
430
425
new_rev = & ucode_new_rev ;
431
- patch = & amd_ucode_patch [0 ];
432
426
#endif
433
427
434
428
desc .cpuid_1_eax = cpuid_1_eax ;
@@ -452,9 +446,6 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size, boo
452
446
if (!__apply_microcode_amd (mc )) {
453
447
* new_rev = mc -> hdr .patch_id ;
454
448
ret = true;
455
-
456
- if (save_patch )
457
- memcpy (patch , mc , min_t (u32 , desc .psize , PATCH_MAX_SIZE ));
458
449
}
459
450
460
451
return ret ;
@@ -507,50 +498,20 @@ static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data
507
498
* ret = cp ;
508
499
}
509
500
510
- void __init load_ucode_amd_bsp (unsigned int cpuid_1_eax )
501
+ static void apply_ucode_from_containers (unsigned int cpuid_1_eax )
511
502
{
512
503
struct cpio_data cp = { };
513
504
514
505
find_blobs_in_containers (cpuid_1_eax , & cp );
515
506
if (!(cp .data && cp .size ))
516
507
return ;
517
508
518
- early_apply_microcode (cpuid_1_eax , cp .data , cp .size , true );
509
+ early_apply_microcode (cpuid_1_eax , cp .data , cp .size );
519
510
}
520
511
521
- void load_ucode_amd_ap (unsigned int cpuid_1_eax )
512
+ void load_ucode_amd_early (unsigned int cpuid_1_eax )
522
513
{
523
- struct microcode_amd * mc ;
524
- struct cpio_data cp ;
525
- u32 * new_rev , rev , dummy ;
526
-
527
- if (IS_ENABLED (CONFIG_X86_32 )) {
528
- mc = (struct microcode_amd * )__pa_nodebug (amd_ucode_patch );
529
- new_rev = (u32 * )__pa_nodebug (& ucode_new_rev );
530
- } else {
531
- mc = (struct microcode_amd * )amd_ucode_patch ;
532
- new_rev = & ucode_new_rev ;
533
- }
534
-
535
- native_rdmsr (MSR_AMD64_PATCH_LEVEL , rev , dummy );
536
-
537
- /*
538
- * Check whether a new patch has been saved already. Also, allow application of
539
- * the same revision in order to pick up SMT-thread-specific configuration even
540
- * if the sibling SMT thread already has an up-to-date revision.
541
- */
542
- if (* new_rev && rev <= mc -> hdr .patch_id ) {
543
- if (!__apply_microcode_amd (mc )) {
544
- * new_rev = mc -> hdr .patch_id ;
545
- return ;
546
- }
547
- }
548
-
549
- find_blobs_in_containers (cpuid_1_eax , & cp );
550
- if (!(cp .data && cp .size ))
551
- return ;
552
-
553
- early_apply_microcode (cpuid_1_eax , cp .data , cp .size , false);
514
+ return apply_ucode_from_containers (cpuid_1_eax );
554
515
}
555
516
556
517
static enum ucode_state load_microcode_amd (u8 family , const u8 * data , size_t size );
@@ -578,23 +539,6 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
578
539
return 0 ;
579
540
}
580
541
581
- void reload_ucode_amd (unsigned int cpu )
582
- {
583
- u32 rev , dummy __always_unused ;
584
- struct microcode_amd * mc ;
585
-
586
- mc = (struct microcode_amd * )amd_ucode_patch [cpu_to_node (cpu )];
587
-
588
- rdmsr (MSR_AMD64_PATCH_LEVEL , rev , dummy );
589
-
590
- if (rev < mc -> hdr .patch_id ) {
591
- if (!__apply_microcode_amd (mc )) {
592
- ucode_new_rev = mc -> hdr .patch_id ;
593
- pr_info ("reload patch_level=0x%08x\n" , ucode_new_rev );
594
- }
595
- }
596
- }
597
-
598
542
/*
599
543
* a small, trivial cache of per-family ucode patches
600
544
*/
@@ -655,6 +599,28 @@ static struct ucode_patch *find_patch(unsigned int cpu)
655
599
return cache_find_patch (equiv_id );
656
600
}
657
601
602
+ void reload_ucode_amd (unsigned int cpu )
603
+ {
604
+ u32 rev , dummy __always_unused ;
605
+ struct microcode_amd * mc ;
606
+ struct ucode_patch * p ;
607
+
608
+ p = find_patch (cpu );
609
+ if (!p )
610
+ return ;
611
+
612
+ mc = p -> data ;
613
+
614
+ rdmsr (MSR_AMD64_PATCH_LEVEL , rev , dummy );
615
+
616
+ if (rev < mc -> hdr .patch_id ) {
617
+ if (!__apply_microcode_amd (mc )) {
618
+ ucode_new_rev = mc -> hdr .patch_id ;
619
+ pr_info ("reload patch_level=0x%08x\n" , ucode_new_rev );
620
+ }
621
+ }
622
+ }
623
+
658
624
static int collect_cpu_info_amd (int cpu , struct cpu_signature * csig )
659
625
{
660
626
struct cpuinfo_x86 * c = & cpu_data (cpu );
@@ -875,9 +841,6 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
875
841
continue ;
876
842
877
843
ret = UCODE_NEW ;
878
-
879
- memset (& amd_ucode_patch [nid ], 0 , PATCH_MAX_SIZE );
880
- memcpy (& amd_ucode_patch [nid ], p -> data , min_t (u32 , p -> size , PATCH_MAX_SIZE ));
881
844
}
882
845
883
846
return ret ;
0 commit comments