@@ -66,7 +66,9 @@ struct reg_bits_to_feat_map {
66
66
#define FEAT_BRBE ID_AA64DFR0_EL1, BRBE, IMP
67
67
#define FEAT_TRC_SR ID_AA64DFR0_EL1, TraceVer, IMP
68
68
#define FEAT_PMUv3 ID_AA64DFR0_EL1, PMUVer, IMP
69
+ #define FEAT_PMUv3p9 ID_AA64DFR0_EL1, PMUVer, V3P9
69
70
#define FEAT_TRBE ID_AA64DFR0_EL1, TraceBuffer, IMP
71
+ #define FEAT_TRBEv1p1 ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1
70
72
#define FEAT_DoubleLock ID_AA64DFR0_EL1, DoubleLock, IMP
71
73
#define FEAT_TRF ID_AA64DFR0_EL1, TraceFilt, IMP
72
74
#define FEAT_AA32EL0 ID_AA64PFR0_EL1, EL0, AARCH32
@@ -84,8 +86,10 @@ struct reg_bits_to_feat_map {
84
86
#define FEAT_LS64_V ID_AA64ISAR1_EL1, LS64, LS64_V
85
87
#define FEAT_LS64_ACCDATA ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA
86
88
#define FEAT_RAS ID_AA64PFR0_EL1, RAS, IMP
89
+ #define FEAT_RASv2 ID_AA64PFR0_EL1, RAS, V2
87
90
#define FEAT_GICv3 ID_AA64PFR0_EL1, GIC, IMP
88
91
#define FEAT_LOR ID_AA64MMFR1_EL1, LO, IMP
92
+ #define FEAT_SPEv1p4 ID_AA64DFR0_EL1, PMSVer, V1P4
89
93
#define FEAT_SPEv1p5 ID_AA64DFR0_EL1, PMSVer, V1P5
90
94
#define FEAT_ATS1A ID_AA64ISAR2_EL1, ATS1A, IMP
91
95
#define FEAT_SPECRES2 ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX
@@ -110,10 +114,23 @@ struct reg_bits_to_feat_map {
110
114
#define FEAT_EVT_TTLBxS ID_AA64MMFR2_EL1, EVT, TTLBxS
111
115
#define FEAT_MTE2 ID_AA64PFR1_EL1, MTE, MTE2
112
116
#define FEAT_RME ID_AA64PFR0_EL1, RME, IMP
117
+ #define FEAT_MPAM ID_AA64PFR0_EL1, MPAM, 1
113
118
#define FEAT_S2FWB ID_AA64MMFR2_EL1, FWB, IMP
114
119
#define FEAT_TME ID_AA64ISAR0_EL1, TME, IMP
115
120
#define FEAT_TWED ID_AA64MMFR1_EL1, TWED, IMP
116
121
#define FEAT_E2H0 ID_AA64MMFR4_EL1, E2H0, IMP
122
+ #define FEAT_SRMASK ID_AA64MMFR4_EL1, SRMASK, IMP
123
+ #define FEAT_PoPS ID_AA64MMFR4_EL1, PoPS, IMP
124
+ #define FEAT_PFAR ID_AA64PFR1_EL1, PFAR, IMP
125
+ #define FEAT_Debugv8p9 ID_AA64DFR0_EL1, PMUVer, V3P9
126
+ #define FEAT_PMUv3_SS ID_AA64DFR0_EL1, PMSS, IMP
127
+ #define FEAT_SEBEP ID_AA64DFR0_EL1, SEBEP, IMP
128
+ #define FEAT_EBEP ID_AA64DFR1_EL1, EBEP, IMP
129
+ #define FEAT_ITE ID_AA64DFR1_EL1, ITE, IMP
130
+ #define FEAT_PMUv3_ICNTR ID_AA64DFR1_EL1, PMICNTR, IMP
131
+ #define FEAT_SPMU ID_AA64DFR1_EL1, SPMU, IMP
132
+ #define FEAT_SPE_nVM ID_AA64DFR2_EL1, SPE_nVM, IMP
133
+ #define FEAT_STEP2 ID_AA64DFR2_EL1, STEP, IMP
117
134
118
135
static bool not_feat_aa64el3 (struct kvm * kvm )
119
136
{
@@ -180,6 +197,32 @@ static bool feat_sme_smps(struct kvm *kvm)
180
197
(read_sysreg_s (SYS_SMIDR_EL1 ) & SMIDR_EL1_SMPS ));
181
198
}
182
199
200
+ static bool feat_spe_fds (struct kvm * kvm )
201
+ {
202
+ /*
203
+ * Revists this if KVM ever supports SPE -- this really should
204
+ * look at the guest's view of PMSIDR_EL1.
205
+ */
206
+ return (kvm_has_feat (kvm , FEAT_SPEv1p4 ) &&
207
+ (read_sysreg_s (SYS_PMSIDR_EL1 ) & PMSIDR_EL1_FDS ));
208
+ }
209
+
210
+ static bool feat_trbe_mpam (struct kvm * kvm )
211
+ {
212
+ /*
213
+ * Revists this if KVM ever supports both MPAM and TRBE --
214
+ * this really should look at the guest's view of TRBIDR_EL1.
215
+ */
216
+ return (kvm_has_feat (kvm , FEAT_TRBE ) &&
217
+ kvm_has_feat (kvm , FEAT_MPAM ) &&
218
+ (read_sysreg_s (SYS_TRBIDR_EL1 ) & TRBIDR_EL1_MPAM ));
219
+ }
220
+
221
+ static bool feat_ebep_pmuv3_ss (struct kvm * kvm )
222
+ {
223
+ return kvm_has_feat (kvm , FEAT_EBEP ) || kvm_has_feat (kvm , FEAT_PMUv3_SS );
224
+ }
225
+
183
226
static bool compute_hcr_rw (struct kvm * kvm , u64 * bits )
184
227
{
185
228
/* This is purely academic: AArch32 and NV are mutually exclusive */
@@ -589,6 +632,106 @@ static const struct reg_bits_to_feat_map hafgrtr_feat_map[] = {
589
632
FEAT_AMUv1 ),
590
633
};
591
634
635
+ static const struct reg_bits_to_feat_map hfgitr2_feat_map [] = {
636
+ NEEDS_FEAT (HFGITR2_EL2_nDCCIVAPS , FEAT_PoPS ),
637
+ NEEDS_FEAT (HFGITR2_EL2_TSBCSYNC , FEAT_TRBEv1p1 )
638
+ };
639
+
640
+ static const struct reg_bits_to_feat_map hfgrtr2_feat_map [] = {
641
+ NEEDS_FEAT (HFGRTR2_EL2_nPFAR_EL1 , FEAT_PFAR ),
642
+ NEEDS_FEAT (HFGRTR2_EL2_nERXGSR_EL1 , FEAT_RASv2 ),
643
+ NEEDS_FEAT (HFGRTR2_EL2_nACTLRALIAS_EL1 |
644
+ HFGRTR2_EL2_nACTLRMASK_EL1 |
645
+ HFGRTR2_EL2_nCPACRALIAS_EL1 |
646
+ HFGRTR2_EL2_nCPACRMASK_EL1 |
647
+ HFGRTR2_EL2_nSCTLR2MASK_EL1 |
648
+ HFGRTR2_EL2_nSCTLRALIAS2_EL1 |
649
+ HFGRTR2_EL2_nSCTLRALIAS_EL1 |
650
+ HFGRTR2_EL2_nSCTLRMASK_EL1 |
651
+ HFGRTR2_EL2_nTCR2ALIAS_EL1 |
652
+ HFGRTR2_EL2_nTCR2MASK_EL1 |
653
+ HFGRTR2_EL2_nTCRALIAS_EL1 |
654
+ HFGRTR2_EL2_nTCRMASK_EL1 ,
655
+ FEAT_SRMASK ),
656
+ NEEDS_FEAT (HFGRTR2_EL2_nRCWSMASK_EL1 , FEAT_THE ),
657
+ };
658
+
659
+ static const struct reg_bits_to_feat_map hfgwtr2_feat_map [] = {
660
+ NEEDS_FEAT (HFGWTR2_EL2_nPFAR_EL1 , FEAT_PFAR ),
661
+ NEEDS_FEAT (HFGWTR2_EL2_nACTLRALIAS_EL1 |
662
+ HFGWTR2_EL2_nACTLRMASK_EL1 |
663
+ HFGWTR2_EL2_nCPACRALIAS_EL1 |
664
+ HFGWTR2_EL2_nCPACRMASK_EL1 |
665
+ HFGWTR2_EL2_nSCTLR2MASK_EL1 |
666
+ HFGWTR2_EL2_nSCTLRALIAS2_EL1 |
667
+ HFGWTR2_EL2_nSCTLRALIAS_EL1 |
668
+ HFGWTR2_EL2_nSCTLRMASK_EL1 |
669
+ HFGWTR2_EL2_nTCR2ALIAS_EL1 |
670
+ HFGWTR2_EL2_nTCR2MASK_EL1 |
671
+ HFGWTR2_EL2_nTCRALIAS_EL1 |
672
+ HFGWTR2_EL2_nTCRMASK_EL1 ,
673
+ FEAT_SRMASK ),
674
+ NEEDS_FEAT (HFGWTR2_EL2_nRCWSMASK_EL1 , FEAT_THE ),
675
+ };
676
+
677
+ static const struct reg_bits_to_feat_map hdfgrtr2_feat_map [] = {
678
+ NEEDS_FEAT (HDFGRTR2_EL2_nMDSELR_EL1 , FEAT_Debugv8p9 ),
679
+ NEEDS_FEAT (HDFGRTR2_EL2_nPMECR_EL1 , feat_ebep_pmuv3_ss ),
680
+ NEEDS_FEAT (HDFGRTR2_EL2_nTRCITECR_EL1 , FEAT_ITE ),
681
+ NEEDS_FEAT (HDFGRTR2_EL2_nPMICFILTR_EL0 |
682
+ HDFGRTR2_EL2_nPMICNTR_EL0 ,
683
+ FEAT_PMUv3_ICNTR ),
684
+ NEEDS_FEAT (HDFGRTR2_EL2_nPMUACR_EL1 , FEAT_PMUv3p9 ),
685
+ NEEDS_FEAT (HDFGRTR2_EL2_nPMSSCR_EL1 |
686
+ HDFGRTR2_EL2_nPMSSDATA ,
687
+ FEAT_PMUv3_SS ),
688
+ NEEDS_FEAT (HDFGRTR2_EL2_nPMIAR_EL1 , FEAT_SEBEP ),
689
+ NEEDS_FEAT (HDFGRTR2_EL2_nPMSDSFR_EL1 , feat_spe_fds ),
690
+ NEEDS_FEAT (HDFGRTR2_EL2_nPMBMAR_EL1 , FEAT_SPE_nVM ),
691
+ NEEDS_FEAT (HDFGRTR2_EL2_nSPMACCESSR_EL1 |
692
+ HDFGRTR2_EL2_nSPMCNTEN |
693
+ HDFGRTR2_EL2_nSPMCR_EL0 |
694
+ HDFGRTR2_EL2_nSPMDEVAFF_EL1 |
695
+ HDFGRTR2_EL2_nSPMEVCNTRn_EL0 |
696
+ HDFGRTR2_EL2_nSPMEVTYPERn_EL0 |
697
+ HDFGRTR2_EL2_nSPMID |
698
+ HDFGRTR2_EL2_nSPMINTEN |
699
+ HDFGRTR2_EL2_nSPMOVS |
700
+ HDFGRTR2_EL2_nSPMSCR_EL1 |
701
+ HDFGRTR2_EL2_nSPMSELR_EL0 ,
702
+ FEAT_SPMU ),
703
+ NEEDS_FEAT (HDFGRTR2_EL2_nMDSTEPOP_EL1 , FEAT_STEP2 ),
704
+ NEEDS_FEAT (HDFGRTR2_EL2_nTRBMPAM_EL1 , feat_trbe_mpam ),
705
+ };
706
+
707
+ static const struct reg_bits_to_feat_map hdfgwtr2_feat_map [] = {
708
+ NEEDS_FEAT (HDFGWTR2_EL2_nMDSELR_EL1 , FEAT_Debugv8p9 ),
709
+ NEEDS_FEAT (HDFGWTR2_EL2_nPMECR_EL1 , feat_ebep_pmuv3_ss ),
710
+ NEEDS_FEAT (HDFGWTR2_EL2_nTRCITECR_EL1 , FEAT_ITE ),
711
+ NEEDS_FEAT (HDFGWTR2_EL2_nPMICFILTR_EL0 |
712
+ HDFGWTR2_EL2_nPMICNTR_EL0 ,
713
+ FEAT_PMUv3_ICNTR ),
714
+ NEEDS_FEAT (HDFGWTR2_EL2_nPMUACR_EL1 |
715
+ HDFGWTR2_EL2_nPMZR_EL0 ,
716
+ FEAT_PMUv3p9 ),
717
+ NEEDS_FEAT (HDFGWTR2_EL2_nPMSSCR_EL1 , FEAT_PMUv3_SS ),
718
+ NEEDS_FEAT (HDFGWTR2_EL2_nPMIAR_EL1 , FEAT_SEBEP ),
719
+ NEEDS_FEAT (HDFGWTR2_EL2_nPMSDSFR_EL1 , feat_spe_fds ),
720
+ NEEDS_FEAT (HDFGWTR2_EL2_nPMBMAR_EL1 , FEAT_SPE_nVM ),
721
+ NEEDS_FEAT (HDFGWTR2_EL2_nSPMACCESSR_EL1 |
722
+ HDFGWTR2_EL2_nSPMCNTEN |
723
+ HDFGWTR2_EL2_nSPMCR_EL0 |
724
+ HDFGWTR2_EL2_nSPMEVCNTRn_EL0 |
725
+ HDFGWTR2_EL2_nSPMEVTYPERn_EL0 |
726
+ HDFGWTR2_EL2_nSPMINTEN |
727
+ HDFGWTR2_EL2_nSPMOVS |
728
+ HDFGWTR2_EL2_nSPMSCR_EL1 |
729
+ HDFGWTR2_EL2_nSPMSELR_EL0 ,
730
+ FEAT_SPMU ),
731
+ NEEDS_FEAT (HDFGWTR2_EL2_nMDSTEPOP_EL1 , FEAT_STEP2 ),
732
+ NEEDS_FEAT (HDFGWTR2_EL2_nTRBMPAM_EL1 , feat_trbe_mpam ),
733
+ };
734
+
592
735
static const struct reg_bits_to_feat_map hcrx_feat_map [] = {
593
736
NEEDS_FEAT (HCRX_EL2_PACMEn , feat_pauth_lr ),
594
737
NEEDS_FEAT (HCRX_EL2_EnFPM , FEAT_FPMR ),
@@ -820,6 +963,27 @@ void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
820
963
ARRAY_SIZE (hafgrtr_feat_map ),
821
964
0 , NEVER_FGU );
822
965
break ;
966
+ case HFGRTR2_GROUP :
967
+ val |= compute_res0_bits (kvm , hfgrtr2_feat_map ,
968
+ ARRAY_SIZE (hfgrtr2_feat_map ),
969
+ 0 , NEVER_FGU );
970
+ val |= compute_res0_bits (kvm , hfgwtr2_feat_map ,
971
+ ARRAY_SIZE (hfgwtr2_feat_map ),
972
+ 0 , NEVER_FGU );
973
+ break ;
974
+ case HFGITR2_GROUP :
975
+ val |= compute_res0_bits (kvm , hfgitr2_feat_map ,
976
+ ARRAY_SIZE (hfgitr2_feat_map ),
977
+ 0 , NEVER_FGU );
978
+ break ;
979
+ case HDFGRTR2_GROUP :
980
+ val |= compute_res0_bits (kvm , hdfgrtr2_feat_map ,
981
+ ARRAY_SIZE (hdfgrtr2_feat_map ),
982
+ 0 , NEVER_FGU );
983
+ val |= compute_res0_bits (kvm , hdfgwtr2_feat_map ,
984
+ ARRAY_SIZE (hdfgwtr2_feat_map ),
985
+ 0 , NEVER_FGU );
986
+ break ;
823
987
default :
824
988
BUG ();
825
989
}
@@ -868,6 +1032,36 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
868
1032
* res0 |= hafgrtr_masks .res0 ;
869
1033
* res1 = HAFGRTR_EL2_RES1 ;
870
1034
break ;
1035
+ case HFGRTR2_EL2 :
1036
+ * res0 = compute_res0_bits (kvm , hfgrtr2_feat_map ,
1037
+ ARRAY_SIZE (hfgrtr2_feat_map ), 0 , 0 );
1038
+ * res0 |= hfgrtr2_masks .res0 ;
1039
+ * res1 = HFGRTR2_EL2_RES1 ;
1040
+ break ;
1041
+ case HFGWTR2_EL2 :
1042
+ * res0 = compute_res0_bits (kvm , hfgwtr2_feat_map ,
1043
+ ARRAY_SIZE (hfgwtr2_feat_map ), 0 , 0 );
1044
+ * res0 |= hfgwtr2_masks .res0 ;
1045
+ * res1 = HFGWTR2_EL2_RES1 ;
1046
+ break ;
1047
+ case HFGITR2_EL2 :
1048
+ * res0 = compute_res0_bits (kvm , hfgitr2_feat_map ,
1049
+ ARRAY_SIZE (hfgitr2_feat_map ), 0 , 0 );
1050
+ * res0 |= hfgitr2_masks .res0 ;
1051
+ * res1 = HFGITR2_EL2_RES1 ;
1052
+ break ;
1053
+ case HDFGRTR2_EL2 :
1054
+ * res0 = compute_res0_bits (kvm , hdfgrtr2_feat_map ,
1055
+ ARRAY_SIZE (hdfgrtr2_feat_map ), 0 , 0 );
1056
+ * res0 |= hdfgrtr2_masks .res0 ;
1057
+ * res1 = HDFGRTR2_EL2_RES1 ;
1058
+ break ;
1059
+ case HDFGWTR2_EL2 :
1060
+ * res0 = compute_res0_bits (kvm , hdfgwtr2_feat_map ,
1061
+ ARRAY_SIZE (hdfgwtr2_feat_map ), 0 , 0 );
1062
+ * res0 |= hdfgwtr2_masks .res0 ;
1063
+ * res1 = HDFGWTR2_EL2_RES1 ;
1064
+ break ;
871
1065
case HCRX_EL2 :
872
1066
* res0 = compute_res0_bits (kvm , hcrx_feat_map ,
873
1067
ARRAY_SIZE (hcrx_feat_map ), 0 , 0 );
0 commit comments