@@ -47,6 +47,7 @@ void kvm_init_nested(struct kvm *kvm)
47
47
{
48
48
kvm -> arch .nested_mmus = NULL ;
49
49
kvm -> arch .nested_mmus_size = 0 ;
50
+ atomic_set (& kvm -> arch .vncr_map_count , 0 );
50
51
}
51
52
52
53
static int init_nested_s2_mmu (struct kvm * kvm , struct kvm_s2_mmu * mmu )
@@ -756,6 +757,7 @@ void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
756
757
clear_fixmap (vncr_fixmap (vcpu -> arch .vncr_tlb -> cpu ));
757
758
vcpu -> arch .vncr_tlb -> cpu = -1 ;
758
759
host_data_clear_flag (L1_VNCR_MAPPED );
760
+ atomic_dec (& vcpu -> kvm -> arch .vncr_map_count );
759
761
}
760
762
761
763
/*
@@ -855,6 +857,196 @@ static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end)
855
857
}
856
858
}
857
859
860
+ struct s1e2_tlbi_scope {
861
+ enum {
862
+ TLBI_ALL ,
863
+ TLBI_VA ,
864
+ TLBI_VAA ,
865
+ TLBI_ASID ,
866
+ } type ;
867
+
868
+ u16 asid ;
869
+ u64 va ;
870
+ u64 size ;
871
+ };
872
+
873
+ static void invalidate_vncr_va (struct kvm * kvm ,
874
+ struct s1e2_tlbi_scope * scope )
875
+ {
876
+ struct kvm_vcpu * vcpu ;
877
+ unsigned long i ;
878
+
879
+ lockdep_assert_held_write (& kvm -> mmu_lock );
880
+
881
+ kvm_for_each_vcpu (i , vcpu , kvm ) {
882
+ struct vncr_tlb * vt = vcpu -> arch .vncr_tlb ;
883
+ u64 va_start , va_end , va_size ;
884
+
885
+ if (!vt -> valid )
886
+ continue ;
887
+
888
+ va_size = ttl_to_size (pgshift_level_to_ttl (vt -> wi .pgshift ,
889
+ vt -> wr .level ));
890
+ va_start = vt -> gva & (va_size - 1 );
891
+ va_end = va_start + va_size ;
892
+
893
+ switch (scope -> type ) {
894
+ case TLBI_ALL :
895
+ break ;
896
+
897
+ case TLBI_VA :
898
+ if (va_end <= scope -> va ||
899
+ va_start >= (scope -> va + scope -> size ))
900
+ continue ;
901
+ if (vt -> wr .nG && vt -> wr .asid != scope -> asid )
902
+ continue ;
903
+ break ;
904
+
905
+ case TLBI_VAA :
906
+ if (va_end <= scope -> va ||
907
+ va_start >= (scope -> va + scope -> size ))
908
+ continue ;
909
+ break ;
910
+
911
+ case TLBI_ASID :
912
+ if (!vt -> wr .nG || vt -> wr .asid != scope -> asid )
913
+ continue ;
914
+ break ;
915
+ }
916
+
917
+ invalidate_vncr (vt );
918
+ }
919
+ }
920
+
921
+ static void compute_s1_tlbi_range (struct kvm_vcpu * vcpu , u32 inst , u64 val ,
922
+ struct s1e2_tlbi_scope * scope )
923
+ {
924
+ switch (inst ) {
925
+ case OP_TLBI_ALLE2 :
926
+ case OP_TLBI_ALLE2IS :
927
+ case OP_TLBI_ALLE2OS :
928
+ case OP_TLBI_VMALLE1 :
929
+ case OP_TLBI_VMALLE1IS :
930
+ case OP_TLBI_VMALLE1OS :
931
+ case OP_TLBI_ALLE2NXS :
932
+ case OP_TLBI_ALLE2ISNXS :
933
+ case OP_TLBI_ALLE2OSNXS :
934
+ case OP_TLBI_VMALLE1NXS :
935
+ case OP_TLBI_VMALLE1ISNXS :
936
+ case OP_TLBI_VMALLE1OSNXS :
937
+ scope -> type = TLBI_ALL ;
938
+ break ;
939
+ case OP_TLBI_VAE2 :
940
+ case OP_TLBI_VAE2IS :
941
+ case OP_TLBI_VAE2OS :
942
+ case OP_TLBI_VAE1 :
943
+ case OP_TLBI_VAE1IS :
944
+ case OP_TLBI_VAE1OS :
945
+ case OP_TLBI_VAE2NXS :
946
+ case OP_TLBI_VAE2ISNXS :
947
+ case OP_TLBI_VAE2OSNXS :
948
+ case OP_TLBI_VAE1NXS :
949
+ case OP_TLBI_VAE1ISNXS :
950
+ case OP_TLBI_VAE1OSNXS :
951
+ case OP_TLBI_VALE2 :
952
+ case OP_TLBI_VALE2IS :
953
+ case OP_TLBI_VALE2OS :
954
+ case OP_TLBI_VALE1 :
955
+ case OP_TLBI_VALE1IS :
956
+ case OP_TLBI_VALE1OS :
957
+ case OP_TLBI_VALE2NXS :
958
+ case OP_TLBI_VALE2ISNXS :
959
+ case OP_TLBI_VALE2OSNXS :
960
+ case OP_TLBI_VALE1NXS :
961
+ case OP_TLBI_VALE1ISNXS :
962
+ case OP_TLBI_VALE1OSNXS :
963
+ scope -> type = TLBI_VA ;
964
+ scope -> size = ttl_to_size (FIELD_GET (TLBI_TTL_MASK , val ));
965
+ if (!scope -> size )
966
+ scope -> size = SZ_1G ;
967
+ scope -> va = (val << 12 ) & ~(scope -> size - 1 );
968
+ scope -> asid = FIELD_GET (TLBIR_ASID_MASK , val );
969
+ break ;
970
+ case OP_TLBI_ASIDE1 :
971
+ case OP_TLBI_ASIDE1IS :
972
+ case OP_TLBI_ASIDE1OS :
973
+ case OP_TLBI_ASIDE1NXS :
974
+ case OP_TLBI_ASIDE1ISNXS :
975
+ case OP_TLBI_ASIDE1OSNXS :
976
+ scope -> type = TLBI_ASID ;
977
+ scope -> asid = FIELD_GET (TLBIR_ASID_MASK , val );
978
+ break ;
979
+ case OP_TLBI_VAAE1 :
980
+ case OP_TLBI_VAAE1IS :
981
+ case OP_TLBI_VAAE1OS :
982
+ case OP_TLBI_VAAE1NXS :
983
+ case OP_TLBI_VAAE1ISNXS :
984
+ case OP_TLBI_VAAE1OSNXS :
985
+ case OP_TLBI_VAALE1 :
986
+ case OP_TLBI_VAALE1IS :
987
+ case OP_TLBI_VAALE1OS :
988
+ case OP_TLBI_VAALE1NXS :
989
+ case OP_TLBI_VAALE1ISNXS :
990
+ case OP_TLBI_VAALE1OSNXS :
991
+ scope -> type = TLBI_VAA ;
992
+ scope -> size = ttl_to_size (FIELD_GET (TLBI_TTL_MASK , val ));
993
+ if (!scope -> size )
994
+ scope -> size = SZ_1G ;
995
+ scope -> va = (val << 12 ) & ~(scope -> size - 1 );
996
+ break ;
997
+ case OP_TLBI_RVAE2 :
998
+ case OP_TLBI_RVAE2IS :
999
+ case OP_TLBI_RVAE2OS :
1000
+ case OP_TLBI_RVAE1 :
1001
+ case OP_TLBI_RVAE1IS :
1002
+ case OP_TLBI_RVAE1OS :
1003
+ case OP_TLBI_RVAE2NXS :
1004
+ case OP_TLBI_RVAE2ISNXS :
1005
+ case OP_TLBI_RVAE2OSNXS :
1006
+ case OP_TLBI_RVAE1NXS :
1007
+ case OP_TLBI_RVAE1ISNXS :
1008
+ case OP_TLBI_RVAE1OSNXS :
1009
+ case OP_TLBI_RVALE2 :
1010
+ case OP_TLBI_RVALE2IS :
1011
+ case OP_TLBI_RVALE2OS :
1012
+ case OP_TLBI_RVALE1 :
1013
+ case OP_TLBI_RVALE1IS :
1014
+ case OP_TLBI_RVALE1OS :
1015
+ case OP_TLBI_RVALE2NXS :
1016
+ case OP_TLBI_RVALE2ISNXS :
1017
+ case OP_TLBI_RVALE2OSNXS :
1018
+ case OP_TLBI_RVALE1NXS :
1019
+ case OP_TLBI_RVALE1ISNXS :
1020
+ case OP_TLBI_RVALE1OSNXS :
1021
+ scope -> type = TLBI_VA ;
1022
+ scope -> va = decode_range_tlbi (val , & scope -> size , & scope -> asid );
1023
+ break ;
1024
+ case OP_TLBI_RVAAE1 :
1025
+ case OP_TLBI_RVAAE1IS :
1026
+ case OP_TLBI_RVAAE1OS :
1027
+ case OP_TLBI_RVAAE1NXS :
1028
+ case OP_TLBI_RVAAE1ISNXS :
1029
+ case OP_TLBI_RVAAE1OSNXS :
1030
+ case OP_TLBI_RVAALE1 :
1031
+ case OP_TLBI_RVAALE1IS :
1032
+ case OP_TLBI_RVAALE1OS :
1033
+ case OP_TLBI_RVAALE1NXS :
1034
+ case OP_TLBI_RVAALE1ISNXS :
1035
+ case OP_TLBI_RVAALE1OSNXS :
1036
+ scope -> type = TLBI_VAA ;
1037
+ scope -> va = decode_range_tlbi (val , & scope -> size , NULL );
1038
+ break ;
1039
+ }
1040
+ }
1041
+
1042
+ void kvm_handle_s1e2_tlbi (struct kvm_vcpu * vcpu , u32 inst , u64 val )
1043
+ {
1044
+ struct s1e2_tlbi_scope scope = {};
1045
+
1046
+ compute_s1_tlbi_range (vcpu , inst , val , & scope );
1047
+ invalidate_vncr_va (vcpu -> kvm , & scope );
1048
+ }
1049
+
858
1050
void kvm_nested_s2_wp (struct kvm * kvm )
859
1051
{
860
1052
int i ;
@@ -1191,6 +1383,7 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
1191
1383
if (pgprot_val (prot ) != pgprot_val (PAGE_NONE )) {
1192
1384
__set_fixmap (vncr_fixmap (vt -> cpu ), vt -> hpa , prot );
1193
1385
host_data_set_flag (L1_VNCR_MAPPED );
1386
+ atomic_inc (& vcpu -> kvm -> arch .vncr_map_count );
1194
1387
}
1195
1388
}
1196
1389
0 commit comments