@@ -934,11 +934,11 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
934
934
return ;
935
935
936
936
if (index >= 32 ) {
937
- val . vval = VCPU_VSX_VR (vcpu , index - 32 );
937
+ kvmppc_get_vsx_vr (vcpu , index - 32 , & val . vval );
938
938
val .vsxval [offset ] = gpr ;
939
- VCPU_VSX_VR (vcpu , index - 32 ) = val .vval ;
939
+ kvmppc_set_vsx_vr (vcpu , index - 32 , & val .vval ) ;
940
940
} else {
941
- VCPU_VSX_FPR (vcpu , index , offset ) = gpr ;
941
+ kvmppc_set_vsx_fpr (vcpu , index , offset , gpr ) ;
942
942
}
943
943
}
944
944
@@ -949,13 +949,13 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
949
949
int index = vcpu -> arch .io_gpr & KVM_MMIO_REG_MASK ;
950
950
951
951
if (index >= 32 ) {
952
- val . vval = VCPU_VSX_VR (vcpu , index - 32 );
952
+ kvmppc_get_vsx_vr (vcpu , index - 32 , & val . vval );
953
953
val .vsxval [0 ] = gpr ;
954
954
val .vsxval [1 ] = gpr ;
955
- VCPU_VSX_VR (vcpu , index - 32 ) = val .vval ;
955
+ kvmppc_set_vsx_vr (vcpu , index - 32 , & val .vval ) ;
956
956
} else {
957
- VCPU_VSX_FPR (vcpu , index , 0 ) = gpr ;
958
- VCPU_VSX_FPR (vcpu , index , 1 ) = gpr ;
957
+ kvmppc_set_vsx_fpr (vcpu , index , 0 , gpr ) ;
958
+ kvmppc_set_vsx_fpr (vcpu , index , 1 , gpr ) ;
959
959
}
960
960
}
961
961
@@ -970,12 +970,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
970
970
val .vsx32val [1 ] = gpr ;
971
971
val .vsx32val [2 ] = gpr ;
972
972
val .vsx32val [3 ] = gpr ;
973
- VCPU_VSX_VR (vcpu , index - 32 ) = val .vval ;
973
+ kvmppc_set_vsx_vr (vcpu , index - 32 , & val .vval ) ;
974
974
} else {
975
975
val .vsx32val [0 ] = gpr ;
976
976
val .vsx32val [1 ] = gpr ;
977
- VCPU_VSX_FPR (vcpu , index , 0 ) = val .vsxval [0 ];
978
- VCPU_VSX_FPR (vcpu , index , 1 ) = val .vsxval [0 ];
977
+ kvmppc_set_vsx_fpr (vcpu , index , 0 , val .vsxval [0 ]) ;
978
+ kvmppc_set_vsx_fpr (vcpu , index , 1 , val .vsxval [0 ]) ;
979
979
}
980
980
}
981
981
@@ -991,15 +991,15 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
991
991
return ;
992
992
993
993
if (index >= 32 ) {
994
- val . vval = VCPU_VSX_VR (vcpu , index - 32 );
994
+ kvmppc_get_vsx_vr (vcpu , index - 32 , & val . vval );
995
995
val .vsx32val [offset ] = gpr32 ;
996
- VCPU_VSX_VR (vcpu , index - 32 ) = val .vval ;
996
+ kvmppc_set_vsx_vr (vcpu , index - 32 , & val .vval ) ;
997
997
} else {
998
998
dword_offset = offset / 2 ;
999
999
word_offset = offset % 2 ;
1000
- val .vsxval [0 ] = VCPU_VSX_FPR (vcpu , index , dword_offset );
1000
+ val .vsxval [0 ] = kvmppc_get_vsx_fpr (vcpu , index , dword_offset );
1001
1001
val .vsx32val [word_offset ] = gpr32 ;
1002
- VCPU_VSX_FPR (vcpu , index , dword_offset ) = val .vsxval [0 ];
1002
+ kvmppc_set_vsx_fpr (vcpu , index , dword_offset , val .vsxval [0 ]) ;
1003
1003
}
1004
1004
}
1005
1005
#endif /* CONFIG_VSX */
@@ -1058,9 +1058,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1058
1058
if (offset == -1 )
1059
1059
return ;
1060
1060
1061
- val . vval = VCPU_VSX_VR (vcpu , index );
1061
+ kvmppc_get_vsx_vr (vcpu , index , & val . vval );
1062
1062
val .vsxval [offset ] = gpr ;
1063
- VCPU_VSX_VR (vcpu , index ) = val .vval ;
1063
+ kvmppc_set_vsx_vr (vcpu , index , & val .vval ) ;
1064
1064
}
1065
1065
1066
1066
static inline void kvmppc_set_vmx_word (struct kvm_vcpu * vcpu ,
@@ -1074,9 +1074,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1074
1074
if (offset == -1 )
1075
1075
return ;
1076
1076
1077
- val . vval = VCPU_VSX_VR (vcpu , index );
1077
+ kvmppc_get_vsx_vr (vcpu , index , & val . vval );
1078
1078
val .vsx32val [offset ] = gpr32 ;
1079
- VCPU_VSX_VR (vcpu , index ) = val .vval ;
1079
+ kvmppc_set_vsx_vr (vcpu , index , & val .vval ) ;
1080
1080
}
1081
1081
1082
1082
static inline void kvmppc_set_vmx_hword (struct kvm_vcpu * vcpu ,
@@ -1090,9 +1090,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1090
1090
if (offset == -1 )
1091
1091
return ;
1092
1092
1093
- val . vval = VCPU_VSX_VR (vcpu , index );
1093
+ kvmppc_get_vsx_vr (vcpu , index , & val . vval );
1094
1094
val .vsx16val [offset ] = gpr16 ;
1095
- VCPU_VSX_VR (vcpu , index ) = val .vval ;
1095
+ kvmppc_set_vsx_vr (vcpu , index , & val .vval ) ;
1096
1096
}
1097
1097
1098
1098
static inline void kvmppc_set_vmx_byte (struct kvm_vcpu * vcpu ,
@@ -1106,9 +1106,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1106
1106
if (offset == -1 )
1107
1107
return ;
1108
1108
1109
- val . vval = VCPU_VSX_VR (vcpu , index );
1109
+ kvmppc_get_vsx_vr (vcpu , index , & val . vval );
1110
1110
val .vsx8val [offset ] = gpr8 ;
1111
- VCPU_VSX_VR (vcpu , index ) = val .vval ;
1111
+ kvmppc_set_vsx_vr (vcpu , index , & val .vval ) ;
1112
1112
}
1113
1113
#endif /* CONFIG_ALTIVEC */
1114
1114
@@ -1194,14 +1194,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1194
1194
if (vcpu -> kvm -> arch .kvm_ops -> giveup_ext )
1195
1195
vcpu -> kvm -> arch .kvm_ops -> giveup_ext (vcpu , MSR_FP );
1196
1196
1197
- VCPU_FPR (vcpu , vcpu -> arch .io_gpr & KVM_MMIO_REG_MASK ) = gpr ;
1197
+ kvmppc_set_fpr (vcpu , vcpu -> arch .io_gpr & KVM_MMIO_REG_MASK , gpr ) ;
1198
1198
break ;
1199
1199
#ifdef CONFIG_PPC_BOOK3S
1200
1200
case KVM_MMIO_REG_QPR :
1201
1201
vcpu -> arch .qpr [vcpu -> arch .io_gpr & KVM_MMIO_REG_MASK ] = gpr ;
1202
1202
break ;
1203
1203
case KVM_MMIO_REG_FQPR :
1204
- VCPU_FPR (vcpu , vcpu -> arch .io_gpr & KVM_MMIO_REG_MASK ) = gpr ;
1204
+ kvmppc_set_fpr (vcpu , vcpu -> arch .io_gpr & KVM_MMIO_REG_MASK , gpr ) ;
1205
1205
vcpu -> arch .qpr [vcpu -> arch .io_gpr & KVM_MMIO_REG_MASK ] = gpr ;
1206
1206
break ;
1207
1207
#endif
@@ -1419,9 +1419,9 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1419
1419
}
1420
1420
1421
1421
if (rs < 32 ) {
1422
- * val = VCPU_VSX_FPR (vcpu , rs , vsx_offset );
1422
+ * val = kvmppc_get_vsx_fpr (vcpu , rs , vsx_offset );
1423
1423
} else {
1424
- reg . vval = VCPU_VSX_VR (vcpu , rs - 32 );
1424
+ kvmppc_get_vsx_vr (vcpu , rs - 32 , & reg . vval );
1425
1425
* val = reg .vsxval [vsx_offset ];
1426
1426
}
1427
1427
break ;
@@ -1438,10 +1438,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1438
1438
if (rs < 32 ) {
1439
1439
dword_offset = vsx_offset / 2 ;
1440
1440
word_offset = vsx_offset % 2 ;
1441
- reg .vsxval [0 ] = VCPU_VSX_FPR (vcpu , rs , dword_offset );
1441
+ reg .vsxval [0 ] = kvmppc_get_vsx_fpr (vcpu , rs , dword_offset );
1442
1442
* val = reg .vsx32val [word_offset ];
1443
1443
} else {
1444
- reg . vval = VCPU_VSX_VR (vcpu , rs - 32 );
1444
+ kvmppc_get_vsx_vr (vcpu , rs - 32 , & reg . vval );
1445
1445
* val = reg .vsx32val [vsx_offset ];
1446
1446
}
1447
1447
break ;
@@ -1556,7 +1556,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1556
1556
if (vmx_offset == -1 )
1557
1557
return -1 ;
1558
1558
1559
- reg . vval = VCPU_VSX_VR (vcpu , index );
1559
+ kvmppc_get_vsx_vr (vcpu , index , & reg . vval );
1560
1560
* val = reg .vsxval [vmx_offset ];
1561
1561
1562
1562
return result ;
@@ -1574,7 +1574,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1574
1574
if (vmx_offset == -1 )
1575
1575
return -1 ;
1576
1576
1577
- reg . vval = VCPU_VSX_VR (vcpu , index );
1577
+ kvmppc_get_vsx_vr (vcpu , index , & reg . vval );
1578
1578
* val = reg .vsx32val [vmx_offset ];
1579
1579
1580
1580
return result ;
@@ -1592,7 +1592,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1592
1592
if (vmx_offset == -1 )
1593
1593
return -1 ;
1594
1594
1595
- reg . vval = VCPU_VSX_VR (vcpu , index );
1595
+ kvmppc_get_vsx_vr (vcpu , index , & reg . vval );
1596
1596
* val = reg .vsx16val [vmx_offset ];
1597
1597
1598
1598
return result ;
@@ -1610,7 +1610,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1610
1610
if (vmx_offset == -1 )
1611
1611
return -1 ;
1612
1612
1613
- reg . vval = VCPU_VSX_VR (vcpu , index );
1613
+ kvmppc_get_vsx_vr (vcpu , index , & reg . vval );
1614
1614
* val = reg .vsx8val [vmx_offset ];
1615
1615
1616
1616
return result ;
@@ -1719,14 +1719,14 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1719
1719
r = - ENXIO ;
1720
1720
break ;
1721
1721
}
1722
- val . vval = vcpu -> arch . vr . vr [ reg -> id - KVM_REG_PPC_VR0 ] ;
1722
+ kvmppc_get_vsx_vr ( vcpu , reg -> id - KVM_REG_PPC_VR0 , & val . vval ) ;
1723
1723
break ;
1724
1724
case KVM_REG_PPC_VSCR :
1725
1725
if (!cpu_has_feature (CPU_FTR_ALTIVEC )) {
1726
1726
r = - ENXIO ;
1727
1727
break ;
1728
1728
}
1729
- val = get_reg_val (reg -> id , vcpu -> arch . vr . vscr . u [ 3 ] );
1729
+ val = get_reg_val (reg -> id , kvmppc_get_vscr ( vcpu ) );
1730
1730
break ;
1731
1731
case KVM_REG_PPC_VRSAVE :
1732
1732
val = get_reg_val (reg -> id , vcpu -> arch .vrsave );
@@ -1770,14 +1770,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1770
1770
r = - ENXIO ;
1771
1771
break ;
1772
1772
}
1773
- vcpu -> arch . vr . vr [ reg -> id - KVM_REG_PPC_VR0 ] = val .vval ;
1773
+ kvmppc_set_vsx_vr ( vcpu , reg -> id - KVM_REG_PPC_VR0 , & val .vval ) ;
1774
1774
break ;
1775
1775
case KVM_REG_PPC_VSCR :
1776
1776
if (!cpu_has_feature (CPU_FTR_ALTIVEC )) {
1777
1777
r = - ENXIO ;
1778
1778
break ;
1779
1779
}
1780
- vcpu -> arch . vr . vscr . u [ 3 ] = set_reg_val (reg -> id , val );
1780
+ kvmppc_set_vscr ( vcpu , set_reg_val (reg -> id , val ) );
1781
1781
break ;
1782
1782
case KVM_REG_PPC_VRSAVE :
1783
1783
if (!cpu_has_feature (CPU_FTR_ALTIVEC )) {
0 commit comments