Skip to content

Commit 52425a3

Browse files
iamjpnmpe
authored andcommitted
KVM: PPC: Introduce FPR/VR accessor functions
Introduce accessor functions for floating point and vector registers like the ones that exist for GPRs. Use these to replace the existing FPR and VR accessor macros. This will be important later for Nested APIv2 support which requires additional functionality for accessing and modifying VCPU state. Signed-off-by: Gautam Menghani <gautam@linux.ibm.com> Signed-off-by: Jordan Niethe <jniethe5@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20230914030600.16993-3-jniethe5@gmail.com
1 parent 0e85b7d commit 52425a3

File tree

5 files changed

+110
-45
lines changed

5 files changed

+110
-45
lines changed

arch/powerpc/include/asm/kvm_book3s.h

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -403,6 +403,61 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
403403
return vcpu->arch.fault_dar;
404404
}
405405

406+
static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
407+
{
408+
return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
409+
}
410+
411+
static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
412+
{
413+
vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
414+
}
415+
416+
static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
417+
{
418+
return vcpu->arch.fp.fpscr;
419+
}
420+
421+
static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
422+
{
423+
vcpu->arch.fp.fpscr = val;
424+
}
425+
426+
427+
static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
428+
{
429+
return vcpu->arch.fp.fpr[i][j];
430+
}
431+
432+
static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
433+
u64 val)
434+
{
435+
vcpu->arch.fp.fpr[i][j] = val;
436+
}
437+
438+
#ifdef CONFIG_ALTIVEC
439+
static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
440+
{
441+
*v = vcpu->arch.vr.vr[i];
442+
}
443+
444+
static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
445+
vector128 *val)
446+
{
447+
vcpu->arch.vr.vr[i] = *val;
448+
}
449+
450+
static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
451+
{
452+
return vcpu->arch.vr.vscr.u[3];
453+
}
454+
455+
static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
456+
{
457+
vcpu->arch.vr.vscr.u[3] = val;
458+
}
459+
#endif
460+
406461
/* Expiry time of vcpu DEC relative to host TB */
407462
static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
408463
{

arch/powerpc/include/asm/kvm_booke.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,16 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
8989
return vcpu->arch.regs.nip;
9090
}
9191

92+
static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
93+
{
94+
vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
95+
}
96+
97+
static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
98+
{
99+
return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
100+
}
101+
92102
#ifdef CONFIG_BOOKE
93103
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
94104
{

arch/powerpc/kvm/book3s.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -636,17 +636,17 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
636636
break;
637637
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
638638
i = id - KVM_REG_PPC_FPR0;
639-
*val = get_reg_val(id, VCPU_FPR(vcpu, i));
639+
*val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
640640
break;
641641
case KVM_REG_PPC_FPSCR:
642-
*val = get_reg_val(id, vcpu->arch.fp.fpscr);
642+
*val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
643643
break;
644644
#ifdef CONFIG_VSX
645645
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
646646
if (cpu_has_feature(CPU_FTR_VSX)) {
647647
i = id - KVM_REG_PPC_VSR0;
648-
val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
649-
val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
648+
val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
649+
val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
650650
} else {
651651
r = -ENXIO;
652652
}
@@ -724,7 +724,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
724724
break;
725725
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
726726
i = id - KVM_REG_PPC_FPR0;
727-
VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
727+
kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
728728
break;
729729
case KVM_REG_PPC_FPSCR:
730730
vcpu->arch.fp.fpscr = set_reg_val(id, *val);
@@ -733,8 +733,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
733733
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
734734
if (cpu_has_feature(CPU_FTR_VSX)) {
735735
i = id - KVM_REG_PPC_VSR0;
736-
vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
737-
vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
736+
kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
737+
kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
738738
} else {
739739
r = -ENXIO;
740740
}
@@ -765,7 +765,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
765765
break;
766766
#endif /* CONFIG_KVM_XIVE */
767767
case KVM_REG_PPC_FSCR:
768-
vcpu->arch.fscr = set_reg_val(id, *val);
768+
kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
769769
break;
770770
case KVM_REG_PPC_TAR:
771771
vcpu->arch.tar = set_reg_val(id, *val);

arch/powerpc/kvm/emulate_loadstore.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
250250
vcpu->arch.mmio_sp64_extend = 1;
251251

252252
emulated = kvmppc_handle_store(vcpu,
253-
VCPU_FPR(vcpu, op.reg), size, 1);
253+
kvmppc_get_fpr(vcpu, op.reg), size, 1);
254254

255255
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
256256
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);

arch/powerpc/kvm/powerpc.c

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -934,11 +934,11 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
934934
return;
935935

936936
if (index >= 32) {
937-
val.vval = VCPU_VSX_VR(vcpu, index - 32);
937+
kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
938938
val.vsxval[offset] = gpr;
939-
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
939+
kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
940940
} else {
941-
VCPU_VSX_FPR(vcpu, index, offset) = gpr;
941+
kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
942942
}
943943
}
944944

@@ -949,13 +949,13 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
949949
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
950950

951951
if (index >= 32) {
952-
val.vval = VCPU_VSX_VR(vcpu, index - 32);
952+
kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
953953
val.vsxval[0] = gpr;
954954
val.vsxval[1] = gpr;
955-
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
955+
kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
956956
} else {
957-
VCPU_VSX_FPR(vcpu, index, 0) = gpr;
958-
VCPU_VSX_FPR(vcpu, index, 1) = gpr;
957+
kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
958+
kvmppc_set_vsx_fpr(vcpu, index, 1, gpr);
959959
}
960960
}
961961

@@ -970,12 +970,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
970970
val.vsx32val[1] = gpr;
971971
val.vsx32val[2] = gpr;
972972
val.vsx32val[3] = gpr;
973-
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
973+
kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
974974
} else {
975975
val.vsx32val[0] = gpr;
976976
val.vsx32val[1] = gpr;
977-
VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
978-
VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
977+
kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]);
978+
kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]);
979979
}
980980
}
981981

@@ -991,15 +991,15 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
991991
return;
992992

993993
if (index >= 32) {
994-
val.vval = VCPU_VSX_VR(vcpu, index - 32);
994+
kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
995995
val.vsx32val[offset] = gpr32;
996-
VCPU_VSX_VR(vcpu, index - 32) = val.vval;
996+
kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
997997
} else {
998998
dword_offset = offset / 2;
999999
word_offset = offset % 2;
1000-
val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
1000+
val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset);
10011001
val.vsx32val[word_offset] = gpr32;
1002-
VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
1002+
kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]);
10031003
}
10041004
}
10051005
#endif /* CONFIG_VSX */
@@ -1058,9 +1058,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
10581058
if (offset == -1)
10591059
return;
10601060

1061-
val.vval = VCPU_VSX_VR(vcpu, index);
1061+
kvmppc_get_vsx_vr(vcpu, index, &val.vval);
10621062
val.vsxval[offset] = gpr;
1063-
VCPU_VSX_VR(vcpu, index) = val.vval;
1063+
kvmppc_set_vsx_vr(vcpu, index, &val.vval);
10641064
}
10651065

10661066
static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
@@ -1074,9 +1074,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
10741074
if (offset == -1)
10751075
return;
10761076

1077-
val.vval = VCPU_VSX_VR(vcpu, index);
1077+
kvmppc_get_vsx_vr(vcpu, index, &val.vval);
10781078
val.vsx32val[offset] = gpr32;
1079-
VCPU_VSX_VR(vcpu, index) = val.vval;
1079+
kvmppc_set_vsx_vr(vcpu, index, &val.vval);
10801080
}
10811081

10821082
static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
@@ -1090,9 +1090,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
10901090
if (offset == -1)
10911091
return;
10921092

1093-
val.vval = VCPU_VSX_VR(vcpu, index);
1093+
kvmppc_get_vsx_vr(vcpu, index, &val.vval);
10941094
val.vsx16val[offset] = gpr16;
1095-
VCPU_VSX_VR(vcpu, index) = val.vval;
1095+
kvmppc_set_vsx_vr(vcpu, index, &val.vval);
10961096
}
10971097

10981098
static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
@@ -1106,9 +1106,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
11061106
if (offset == -1)
11071107
return;
11081108

1109-
val.vval = VCPU_VSX_VR(vcpu, index);
1109+
kvmppc_get_vsx_vr(vcpu, index, &val.vval);
11101110
val.vsx8val[offset] = gpr8;
1111-
VCPU_VSX_VR(vcpu, index) = val.vval;
1111+
kvmppc_set_vsx_vr(vcpu, index, &val.vval);
11121112
}
11131113
#endif /* CONFIG_ALTIVEC */
11141114

@@ -1194,14 +1194,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
11941194
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
11951195
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
11961196

1197-
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1197+
kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
11981198
break;
11991199
#ifdef CONFIG_PPC_BOOK3S
12001200
case KVM_MMIO_REG_QPR:
12011201
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
12021202
break;
12031203
case KVM_MMIO_REG_FQPR:
1204-
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1204+
kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
12051205
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
12061206
break;
12071207
#endif
@@ -1419,9 +1419,9 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
14191419
}
14201420

14211421
if (rs < 32) {
1422-
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1422+
*val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
14231423
} else {
1424-
reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1424+
kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
14251425
*val = reg.vsxval[vsx_offset];
14261426
}
14271427
break;
@@ -1438,10 +1438,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
14381438
if (rs < 32) {
14391439
dword_offset = vsx_offset / 2;
14401440
word_offset = vsx_offset % 2;
1441-
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1441+
reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
14421442
*val = reg.vsx32val[word_offset];
14431443
} else {
1444-
reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1444+
kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
14451445
*val = reg.vsx32val[vsx_offset];
14461446
}
14471447
break;
@@ -1556,7 +1556,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
15561556
if (vmx_offset == -1)
15571557
return -1;
15581558

1559-
reg.vval = VCPU_VSX_VR(vcpu, index);
1559+
kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
15601560
*val = reg.vsxval[vmx_offset];
15611561

15621562
return result;
@@ -1574,7 +1574,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
15741574
if (vmx_offset == -1)
15751575
return -1;
15761576

1577-
reg.vval = VCPU_VSX_VR(vcpu, index);
1577+
kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
15781578
*val = reg.vsx32val[vmx_offset];
15791579

15801580
return result;
@@ -1592,7 +1592,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
15921592
if (vmx_offset == -1)
15931593
return -1;
15941594

1595-
reg.vval = VCPU_VSX_VR(vcpu, index);
1595+
kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
15961596
*val = reg.vsx16val[vmx_offset];
15971597

15981598
return result;
@@ -1610,7 +1610,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
16101610
if (vmx_offset == -1)
16111611
return -1;
16121612

1613-
reg.vval = VCPU_VSX_VR(vcpu, index);
1613+
kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
16141614
*val = reg.vsx8val[vmx_offset];
16151615

16161616
return result;
@@ -1719,14 +1719,14 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
17191719
r = -ENXIO;
17201720
break;
17211721
}
1722-
val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1722+
kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
17231723
break;
17241724
case KVM_REG_PPC_VSCR:
17251725
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
17261726
r = -ENXIO;
17271727
break;
17281728
}
1729-
val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1729+
val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
17301730
break;
17311731
case KVM_REG_PPC_VRSAVE:
17321732
val = get_reg_val(reg->id, vcpu->arch.vrsave);
@@ -1770,14 +1770,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
17701770
r = -ENXIO;
17711771
break;
17721772
}
1773-
vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1773+
kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
17741774
break;
17751775
case KVM_REG_PPC_VSCR:
17761776
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
17771777
r = -ENXIO;
17781778
break;
17791779
}
1780-
vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1780+
kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
17811781
break;
17821782
case KVM_REG_PPC_VRSAVE:
17831783
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {

0 commit comments

Comments
 (0)