Skip to content

Commit 6de2e83

Browse files
iamjpnmpe
authored andcommitted
KVM: PPC: Book3S HV: Introduce low level MSR accessor
kvmppc_get_msr() and kvmppc_set_msr_fast() serve as accessors for the MSR. However because the MSR is kept in the shared regs they include a conditional check for kvmppc_shared_big_endian() and endian conversion. Within the Book3S HV specific code there are direct reads and writes of shregs::msr. In preparation for Nested APIv2 these accesses need to be replaced with accessor functions so it is possible to extend their behavior. However, using the kvmppc_get_msr() and kvmppc_set_msr_fast() functions is undesirable because it would introduce a conditional branch and endian conversion that is not currently present. kvmppc_set_msr_hv() already exists, it is used for the kvmppc_ops::set_msr callback. Introduce a low level accessor __kvmppc_{s,g}et_msr_hv() that simply gets and sets shregs::msr. This will be extend for Nested APIv2 support. Signed-off-by: Jordan Niethe <jniethe5@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20230914030600.16993-8-jniethe5@gmail.com
1 parent ebc88ea commit 6de2e83

File tree

4 files changed

+33
-21
lines changed

4 files changed

+33
-21
lines changed

arch/powerpc/kvm/book3s_64_mmu_hv.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#include <asm/pte-walk.h>
2929

3030
#include "book3s.h"
31+
#include "book3s_hv.h"
3132
#include "trace_hv.h"
3233

3334
//#define DEBUG_RESIZE_HPT 1
@@ -347,7 +348,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
347348
unsigned long v, orig_v, gr;
348349
__be64 *hptep;
349350
long int index;
350-
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
351+
int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR);
351352

352353
if (kvm_is_radix(vcpu->kvm))
353354
return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
@@ -385,7 +386,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
385386

386387
/* Get PP bits and key for permission check */
387388
pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
388-
key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
389+
key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
389390
key &= slb_v;
390391

391392
/* Calculate permissions */

arch/powerpc/kvm/book3s_hv.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1374,7 +1374,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
13741374
*/
13751375
static void kvmppc_cede(struct kvm_vcpu *vcpu)
13761376
{
1377-
vcpu->arch.shregs.msr |= MSR_EE;
1377+
__kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE);
13781378
vcpu->arch.ceded = 1;
13791379
smp_mb();
13801380
if (vcpu->arch.prodded) {
@@ -1589,7 +1589,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
15891589
* That can happen due to a bug, or due to a machine check
15901590
* occurring at just the wrong time.
15911591
*/
1592-
if (vcpu->arch.shregs.msr & MSR_HV) {
1592+
if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
15931593
printk(KERN_EMERG "KVM trap in HV mode!\n");
15941594
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
15951595
vcpu->arch.trap, kvmppc_get_pc(vcpu),
@@ -1640,7 +1640,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
16401640
* so that it knows that the machine check occurred.
16411641
*/
16421642
if (!vcpu->kvm->arch.fwnmi_enabled) {
1643-
ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) |
1643+
ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) |
16441644
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
16451645
kvmppc_core_queue_machine_check(vcpu, flags);
16461646
r = RESUME_GUEST;
@@ -1670,7 +1670,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
16701670
* as a result of a hypervisor emulation interrupt
16711671
* (e40) getting turned into a 700 by BML RTAS.
16721672
*/
1673-
flags = (vcpu->arch.shregs.msr & 0x1f0000ull) |
1673+
flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) |
16741674
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
16751675
kvmppc_core_queue_program(vcpu, flags);
16761676
r = RESUME_GUEST;
@@ -1680,7 +1680,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
16801680
{
16811681
int i;
16821682

1683-
if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
1683+
if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
16841684
/*
16851685
* Guest userspace executed sc 1. This can only be
16861686
* reached by the P9 path because the old path
@@ -1758,7 +1758,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
17581758
break;
17591759
}
17601760

1761-
if (!(vcpu->arch.shregs.msr & MSR_DR))
1761+
if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR))
17621762
vsid = vcpu->kvm->arch.vrma_slb_v;
17631763
else
17641764
vsid = vcpu->arch.fault_gpa;
@@ -1782,7 +1782,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
17821782
long err;
17831783

17841784
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1785-
vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
1785+
vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) &
17861786
DSISR_SRR1_MATCH_64S;
17871787
if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
17881788
/*
@@ -1791,7 +1791,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
17911791
* hash fault handling below is v3 only (it uses ASDR
17921792
* via fault_gpa).
17931793
*/
1794-
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1794+
if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
17951795
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
17961796
r = RESUME_PAGE_FAULT;
17971797
break;
@@ -1805,7 +1805,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
18051805
break;
18061806
}
18071807

1808-
if (!(vcpu->arch.shregs.msr & MSR_IR))
1808+
if (!(__kvmppc_get_msr_hv(vcpu) & MSR_IR))
18091809
vsid = vcpu->kvm->arch.vrma_slb_v;
18101810
else
18111811
vsid = vcpu->arch.fault_gpa;
@@ -1895,7 +1895,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
18951895
kvmppc_dump_regs(vcpu);
18961896
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
18971897
vcpu->arch.trap, kvmppc_get_pc(vcpu),
1898-
vcpu->arch.shregs.msr);
1898+
__kvmppc_get_msr_hv(vcpu));
18991899
run->hw.hardware_exit_reason = vcpu->arch.trap;
19001900
r = RESUME_HOST;
19011901
break;
@@ -1919,11 +1919,11 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
19191919
* That can happen due to a bug, or due to a machine check
19201920
* occurring at just the wrong time.
19211921
*/
1922-
if (vcpu->arch.shregs.msr & MSR_HV) {
1922+
if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
19231923
pr_emerg("KVM trap in HV mode while nested!\n");
19241924
pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
19251925
vcpu->arch.trap, kvmppc_get_pc(vcpu),
1926-
vcpu->arch.shregs.msr);
1926+
__kvmppc_get_msr_hv(vcpu));
19271927
kvmppc_dump_regs(vcpu);
19281928
return RESUME_HOST;
19291929
}
@@ -1980,7 +1980,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
19801980
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
19811981
vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
19821982
DSISR_SRR1_MATCH_64S;
1983-
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1983+
if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
19841984
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
19851985
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
19861986
r = kvmhv_nested_page_fault(vcpu);
@@ -2940,7 +2940,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
29402940
spin_lock_init(&vcpu->arch.vpa_update_lock);
29412941
spin_lock_init(&vcpu->arch.tbacct_lock);
29422942
vcpu->arch.busy_preempt = TB_NIL;
2943-
vcpu->arch.shregs.msr = MSR_ME;
2943+
__kvmppc_set_msr_hv(vcpu, MSR_ME);
29442944
vcpu->arch.intr_msr = MSR_SF | MSR_ME;
29452945

29462946
/*
@@ -4188,7 +4188,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
41884188
__this_cpu_write(cpu_in_guest, NULL);
41894189

41904190
if (trap == BOOK3S_INTERRUPT_SYSCALL &&
4191-
!(vcpu->arch.shregs.msr & MSR_PR)) {
4191+
!(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
41924192
unsigned long req = kvmppc_get_gpr(vcpu, 3);
41934193

41944194
/*
@@ -4667,7 +4667,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
46674667

46684668
if (!nested) {
46694669
kvmppc_core_prepare_to_enter(vcpu);
4670-
if (vcpu->arch.shregs.msr & MSR_EE) {
4670+
if (__kvmppc_get_msr_hv(vcpu) & MSR_EE) {
46714671
if (xive_interrupt_pending(vcpu))
46724672
kvmppc_inject_interrupt_hv(vcpu,
46734673
BOOK3S_INTERRUPT_EXTERNAL, 0);
@@ -4880,7 +4880,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
48804880
if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
48814881
accumulate_time(vcpu, &vcpu->arch.hcall);
48824882

4883-
if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
4883+
if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
48844884
/*
48854885
* These should have been caught reflected
48864886
* into the guest by now. Final sanity check:

arch/powerpc/kvm/book3s_hv.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,16 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
5151
#define end_timing(vcpu) do {} while (0)
5252
#endif
5353

54+
static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
55+
{
56+
vcpu->arch.shregs.msr = val;
57+
}
58+
59+
static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
60+
{
61+
return vcpu->arch.shregs.msr;
62+
}
63+
5464
#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \
5565
static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \
5666
{ \

arch/powerpc/kvm/book3s_hv_builtin.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232

3333
#include "book3s_xics.h"
3434
#include "book3s_xive.h"
35+
#include "book3s_hv.h"
3536

3637
/*
3738
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
@@ -514,7 +515,7 @@ void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
514515
*/
515516
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
516517
msr &= ~MSR_TS_MASK;
517-
vcpu->arch.shregs.msr = msr;
518+
__kvmppc_set_msr_hv(vcpu, msr);
518519
kvmppc_end_cede(vcpu);
519520
}
520521
EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
@@ -552,7 +553,7 @@ static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
552553
kvmppc_set_srr0(vcpu, pc);
553554
kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
554555
kvmppc_set_pc(vcpu, new_pc);
555-
vcpu->arch.shregs.msr = new_msr;
556+
__kvmppc_set_msr_hv(vcpu, new_msr);
556557
}
557558

558559
void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)

0 commit comments

Comments
 (0)