@@ -1374,7 +1374,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
1374
1374
*/
1375
1375
static void kvmppc_cede (struct kvm_vcpu * vcpu )
1376
1376
{
1377
- vcpu -> arch . shregs . msr |= MSR_EE ;
1377
+ __kvmppc_set_msr_hv ( vcpu , __kvmppc_get_msr_hv ( vcpu ) | MSR_EE ) ;
1378
1378
vcpu -> arch .ceded = 1 ;
1379
1379
smp_mb ();
1380
1380
if (vcpu -> arch .prodded ) {
@@ -1589,7 +1589,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1589
1589
* That can happen due to a bug, or due to a machine check
1590
1590
* occurring at just the wrong time.
1591
1591
*/
1592
- if (vcpu -> arch . shregs . msr & MSR_HV ) {
1592
+ if (__kvmppc_get_msr_hv ( vcpu ) & MSR_HV ) {
1593
1593
printk (KERN_EMERG "KVM trap in HV mode!\n" );
1594
1594
printk (KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n" ,
1595
1595
vcpu -> arch .trap , kvmppc_get_pc (vcpu ),
@@ -1640,7 +1640,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1640
1640
* so that it knows that the machine check occurred.
1641
1641
*/
1642
1642
if (!vcpu -> kvm -> arch .fwnmi_enabled ) {
1643
- ulong flags = (vcpu -> arch . shregs . msr & 0x083c0000 ) |
1643
+ ulong flags = (__kvmppc_get_msr_hv ( vcpu ) & 0x083c0000 ) |
1644
1644
(kvmppc_get_msr (vcpu ) & SRR1_PREFIXED );
1645
1645
kvmppc_core_queue_machine_check (vcpu , flags );
1646
1646
r = RESUME_GUEST ;
@@ -1670,7 +1670,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1670
1670
* as a result of a hypervisor emulation interrupt
1671
1671
* (e40) getting turned into a 700 by BML RTAS.
1672
1672
*/
1673
- flags = (vcpu -> arch . shregs . msr & 0x1f0000ull ) |
1673
+ flags = (__kvmppc_get_msr_hv ( vcpu ) & 0x1f0000ull ) |
1674
1674
(kvmppc_get_msr (vcpu ) & SRR1_PREFIXED );
1675
1675
kvmppc_core_queue_program (vcpu , flags );
1676
1676
r = RESUME_GUEST ;
@@ -1680,7 +1680,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1680
1680
{
1681
1681
int i ;
1682
1682
1683
- if (unlikely (vcpu -> arch . shregs . msr & MSR_PR )) {
1683
+ if (unlikely (__kvmppc_get_msr_hv ( vcpu ) & MSR_PR )) {
1684
1684
/*
1685
1685
* Guest userspace executed sc 1. This can only be
1686
1686
* reached by the P9 path because the old path
@@ -1758,7 +1758,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1758
1758
break ;
1759
1759
}
1760
1760
1761
- if (!(vcpu -> arch . shregs . msr & MSR_DR ))
1761
+ if (!(__kvmppc_get_msr_hv ( vcpu ) & MSR_DR ))
1762
1762
vsid = vcpu -> kvm -> arch .vrma_slb_v ;
1763
1763
else
1764
1764
vsid = vcpu -> arch .fault_gpa ;
@@ -1782,7 +1782,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1782
1782
long err ;
1783
1783
1784
1784
vcpu -> arch .fault_dar = kvmppc_get_pc (vcpu );
1785
- vcpu -> arch .fault_dsisr = vcpu -> arch . shregs . msr &
1785
+ vcpu -> arch .fault_dsisr = __kvmppc_get_msr_hv ( vcpu ) &
1786
1786
DSISR_SRR1_MATCH_64S ;
1787
1787
if (kvm_is_radix (vcpu -> kvm ) || !cpu_has_feature (CPU_FTR_ARCH_300 )) {
1788
1788
/*
@@ -1791,7 +1791,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1791
1791
* hash fault handling below is v3 only (it uses ASDR
1792
1792
* via fault_gpa).
1793
1793
*/
1794
- if (vcpu -> arch . shregs . msr & HSRR1_HISI_WRITE )
1794
+ if (__kvmppc_get_msr_hv ( vcpu ) & HSRR1_HISI_WRITE )
1795
1795
vcpu -> arch .fault_dsisr |= DSISR_ISSTORE ;
1796
1796
r = RESUME_PAGE_FAULT ;
1797
1797
break ;
@@ -1805,7 +1805,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1805
1805
break ;
1806
1806
}
1807
1807
1808
- if (!(vcpu -> arch . shregs . msr & MSR_IR ))
1808
+ if (!(__kvmppc_get_msr_hv ( vcpu ) & MSR_IR ))
1809
1809
vsid = vcpu -> kvm -> arch .vrma_slb_v ;
1810
1810
else
1811
1811
vsid = vcpu -> arch .fault_gpa ;
@@ -1895,7 +1895,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1895
1895
kvmppc_dump_regs (vcpu );
1896
1896
printk (KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n" ,
1897
1897
vcpu -> arch .trap , kvmppc_get_pc (vcpu ),
1898
- vcpu -> arch . shregs . msr );
1898
+ __kvmppc_get_msr_hv ( vcpu ) );
1899
1899
run -> hw .hardware_exit_reason = vcpu -> arch .trap ;
1900
1900
r = RESUME_HOST ;
1901
1901
break ;
@@ -1919,11 +1919,11 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
1919
1919
* That can happen due to a bug, or due to a machine check
1920
1920
* occurring at just the wrong time.
1921
1921
*/
1922
- if (vcpu -> arch . shregs . msr & MSR_HV ) {
1922
+ if (__kvmppc_get_msr_hv ( vcpu ) & MSR_HV ) {
1923
1923
pr_emerg ("KVM trap in HV mode while nested!\n" );
1924
1924
pr_emerg ("trap=0x%x | pc=0x%lx | msr=0x%llx\n" ,
1925
1925
vcpu -> arch .trap , kvmppc_get_pc (vcpu ),
1926
- vcpu -> arch . shregs . msr );
1926
+ __kvmppc_get_msr_hv ( vcpu ) );
1927
1927
kvmppc_dump_regs (vcpu );
1928
1928
return RESUME_HOST ;
1929
1929
}
@@ -1980,7 +1980,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
1980
1980
vcpu -> arch .fault_dar = kvmppc_get_pc (vcpu );
1981
1981
vcpu -> arch .fault_dsisr = kvmppc_get_msr (vcpu ) &
1982
1982
DSISR_SRR1_MATCH_64S ;
1983
- if (vcpu -> arch . shregs . msr & HSRR1_HISI_WRITE )
1983
+ if (__kvmppc_get_msr_hv ( vcpu ) & HSRR1_HISI_WRITE )
1984
1984
vcpu -> arch .fault_dsisr |= DSISR_ISSTORE ;
1985
1985
srcu_idx = srcu_read_lock (& vcpu -> kvm -> srcu );
1986
1986
r = kvmhv_nested_page_fault (vcpu );
@@ -2940,7 +2940,7 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
2940
2940
spin_lock_init (& vcpu -> arch .vpa_update_lock );
2941
2941
spin_lock_init (& vcpu -> arch .tbacct_lock );
2942
2942
vcpu -> arch .busy_preempt = TB_NIL ;
2943
- vcpu -> arch . shregs . msr = MSR_ME ;
2943
+ __kvmppc_set_msr_hv ( vcpu , MSR_ME ) ;
2944
2944
vcpu -> arch .intr_msr = MSR_SF | MSR_ME ;
2945
2945
2946
2946
/*
@@ -4188,7 +4188,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
4188
4188
__this_cpu_write (cpu_in_guest , NULL );
4189
4189
4190
4190
if (trap == BOOK3S_INTERRUPT_SYSCALL &&
4191
- !(vcpu -> arch . shregs . msr & MSR_PR )) {
4191
+ !(__kvmppc_get_msr_hv ( vcpu ) & MSR_PR )) {
4192
4192
unsigned long req = kvmppc_get_gpr (vcpu , 3 );
4193
4193
4194
4194
/*
@@ -4667,7 +4667,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
4667
4667
4668
4668
if (!nested ) {
4669
4669
kvmppc_core_prepare_to_enter (vcpu );
4670
- if (vcpu -> arch . shregs . msr & MSR_EE ) {
4670
+ if (__kvmppc_get_msr_hv ( vcpu ) & MSR_EE ) {
4671
4671
if (xive_interrupt_pending (vcpu ))
4672
4672
kvmppc_inject_interrupt_hv (vcpu ,
4673
4673
BOOK3S_INTERRUPT_EXTERNAL , 0 );
@@ -4880,7 +4880,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
4880
4880
if (run -> exit_reason == KVM_EXIT_PAPR_HCALL ) {
4881
4881
accumulate_time (vcpu , & vcpu -> arch .hcall );
4882
4882
4883
- if (WARN_ON_ONCE (vcpu -> arch . shregs . msr & MSR_PR )) {
4883
+ if (WARN_ON_ONCE (__kvmppc_get_msr_hv ( vcpu ) & MSR_PR )) {
4884
4884
/*
4885
4885
* These should have been caught reflected
4886
4886
* into the guest by now. Final sanity check:
0 commit comments