Skip to content

Commit 3fee483

Browse files
committed
KVM: x86: remove shadow_memtype_mask
The IGNORE_GUEST_PAT quirk is inapplicable, and thus always-disabled, if shadow_memtype_mask is zero. As long as vmx_get_mt_mask is not called for the shadow paging case, there is no need to consult shadow_memtype_mask and it can be removed altogether. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent c9c1e20 commit 3fee483

File tree

5 files changed

+7
-34
lines changed

5 files changed

+7
-34
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4663,21 +4663,6 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
46634663
}
46644664
#endif
46654665

4666-
bool kvm_mmu_may_ignore_guest_pat(struct kvm *kvm)
4667-
{
4668-
/*
4669-
* When EPT is enabled (shadow_memtype_mask is non-zero), and the VM
4670-
* has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is to
4671-
* honor the memtype from the guest's PAT so that guest accesses to
4672-
* memory that is DMA'd aren't cached against the guest's wishes. As a
4673-
* result, KVM _may_ ignore guest PAT, whereas without non-coherent DMA.
4674-
* KVM _always_ ignores guest PAT, when EPT is enabled and when quirk
4675-
* KVM_X86_QUIRK_IGNORE_GUEST_PAT is enabled or the CPU lacks the
4676-
* ability to safely honor guest PAT.
4677-
*/
4678-
return kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT);
4679-
}
4680-
46814666
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
46824667
{
46834668
#ifdef CONFIG_X86_64

arch/x86/kvm/mmu/spte.c

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ u64 __read_mostly shadow_mmio_value;
3737
u64 __read_mostly shadow_mmio_mask;
3838
u64 __read_mostly shadow_mmio_access_mask;
3939
u64 __read_mostly shadow_present_mask;
40-
u64 __read_mostly shadow_memtype_mask;
4140
u64 __read_mostly shadow_me_value;
4241
u64 __read_mostly shadow_me_mask;
4342
u64 __read_mostly shadow_acc_track_mask;
@@ -203,9 +202,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
203202
if (level > PG_LEVEL_4K)
204203
spte |= PT_PAGE_SIZE_MASK;
205204

206-
if (shadow_memtype_mask)
207-
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn,
208-
kvm_is_mmio_pfn(pfn));
205+
spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn));
209206
if (host_writable)
210207
spte |= shadow_host_writable_mask;
211208
else
@@ -460,13 +457,7 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
460457
/* VMX_EPT_SUPPRESS_VE_BIT is needed for W or X violation. */
461458
shadow_present_mask =
462459
(has_exec_only ? 0ull : VMX_EPT_READABLE_MASK) | VMX_EPT_SUPPRESS_VE_BIT;
463-
/*
464-
* EPT overrides the host MTRRs, and so KVM must program the desired
465-
* memtype directly into the SPTEs. Note, this mask is just the mask
466-
* of all bits that factor into the memtype, the actual memtype must be
467-
* dynamically calculated, e.g. to ensure host MMIO is mapped UC.
468-
*/
469-
shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT;
460+
470461
shadow_acc_track_mask = VMX_EPT_RWX_MASK;
471462
shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
472463
shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
@@ -518,12 +509,6 @@ void kvm_mmu_reset_all_pte_masks(void)
518509
shadow_x_mask = 0;
519510
shadow_present_mask = PT_PRESENT_MASK;
520511

521-
/*
522-
* For shadow paging and NPT, KVM uses PAT entry '0' to encode WB
523-
* memtype in the SPTEs, i.e. relies on host MTRRs to provide the
524-
* correct memtype (WB is the "weakest" memtype).
525-
*/
526-
shadow_memtype_mask = 0;
527512
shadow_acc_track_mask = 0;
528513
shadow_me_mask = 0;
529514
shadow_me_value = 0;

arch/x86/kvm/mmu/spte.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,6 @@ extern u64 __read_mostly shadow_mmio_value;
187187
extern u64 __read_mostly shadow_mmio_mask;
188188
extern u64 __read_mostly shadow_mmio_access_mask;
189189
extern u64 __read_mostly shadow_present_mask;
190-
extern u64 __read_mostly shadow_memtype_mask;
191190
extern u64 __read_mostly shadow_me_value;
192191
extern u64 __read_mostly shadow_me_mask;
193192

arch/x86/kvm/vmx/vmx.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8434,6 +8434,8 @@ __init int vmx_hardware_setup(void)
84348434
if (enable_ept)
84358435
kvm_mmu_set_ept_masks(enable_ept_ad_bits,
84368436
cpu_has_vmx_ept_execute_only());
8437+
else
8438+
vt_x86_ops.get_mt_mask = NULL;
84378439

84388440
/*
84398441
* Setup shadow_me_value/shadow_me_mask to include MKTME KeyID

arch/x86/kvm/x86.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13567,8 +13567,10 @@ static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
1356713567
* due to toggling the "ignore PAT" bit. Zap all SPTEs when the first
1356813568
* (or last) non-coherent device is (un)registered to so that new SPTEs
1356913569
* with the correct "ignore guest PAT" setting are created.
13570+
*
13571+
* If KVM always honors guest PAT, however, there is nothing to do.
1357013572
*/
13571-
if (kvm_mmu_may_ignore_guest_pat(kvm))
13573+
if (kvm_check_has_quirk(kvm, KVM_X86_QUIRK_IGNORE_GUEST_PAT))
1357213574
kvm_zap_gfn_range(kvm, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
1357313575
}
1357413576

0 commit comments

Comments
 (0)