Skip to content

Commit fbb4ada

Browse files
yanzhao56bonzini
authored andcommitted
KVM: x86: Make cpu_dirty_log_size a per-VM value
Make cpu_dirty_log_size (CPU's dirty log buffer size) a per-VM value and set the per-VM cpu_dirty_log_size only for normal VMs when PML is enabled. Do not set it for TDs. Until now, cpu_dirty_log_size was a system-wide value that is used for all VMs and is set to the PML buffer size when PML was enabled in VMX. However, PML is not currently supported for TDs, though PML remains available for normal VMs as long as the feature is supported by hardware and enabled in VMX. Making cpu_dirty_log_size a per-VM value allows it to be ther PML buffer size for normal VMs and 0 for TDs. This allows functions like kvm_arch_sync_dirty_log() and kvm_mmu_update_cpu_dirty_logging() to determine if PML is supported, in order to kick off vCPUs or request them to update CPU dirty logging status (turn on/off PML in VMCS). This fixes an issue first reported in [1], where QEMU attaches an emulated VGA device to a TD; note that KVM_MEM_LOG_DIRTY_PAGES still works if the corresponding has no flag KVM_MEM_GUEST_MEMFD. KVM then invokes kvm_mmu_update_cpu_dirty_logging() and from there vmx_update_cpu_dirty_logging(), which incorrectly accesses a kvm_vmx struct for a TDX VM. Reported-by: ANAND NARSHINHA PATIL <Anand.N.Patil@ibm.com> Reported-by: Pedro Principeza <pedro.principeza@canonical.com> Reported-by: Farrah Chen <farrah.chen@intel.com> Closes: canonical/tdx#202 Link: canonical/tdx#202 [1] Suggested-by: Kai Huang <kai.huang@intel.com> Signed-off-by: Yan Zhao <yan.y.zhao@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent fd32525 commit fbb4ada

File tree

6 files changed

+16
-15
lines changed

6 files changed

+16
-15
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1562,6 +1562,13 @@ struct kvm_arch {
15621562
struct kvm_mmu_memory_cache split_desc_cache;
15631563

15641564
gfn_t gfn_direct_bits;
1565+
1566+
/*
1567+
* Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A Zero
1568+
* value indicates CPU dirty logging is unsupported or disabled in
1569+
* current VM.
1570+
*/
1571+
int cpu_dirty_log_size;
15651572
};
15661573

15671574
struct kvm_vm_stat {
@@ -1815,11 +1822,6 @@ struct kvm_x86_ops {
18151822
struct x86_exception *exception);
18161823
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
18171824

1818-
/*
1819-
* Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero
1820-
* value indicates CPU dirty logging is unsupported or disabled.
1821-
*/
1822-
int cpu_dirty_log_size;
18231825
void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
18241826

18251827
const struct kvm_x86_nested_ops *nested_ops;

arch/x86/kvm/mmu/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1305,15 +1305,15 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
13051305
* enabled but it chooses between clearing the Dirty bit and Writeable
13061306
* bit based on the context.
13071307
*/
1308-
if (kvm_x86_ops.cpu_dirty_log_size)
1308+
if (kvm->arch.cpu_dirty_log_size)
13091309
kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
13101310
else
13111311
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
13121312
}
13131313

13141314
int kvm_cpu_dirty_log_size(struct kvm *kvm)
13151315
{
1316-
return kvm_x86_ops.cpu_dirty_log_size;
1316+
return kvm->arch.cpu_dirty_log_size;
13171317
}
13181318

13191319
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,

arch/x86/kvm/mmu/mmu_internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm *kvm,
198198
* being enabled is mandatory as the bits used to denote WP-only SPTEs
199199
* are reserved for PAE paging (32-bit KVM).
200200
*/
201-
return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
201+
return kvm->arch.cpu_dirty_log_size && sp->role.guest_mode;
202202
}
203203

204204
static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)

arch/x86/kvm/vmx/main.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -322,7 +322,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
322322
.check_intercept = vmx_check_intercept,
323323
.handle_exit_irqoff = vmx_handle_exit_irqoff,
324324

325-
.cpu_dirty_log_size = PML_LOG_NR_ENTRIES,
326325
.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
327326

328327
.nested_ops = &vmx_nested_ops,

arch/x86/kvm/vmx/vmx.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7649,6 +7649,9 @@ int vmx_vm_init(struct kvm *kvm)
76497649
break;
76507650
}
76517651
}
7652+
7653+
if (enable_pml)
7654+
kvm->arch.cpu_dirty_log_size = PML_LOG_NR_ENTRIES;
76527655
return 0;
76537656
}
76547657

@@ -8502,9 +8505,6 @@ __init int vmx_hardware_setup(void)
85028505
if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
85038506
enable_pml = 0;
85048507

8505-
if (!enable_pml)
8506-
vt_x86_ops.cpu_dirty_log_size = 0;
8507-
85088508
if (!cpu_has_vmx_preemption_timer())
85098509
enable_preemption_timer = false;
85108510

arch/x86/kvm/x86.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6480,7 +6480,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
64806480
struct kvm_vcpu *vcpu;
64816481
unsigned long i;
64826482

6483-
if (!kvm_x86_ops.cpu_dirty_log_size)
6483+
if (!kvm->arch.cpu_dirty_log_size)
64846484
return;
64856485

64866486
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -13078,7 +13078,7 @@ static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
1307813078
{
1307913079
int nr_slots;
1308013080

13081-
if (!kvm_x86_ops.cpu_dirty_log_size)
13081+
if (!kvm->arch.cpu_dirty_log_size)
1308213082
return;
1308313083

1308413084
nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging);
@@ -13150,7 +13150,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
1315013150
if (READ_ONCE(eager_page_split))
1315113151
kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K);
1315213152

13153-
if (kvm_x86_ops.cpu_dirty_log_size) {
13153+
if (kvm->arch.cpu_dirty_log_size) {
1315413154
kvm_mmu_slot_leaf_clear_dirty(kvm, new);
1315513155
kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
1315613156
} else {

0 commit comments

Comments
 (0)