Skip to content

Commit 09b3d3c

Browse files
yamahatabonzini
authored andcommitted
KVM: TDX: Add placeholders for TDX VM/vCPU structures
Add TDX's own VM and vCPU structures as placeholder to manage and run TDX guests. Also add helper functions to check whether a VM/vCPU is TDX or normal VMX one, and add helpers to convert between TDX VM/vCPU and KVM VM/vCPU. TDX protects guest VMs from malicious host. Unlike VMX guests, TDX guests are crypto-protected. KVM cannot access TDX guests' memory and vCPU states directly. Instead, TDX requires KVM to use a set of TDX architecture-defined firmware APIs (a.k.a TDX module SEAMCALLs) to manage and run TDX guests. In fact, the way to manage and run TDX guests and normal VMX guests are quite different. Because of that, the current structures ('struct kvm_vmx' and 'struct vcpu_vmx') to manage VMX guests are not quite suitable for TDX guests. E.g., the majority of the members of 'struct vcpu_vmx' don't apply to TDX guests. Introduce TDX's own VM and vCPU structures ('struct kvm_tdx' and 'struct vcpu_tdx' respectively) for KVM to manage and run TDX guests. And instead of building TDX's VM and vCPU structures based on VMX's, build them directly based on 'struct kvm'. As a result, TDX and VMX guests will have different VM size and vCPU size/alignment. Currently, kvm_arch_alloc_vm() uses 'kvm_x86_ops::vm_size' to allocate enough space for the VM structure when creating guest. With TDX guests, ideally, KVM should allocate the VM structure based on the VM type so that the precise size can be allocated for VMX and TDX guests. But this requires more extensive code change. For now, simply choose the maximum size of 'struct kvm_tdx' and 'struct kvm_vmx' for VM structure allocation for both VMX and TDX guests. This would result in small memory waste for each VM which has smaller VM structure size but this is acceptable. For simplicity, use the same way for vCPU allocation too. Otherwise KVM would need to maintain a separate 'kvm_vcpu_cache' for each VM type. Note, updating the 'vt_x86_ops::vm_size' needs to be done before calling kvm_ops_update(), which copies vt_x86_ops to kvm_x86_ops. However this happens before TDX module initialization. Therefore theoretically it is possible that 'kvm_x86_ops::vm_size' is set to size of 'struct kvm_tdx' (when it's larger) but TDX actually fails to initialize at a later time. Again the worst case of this is wasting couple of bytes memory for each VM. KVM could choose to update 'kvm_x86_ops::vm_size' at a later time depending on TDX's status but that would require base KVM module to export either kvm_x86_ops or kvm_ops_update(). Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- - Make to_kvm_tdx() and to_tdx() private to tdx.c (Francesco, Tony) - Add pragma poison for to_vmx() (Paolo) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 45154fb commit 09b3d3c

File tree

3 files changed

+100
-4
lines changed

3 files changed

+100
-4
lines changed

arch/x86/kvm/vmx/main.c

Lines changed: 50 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,39 @@
88
#include "posted_intr.h"
99
#include "tdx.h"
1010

11+
static __init int vt_hardware_setup(void)
12+
{
13+
int ret;
14+
15+
ret = vmx_hardware_setup();
16+
if (ret)
17+
return ret;
18+
19+
/*
20+
* Update vt_x86_ops::vm_size here so it is ready before
21+
* kvm_ops_update() is called in kvm_x86_vendor_init().
22+
*
23+
* Note, the actual bringing up of TDX must be done after
24+
* kvm_ops_update() because enabling TDX requires enabling
25+
* hardware virtualization first, i.e., all online CPUs must
26+
* be in post-VMXON state. This means the @vm_size here
27+
* may be updated to TDX's size but TDX may fail to enable
28+
* at later time.
29+
*
30+
* The VMX/VT code could update kvm_x86_ops::vm_size again
31+
* after bringing up TDX, but this would require exporting
32+
* either kvm_x86_ops or kvm_ops_update() from the base KVM
33+
* module, which looks overkill. Anyway, the worst case here
34+
* is KVM may allocate couple of more bytes than needed for
35+
* each VM.
36+
*/
37+
if (enable_tdx)
38+
vt_x86_ops.vm_size = max_t(unsigned int, vt_x86_ops.vm_size,
39+
sizeof(struct kvm_tdx));
40+
41+
return 0;
42+
}
43+
1144
#define VMX_REQUIRED_APICV_INHIBITS \
1245
(BIT(APICV_INHIBIT_REASON_DISABLED) | \
1346
BIT(APICV_INHIBIT_REASON_ABSENT) | \
@@ -163,7 +196,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
163196
};
164197

165198
struct kvm_x86_init_ops vt_init_ops __initdata = {
166-
.hardware_setup = vmx_hardware_setup,
199+
.hardware_setup = vt_hardware_setup,
167200
.handle_intel_pt_intr = NULL,
168201

169202
.runtime_ops = &vt_x86_ops,
@@ -180,6 +213,7 @@ module_exit(vt_exit);
180213

181214
static int __init vt_init(void)
182215
{
216+
unsigned vcpu_size, vcpu_align;
183217
int r;
184218

185219
r = vmx_init();
@@ -191,12 +225,25 @@ static int __init vt_init(void)
191225
if (r)
192226
goto err_tdx_bringup;
193227

228+
/*
229+
* TDX and VMX have different vCPU structures. Calculate the
230+
* maximum size/align so that kvm_init() can use the larger
231+
* values to create the kmem_vcpu_cache.
232+
*/
233+
vcpu_size = sizeof(struct vcpu_vmx);
234+
vcpu_align = __alignof__(struct vcpu_vmx);
235+
if (enable_tdx) {
236+
vcpu_size = max_t(unsigned, vcpu_size,
237+
sizeof(struct vcpu_tdx));
238+
vcpu_align = max_t(unsigned, vcpu_align,
239+
__alignof__(struct vcpu_tdx));
240+
}
241+
194242
/*
195243
* Common KVM initialization _must_ come last, after this, /dev/kvm is
196244
* exposed to userspace!
197245
*/
198-
r = kvm_init(sizeof(struct vcpu_vmx), __alignof__(struct vcpu_vmx),
199-
THIS_MODULE);
246+
r = kvm_init(vcpu_size, vcpu_align, THIS_MODULE);
200247
if (r)
201248
goto err_kvm_init;
202249

arch/x86/kvm/vmx/tdx.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,28 @@
55
#include "capabilities.h"
66
#include "tdx.h"
77

8+
#pragma GCC poison to_vmx
9+
810
#undef pr_fmt
911
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1012

11-
static bool enable_tdx __ro_after_init;
13+
bool enable_tdx __ro_after_init;
1214
module_param_named(tdx, enable_tdx, bool, 0444);
1315

1416
static enum cpuhp_state tdx_cpuhp_state;
1517

1618
static const struct tdx_sys_info *tdx_sysinfo;
1719

20+
static __always_inline struct kvm_tdx *to_kvm_tdx(struct kvm *kvm)
21+
{
22+
return container_of(kvm, struct kvm_tdx, kvm);
23+
}
24+
25+
static __always_inline struct vcpu_tdx *to_tdx(struct kvm_vcpu *vcpu)
26+
{
27+
return container_of(vcpu, struct vcpu_tdx, vcpu);
28+
}
29+
1830
static int tdx_online_cpu(unsigned int cpu)
1931
{
2032
unsigned long flags;

arch/x86/kvm/vmx/tdx.h

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,46 @@
55
#ifdef CONFIG_KVM_INTEL_TDX
66
int tdx_bringup(void);
77
void tdx_cleanup(void);
8+
9+
extern bool enable_tdx;
10+
11+
struct kvm_tdx {
12+
struct kvm kvm;
13+
/* TDX specific members follow. */
14+
};
15+
16+
struct vcpu_tdx {
17+
struct kvm_vcpu vcpu;
18+
/* TDX specific members follow. */
19+
};
20+
21+
static inline bool is_td(struct kvm *kvm)
22+
{
23+
return kvm->arch.vm_type == KVM_X86_TDX_VM;
24+
}
25+
26+
static inline bool is_td_vcpu(struct kvm_vcpu *vcpu)
27+
{
28+
return is_td(vcpu->kvm);
29+
}
30+
831
#else
932
static inline int tdx_bringup(void) { return 0; }
1033
static inline void tdx_cleanup(void) {}
34+
35+
#define enable_tdx 0
36+
37+
struct kvm_tdx {
38+
struct kvm kvm;
39+
};
40+
41+
struct vcpu_tdx {
42+
struct kvm_vcpu vcpu;
43+
};
44+
45+
static inline bool is_td(struct kvm *kvm) { return false; }
46+
static inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }
47+
1148
#endif
1249

1350
#endif

0 commit comments

Comments
 (0)