|
8 | 8 | #include "x86_ops.h"
|
9 | 9 | #include "lapic.h"
|
10 | 10 | #include "tdx.h"
|
| 11 | +#include "vmx.h" |
11 | 12 | #include "mmu/spte.h"
|
12 | 13 |
|
13 | 14 | #pragma GCC poison to_vmx
|
@@ -524,6 +525,51 @@ void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int pgd_level)
|
524 | 525 | td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa);
|
525 | 526 | }
|
526 | 527 |
|
| 528 | +/* |
| 529 | + * Ensure shared and private EPTs to be flushed on all vCPUs. |
| 530 | + * tdh_mem_track() is the only caller that increases TD epoch. An increase in |
| 531 | + * the TD epoch (e.g., to value "N + 1") is successful only if no vCPUs are |
| 532 | + * running in guest mode with the value "N - 1". |
| 533 | + * |
| 534 | + * A successful execution of tdh_mem_track() ensures that vCPUs can only run in |
| 535 | + * guest mode with TD epoch value "N" if no TD exit occurs after the TD epoch |
| 536 | + * being increased to "N + 1". |
| 537 | + * |
| 538 | + * Kicking off all vCPUs after that further results in no vCPUs can run in guest |
| 539 | + * mode with TD epoch value "N", which unblocks the next tdh_mem_track() (e.g. |
| 540 | + * to increase TD epoch to "N + 2"). |
| 541 | + * |
| 542 | + * TDX module will flush EPT on the next TD enter and make vCPUs to run in |
| 543 | + * guest mode with TD epoch value "N + 1". |
| 544 | + * |
| 545 | + * kvm_make_all_cpus_request() guarantees all vCPUs are out of guest mode by |
| 546 | + * waiting empty IPI handler ack_kick(). |
| 547 | + * |
| 548 | + * No action is required to the vCPUs being kicked off since the kicking off |
| 549 | + * occurs certainly after TD epoch increment and before the next |
| 550 | + * tdh_mem_track(). |
| 551 | + */ |
| 552 | +static void __always_unused tdx_track(struct kvm *kvm) |
| 553 | +{ |
| 554 | + struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm); |
| 555 | + u64 err; |
| 556 | + |
| 557 | + /* If TD isn't finalized, it's before any vcpu running. */ |
| 558 | + if (unlikely(kvm_tdx->state != TD_STATE_RUNNABLE)) |
| 559 | + return; |
| 560 | + |
| 561 | + lockdep_assert_held_write(&kvm->mmu_lock); |
| 562 | + |
| 563 | + do { |
| 564 | + err = tdh_mem_track(&kvm_tdx->td); |
| 565 | + } while (unlikely((err & TDX_SEAMCALL_STATUS_MASK) == TDX_OPERAND_BUSY)); |
| 566 | + |
| 567 | + if (KVM_BUG_ON(err, kvm)) |
| 568 | + pr_tdx_error(TDH_MEM_TRACK, err); |
| 569 | + |
| 570 | + kvm_make_all_cpus_request(kvm, KVM_REQ_OUTSIDE_GUEST_MODE); |
| 571 | +} |
| 572 | + |
527 | 573 | static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd)
|
528 | 574 | {
|
529 | 575 | const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
|
@@ -1079,6 +1125,41 @@ static int tdx_td_init(struct kvm *kvm, struct kvm_tdx_cmd *cmd)
|
1079 | 1125 | return ret;
|
1080 | 1126 | }
|
1081 | 1127 |
|
| 1128 | +void tdx_flush_tlb_current(struct kvm_vcpu *vcpu) |
| 1129 | +{ |
| 1130 | + /* |
| 1131 | + * flush_tlb_current() is invoked when the first time for the vcpu to |
| 1132 | + * run or when root of shared EPT is invalidated. |
| 1133 | + * KVM only needs to flush shared EPT because the TDX module handles TLB |
| 1134 | + * invalidation for private EPT in tdh_vp_enter(); |
| 1135 | + * |
| 1136 | + * A single context invalidation for shared EPT can be performed here. |
| 1137 | + * However, this single context invalidation requires the private EPTP |
| 1138 | + * rather than the shared EPTP to flush shared EPT, as shared EPT uses |
| 1139 | + * private EPTP as its ASID for TLB invalidation. |
| 1140 | + * |
| 1141 | + * To avoid reading back private EPTP, perform a global invalidation for |
| 1142 | + * shared EPT instead to keep this function simple. |
| 1143 | + */ |
| 1144 | + ept_sync_global(); |
| 1145 | +} |
| 1146 | + |
| 1147 | +void tdx_flush_tlb_all(struct kvm_vcpu *vcpu) |
| 1148 | +{ |
| 1149 | + /* |
| 1150 | + * TDX has called tdx_track() in tdx_sept_remove_private_spte() to |
| 1151 | + * ensure that private EPT will be flushed on the next TD enter. No need |
| 1152 | + * to call tdx_track() here again even when this callback is a result of |
| 1153 | + * zapping private EPT. |
| 1154 | + * |
| 1155 | + * Due to the lack of the context to determine which EPT has been |
| 1156 | + * affected by zapping, invoke invept() directly here for both shared |
| 1157 | + * EPT and private EPT for simplicity, though it's not necessary for |
| 1158 | + * private EPT. |
| 1159 | + */ |
| 1160 | + ept_sync_global(); |
| 1161 | +} |
| 1162 | + |
1082 | 1163 | int tdx_vm_ioctl(struct kvm *kvm, void __user *argp)
|
1083 | 1164 | {
|
1084 | 1165 | struct kvm_tdx_cmd tdx_cmd;
|
|
0 commit comments