|
18 | 18 | #include <asm/mshyperv.h>
|
19 | 19 | #include <asm/hypervisor.h>
|
20 | 20 | #include <asm/mtrr.h>
|
| 21 | +#include <asm/coco.h> |
| 22 | +#include <asm/io_apic.h> |
| 23 | +#include <asm/sev.h> |
| 24 | +#include <asm/realmode.h> |
| 25 | +#include <asm/e820/api.h> |
| 26 | +#include <asm/desc.h> |
21 | 27 |
|
22 | 28 | #ifdef CONFIG_AMD_MEM_ENCRYPT
|
23 | 29 |
|
24 | 30 | #define GHCB_USAGE_HYPERV_CALL 1
|
25 | 31 |
|
| 32 | +static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE); |
| 33 | +static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE); |
| 34 | + |
26 | 35 | union hv_ghcb {
|
27 | 36 | struct ghcb ghcb;
|
28 | 37 | struct {
|
@@ -56,6 +65,8 @@ union hv_ghcb {
|
56 | 65 | } hypercall;
|
57 | 66 | } __packed __aligned(HV_HYP_PAGE_SIZE);
|
58 | 67 |
|
| 68 | +static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa); |
| 69 | + |
59 | 70 | static u16 hv_ghcb_version __ro_after_init;
|
60 | 71 |
|
61 | 72 | u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
|
@@ -357,6 +368,133 @@ static bool hv_is_private_mmio(u64 addr)
|
357 | 368 | return false;
|
358 | 369 | }
|
359 | 370 |
|
| 371 | +#define hv_populate_vmcb_seg(seg, gdtr_base) \ |
| 372 | +do { \ |
| 373 | + if (seg.selector) { \ |
| 374 | + seg.base = 0; \ |
| 375 | + seg.limit = HV_AP_SEGMENT_LIMIT; \ |
| 376 | + seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \ |
| 377 | + seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \ |
| 378 | + } \ |
| 379 | +} while (0) \ |
| 380 | + |
| 381 | +static int snp_set_vmsa(void *va, bool vmsa) |
| 382 | +{ |
| 383 | + u64 attrs; |
| 384 | + |
| 385 | + /* |
| 386 | + * Running at VMPL0 allows the kernel to change the VMSA bit for a page |
| 387 | + * using the RMPADJUST instruction. However, for the instruction to |
| 388 | + * succeed it must target the permissions of a lesser privileged |
| 389 | + * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST |
| 390 | + * instruction in the AMD64 APM Volume 3). |
| 391 | + */ |
| 392 | + attrs = 1; |
| 393 | + if (vmsa) |
| 394 | + attrs |= RMPADJUST_VMSA_PAGE_BIT; |
| 395 | + |
| 396 | + return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs); |
| 397 | +} |
| 398 | + |
| 399 | +static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa) |
| 400 | +{ |
| 401 | + int err; |
| 402 | + |
| 403 | + err = snp_set_vmsa(vmsa, false); |
| 404 | + if (err) |
| 405 | + pr_err("clear VMSA page failed (%u), leaking page\n", err); |
| 406 | + else |
| 407 | + free_page((unsigned long)vmsa); |
| 408 | +} |
| 409 | + |
| 410 | +int hv_snp_boot_ap(int cpu, unsigned long start_ip) |
| 411 | +{ |
| 412 | + struct sev_es_save_area *vmsa = (struct sev_es_save_area *) |
| 413 | + __get_free_page(GFP_KERNEL | __GFP_ZERO); |
| 414 | + struct sev_es_save_area *cur_vmsa; |
| 415 | + struct desc_ptr gdtr; |
| 416 | + u64 ret, retry = 5; |
| 417 | + struct hv_enable_vp_vtl *start_vp_input; |
| 418 | + unsigned long flags; |
| 419 | + |
| 420 | + if (!vmsa) |
| 421 | + return -ENOMEM; |
| 422 | + |
| 423 | + native_store_gdt(&gdtr); |
| 424 | + |
| 425 | + vmsa->gdtr.base = gdtr.address; |
| 426 | + vmsa->gdtr.limit = gdtr.size; |
| 427 | + |
| 428 | + asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector)); |
| 429 | + hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base); |
| 430 | + |
| 431 | + asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector)); |
| 432 | + hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base); |
| 433 | + |
| 434 | + asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector)); |
| 435 | + hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base); |
| 436 | + |
| 437 | + asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector)); |
| 438 | + hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base); |
| 439 | + |
| 440 | + vmsa->efer = native_read_msr(MSR_EFER); |
| 441 | + |
| 442 | + asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4)); |
| 443 | + asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3)); |
| 444 | + asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0)); |
| 445 | + |
| 446 | + vmsa->xcr0 = 1; |
| 447 | + vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT; |
| 448 | + vmsa->rip = (u64)secondary_startup_64_no_verify; |
| 449 | + vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE]; |
| 450 | + |
| 451 | + /* |
| 452 | + * Set the SNP-specific fields for this VMSA: |
| 453 | + * VMPL level |
| 454 | + * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits) |
| 455 | + */ |
| 456 | + vmsa->vmpl = 0; |
| 457 | + vmsa->sev_features = sev_status >> 2; |
| 458 | + |
| 459 | + ret = snp_set_vmsa(vmsa, true); |
| 460 | + if (!ret) { |
| 461 | + pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret); |
| 462 | + free_page((u64)vmsa); |
| 463 | + return ret; |
| 464 | + } |
| 465 | + |
| 466 | + local_irq_save(flags); |
| 467 | + start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg; |
| 468 | + memset(start_vp_input, 0, sizeof(*start_vp_input)); |
| 469 | + start_vp_input->partition_id = -1; |
| 470 | + start_vp_input->vp_index = cpu; |
| 471 | + start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl; |
| 472 | + *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1; |
| 473 | + |
| 474 | + do { |
| 475 | + ret = hv_do_hypercall(HVCALL_START_VP, |
| 476 | + start_vp_input, NULL); |
| 477 | + } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--); |
| 478 | + |
| 479 | + local_irq_restore(flags); |
| 480 | + |
| 481 | + if (!hv_result_success(ret)) { |
| 482 | + pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret); |
| 483 | + snp_cleanup_vmsa(vmsa); |
| 484 | + vmsa = NULL; |
| 485 | + } |
| 486 | + |
| 487 | + cur_vmsa = per_cpu(hv_sev_vmsa, cpu); |
| 488 | + /* Free up any previous VMSA page */ |
| 489 | + if (cur_vmsa) |
| 490 | + snp_cleanup_vmsa(cur_vmsa); |
| 491 | + |
| 492 | + /* Record the current VMSA page */ |
| 493 | + per_cpu(hv_sev_vmsa, cpu) = vmsa; |
| 494 | + |
| 495 | + return ret; |
| 496 | +} |
| 497 | + |
360 | 498 | void __init hv_vtom_init(void)
|
361 | 499 | {
|
362 | 500 | /*
|
|
0 commit comments