KVM: SVM: Provide an updated VMRUN invocation for SEV-ES guests
authorTom Lendacky <thomas.lendacky@amd.com>
Thu, 10 Dec 2020 17:10:08 +0000 (11:10 -0600)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 15 Dec 2020 10:20:59 +0000 (05:20 -0500)
The run sequence is different for an SEV-ES guest compared to a legacy or
even an SEV guest. The guest vCPU register state of an SEV-ES guest will
be restored on VMRUN and saved on VMEXIT. There is no need to restore the
guest registers directly and through VMLOAD before VMRUN and no need to
save the guest registers directly and through VMSAVE on VMEXIT.

Update the svm_vcpu_run() function to skip register state saving and
restoring and provide an alternative function for running an SEV-ES guest
in vmenter.S

Additionally, certain host state is restored across an SEV-ES VMRUN. As
a result certain register states are not required to be restored upon
VMEXIT (e.g. FS, GS, etc.), so only do that if the guest is not an SEV-ES
guest.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Message-Id: <fb1c66d32f2194e171b95fc1a8affd6d326e10c1.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/svm/vmenter.S
arch/x86/kvm/x86.c

index 3a2e48a8d05c0f8ac0b85d9dde7421789ffc260d..941e5251e13feb5e1f72d1abea90f4772e9207f4 100644 (file)
@@ -3700,16 +3700,20 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
        guest_enter_irqoff();
        lockdep_hardirqs_on(CALLER_ADDR0);
 
-       __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
+       if (sev_es_guest(svm->vcpu.kvm)) {
+               __svm_sev_es_vcpu_run(svm->vmcb_pa);
+       } else {
+               __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
 
 #ifdef CONFIG_X86_64
-       native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+               native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
-       loadsegment(fs, svm->host.fs);
+               loadsegment(fs, svm->host.fs);
 #ifndef CONFIG_X86_32_LAZY_GS
-       loadsegment(gs, svm->host.gs);
+               loadsegment(gs, svm->host.gs);
 #endif
 #endif
+       }
 
        /*
         * VMEXIT disables interrupts (host state), but tracing and lockdep
@@ -3807,14 +3811,17 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
                svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-       reload_tss(vcpu);
+       if (!sev_es_guest(svm->vcpu.kvm))
+               reload_tss(vcpu);
 
        x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
 
-       vcpu->arch.cr2 = svm->vmcb->save.cr2;
-       vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
-       vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
-       vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
+       if (!sev_es_guest(svm->vcpu.kvm)) {
+               vcpu->arch.cr2 = svm->vmcb->save.cr2;
+               vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+               vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+               vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
+       }
 
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
                kvm_before_interrupt(&svm->vcpu);
index df9fe11a632c246fec94d706c9c8b4b50d9de665..a5067f776ce0c4e64a8822a57dc35998c0dbb9ed 100644 (file)
@@ -591,4 +591,9 @@ void sev_es_create_vcpu(struct vcpu_svm *svm);
 void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
 void sev_es_vcpu_put(struct vcpu_svm *svm);
 
+/* vmenter.S */
+
+void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
+void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
+
 #endif
index 1ec1ac40e3280a7f4eed4202354cb69e2d760c77..6feb8c08f45abd9f56ab5e09c8348ad95102befc 100644 (file)
@@ -168,3 +168,53 @@ SYM_FUNC_START(__svm_vcpu_run)
        pop %_ASM_BP
        ret
 SYM_FUNC_END(__svm_vcpu_run)
+
+/**
+ * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
+ * @vmcb_pa:   unsigned long
+ */
+SYM_FUNC_START(__svm_sev_es_vcpu_run)
+       push %_ASM_BP
+#ifdef CONFIG_X86_64
+       push %r15
+       push %r14
+       push %r13
+       push %r12
+#else
+       push %edi
+       push %esi
+#endif
+       push %_ASM_BX
+
+       /* Enter guest mode */
+       mov %_ASM_ARG1, %_ASM_AX
+       sti
+
+1:     vmrun %_ASM_AX
+       jmp 3f
+2:     cmpb $0, kvm_rebooting
+       jne 3f
+       ud2
+       _ASM_EXTABLE(1b, 2b)
+
+3:     cli
+
+#ifdef CONFIG_RETPOLINE
+       /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
+       FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+#endif
+
+       pop %_ASM_BX
+
+#ifdef CONFIG_X86_64
+       pop %r12
+       pop %r13
+       pop %r14
+       pop %r15
+#else
+       pop %esi
+       pop %edi
+#endif
+       pop %_ASM_BP
+       ret
+SYM_FUNC_END(__svm_sev_es_vcpu_run)
index 3e58612babfe0d9808b925c6645ce516083cfbe6..648c677b12e94abf8bbd6a641caf080d8506ba79 100644 (file)
@@ -880,6 +880,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
 
 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
 {
+       if (vcpu->arch.guest_state_protected)
+               return;
+
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
 
                if (vcpu->arch.xcr0 != host_xcr0)
@@ -900,6 +903,9 @@ EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
 
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
 {
+       if (vcpu->arch.guest_state_protected)
+               return;
+
        if (static_cpu_has(X86_FEATURE_PKU) &&
            (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
             (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {