KVM: x86: Split core of hypercall emulation to helper function
authorSean Christopherson <seanjc@google.com>
Mon, 22 Jan 2024 23:54:02 +0000 (15:54 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 12 Apr 2024 08:42:23 +0000 (04:42 -0400)
By necessity, TDX will use a different register ABI for hypercalls.
Break out the core functionality so that it may be reused for TDX.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Message-Id: <5134caa55ac3dec33fb2addb5545b52b3b52db02.1705965635.git.isaku.yamahata@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index 3d56b5bb10e9e8cd7167bff86482972f92bf43ec..01c69840647e45bcc0a26fcb3cbf4eb2205cbb3a 100644 (file)
@@ -2142,6 +2142,10 @@ static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
        kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
 }
 
+unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+                                     unsigned long a0, unsigned long a1,
+                                     unsigned long a2, unsigned long a3,
+                                     int op_64_bit, int cpl);
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
index d584f5739402aa281ca4a9ab471b3d9039628065..2d2619d3eee47349d6aa34cc5a16f7ccefe3adee 100644 (file)
@@ -10080,26 +10080,15 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
        return kvm_skip_emulated_instruction(vcpu);
 }
 
-int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
+unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+                                     unsigned long a0, unsigned long a1,
+                                     unsigned long a2, unsigned long a3,
+                                     int op_64_bit, int cpl)
 {
-       unsigned long nr, a0, a1, a2, a3, ret;
-       int op_64_bit;
-
-       if (kvm_xen_hypercall_enabled(vcpu->kvm))
-               return kvm_xen_hypercall(vcpu);
-
-       if (kvm_hv_hypercall_enabled(vcpu))
-               return kvm_hv_hypercall(vcpu);
-
-       nr = kvm_rax_read(vcpu);
-       a0 = kvm_rbx_read(vcpu);
-       a1 = kvm_rcx_read(vcpu);
-       a2 = kvm_rdx_read(vcpu);
-       a3 = kvm_rsi_read(vcpu);
+       unsigned long ret;
 
        trace_kvm_hypercall(nr, a0, a1, a2, a3);
 
-       op_64_bit = is_64_bit_hypercall(vcpu);
        if (!op_64_bit) {
                nr &= 0xFFFFFFFF;
                a0 &= 0xFFFFFFFF;
@@ -10108,7 +10097,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
                a3 &= 0xFFFFFFFF;
        }
 
-       if (static_call(kvm_x86_get_cpl)(vcpu) != 0) {
+       if (cpl) {
                ret = -KVM_EPERM;
                goto out;
        }
@@ -10169,18 +10158,49 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 
                WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
                vcpu->arch.complete_userspace_io = complete_hypercall_exit;
+               /* stat is incremented on completion. */
                return 0;
        }
        default:
                ret = -KVM_ENOSYS;
                break;
        }
+
 out:
+       ++vcpu->stat.hypercalls;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall);
+
+int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
+{
+       unsigned long nr, a0, a1, a2, a3, ret;
+       int op_64_bit;
+       int cpl;
+
+       if (kvm_xen_hypercall_enabled(vcpu->kvm))
+               return kvm_xen_hypercall(vcpu);
+
+       if (kvm_hv_hypercall_enabled(vcpu))
+               return kvm_hv_hypercall(vcpu);
+
+       nr = kvm_rax_read(vcpu);
+       a0 = kvm_rbx_read(vcpu);
+       a1 = kvm_rcx_read(vcpu);
+       a2 = kvm_rdx_read(vcpu);
+       a3 = kvm_rsi_read(vcpu);
+       op_64_bit = is_64_bit_hypercall(vcpu);
+       cpl = static_call(kvm_x86_get_cpl)(vcpu);
+
+       ret = __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl);
+       if (nr == KVM_HC_MAP_GPA_RANGE && !ret)
+               /* MAP_GPA tosses the request to the user space. */
+               return 0;
+
        if (!op_64_bit)
                ret = (u32)ret;
        kvm_rax_write(vcpu, ret);
 
-       ++vcpu->stat.hypercalls;
        return kvm_skip_emulated_instruction(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);