KVM: Reduce stack usage in kvm_pv_mmu_op()
authorDave Hansen <dave@linux.vnet.ibm.com>
Thu, 6 Aug 2009 17:39:52 +0000 (14:39 -0300)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 9 Sep 2009 03:17:12 +0000 (20:17 -0700)
(cherry picked from commit 6ad18fba05228fb1d47cdbc0339fe8b3fca1ca26)

We're in a hot path.  We can't use kmalloc() because
it might impact performance.  So, we just stick the buffer that
we need into the kvm_vcpu_arch structure.  This is used very
often, so it is not really a waste.

We also have to move the buffer structure's definition to the
arch-specific x86 kvm header.

Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
arch/x86/kvm/mmu.c
include/asm-x86/kvm_host.h

index 51ff937e551d67626c41a5e2abfb72b892e24468..19b0d691886ef96f5bb82f18781156dd56720866 100644 (file)
@@ -135,13 +135,6 @@ module_param(dbg, bool, 0644);
 #define ACC_USER_MASK    PT_USER_MASK
 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
 
-struct kvm_pv_mmu_op_buffer {
-       void *ptr;
-       unsigned len;
-       unsigned processed;
-       char buf[512] __aligned(sizeof(long));
-};
-
 struct kvm_rmap_desc {
        u64 *shadow_ptes[RMAP_EXT];
        struct kvm_rmap_desc *more;
@@ -2294,18 +2287,18 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
                  gpa_t addr, unsigned long *ret)
 {
        int r;
-       struct kvm_pv_mmu_op_buffer buffer;
+       struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
 
-       buffer.ptr = buffer.buf;
-       buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
-       buffer.processed = 0;
+       buffer->ptr = buffer->buf;
+       buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
+       buffer->processed = 0;
 
-       r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
+       r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
        if (r)
                goto out;
 
-       while (buffer.len) {
-               r = kvm_pv_mmu_op_one(vcpu, &buffer);
+       while (buffer->len) {
+               r = kvm_pv_mmu_op_one(vcpu, buffer);
                if (r < 0)
                        goto out;
                if (r == 0)
@@ -2314,7 +2307,7 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
 
        r = 1;
 out:
-       *ret = buffer.processed;
+       *ret = buffer->processed;
        return r;
 }
 
index c2e34c27590066e4cf3359b78c1805d6c02805fc..cf7c887165c4e8eddf44a11d760551ecb2231dee 100644 (file)
@@ -195,6 +195,13 @@ struct kvm_mmu_page {
        };
 };
 
+struct kvm_pv_mmu_op_buffer {
+       void *ptr;
+       unsigned len;
+       unsigned processed;
+       char buf[512] __aligned(sizeof(long));
+};
+
 /*
  * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
  * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
@@ -237,6 +244,9 @@ struct kvm_vcpu_arch {
        bool tpr_access_reporting;
 
        struct kvm_mmu mmu;
+       /* only needed in kvm_pv_mmu_op() path, but it's hot so
+        * put it here to avoid allocation */
+       struct kvm_pv_mmu_op_buffer mmu_op_buffer;
 
        struct kvm_mmu_memory_cache mmu_pte_chain_cache;
        struct kvm_mmu_memory_cache mmu_rmap_desc_cache;