summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-05-04 12:58:32 +0300
committerGreg Kroah-Hartman <gregkh@suse.de>2010-07-05 11:11:18 -0700
commit4a277f9cb862ad44d3a83bfcc0cf42727de99ac3 (patch)
tree87966cb19b527b6f8ce69e2d9a87e7d24162f2da /arch/x86
parent66307ba1647f9666baebd342520bf83cb7832468 (diff)
KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots
On svm, kvm_read_pdptr() may require reading guest memory, which can sleep. Push the spinlock into mmu_alloc_roots(), and only take it after we've read the pdptr. Tested-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> (cherry picked from commit 8facbbff071ff2b19268d3732e31badc60471e21)
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fd2b3af410a8..a2656266f999 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2091,11 +2091,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
direct = 1;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ spin_lock(&vcpu->kvm->mmu_lock);
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, direct,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
+ spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu.root_hpa = root;
return 0;
}
@@ -2117,11 +2119,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
root_gfn = 0;
if (mmu_check_root(vcpu, root_gfn))
return 1;
+ spin_lock(&vcpu->kvm->mmu_lock);
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, direct,
ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
+ spin_unlock(&vcpu->kvm->mmu_lock);
+
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
}
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
@@ -2485,7 +2490,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
goto out;
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
+ spin_unlock(&vcpu->kvm->mmu_lock);
r = mmu_alloc_roots(vcpu);
+ spin_lock(&vcpu->kvm->mmu_lock);
mmu_sync_roots(vcpu);
spin_unlock(&vcpu->kvm->mmu_lock);
if (r)