summaryrefslogtreecommitdiff
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-03-16 18:20:50 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-04-12 12:38:33 +0200
commitd4ad442b9982fba9eab0f9003c8cd185a1afeff6 (patch)
treec1581ac4984a6f3df11c70b1d54df03d9d59a5bc /arch/arm/kvm
parent8e88806117e4868bc459a3042e55f8bf06c0b9e0 (diff)
arm/arm64: KVM: Take mmap_sem in kvm_arch_prepare_memory_region
commit 72f310481a08db821b614e7b5d00febcc9064b36 upstream. We don't hold the mmap_sem while searching for VMAs (via find_vma), in kvm_arch_prepare_memory_region, which can end up in expected failures. Fixes: commit 8eef91239e57 ("arm/arm64: KVM: map MMIO regions at creation time") Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Eric Auger <eric.auger@rehat.com> Reviewed-by: Christoffer Dall <cdall@linaro.org> [ Handle dirty page logging failure case ] Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 5366a736151e..f91ee2f27b41 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1761,6 +1761,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
+ down_read(&current->mm->mmap_sem);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -1804,8 +1805,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
- return -EINVAL;
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
@@ -1817,7 +1820,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
- return ret;
+ goto out;
spin_lock(&kvm->mmu_lock);
if (ret)
@@ -1825,6 +1828,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
else
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+out:
+ up_read(&current->mm->mmap_sem);
return ret;
}