summaryrefslogtreecommitdiff
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
authorCarsten Otte <cotte@de.ibm.com>2011-07-24 10:48:21 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-07-24 10:48:21 +0200
commit598841ca9919d008b520114d8a4378c4ce4e40a1 (patch)
tree3f823474e70af4305c395fb220a138b5bc4e9f90 /arch/s390/kvm/kvm-s390.c
parente5992f2e6c3829cd43dbc4438ee13dcd6506f7f3 (diff)
[S390] use gmap address spaces for kvm guest images
This patch switches kvm from using (Qemu's) user address space to Martin's gmap address space. This way QEMU does not have to use a linker script in order to fit large guests at low addresses in its address space. Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c32
1 files changed, 21 insertions, 11 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 123ebea72282..3ebb4ba83d9d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -190,7 +190,13 @@ int kvm_arch_init_vm(struct kvm *kvm)
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "%s", "vm created");
+ kvm->arch.gmap = gmap_alloc(current->mm);
+ if (!kvm->arch.gmap)
+ goto out_nogmap;
+
return 0;
+out_nogmap:
+ debug_unregister(kvm->arch.dbf);
out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
out_err:
@@ -235,11 +241,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_free_vcpus(kvm);
free_page((unsigned long)(kvm->arch.sca));
debug_unregister(kvm->arch.dbf);
+ gmap_free(kvm->arch.gmap);
}
/* Section: vcpu related */
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.gmap = vcpu->kvm->arch.gmap;
return 0;
}
@@ -285,7 +293,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
- atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
+ atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
vcpu->arch.sie_block->ecb = 6;
vcpu->arch.sie_block->eca = 0xC1002001U;
@@ -454,6 +462,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
local_irq_disable();
kvm_guest_enter();
local_irq_enable();
+ gmap_enable(vcpu->arch.gmap);
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@@ -462,6 +471,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
}
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
+ gmap_disable(vcpu->arch.gmap);
local_irq_disable();
kvm_guest_exit();
local_irq_enable();
@@ -479,13 +489,6 @@ rerun_vcpu:
if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
kvm_s390_vcpu_set_mem(vcpu);
- /* verify, that memory has been registered */
- if (!vcpu->arch.sie_block->gmslm) {
- vcpu_put(vcpu);
- VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
- return -EINVAL;
- }
-
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
@@ -681,10 +684,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr)
return -EINVAL;
- if (mem->userspace_addr & (PAGE_SIZE - 1))
+ if (mem->userspace_addr & 0xffffful)
return -EINVAL;
- if (mem->memory_size & (PAGE_SIZE - 1))
+ if (mem->memory_size & 0xffffful)
return -EINVAL;
if (!user_alloc)
@@ -698,15 +701,22 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot old,
int user_alloc)
{
- int i;
+ int i, rc;
struct kvm_vcpu *vcpu;
+
+ rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
+ mem->guest_phys_addr, mem->memory_size);
+ if (rc)
+ return;
+
/* request update of sie control block for all available vcpus */
kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
continue;
kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
}
+ return;
}
void kvm_arch_flush_shadow(struct kvm *kvm)