summaryrefslogtreecommitdiff
path: root/arch/s390/kvm/kvm-s390.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kvm/kvm-s390.c')
-rw-r--r--arch/s390/kvm/kvm-s390.c330
1 files changed, 234 insertions, 96 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 846589281b04..835d60bedb54 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -246,7 +246,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
- r = KVM_MAX_VCPUS;
+ r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
+ : KVM_S390_BSCA_CPU_SLOTS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
@@ -257,6 +258,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_VECTOR_REGISTERS:
r = MACHINE_HAS_VX;
break;
+ case KVM_CAP_S390_RI:
+ r = test_facility(64);
+ break;
default:
r = 0;
}
@@ -283,6 +287,8 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm,
}
/* Section: vm related */
+static void sca_del_vcpu(struct kvm_vcpu *vcpu);
+
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
@@ -355,6 +361,20 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
r ? "(not available)" : "(success)");
break;
+ case KVM_CAP_S390_RI:
+ r = -EINVAL;
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus)) {
+ r = -EBUSY;
+ } else if (test_facility(64)) {
+ set_kvm_facility(kvm->arch.model.fac->mask, 64);
+ set_kvm_facility(kvm->arch.model.fac->list, 64);
+ r = 0;
+ }
+ mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
+ r ? "(not available)" : "(success)");
+ break;
case KVM_CAP_S390_USER_STSI:
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm->arch.user_stsi = 1;
@@ -375,8 +395,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att
case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0;
VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
- kvm->arch.gmap->asce_end);
- if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
+ kvm->arch.mem_limit);
+ if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
ret = -EFAULT;
break;
default:
@@ -428,9 +448,17 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
if (get_user(new_limit, (u64 __user *)attr->addr))
return -EFAULT;
- if (new_limit > kvm->arch.gmap->asce_end)
+ if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
+ new_limit > kvm->arch.mem_limit)
return -E2BIG;
+ if (!new_limit)
+ return -EINVAL;
+
+ /* gmap_alloc takes last usable address */
+ if (new_limit != KVM_S390_NO_MEM_LIMIT)
+ new_limit -= 1;
+
ret = -EBUSY;
mutex_lock(&kvm->lock);
if (atomic_read(&kvm->online_vcpus) == 0) {
@@ -447,7 +475,9 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
}
}
mutex_unlock(&kvm->lock);
- VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
+ VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
+ VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
+ (void *) kvm->arch.gmap->asce);
break;
}
default:
@@ -1024,7 +1054,7 @@ static int kvm_s390_apxa_installed(void)
u8 config[128];
int cc;
- if (test_facility(2) && test_facility(12)) {
+ if (test_facility(12)) {
cc = kvm_s390_query_ap_config(config);
if (cc)
@@ -1075,6 +1105,15 @@ static int kvm_s390_crypto_init(struct kvm *kvm)
return 0;
}
+static void sca_dispose(struct kvm *kvm)
+{
+ if (kvm->arch.use_esca)
+ free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
+ else
+ free_page((unsigned long)(kvm->arch.sca));
+ kvm->arch.sca = NULL;
+}
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int i, rc;
@@ -1098,14 +1137,17 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
rc = -ENOMEM;
- kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
+ kvm->arch.use_esca = 0; /* start with basic SCA */
+ rwlock_init(&kvm->arch.sca_lock);
+ kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
if (!kvm->arch.sca)
goto out_err;
spin_lock(&kvm_lock);
sca_offset += 16;
- if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
+ if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
sca_offset = 0;
- kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
+ kvm->arch.sca = (struct bsca_block *)
+ ((char *) kvm->arch.sca + sca_offset);
spin_unlock(&kvm_lock);
sprintf(debug_name, "kvm-%u", current->pid);
@@ -1157,8 +1199,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type & KVM_VM_S390_UCONTROL) {
kvm->arch.gmap = NULL;
+ kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
} else {
- kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
+ if (sclp.hamax == U64_MAX)
+ kvm->arch.mem_limit = TASK_MAX_SIZE;
+ else
+ kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
+ sclp.hamax + 1);
+ kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
if (!kvm->arch.gmap)
goto out_err;
kvm->arch.gmap->private = kvm;
@@ -1170,14 +1218,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.epoch = 0;
spin_lock_init(&kvm->arch.start_stop_lock);
- KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
+ KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
return 0;
out_err:
kfree(kvm->arch.crypto.crycb);
free_page((unsigned long)kvm->arch.model.fac);
debug_unregister(kvm->arch.dbf);
- free_page((unsigned long)(kvm->arch.sca));
+ sca_dispose(kvm);
KVM_EVENT(3, "creation of vm failed: %d", rc);
return rc;
}
@@ -1188,14 +1236,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
kvm_s390_clear_local_irqs(vcpu);
kvm_clear_async_pf_completion_queue(vcpu);
- if (!kvm_is_ucontrol(vcpu->kvm)) {
- clear_bit(63 - vcpu->vcpu_id,
- (unsigned long *) &vcpu->kvm->arch.sca->mcn);
- if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
- (__u64) vcpu->arch.sie_block)
- vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
- }
- smp_mb();
+ if (!kvm_is_ucontrol(vcpu->kvm))
+ sca_del_vcpu(vcpu);
if (kvm_is_ucontrol(vcpu->kvm))
gmap_free(vcpu->arch.gmap);
@@ -1228,14 +1270,14 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_free_vcpus(kvm);
free_page((unsigned long)kvm->arch.model.fac);
- free_page((unsigned long)(kvm->arch.sca));
+ sca_dispose(kvm);
debug_unregister(kvm->arch.dbf);
kfree(kvm->arch.crypto.crycb);
if (!kvm_is_ucontrol(kvm))
gmap_free(kvm->arch.gmap);
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
- KVM_EVENT(3, "vm 0x%p destroyed", kvm);
+ KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
}
/* Section: vcpu related */
@@ -1249,6 +1291,117 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
return 0;
}
+static void sca_del_vcpu(struct kvm_vcpu *vcpu)
+{
+ read_lock(&vcpu->kvm->arch.sca_lock);
+ if (vcpu->kvm->arch.use_esca) {
+ struct esca_block *sca = vcpu->kvm->arch.sca;
+
+ clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
+ sca->cpu[vcpu->vcpu_id].sda = 0;
+ } else {
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+
+ clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
+ sca->cpu[vcpu->vcpu_id].sda = 0;
+ }
+ read_unlock(&vcpu->kvm->arch.sca_lock);
+}
+
+static void sca_add_vcpu(struct kvm_vcpu *vcpu)
+{
+ read_lock(&vcpu->kvm->arch.sca_lock);
+ if (vcpu->kvm->arch.use_esca) {
+ struct esca_block *sca = vcpu->kvm->arch.sca;
+
+ sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
+ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
+ vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
+ vcpu->arch.sie_block->ecb2 |= 0x04U;
+ set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
+ } else {
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+
+ sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
+ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
+ vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+ set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
+ }
+ read_unlock(&vcpu->kvm->arch.sca_lock);
+}
+
+/* Basic SCA to Extended SCA data copy routines */
+static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
+{
+ d->sda = s->sda;
+ d->sigp_ctrl.c = s->sigp_ctrl.c;
+ d->sigp_ctrl.scn = s->sigp_ctrl.scn;
+}
+
+static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
+{
+ int i;
+
+ d->ipte_control = s->ipte_control;
+ d->mcn[0] = s->mcn;
+ for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
+ sca_copy_entry(&d->cpu[i], &s->cpu[i]);
+}
+
+static int sca_switch_to_extended(struct kvm *kvm)
+{
+ struct bsca_block *old_sca = kvm->arch.sca;
+ struct esca_block *new_sca;
+ struct kvm_vcpu *vcpu;
+ unsigned int vcpu_idx;
+ u32 scaol, scaoh;
+
+ new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
+ if (!new_sca)
+ return -ENOMEM;
+
+ scaoh = (u32)((u64)(new_sca) >> 32);
+ scaol = (u32)(u64)(new_sca) & ~0x3fU;
+
+ kvm_s390_vcpu_block_all(kvm);
+ write_lock(&kvm->arch.sca_lock);
+
+ sca_copy_b_to_e(new_sca, old_sca);
+
+ kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
+ vcpu->arch.sie_block->scaoh = scaoh;
+ vcpu->arch.sie_block->scaol = scaol;
+ vcpu->arch.sie_block->ecb2 |= 0x04U;
+ }
+ kvm->arch.sca = new_sca;
+ kvm->arch.use_esca = 1;
+
+ write_unlock(&kvm->arch.sca_lock);
+ kvm_s390_vcpu_unblock_all(kvm);
+
+ free_page((unsigned long)old_sca);
+
+ VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
+ old_sca, kvm->arch.sca);
+ return 0;
+}
+
+static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
+{
+ int rc;
+
+ if (id < KVM_S390_BSCA_CPU_SLOTS)
+ return true;
+ if (!sclp.has_esca)
+ return false;
+
+ mutex_lock(&kvm->lock);
+ rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
+ mutex_unlock(&kvm->lock);
+
+ return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
@@ -1259,6 +1412,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
KVM_SYNC_CRS |
KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT;
+ if (test_kvm_facility(vcpu->kvm, 64))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
if (test_kvm_facility(vcpu->kvm, 129))
vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
@@ -1369,8 +1524,11 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
preempt_enable();
mutex_unlock(&vcpu->kvm->lock);
- if (!kvm_is_ucontrol(vcpu->kvm))
+ if (!kvm_is_ucontrol(vcpu->kvm)) {
vcpu->arch.gmap = vcpu->kvm->arch.gmap;
+ sca_add_vcpu(vcpu);
+ }
+
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
@@ -1439,10 +1597,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->eca |= 1;
if (sclp.has_sigpif)
vcpu->arch.sie_block->eca |= 0x10000000U;
+ if (test_kvm_facility(vcpu->kvm, 64))
+ vcpu->arch.sie_block->ecb3 |= 0x01;
if (test_kvm_facility(vcpu->kvm, 129)) {
vcpu->arch.sie_block->eca |= 0x00020000;
vcpu->arch.sie_block->ecd |= 0x20000000;
}
+ vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
if (vcpu->kvm->arch.use_cmma) {
@@ -1465,7 +1626,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
struct sie_page *sie_page;
int rc = -EINVAL;
- if (id >= KVM_MAX_VCPUS)
+ if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
goto out;
rc = -ENOMEM;
@@ -1482,20 +1643,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
vcpu->arch.sie_block->icpua = id;
- if (!kvm_is_ucontrol(kvm)) {
- if (!kvm->arch.sca) {
- WARN_ON_ONCE(1);
- goto out_free_cpu;
- }
- if (!kvm->arch.sca->cpu[id].sda)
- kvm->arch.sca->cpu[id].sda =
- (__u64) vcpu->arch.sie_block;
- vcpu->arch.sie_block->scaoh =
- (__u32)(((__u64)kvm->arch.sca) >> 32);
- vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
- set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
- }
-
spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
vcpu->arch.local_int.wq = &vcpu->wq;
@@ -1509,15 +1656,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
*/
vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
GFP_KERNEL);
- if (!vcpu->arch.guest_fpregs.fprs) {
- rc = -ENOMEM;
+ if (!vcpu->arch.guest_fpregs.fprs)
goto out_free_sie_block;
- }
rc = kvm_vcpu_init(vcpu, kvm, id);
if (rc)
goto out_free_sie_block;
- VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
+ VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
vcpu->arch.sie_block);
trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
@@ -2013,7 +2158,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
*/
kvm_check_async_pf_completion(vcpu);
- memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
+ vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
+ vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
if (need_resched())
schedule();
@@ -2071,8 +2217,6 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
- int rc = -1;
-
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
@@ -2080,40 +2224,36 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
if (guestdbg_enabled(vcpu))
kvm_s390_restore_guest_per_regs(vcpu);
- if (exit_reason >= 0) {
- rc = 0;
+ vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
+ vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
+
+ if (vcpu->arch.sie_block->icptcode > 0) {
+ int rc = kvm_handle_sie_intercept(vcpu);
+
+ if (rc != -EOPNOTSUPP)
+ return rc;
+ vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
+ vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
+ vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
+ vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
+ return -EREMOTE;
+ } else if (exit_reason != -EFAULT) {
+ vcpu->stat.exit_null++;
+ return 0;
} else if (kvm_is_ucontrol(vcpu->kvm)) {
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
vcpu->run->s390_ucontrol.trans_exc_code =
current->thread.gmap_addr;
vcpu->run->s390_ucontrol.pgm_code = 0x10;
- rc = -EREMOTE;
-
+ return -EREMOTE;
} else if (current->thread.gmap_pfault) {
trace_kvm_s390_major_guest_pfault(vcpu);
current->thread.gmap_pfault = 0;
- if (kvm_arch_setup_async_pf(vcpu)) {
- rc = 0;
- } else {
- gpa_t gpa = current->thread.gmap_addr;
- rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
- }
- }
-
- if (rc == -1)
- rc = vcpu_post_run_fault_in_sie(vcpu);
-
- memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
-
- if (rc == 0) {
- if (kvm_is_ucontrol(vcpu->kvm))
- /* Don't exit for host interrupts. */
- rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
- else
- rc = kvm_handle_sie_intercept(vcpu);
+ if (kvm_arch_setup_async_pf(vcpu))
+ return 0;
+ return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
}
-
- return rc;
+ return vcpu_post_run_fault_in_sie(vcpu);
}
static int __vcpu_run(struct kvm_vcpu *vcpu)
@@ -2233,18 +2373,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
rc = 0;
}
- if (rc == -EOPNOTSUPP) {
- /* intercept cannot be handled in-kernel, prepare kvm-run */
- kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
- kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
- kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
- kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
- rc = 0;
- }
-
if (rc == -EREMOTE) {
- /* intercept was handled, but userspace support is needed
- * kvm_run has been prepared by the handler */
+ /* userspace support is needed, kvm_run has been prepared */
rc = 0;
}
@@ -2270,37 +2400,37 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
u64 clkcomp;
int rc;
+ px = kvm_s390_get_prefix(vcpu);
if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
if (write_guest_abs(vcpu, 163, &archmode, 1))
return -EFAULT;
- gpa = SAVE_AREA_BASE;
+ gpa = 0;
} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
if (write_guest_real(vcpu, 163, &archmode, 1))
return -EFAULT;
- gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
- }
- rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
+ gpa = px;
+ } else
+ gpa -= __LC_FPREGS_SAVE_AREA;
+ rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
vcpu->arch.guest_fpregs.fprs, 128);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
+ rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
+ rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
&vcpu->arch.sie_block->gpsw, 16);
- px = kvm_s390_get_prefix(vcpu);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
+ rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
&px, 4);
- rc |= write_guest_abs(vcpu,
- gpa + offsetof(struct save_area, fp_ctrl_reg),
+ rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
&vcpu->arch.guest_fpregs.fpc, 4);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
+ rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
&vcpu->arch.sie_block->todpr, 4);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
+ rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
&vcpu->arch.sie_block->cputm, 8);
clkcomp = vcpu->arch.sie_block->ckc >> 8;
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
+ rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
&clkcomp, 8);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
+ rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
&vcpu->run->s.regs.acrs, 64);
- rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
+ rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
&vcpu->arch.sie_block->gcr, 128);
return rc ? -EFAULT : 0;
}
@@ -2736,6 +2866,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (mem->memory_size & 0xffffful)
return -EINVAL;
+ if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
+ return -EINVAL;
+
return 0;
}
@@ -2767,6 +2900,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
static int __init kvm_s390_init(void)
{
+ if (!sclp.has_sief2) {
+ pr_info("SIE not available\n");
+ return -ENODEV;
+ }
+
return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
}