From 3606189fa3da6afcad0cbbc9b91e94f1f158da5a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 27 Oct 2014 14:40:49 +0100 Subject: KVM: emulator: fix error code for __linearize The error code for #GP and #SS is zero when the segment is used to access an operand or an instruction. It is only non-zero when a segment register is being loaded; for limit checks this means cases such as: * for #GP, when RIP is beyond the limit on a far call (before the first instruction is executed). We do not implement this check, but it would be in em_jmp_far/em_call_far. * for #SS, if the new stack overflows during an inter-privilege-level call to a non-conforming code segment. We do not implement stack switching at all. So use an error code of zero. Reviewed-by: Nadav Amit Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 749f9fa38254..8aa66068712f 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -711,9 +711,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) - return emulate_ss(ctxt, sel); + return emulate_ss(ctxt, 0); else - return emulate_gp(ctxt, sel); + return emulate_gp(ctxt, 0); } static int linearize(struct x86_emulate_ctxt *ctxt, -- cgit v1.2.3 From fd56e1546a5f734290cbedd2b81c518850736511 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 27 Oct 2014 14:40:39 +0100 Subject: KVM: emulator: fix execution close to the segment limit Emulation of code that is 14 bytes to the segment limit or closer (e.g. RIP = 0xFFFFFFF2 after reset) is broken because we try to read as many as 15 bytes from the beginning of the instruction, and __linearize fails when the passed (address, size) pair reaches out of the segment. To fix this, let __linearize return the maximum accessible size (clamped to 2^32-1) for usage in __do_insn_fetch_bytes, and avoid the limit check by passing zero for the desired size. For expand-down segments, __linearize is performing a redundant check. (u32)(addr.ea + size - 1) <= lim can only happen if addr.ea is close to 4GB; in this case, addr.ea + size - 1 will also fail the check against the upper bound of the segment (which is provided by the D/B bit). After eliminating the redundant check, it is simple to compute the *max_size for expand-down segments too. Now that the limit check is done in __do_insn_fetch_bytes, we want to inject a general protection fault there if size < op_size (like __linearize would have done), instead of just aborting. This fixes booting Tiano Core from emulated flash with EPT disabled. Cc: stable@vger.kernel.org Fixes: 719d5a9b2487e0562f178f61e323c3dc18a8b200 Reported-by: Borislav Petkov Tested-by: Borislav Petkov Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 8aa66068712f..52a96270b560 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -641,7 +641,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) static int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, - unsigned size, bool write, bool fetch, + unsigned *max_size, unsigned size, + bool write, bool fetch, ulong *linear) { struct desc_struct desc; @@ -652,10 +653,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, unsigned cpl; la = seg_base(ctxt, addr.seg) + addr.ea; + *max_size = 0; switch (ctxt->mode) { case X86EMUL_MODE_PROT64: if (((signed long)la << 16) >> 16 != la) return emulate_gp(ctxt, 0); + + *max_size = min_t(u64, ~0u, (1ull << 48) - la); + if (size > *max_size) + goto bad; break; default: usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, @@ -673,20 +679,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && (ctxt->d & NoBigReal)) { /* la is between zero and 0xffff */ - if (la > 0xffff || (u32)(la + size - 1) > 0xffff) + if (la > 0xffff) goto bad; + *max_size = 0x10000 - la; } else if ((desc.type & 8) || !(desc.type & 4)) { /* expand-up segment */ - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) + if (addr.ea > lim) goto bad; + *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); } else { /* expand-down segment */ - if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) + if (addr.ea <= lim) goto bad; lim = desc.d ? 0xffffffff : 0xffff; - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) + if (addr.ea > lim) goto bad; + *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); } + if (size > *max_size) + goto bad; cpl = ctxt->ops->cpl(ctxt); if (!(desc.type & 8)) { /* data segment */ @@ -721,7 +732,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, unsigned size, bool write, ulong *linear) { - return __linearize(ctxt, addr, size, write, false, linear); + unsigned max_size; + return __linearize(ctxt, addr, &max_size, size, write, false, linear); } @@ -746,17 +758,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) { int rc; - unsigned size; + unsigned size, max_size; unsigned long linear; int cur_size = ctxt->fetch.end - ctxt->fetch.data; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = ctxt->eip + cur_size }; - size = 15UL ^ cur_size; - rc = __linearize(ctxt, addr, size, false, true, &linear); + /* + * We do not know exactly how many bytes will be needed, and + * __linearize is expensive, so fetch as much as possible. We + * just have to avoid going beyond the 15 byte limit, the end + * of the segment, or the end of the page. + * + * __linearize is called with size 0 so that it does not do any + * boundary check itself. Instead, we use max_size to check + * against op_size. + */ + rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; + size = min_t(unsigned, 15UL ^ cur_size, max_size); size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); /* @@ -766,7 +788,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) * still, we must have hit the 15-byte boundary. */ if (unlikely(size < op_size)) - return X86EMUL_UNHANDLEABLE; + return emulate_gp(ctxt, 0); + rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, size, &ctxt->exception); if (unlikely(rc != X86EMUL_CONTINUE)) -- cgit v1.2.3 From 7e46dddd6f6cd5dbf3c7bd04a7e75d19475ac9f2 Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Tue, 28 Oct 2014 00:03:43 +0200 Subject: KVM: x86: Fix far-jump to non-canonical check Commit d1442d85cc30 ("KVM: x86: Handle errors when RIP is set during far jumps") introduced a bug that caused the fix to be incomplete. Due to incorrect evaluation, far jump to segment with L bit cleared (i.e., 32-bit segment) and RIP with any of the high bits set (i.e, RIP[63:32] != 0) set may not trigger #GP. As we know, this imposes a security problem. In addition, the condition for two warnings was incorrect. Fixes: d1442d85cc30ea75f7d399474ca738e0bc96f715 Reported-by: Dan Carpenter Signed-off-by: Nadav Amit [Add #ifdef CONFIG_X86_64 to avoid complaints of undefined behavior. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/emulate.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 52a96270b560..5edf088ca51e 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -574,12 +574,14 @@ static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, case 4: ctxt->_eip = (u32)dst; break; +#ifdef CONFIG_X86_64 case 8: if ((cs_l && is_noncanonical_address(dst)) || - (!cs_l && (dst & ~(u32)-1))) + (!cs_l && (dst >> 32) != 0)) return emulate_gp(ctxt, 0); ctxt->_eip = dst; break; +#endif default: WARN(1, "unsupported eip assignment size\n"); } @@ -2035,7 +2037,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); if (rc != X86EMUL_CONTINUE) { - WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); return rc; @@ -2132,7 +2134,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) return rc; rc = assign_eip_far(ctxt, eip, new_desc.l); if (rc != X86EMUL_CONTINUE) { - WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); } return rc; -- cgit v1.2.3 From 282da870f4b5868efadcd4dc2a5a738d6fdf65d4 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 8 Oct 2014 18:05:39 +0200 Subject: KVM: nVMX: Disable preemption while reading from shadow VMCS In order to access the shadow VMCS, we need to load it. At this point, vmx->loaded_vmcs->vmcs and the actually loaded one start to differ. If we now get preempted by Linux, vmx_vcpu_put and, on return, the vmx_vcpu_load will work against the wrong vmcs. That can cause copy_shadow_to_vmcs12 to corrupt the vmcs12 state. Fix the issue by disabling preemption during the copy operation. copy_vmcs12_to_shadow is safe from this issue as it is executed by vmx_vcpu_run when preemption is already disabled before vmentry. This bug is exposed by running Jailhouse within KVM on CPUs with shadow VMCS support. Jailhouse never expects an interrupt pending vmexit, but the bug can cause it if, after copy_shadow_to_vmcs12 is preempted, the active VMCS happens to have the virtual interrupt pending flag set in the CPU-based execution controls. Signed-off-by: Jan Kiszka Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a8b76c4c95e2..a0f78dbaabee 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6426,6 +6426,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) const unsigned long *fields = shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields; + preempt_disable(); + vmcs_load(shadow_vmcs); for (i = 0; i < num_fields; i++) { @@ -6449,6 +6451,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) vmcs_clear(shadow_vmcs); vmcs_load(vmx->loaded_vmcs->vmcs); + + preempt_enable(); } static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) -- cgit v1.2.3 From a73896cb5bbdce672945745db8224352a689f580 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Sun, 2 Nov 2014 07:54:30 +0100 Subject: KVM: vmx: defer load of APIC access page address during reset Most call paths to vmx_vcpu_reset do not hold the SRCU lock. Defer loading the APIC access page to the next vmentry. This avoids the following lockdep splat: [ INFO: suspicious RCU usage. ] 3.18.0-rc2-test2+ #70 Not tainted ------------------------------- include/linux/kvm_host.h:474 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 0 1 lock held by qemu-system-x86/2371: #0: (&vcpu->mutex){+.+...}, at: [] vcpu_load+0x20/0xd0 [kvm] stack backtrace: CPU: 4 PID: 2371 Comm: qemu-system-x86 Not tainted 3.18.0-rc2-test2+ #70 Hardware name: Dell Inc. OptiPlex 9010/0M9KCM, BIOS A12 01/10/2013 0000000000000001 ffff880209983ca8 ffffffff816f514f 0000000000000000 ffff8802099b8990 ffff880209983cd8 ffffffff810bd687 00000000000fee00 ffff880208a2c000 ffff880208a10000 ffff88020ef50040 ffff880209983d08 Call Trace: [] dump_stack+0x4e/0x71 [] lockdep_rcu_suspicious+0xe7/0x120 [] gfn_to_memslot+0xd5/0xe0 [kvm] [] __gfn_to_pfn+0x33/0x60 [kvm] [] gfn_to_page+0x25/0x90 [kvm] [] kvm_vcpu_reload_apic_access_page+0x3c/0x80 [kvm] [] vmx_vcpu_reset+0x20c/0x460 [kvm_intel] [] kvm_vcpu_reset+0x15e/0x1b0 [kvm] [] kvm_arch_vcpu_setup+0x2c/0x50 [kvm] [] kvm_vm_ioctl+0x1d0/0x780 [kvm] [] ? __lock_is_held+0x54/0x80 [] do_vfs_ioctl+0x300/0x520 [] ? __fget+0x5/0x250 [] ? __fget_light+0x2a/0xe0 [] SyS_ioctl+0x81/0xa0 [] system_call_fastpath+0x16/0x1b Reported-by: Takashi Iwai Reported-by: Alexei Starovoitov Reviewed-by: Wanpeng Li Tested-by: Wanpeng Li Fixes: 38b9917350cb2946e368ba684cfc33d1672f104e Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a0f78dbaabee..3e556c68351b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4579,7 +4579,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmcs_write32(TPR_THRESHOLD, 0); } - kvm_vcpu_reload_apic_access_page(vcpu); + kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); if (vmx_vm_has_apicv(vcpu->kvm)) memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); -- cgit v1.2.3