summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorAndi Kleen <andi@firstfloor.org>2009-07-01 16:18:45 +0200
committerGreg Kroah-Hartman <gregkh@suse.de>2009-07-02 16:49:32 -0700
commitc45181c66af214e4187664e46460e87d3aefa97a (patch)
tree3d26df450f7552e1e0c99a55db48869bc95b0836 /arch/x86
parent376c10a25a8b73b90e67d2fa870bfda288fa067d (diff)
KVM: Add VT-x machine check support v4
(replaces commit a0861c02a981c943573478ea13b29b1fb958ee5b upstream in a cleaner way for the 2.6.30 kernel tree) VT-x needs an explicit MC vector intercept to handle machine checks in the hypervisor. It also has a special option to catch machine checks that happen during VT entry. Do these interceptions and forward them to the Linux machine check handler. Make it always look like user space is interrupted because the machine check handler treats kernel/user space differently. Thanks to Huang Ying and Jiang Yunhong for help and testing. Cc: ying.huang@intel.com v2: Handle machine checks still in interrupt off context to avoid problems on preemptible kernels. v3: Handle old style 32bit and make fully standalone Signed-off-by: Andi Kleen <ak@linux.intel.com> Cc: Huang Ying <ying.huang@intel.com> Acked-by: Avi Kivity <avi@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/vmx.h1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_32.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c1
-rw-r--r--arch/x86/kvm/vmx.c49
6 files changed, 53 insertions, 3 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f0faf58044ff..a93d1cc413e1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -371,6 +371,8 @@ struct kvm_vcpu_arch {
unsigned long dr6;
unsigned long dr7;
unsigned long eff_db[KVM_NR_DB_REGS];
+
+ u32 exit_reason;
};
struct kvm_mem_alias {
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 4f8c199584e7..5c7037830450 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -153,5 +153,7 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
extern void (*mce_threshold_vector)(void);
+extern void (*machine_check_vector)(struct pt_regs *, long error_code);
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 498f944010b9..11be5ad2e0e9 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -247,6 +247,7 @@ enum vmcs_field {
#define EXIT_REASON_MSR_READ 31
#define EXIT_REASON_MSR_WRITE 32
#define EXIT_REASON_MWAIT_INSTRUCTION 36
+#define EXIT_REASON_MCE_DURING_VMENTRY 41
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
#define EXIT_REASON_APIC_ACCESS 44
#define EXIT_REASON_EPT_VIOLATION 48
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c
index 3552119b091d..07b523c5288c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_32.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_32.c
@@ -29,6 +29,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
/* Call the installed machine check handler for this CPU setup. */
void (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;
+EXPORT_SYMBOL_GPL(machine_check_vector);
/* This has to be run for each processor */
void mcheck_init(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 09dd1d414fc3..289cc4815028 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -420,6 +420,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
out2:
atomic_dec(&mce_entry);
}
+EXPORT_SYMBOL_GPL(do_machine_check);
#ifdef CONFIG_X86_MCE_INTEL
/***
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bb481330716f..e4c78f96c8cf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -32,6 +32,7 @@
#include <asm/desc.h>
#include <asm/vmx.h>
#include <asm/virtext.h>
+#include <asm/mce.h>
#define __ex(x) __kvm_handle_fault_on_reboot(x)
@@ -97,6 +98,8 @@ struct vcpu_vmx {
int soft_vnmi_blocked;
ktime_t entry_time;
s64 vnmi_blocked_time;
+
+ u32 exit_reason;
};
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -478,7 +481,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
{
u32 eb;
- eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
+ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
if (!vcpu->fpu_active)
eb |= 1u << NM_VECTOR;
if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
@@ -2585,6 +2588,35 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
return 0;
}
+/*
+ * Trigger machine check on the host. We assume all the MSRs are already set up
+ * by the CPU and that we still run on the same CPU as the MCE occurred on.
+ * We pass a fake environment to the machine check handler because we want
+ * the guest to be always treated like user space, no matter what context
+ * it used internally.
+ */
+static void kvm_machine_check(void)
+{
+#ifdef CONFIG_X86_MCE
+ struct pt_regs regs = {
+ .cs = 3, /* Fake ring 3 no matter what the guest ran on */
+ .flags = X86_EFLAGS_IF,
+ };
+
+#ifdef CONFIG_X86_64
+ do_machine_check(&regs, 0);
+#else
+ machine_check_vector(&regs, 0);
+#endif
+#endif
+}
+
+static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ /* already handled by vcpu_run */
+ return 1;
+}
+
static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2596,6 +2628,10 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vect_info = vmx->idt_vectoring_info;
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ ex_no = intr_info & INTR_INFO_VECTOR_MASK;
+ if (ex_no == MC_VECTOR)
+ return handle_machine_check(vcpu, kvm_run);
+
if ((vect_info & VECTORING_INFO_VALID_MASK) &&
!is_page_fault(intr_info))
printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
@@ -2648,7 +2684,6 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1;
}
- ex_no = intr_info & INTR_INFO_VECTOR_MASK;
switch (ex_no) {
case DB_VECTOR:
dr6 = vmcs_readl(EXIT_QUALIFICATION);
@@ -3150,6 +3185,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
[EXIT_REASON_WBINVD] = handle_wbinvd,
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
+ [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
};
static const int kvm_vmx_max_exit_handlers =
@@ -3161,8 +3197,8 @@ static const int kvm_vmx_max_exit_handlers =
*/
static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
- u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
@@ -3512,6 +3548,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+
+ /* Handle machine checks before interrupts are enabled */
+ if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) ||
+ (intr_info & INTR_INFO_VECTOR_MASK) == MC_VECTOR)
+ kvm_machine_check();
+
/* We need to handle NMIs before interrupts are enabled */
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
(intr_info & INTR_INFO_VALID_MASK)) {