From 7e57cba06074da84d7c24d8c3f44040d2d8c88ac Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:03 +0100 Subject: KVM: PPC: Use PACA backed shadow vcpu We're being horribly racy right now. All the entry and exit code hijacks random fields from the PACA that could easily be used by different code in case we get interrupted, for example by a #MC or even page fault. After discussing this with Ben, we figured it's best to reserve some more space in the PACA and just shove off some vcpu state to there. That way we can drastically improve the readability of the code, make it less racy and less complex. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm/kvm_book3s.h') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 74b7369770d0..f192017d799d 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -23,6 +23,7 @@ #include #include #include +#include struct kvmppc_slb { u64 esid; @@ -69,6 +70,7 @@ struct kvmppc_sid_map { struct kvmppc_vcpu_book3s { struct kvm_vcpu vcpu; + struct kvmppc_book3s_shadow_vcpu shadow_vcpu; struct kvmppc_sid_map sid_map[SID_MAP_NUM]; struct kvmppc_slb slb[64]; struct { -- cgit v1.2.3 From 021ec9c69f8b7b20f46296cc76cc4cb341b25191 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:06 +0100 Subject: KVM: PPC: Call SLB patching code in interrupt safe manner Currently we're racy when doing the transition from IR=1 to IR=0, from the module memory entry code to the real mode SLB switching code. To work around that I took a look at the RTAS entry code which is faced with a similar problem and did the same thing: A small helper in linear mapped memory that does mtmsr with IR=0 and then RFIs info the actual handler. Thanks to that trick we can safely take page faults in the entry code and only need to be really wary of what to do as of the SLB switching part. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include/asm/kvm_book3s.h') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index f192017d799d..c91be0ff0232 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -121,6 +121,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, extern u32 kvmppc_trampoline_lowmem; extern u32 kvmppc_trampoline_enter; +extern void kvmppc_rmcall(ulong srr0, ulong srr1); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { -- cgit v1.2.3 From 25a8a02d26a71c28e26417a3520c653c2d40af6b Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 8 Jan 2010 02:58:07 +0100 Subject: KVM: PPC: Emulate trap SRR1 flags properly Book3S needs some flags in SRR1 to get to know details about an interrupt. One such example is the trap instruction. It tells the guest kernel that a program interrupt is due to a trap using a bit in SRR1. This patch implements above behavior, making WARN_ON behave like WARN_ON. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include/asm/kvm_book3s.h') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index c91be0ff0232..79ab8faf18e7 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -91,6 +91,7 @@ struct kvmppc_vcpu_book3s { u64 vsid_next; u64 vsid_max; int context_id; + ulong prog_flags; /* flags to inject when giving a 700 trap */ }; #define CONTEXT_HOST 0 -- cgit v1.2.3 From 4b5c9b7f9bdd76a3c860731db08bfc6758e96e29 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Sun, 10 Jan 2010 03:27:47 +0100 Subject: KVM: PPC: Make large pages work An SLB entry contains two pieces of information related to size: 1) PTE size 2) SLB size The L bit defines the PTE be "large" (usually means 16MB), SLB_VSID_B_1T defines that the SLB should span 1 GB instead of the default 256MB. Apparently I messed things up and just put those two in one box, shaked it heavily and came up with the current code which handles large pages incorrectly, because it also treats large page SLB entries as "1TB" segment entries. This patch splits those two features apart, making Linux guests boot even when they have > 256MB. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm/kvm_book3s.h') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 79ab8faf18e7..c7db69f1e779 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -34,7 +34,8 @@ struct kvmppc_slb { bool Ks; bool Kp; bool nx; - bool large; + bool large; /* PTEs are 16MB */ + bool tb; /* 1TB segment */ bool class; }; -- cgit v1.2.3 From d5e528136cda31a32ff7d1eaa8d06220eb443781 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 15 Jan 2010 14:49:10 +0100 Subject: KVM: PPC: Add helper functions to call real mode loaders Linux contains quite some bits of code to load FPU, Altivec and VSX lazily for a task. It calls those bits in real mode, coming from an interrupt handler. For KVM we better reuse those, so let's wrap a bit of trampoline magic around them and then we can call them from normal module code. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/include/asm/kvm_book3s.h') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index c7db69f1e779..e1b441cce416 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -124,6 +124,9 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, extern u32 kvmppc_trampoline_lowmem; extern u32 kvmppc_trampoline_enter; extern void kvmppc_rmcall(ulong srr0, ulong srr1); +extern void kvmppc_load_up_fpu(void); +extern void kvmppc_load_up_altivec(void); +extern void kvmppc_load_up_vsx(void); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { -- cgit v1.2.3 From 1c0006d8d131585095c4a27dbfcfb3970807a35e Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 15 Jan 2010 14:49:12 +0100 Subject: KVM: PPC: Fix initial GPR settings Commit 7d01b4c3ed2bb33ceaf2d270cb4831a67a76b51b introduced PACA backed vcpu values. With this patch, when a userspace app was setting GPRs before it was actually first loaded, the set values get discarded. This is because vcpu_load loads them from the vcpu backing store that we use whenever we're not owning the PACA. That behavior is not really a major problem, because we don't need it for qemu. Other users (like kvmctl) do have problems with it though, so let's better do it right. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- arch/powerpc/include/asm/kvm_book3s.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/powerpc/include/asm/kvm_book3s.h') diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index e1b441cce416..db7db0a96967 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -22,7 +22,6 @@ #include #include -#include #include struct kvmppc_slb { -- cgit v1.2.3