summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2015-10-15 01:20:29 +0100
committerBen Hutchings <ben@decadent.org.uk>2015-11-17 15:54:40 +0000
commit005f90fa5c6bf0b7d0f08df1b0b712e7e8d92b6f (patch)
treeab43c51abb200a1a347fd3fc6cfca2977c07983a /arch
parent0149138c4142da287d23f9d5c6038f7fb5e30ac2 (diff)
Revert "KVM: MMU: fix validation of mmio page fault"
This reverts commit 41e3025eacd6daafc40c3e7850fbcabc8b847805, which was commit 6f691251c0350ac52a007c54bf3ef62e9d8cdc5e upstream. The fix is only needed after commit f8f559422b6c ("KVM: MMU: fast invalidate all mmio sptes"), included in Linux 3.11. Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cac7b2b5595f..4a949c7f6423 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -326,6 +326,12 @@ static u64 __get_spte_lockless(u64 *sptep)
{
return ACCESS_ONCE(*sptep);
}
+
+static bool __check_direct_spte_mmio_pf(u64 spte)
+{
+ /* It is valid if the spte is zapped. */
+ return spte == 0ull;
+}
#else
union split_spte {
struct {
@@ -430,6 +436,23 @@ retry:
return spte.spte;
}
+
+static bool __check_direct_spte_mmio_pf(u64 spte)
+{
+ union split_spte sspte = (union split_spte)spte;
+ u32 high_mmio_mask = shadow_mmio_mask >> 32;
+
+ /* It is valid if the spte is zapped. */
+ if (spte == 0ull)
+ return true;
+
+ /* It is valid if the spte is being zapped. */
+ if (sspte.spte_low == 0ull &&
+ (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
+ return true;
+
+ return false;
+}
#endif
static bool spte_has_volatile_bits(u64 spte)
@@ -2872,6 +2895,21 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
return vcpu_match_mmio_gva(vcpu, addr);
}
+
+/*
+ * On direct hosts, the last spte is only allows two states
+ * for mmio page fault:
+ * - It is the mmio spte
+ * - It is zapped or it is being zapped.
+ *
+ * This function completely checks the spte when the last spte
+ * is not the mmio spte.
+ */
+static bool check_direct_spte_mmio_pf(u64 spte)
+{
+ return __check_direct_spte_mmio_pf(spte);
+}
+
static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
{
struct kvm_shadow_walk_iterator iterator;
@@ -2913,6 +2951,13 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
}
/*
+ * It's ok if the gva is remapped by other cpus on shadow guest,
+ * it's a BUG if the gfn is not a mmio page.
+ */
+ if (direct && !check_direct_spte_mmio_pf(spte))
+ return -1;
+
+ /*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/