summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-05 16:36:50 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-05 23:55:26 -0800
commit86a5ba025d0a0b251817d0efbeaf7037d4175d21 (patch)
tree35dbc71edaa0d242ba4c0ca429c41cff67df38d0 /drivers
parent139bdb2d9e410d448281057a37b53770324ccac8 (diff)
[PATCH] KVM: MMU: Page table write flood protection
In fork() (or when we protect a page that is no longer a page table), we can experience floods of writes to a page, which have to be emulated. This is expensive. So, if we detect such a flood, zap the page so subsequent writes can proceed natively. Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/kvm.h3
-rw-r--r--drivers/kvm/mmu.c16
2 files changed, 18 insertions, 1 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 6e4daf404146..201b2735ca91 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -238,6 +238,9 @@ struct kvm_vcpu {
struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
struct kvm_mmu mmu;
+ gfn_t last_pt_write_gfn;
+ int last_pt_write_count;
+
struct kvm_guest_debug guest_debug;
char fx_buf[FX_BUF_SIZE];
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 8cf3688f7e70..0e44aca9eee7 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -969,8 +969,17 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
unsigned page_offset;
unsigned misaligned;
int level;
+ int flooded = 0;
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+ if (gfn == vcpu->last_pt_write_gfn) {
+ ++vcpu->last_pt_write_count;
+ if (vcpu->last_pt_write_count >= 3)
+ flooded = 1;
+ } else {
+ vcpu->last_pt_write_gfn = gfn;
+ vcpu->last_pt_write_count = 1;
+ }
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &vcpu->kvm->mmu_page_hash[index];
hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
@@ -978,11 +987,16 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
continue;
pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
- if (misaligned) {
+ if (misaligned || flooded) {
/*
* Misaligned accesses are too much trouble to fix
* up; also, they usually indicate a page is not used
* as a page table.
+ *
+ * If we're seeing too many writes to a page,
+ * it may no longer be a page table, or we may be
+ * forking, in which case it is better to unmap the
+ * page.
*/
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, page->role.word);