summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrew Honig <ahonig@google.com>2013-03-29 09:35:21 -0700
committerGleb Natapov <gleb@redhat.com>2013-04-07 13:05:35 +0300
commit8f964525a121f2ff2df948dac908dcc65be21b5b (patch)
tree1986d7677a1cae8f639c91812da2d8c6ed5bba26 /include
parent09a6e1f4ad32243989b30485f78985c0923284cd (diff)
KVM: Allow cross page reads and writes from cached translations.
This patch adds support for kvm_gfn_to_hva_cache_init functions for reads and writes that will cross a page. If the range falls within the same memslot, then this will be a fast operation. If the range is split between two memslots, then the slower kvm_read_guest and kvm_write_guest are used. Tested: Test against kvm_clock unit tests. Signed-off-by: Andrew Honig <ahonig@google.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/kvm_types.h1
2 files changed, 2 insertions, 1 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index cad77fe09d77..c13958251927 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- gpa_t gpa);
+ gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index fa7cc7244cbd..b0bcce0ddc95 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
unsigned long hva;
+ unsigned long len;
struct kvm_memory_slot *memslot;
};