summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorBandan Das <bsd@redhat.com>2016-07-12 18:18:49 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2016-07-14 09:03:14 +0200
commitffb128c89b77b44da18ccf51844a8e750e2c427a (patch)
tree2343268f9b507a99abf34bf20d99b1158488b67e /arch/x86/include/asm/kvm_host.h
parent812f30b234539ccb0139f92dfdbec1e8158cf535 (diff)
kvm: mmu: don't set the present bit unconditionally
To support execute only mappings on behalf of L1 hypervisors, we need to teach set_spte() to honor all three of L1's XWR bits. As a start, add a new variable "shadow_present_mask" that will be set for non-EPT shadow paging and clear for EPT. Signed-off-by: Bandan Das <bsd@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7a628fb6a2c2..d0845b289adb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1031,7 +1031,7 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
- u64 dirty_mask, u64 nx_mask, u64 x_mask);
+ u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask);
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,