diff options
author | Nicolai Stange <nstange@suse.de> | 2018-07-18 19:07:38 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-08-15 18:12:57 +0200 |
commit | ae217320c17df01283add6f8a549c3fc70580423 (patch) | |
tree | 1c5fdc965a81e6ab6c4e7ad12e4ca6591487d5ac /arch/x86/kvm/vmx.c | |
parent | a20c88c2a346dd90ce244f73647cd6bd8e1cacb3 (diff) |
x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content
commit 288d152c23dcf3c09da46c5c481903ca10ebfef7 upstream
The slow path in vmx_l1d_flush() reads from vmx_l1d_flush_pages in order
to evict the L1d cache.
However, these pages are never cleared and, in theory, their data could be
leaked.
More importantly, KSM could merge a nested hypervisor's vmx_l1d_flush_pages
to fewer than 1 << L1D_CACHE_ORDER host physical pages and this would break
the L1d flushing algorithm: L1D on x86_64 is tagged by physical addresses.
Fix this by initializing the individual vmx_l1d_flush_pages with a
different pattern each.
Rename the "empty_zp" asm constraint identifier in vmx_l1d_flush() to
"flush_pages" to reflect this change.
Fixes: a47dd5f06714 ("x86/KVM/VMX: Add L1D flush algorithm")
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d077869f9228..56c410841298 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -214,6 +214,7 @@ static void *vmx_l1d_flush_pages; static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) { struct page *page; + unsigned int i; if (!enable_ept) { l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; @@ -246,6 +247,16 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) if (!page) return -ENOMEM; vmx_l1d_flush_pages = page_address(page); + + /* + * Initialize each page with a different pattern in + * order to protect against KSM in the nested + * virtualization case. + */ + for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) { + memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1, + PAGE_SIZE); + } } l1tf_vmx_mitigation = l1tf; @@ -9176,7 +9187,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu) /* First ensure the pages are in the TLB */ "xorl %%eax, %%eax\n" ".Lpopulate_tlb:\n\t" - "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" "addl $4096, %%eax\n\t" "cmpl %%eax, %[size]\n\t" "jne .Lpopulate_tlb\n\t" @@ -9185,12 +9196,12 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu) /* Now fill the cache */ "xorl %%eax, %%eax\n" ".Lfill_cache:\n" - "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t" + "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" "addl $64, %%eax\n\t" "cmpl %%eax, %[size]\n\t" "jne .Lfill_cache\n\t" "lfence\n" - :: [empty_zp] "r" (vmx_l1d_flush_pages), + :: [flush_pages] "r" (vmx_l1d_flush_pages), [size] "r" (size) : "eax", "ebx", "ecx", "edx"); } |