summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2015-07-15 10:29:37 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-08-16 20:52:15 -0700
commitd8246ca4e3ce08c9ed98ebe292f36ee2bc5f54ab (patch)
treef3b7f0b2fc80a69d692740326d4d2aa954402c9d /arch
parent1dd191d72fdfb46988d8c97c8b19fc6d83d44d3e (diff)
x86/nmi/64: Reorder nested NMI checks
commit a27507ca2d796cfa8d907de31ad730359c8a6d06 upstream. Check the repeat_nmi .. end_repeat_nmi special case first. The next patch will rework the RSP check and, as a side effect, the RSP check will no longer detect repeat_nmi .. end_repeat_nmi, so we'll need this ordering of the checks. Note: this is more subtle than it appears. The check for repeat_nmi .. end_repeat_nmi jumps straight out of the NMI code instead of adjusting the "iret" frame to force a repeat. This is necessary, because the code between repeat_nmi and end_repeat_nmi sets "NMI executing" and then writes to the "iret" frame itself. If a nested NMI comes in and modifies the "iret" frame while repeat_nmi is also modifying it, we'll end up with garbage. The old code got this right, as does the new code, but the new code is a bit more explicit. If we were to move the check right after the "NMI executing" check, then we'd get it wrong and have random crashes. ( Because the "NMI executing" check would jump to the code that would modify the "iret" frame without checking if the interrupted NMI was currently modifying it. ) Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Borislav Petkov <bp@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: stable@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/entry_64.S34
1 files changed, 18 insertions, 16 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 0cc6af62f34d..3824867c8993 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1535,7 +1535,24 @@ ENTRY(nmi)
/*
* Determine whether we're a nested NMI.
*
- * First check "NMI executing". If it's set, then we're nested.
+ * If we interrupted kernel code between repeat_nmi and
+ * end_repeat_nmi, then we are a nested NMI. We must not
+ * modify the "iret" frame because it's being written by
+ * the outer NMI. That's okay; the outer NMI handler is
+ * about to about to call do_nmi anyway, so we can just
+ * resume the outer NMI.
+ */
+
+ movq $repeat_nmi, %rdx
+ cmpq 8(%rsp), %rdx
+ ja 1f
+ movq $end_repeat_nmi, %rdx
+ cmpq 8(%rsp), %rdx
+ ja nested_nmi_out
+1:
+
+ /*
+ * Now check "NMI executing". If it's set, then we're nested.
* This will not detect if we interrupted an outer NMI just
* before IRET.
*/
@@ -1562,21 +1579,6 @@ ENTRY(nmi)
nested_nmi:
/*
- * If we interrupted an NMI that is between repeat_nmi and
- * end_repeat_nmi, then we must not modify the "iret" frame
- * because it's being written by the outer NMI. That's okay;
- * the outer NMI handler is about to call do_nmi anyway,
- * so we can just resume the outer NMI.
- */
- movq $repeat_nmi, %rdx
- cmpq 8(%rsp), %rdx
- ja 1f
- movq $end_repeat_nmi, %rdx
- cmpq 8(%rsp), %rdx
- ja nested_nmi_out
-
-1:
- /*
* Modify the "iret" frame to point to repeat_nmi, forcing another
* iteration of NMI handling.
*/