summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-07-20 10:56:22 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-07-22 14:27:41 +0200
commitbe331630903b1c6355178d116d3bfe52f20a3ac7 (patch)
tree237362db733d5bd97149d6f34654db4cab907ad4 /arch
parentd1b5c1958391b921bce2ffd95ddfcd3f1e950c99 (diff)
arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1
commit 8e2906245f1e3b0d027169d9f2e55ce0548cb96e upstream. In order for the kernel to protect itself, let's call the SSBD mitigation implemented by the higher exception level (either hypervisor or firmware) on each transition between userspace and kernel. We must take the PSCI conduit into account in order to target the right exception level, hence the introduction of a runtime patching callback. Reviewed-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Julien Grall <julien.grall@arm.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/kernel/cpu_errata.c24
-rw-r--r--arch/arm64/kernel/entry.S22
2 files changed, 46 insertions, 0 deletions
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 2de62aa91303..d4cbdbe1d1eb 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -187,6 +187,30 @@ static int enable_smccc_arch_workaround_1(void *data)
}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+#ifdef CONFIG_ARM64_SSBD
+void __init arm64_update_smccc_conduit(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr,
+ int nr_inst)
+{
+ u32 insn;
+
+ BUG_ON(nr_inst != 1);
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ insn = aarch64_insn_get_hvc_value();
+ break;
+ case PSCI_CONDUIT_SMC:
+ insn = aarch64_insn_get_smc_value();
+ break;
+ default:
+ return;
+ }
+
+ *updptr = cpu_to_le32(insn);
+}
+#endif /* CONFIG_ARM64_SSBD */
+
#define MIDR_RANGE(model, min, max) \
.def_scope = SCOPE_LOCAL_CPU, \
.matches = is_affected_midr_range, \
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 7fed00b47692..7651e9164943 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -18,6 +18,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
@@ -95,6 +96,18 @@ alternative_else_nop_endif
add \dst, \dst, #(\sym - .entry.tramp.text)
.endm
+ // This macro corrupts x0-x3. It is the caller's duty
+ // to save/restore them if required.
+ .macro apply_ssbd, state
+#ifdef CONFIG_ARM64_SSBD
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+ mov w1, #\state
+alternative_cb arm64_update_smccc_conduit
+ nop // Patched to SMC/HVC #0
+alternative_cb_end
+#endif
+ .endm
+
.macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
@@ -122,6 +135,13 @@ alternative_else_nop_endif
ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
+ apply_ssbd 1
+
+#ifdef CONFIG_ARM64_SSBD
+ ldp x0, x1, [sp, #16 * 0]
+ ldp x2, x3, [sp, #16 * 1]
+#endif
+
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
@@ -190,6 +210,8 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
+ apply_ssbd 0
+
.endif
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22