summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuzuki K Poulose <suzuki.poulose@arm.com>2016-11-08 13:56:20 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-06-06 16:44:35 +0200
commitfe64d7d6ab83b03c933d9f76643b810857774306 (patch)
tree1f69b908e64e3162afffcb5905ef3067fcbb7af8
parente1928457073c996ffc681be2b6f9576706fbcf8d (diff)
arm64: Add hypervisor safe helper for checking constant capabilities
commit a4023f682739439b434165b54af7cb3676a4766e upstream. The hypervisor may not have full access to the kernel data structures and hence cannot safely use cpus_have_cap() helper for checking the system capability. Add a safe helper for hypervisors to check a constant system capability, which *doesn't* fall back to checking the bitmap maintained by the kernel. With this, make the cpus_have_cap() only check the bitmask and force constant cap checks to use the new API for quicker checks. Cc: Robert Ritcher <rritcher@cavium.com> Cc: Tirumalesh Chalamarla <tchalamarla@cavium.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> [4.9: restore cpus_have_const_cap() to previously-backported code] Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport] Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/arm64/include/asm/cpufeature.h19
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c5
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c13
7 files changed, 20 insertions, 25 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 0bc0b1de90c4..9890d20f2356 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -9,8 +9,6 @@
#ifndef __ASM_CPUFEATURE_H
#define __ASM_CPUFEATURE_H
-#include <linux/jump_label.h>
-
#include <asm/cpucaps.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
@@ -27,6 +25,8 @@
#ifndef __ASSEMBLY__
+#include <linux/bug.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
/* CPU feature register tracking */
@@ -104,14 +104,19 @@ static inline bool cpu_have_feature(unsigned int num)
return elf_hwcap & (1UL << num);
}
+/* System capability check for constant caps */
+static inline bool cpus_have_const_cap(int num)
+{
+ if (num >= ARM64_NCAPS)
+ return false;
+ return static_branch_unlikely(&cpu_hwcap_keys[num]);
+}
+
static inline bool cpus_have_cap(unsigned int num)
{
if (num >= ARM64_NCAPS)
return false;
- if (__builtin_constant_p(num))
- return static_branch_unlikely(&cpu_hwcap_keys[num]);
- else
- return test_bit(num, cpu_hwcaps);
+ return test_bit(num, cpu_hwcaps);
}
static inline void cpus_set_cap(unsigned int num)
@@ -200,7 +205,7 @@ static inline bool cpu_supports_mixed_endian_el0(void)
static inline bool system_supports_32bit_el0(void)
{
- return cpus_have_cap(ARM64_HAS_32BIT_EL0);
+ return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
}
static inline bool system_supports_mixed_endian_el0(void)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0a33ea304e63..7f402f64c744 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -398,7 +398,7 @@ static inline void __cpu_init_stage2(void)
static inline bool kvm_arm_harden_branch_predictor(void)
{
- return cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
}
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index eac73a640ea7..824c83db9b47 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -341,7 +341,7 @@ static inline void *kvm_get_hyp_vector(void)
vect = __bp_harden_hyp_vecs_start +
data->hyp_vectors_slot * SZ_2K;
- if (!cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ if (!cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
vect = lm_alias(vect);
}
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index d51158a61892..6ac34c75f4e1 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -37,7 +37,7 @@ typedef struct {
static inline bool arm64_kernel_unmapped_at_el0(void)
{
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
- cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+ cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
typedef void (*bp_hardening_cb_t)(void);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a0ee01202503..e28665411bd1 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
#endif
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys);
@@ -762,7 +763,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
* ThunderX leads to apparent I-cache corruption of kernel text, which
* ends as well as you might imagine. Don't even try.
*/
- if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
str = "ARM64_WORKAROUND_CAVIUM_27456";
__kpti_forced = -1;
}
@@ -1203,5 +1204,5 @@ void __init setup_cpu_features(void)
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{
- return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
+ return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 0972ce58316d..e917d119490c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -291,7 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
- cpus_have_cap(ARM64_HAS_UAO))
+ cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 0b1d5bdd0862..f7b8681aed3f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void)
}
#ifdef CONFIG_ARM64
-static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
static u64 __maybe_unused gic_read_iar(void)
{
- if (static_branch_unlikely(&is_cavium_thunderx))
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
return gic_read_iar_cavium_thunderx();
else
return gic_read_iar_common();
@@ -908,14 +907,6 @@ static const struct irq_domain_ops partition_domain_ops = {
.select = gic_irq_domain_select,
};
-static void gicv3_enable_quirks(void)
-{
-#ifdef CONFIG_ARM64
- if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
- static_branch_enable(&is_cavium_thunderx);
-#endif
-}
-
static int __init gic_init_bases(void __iomem *dist_base,
struct redist_region *rdist_regs,
u32 nr_redist_regions,
@@ -938,8 +929,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.nr_redist_regions = nr_redist_regions;
gic_data.redist_stride = redist_stride;
- gicv3_enable_quirks();
-
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)