summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorRadim Krčmář <rkrcmar@redhat.com>2016-09-29 16:01:51 +0200
committerRadim Krčmář <rkrcmar@redhat.com>2016-09-29 16:01:51 +0200
commit45ca877ad0519a02c22aaff2e2cdf333a1421a0a (patch)
treed7abbc767611379f878bf30b8c3b507d4e31982e /arch/arm/include/asm
parentc5a6d5f7faad8549bb5ff7e3e5792e33933c5b9f (diff)
parent0099b7701f5296a758d9e6b945ec96f96847cc2f (diff)
Merge tag 'kvm-arm-for-v4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into next
KVM/ARM Changes for v4.9 - Various cleanups and removal of redundant code - Two important fixes for not using an in-kernel irqchip - A bit of optimizations - Handle SError exceptions and present them to guests if appropriate - Proxying of GICV access at EL2 if guest mappings are unsafe - GICv3 on AArch32 on ARMv8 - Preparations for GICv3 save/restore, including ABI docs
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/arch_gicv3.h100
-rw-r--r--arch/arm/include/asm/cp15.h15
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/kvm_asm.h7
-rw-r--r--arch/arm/include/asm/kvm_emulate.h35
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/include/asm/kvm_hyp.h18
-rw-r--r--arch/arm/include/asm/kvm_mmu.h28
8 files changed, 146 insertions, 63 deletions
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index e08d15184056..1fee657d3827 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -22,9 +22,7 @@
#include <linux/io.h>
#include <asm/barrier.h>
-
-#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
-#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
+#include <asm/cp15.h>
#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1)
@@ -98,65 +96,135 @@
#define ICH_AP1R2 __AP1Rx(2)
#define ICH_AP1R3 __AP1Rx(3)
+/* A32-to-A64 mappings used by VGIC save/restore */
+
+#define CPUIF_MAP(a32, a64) \
+static inline void write_ ## a64(u32 val) \
+{ \
+ write_sysreg(val, a32); \
+} \
+static inline u32 read_ ## a64(void) \
+{ \
+ return read_sysreg(a32); \
+} \
+
+#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
+static inline void write_ ## a64(u64 val) \
+{ \
+ write_sysreg(lower_32_bits(val), a32lo);\
+ write_sysreg(upper_32_bits(val), a32hi);\
+} \
+static inline u64 read_ ## a64(void) \
+{ \
+ u64 val = read_sysreg(a32lo); \
+ \
+ val |= (u64)read_sysreg(a32hi) << 32; \
+ \
+ return val; \
+}
+
+CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
+CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
+CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
+CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
+CPUIF_MAP(ICH_ELSR, ICH_ELSR_EL2)
+CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
+CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
+CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
+CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
+CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
+CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
+CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
+CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
+CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
+CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
+CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
+
+CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
+CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
+CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
+CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
+CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
+CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
+CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
+CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
+CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
+CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
+CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
+CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
+CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
+CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
+CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
+CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
+
+#define read_gicreg(r) read_##r()
+#define write_gicreg(v, r) write_##r(v)
+
/* Low-level accessors */
static inline void gic_write_eoir(u32 irq)
{
- asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq));
+ write_sysreg(irq, ICC_EOIR1);
isb();
}
static inline void gic_write_dir(u32 val)
{
- asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val));
+ write_sysreg(val, ICC_DIR);
isb();
}
static inline u32 gic_read_iar(void)
{
- u32 irqstat;
+ u32 irqstat = read_sysreg(ICC_IAR1);
- asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
dsb(sy);
+
return irqstat;
}
static inline void gic_write_pmr(u32 val)
{
- asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val));
+ write_sysreg(val, ICC_PMR);
}
static inline void gic_write_ctlr(u32 val)
{
- asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val));
+ write_sysreg(val, ICC_CTLR);
isb();
}
static inline void gic_write_grpen1(u32 val)
{
- asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val));
+ write_sysreg(val, ICC_IGRPEN1);
isb();
}
static inline void gic_write_sgi1r(u64 val)
{
- asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val));
+ write_sysreg(val, ICC_SGI1R);
}
static inline u32 gic_read_sre(void)
{
- u32 val;
-
- asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
- return val;
+ return read_sysreg(ICC_SRE);
}
static inline void gic_write_sre(u32 val)
{
- asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val));
+ write_sysreg(val, ICC_SRE);
isb();
}
+static inline void gic_write_bpr1(u32 val)
+{
+#if defined(__write_sysreg) && defined(ICC_BPR1)
+ write_sysreg(val, ICC_BPR1);
+#else
+ asm volatile("mcr " __stringify(ICC_BPR1) : : "r" (val));
+#endif
+}
+
/*
* Even in 32bit systems that use LPAE, there is no guarantee that the I/O
* interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index c3f11524f10c..dbdbce1b3a72 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -49,6 +49,21 @@
#ifdef CONFIG_CPU_CP15
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
+ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+
+#define __read_sysreg(r, w, c, t) ({ \
+ t __val; \
+ asm volatile(r " " c : "=r" (__val)); \
+ __val; \
+})
+#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
+
+#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 1ee94c716a7f..e2d94c1b07b8 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -55,6 +55,7 @@
#define MPIDR_LEVEL_BITS 8
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+#define MPIDR_LEVEL_SHIFT(level) (MPIDR_LEVEL_BITS * level)
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 58faff5f1eb2..d7ea6bcb29bf 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -21,6 +21,10 @@
#include <asm/virt.h>
+#define ARM_EXIT_WITH_ABORT_BIT 31
+#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
+#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
+
#define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1
#define ARM_EXCEPTION_SOFTWARE 2
@@ -68,6 +72,9 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern void __init_stage2_translation(void);
extern void __kvm_hyp_reset(unsigned long);
+
+extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern void __vgic_v3_init_lrs(void);
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index ee5328fc4b06..9a8a45aaf19a 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -40,18 +40,29 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
*vcpu_reg(vcpu, reg_num) = val;
}
-bool kvm_condition_valid(struct kvm_vcpu *vcpu);
-void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+{
+ return kvm_condition_valid32(vcpu);
+}
+
+static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+ kvm_skip_instr32(vcpu, is_wide_instr);
+}
+
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr = HCR_GUEST_MASK;
}
-static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+static inline unsigned long vcpu_get_hcr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.hcr;
}
@@ -61,7 +72,7 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
vcpu->arch.hcr = hcr;
}
-static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
+static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
return 1;
}
@@ -71,9 +82,9 @@ static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
}
-static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{
- return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
+ return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -93,11 +104,21 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
return cpsr_mode > USR_MODE;;
}
-static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
+static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hsr;
}
+static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+{
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+ if (hsr & HSR_CV)
+ return (hsr & HSR_COND) >> HSR_COND_SHIFT;
+
+ return -1;
+}
+
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hxfar;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 6ad21f04a922..2d19e02d03fd 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -39,7 +39,12 @@
#include <kvm/arm_vgic.h>
+
+#ifdef CONFIG_ARM_GIC_V3
+#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
+#else
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
+#endif
#define KVM_REQ_VCPU_EXIT 8
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 6eaff28f2ff3..343135ede5fa 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -20,28 +20,15 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
+#include <asm/cp15.h>
#include <asm/kvm_mmu.h>
#include <asm/vfp.h>
#define __hyp_text __section(.hyp.text) notrace
-#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
- "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
-#define __ACCESS_CP15_64(Op1, CRm) \
- "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
#define __ACCESS_VFP(CRn) \
"mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
-#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
-#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
-
-#define __read_sysreg(r, w, c, t) ({ \
- t __val; \
- asm volatile(r " " c : "=r" (__val)); \
- __val; \
-})
-#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
-
#define write_special(v, r) \
asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
#define read_special(r) ({ \
@@ -119,6 +106,9 @@ void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
void __sysreg_save_state(struct kvm_cpu_context *ctxt);
void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+
void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp);
void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp);
static inline bool __vfp_enabled(void)
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 3bb803d6814b..74a44727f8e1 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -63,37 +63,13 @@ void kvm_clear_hyp_idmap(void);
static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
{
*pmd = new_pmd;
- flush_pmd_entry(pmd);
+ dsb(ishst);
}
static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
*pte = new_pte;
- /*
- * flush_pmd_entry just takes a void pointer and cleans the necessary
- * cache entries, so we can reuse the function for ptes.
- */
- flush_pmd_entry(pte);
-}
-
-static inline void kvm_clean_pgd(pgd_t *pgd)
-{
- clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
-}
-
-static inline void kvm_clean_pmd(pmd_t *pmd)
-{
- clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
-}
-
-static inline void kvm_clean_pmd_entry(pmd_t *pmd)
-{
- clean_pmd_entry(pmd);
-}
-
-static inline void kvm_clean_pte(pte_t *pte)
-{
- clean_pte_table(pte);
+ dsb(ishst);
}
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)