diff options
author | Max Krummenacher <max.krummenacher@toradex.com> | 2020-07-07 14:39:39 +0200 |
---|---|---|
committer | Max Krummenacher <max.krummenacher@toradex.com> | 2020-07-07 18:42:28 +0200 |
commit | bb02de12d4adbd463837a57556fe6a4a29535e29 (patch) | |
tree | 83674f672af49651fe39c7d7eee52ae513ea4107 /virt | |
parent | babac008e5cf168abca1a85bda2e8071ca27a5c0 (diff) | |
parent | fd8cd8ac940c8b45b75474415291a3b941c865ab (diff) |
Merge tag 'v5.4.47' into 5.4-2.1.x-imx
This is the 5.4.47 stable release
All conflicts resolved in favour of HEAD, i.e. 5.4-2.1.x-imx
Conflicts:
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/common.h
arch/arm/mach-imx/suspend-imx6.S
arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
arch/powerpc/include/asm/cacheflush.h
drivers/cpufreq/imx6q-cpufreq.c
drivers/dma/imx-sdma.c
drivers/edac/synopsys_edac.c
drivers/firmware/imx/imx-scu.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/phy/phy_device.c
drivers/perf/fsl_imx8_ddr_perf.c
drivers/usb/cdns3/gadget.c
drivers/usb/dwc3/gadget.c
include/uapi/linux/dma-buf.h
Signed-off-by: Max Krummenacher <max.krummenacher@toradex.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/aarch32.c | 28 | ||||
-rw-r--r-- | virt/kvm/arm/arm.c | 22 | ||||
-rw-r--r-- | virt/kvm/arm/hyp/aarch32.c | 8 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio-v2.c | 4 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio-v3.c | 12 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.c | 100 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.h | 3 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 26 |
8 files changed, 146 insertions, 57 deletions
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index 0a356aa91aa1..f2047fc69006 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c @@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = { [7] = { 4, 4 }, /* FIQ, unused */ }; +static bool pre_fault_synchronize(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + if (kvm_arm_vcpu_loaded(vcpu)) { + kvm_arch_vcpu_put(vcpu); + return true; + } + + preempt_enable(); + return false; +} + +static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded) +{ + if (loaded) { + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + preempt_enable(); + } +} + /* * When an exception is taken, most CPSR fields are left unchanged in the * handler. However, some are explicitly overridden (e.g. M[4:0]). @@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) void kvm_inject_undef32(struct kvm_vcpu *vcpu) { + bool loaded = pre_fault_synchronize(vcpu); + prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); + post_fault_synchronize(vcpu, loaded); } /* @@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 vect_offset; u32 *far, *fsr; bool is_lpae; + bool loaded; + + loaded = pre_fault_synchronize(vcpu); if (is_pabt) { vect_offset = 12; @@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, /* no need to shuffle FS[4] into DFSR[10] as its 0 */ *fsr = DFSR_FSC_EXTABT_nLPAE; } + + post_fault_synchronize(vcpu, loaded); } void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 86c6aa1cb58e..986fbc3cf667 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -354,6 +354,16 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) return kvm_vgic_vcpu_init(vcpu); } +#ifdef CONFIG_ARM64 +#define __ptrauth_save_key(regs, key) \ +({ \ + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ +}) +#else +#define __ptrauth_save_key(regs, key) do { } while (0) +#endif + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; @@ -386,7 +396,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) else vcpu_set_wfe_traps(vcpu); - vcpu_ptrauth_setup_lazy(vcpu); + if (vcpu_has_ptrauth(vcpu)) { + struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context; + + __ptrauth_save_key(ctxt->sys_regs, APIA); + __ptrauth_save_key(ctxt->sys_regs, APIB); + __ptrauth_save_key(ctxt->sys_regs, APDA); + __ptrauth_save_key(ctxt->sys_regs, APDB); + __ptrauth_save_key(ctxt->sys_regs, APGA); + + vcpu_ptrauth_disable(vcpu); + } } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/hyp/aarch32.c b/virt/kvm/arm/hyp/aarch32.c index d31f267961e7..25c0e47d57cb 100644 --- a/virt/kvm/arm/hyp/aarch32.c +++ b/virt/kvm/arm/hyp/aarch32.c @@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) */ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) { + u32 pc = *vcpu_pc(vcpu); bool is_thumb; is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT); if (is_thumb && !is_wide_instr) - *vcpu_pc(vcpu) += 2; + pc += 2; else - *vcpu_pc(vcpu) += 4; + pc += 4; + + *vcpu_pc(vcpu) = pc; + kvm_adjust_itstate(vcpu); } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 5945f062d749..d63881f60e1a 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -422,11 +422,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, vgic_mmio_read_active, vgic_mmio_write_sactive, - NULL, vgic_mmio_uaccess_write_sactive, 1, + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR, vgic_mmio_read_active, vgic_mmio_write_cactive, - NULL, vgic_mmio_uaccess_write_cactive, 1, + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI, vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 7dfd15dbb308..4c5909e38f78 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -491,11 +491,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, vgic_mmio_read_active, vgic_mmio_write_sactive, - NULL, vgic_mmio_uaccess_write_sactive, 1, + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, vgic_mmio_read_active, vgic_mmio_write_cactive, - NULL, vgic_mmio_uaccess_write_cactive, + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, @@ -563,12 +563,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0, vgic_mmio_read_active, vgic_mmio_write_sactive, - NULL, vgic_mmio_uaccess_write_sactive, - 4, VGIC_ACCESS_32bit), + vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4, + VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0, vgic_mmio_read_active, vgic_mmio_write_cactive, - NULL, vgic_mmio_uaccess_write_cactive, - 4, VGIC_ACCESS_32bit), + vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4, + VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0, vgic_mmio_read_priority, vgic_mmio_write_priority, 32, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 0d090482720d..fb1dcd397b93 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -300,8 +300,39 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, } } -unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len) + +/* + * If we are fiddling with an IRQ's active state, we have to make sure the IRQ + * is not queued on some running VCPU's LRs, because then the change to the + * active state can be overwritten when the VCPU's state is synced coming back + * from the guest. + * + * For shared interrupts as well as GICv3 private interrupts, we have to + * stop all the VCPUs because interrupts can be migrated while we don't hold + * the IRQ locks and we don't want to be chasing moving targets. + * + * For GICv2 private interrupts we don't have to do anything because + * userspace accesses to the VGIC state already require all VCPUs to be + * stopped, and only the VCPU itself can modify its private interrupts + * active state, which guarantees that the VCPU is not running. + */ +static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid) +{ + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || + intid >= VGIC_NR_PRIVATE_IRQS) + kvm_arm_halt_guest(vcpu->kvm); +} + +/* See vgic_access_active_prepare */ +static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid) +{ + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || + intid >= VGIC_NR_PRIVATE_IRQS) + kvm_arm_resume_guest(vcpu->kvm); +} + +static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 value = 0; @@ -311,6 +342,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); + /* + * Even for HW interrupts, don't evaluate the HW state as + * all the guest is interested in is the virtual state. + */ if (irq->active) value |= (1U << i); @@ -320,6 +355,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, return value; } +unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + u32 intid = VGIC_ADDR_TO_INTID(addr, 1); + u32 val; + + mutex_lock(&vcpu->kvm->lock); + vgic_access_active_prepare(vcpu, intid); + + val = __vgic_mmio_read_active(vcpu, addr, len); + + vgic_access_active_finish(vcpu, intid); + mutex_unlock(&vcpu->kvm->lock); + + return val; +} + +unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return __vgic_mmio_read_active(vcpu, addr, len); +} + /* Must be called with irq->irq_lock held */ static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool active, bool is_uaccess) @@ -371,36 +429,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } -/* - * If we are fiddling with an IRQ's active state, we have to make sure the IRQ - * is not queued on some running VCPU's LRs, because then the change to the - * active state can be overwritten when the VCPU's state is synced coming back - * from the guest. - * - * For shared interrupts, we have to stop all the VCPUs because interrupts can - * be migrated while we don't hold the IRQ locks and we don't want to be - * chasing moving targets. - * - * For private interrupts we don't have to do anything because userspace - * accesses to the VGIC state already require all VCPUs to be stopped, and - * only the VCPU itself can modify its private interrupts active state, which - * guarantees that the VCPU is not running. - */ -static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) -{ - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || - intid > VGIC_NR_PRIVATE_IRQS) - kvm_arm_halt_guest(vcpu->kvm); -} - -/* See vgic_change_active_prepare */ -static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) -{ - if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || - intid > VGIC_NR_PRIVATE_IRQS) - kvm_arm_resume_guest(vcpu->kvm); -} - static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val) @@ -422,11 +450,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 1); mutex_lock(&vcpu->kvm->lock); - vgic_change_active_prepare(vcpu, intid); + vgic_access_active_prepare(vcpu, intid); __vgic_mmio_write_cactive(vcpu, addr, len, val); - vgic_change_active_finish(vcpu, intid); + vgic_access_active_finish(vcpu, intid); mutex_unlock(&vcpu->kvm->lock); } @@ -459,11 +487,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 1); mutex_lock(&vcpu->kvm->lock); - vgic_change_active_prepare(vcpu, intid); + vgic_access_active_prepare(vcpu, intid); __vgic_mmio_write_sactive(vcpu, addr, len, val); - vgic_change_active_finish(vcpu, intid); + vgic_access_active_finish(vcpu, intid); mutex_unlock(&vcpu->kvm->lock); } diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 836f418f1ee8..b6aff5252429 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -157,6 +157,9 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len); +unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 03c681568ab1..d5d4cd581af3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -157,10 +157,9 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); static unsigned long long kvm_createvm_count; static unsigned long long kvm_active_vms; -__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, bool blockable) +__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end) { - return 0; } bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) @@ -381,6 +380,18 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) return container_of(mn, struct kvm, mmu_notifier); } +static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + int idx; + + idx = srcu_read_lock(&kvm->srcu); + kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); + srcu_read_unlock(&kvm->srcu, idx); +} + static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, @@ -405,7 +416,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, { struct kvm *kvm = mmu_notifier_to_kvm(mn); int need_tlb_flush = 0, idx; - int ret; idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); @@ -422,14 +432,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); - - ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start, - range->end, - mmu_notifier_range_blockable(range)); - srcu_read_unlock(&kvm->srcu, idx); - return ret; + return 0; } static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, @@ -535,6 +540,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, } static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { + .invalidate_range = kvm_mmu_notifier_invalidate_range, .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |