summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/irq.h6
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/include/asm/bitops.h6
-rw-r--r--arch/arc/include/asm/io.h72
-rw-r--r--arch/arc/include/asm/perf_event.h3
-rw-r--r--arch/arc/include/asm/uaccess.h8
-rw-r--r--arch/arc/kernel/head.S10
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/da850-evm.dts2
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi25
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi10
-rw-r--r--arch/arm/boot/dts/kirkwood-dnskw.dtsi4
-rw-r--r--arch/arm/boot/dts/mmp2.dtsi9
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts1
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/kernel/irq.c62
-rw-r--r--arch/arm/kernel/smp.c17
-rw-r--r--arch/arm/kvm/mmio.c21
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c2
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c2
-rw-r--r--arch/arm/mach-integrator/impd1.c6
-rw-r--r--arch/arm/mach-iop32x/n2100.c3
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c3
-rw-r--r--arch/arm/mach-omap2/display.c7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c6
-rw-r--r--arch/arm/mach-omap2/prm44xx.c2
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/littleton.c2
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-s3c24xx/mach-osiris-dvs.c8
-rw-r--r--arch/arm/mm/cache-v7.S8
-rw-r--r--arch/arm/plat-pxa/ssp.c3
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S5
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/kernel/entry-ftrace.S1
-rw-r--r--arch/arm64/kernel/head.S6
-rw-r--r--arch/arm64/kernel/hyp-stub.S2
-rw-r--r--arch/arm64/kernel/perf_event.c1
-rw-r--r--arch/arm64/kvm/hyp.S2
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c4
-rw-r--r--arch/ia64/kernel/err_inject.c2
-rw-r--r--arch/m68k/Makefile5
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c7
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/include/asm/pgtable-64.h5
-rw-r--r--arch/mips/include/uapi/asm/inst.h2
-rw-r--r--arch/mips/jazz/jazzdma.c5
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/kernel/mips-cm.c2
-rw-r--r--arch/mips/kernel/process.c7
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/pci/msi-octeon.c4
-rw-r--r--arch/mips/pci/pci-octeon.c10
-rw-r--r--arch/mips/sibyte/common/Makefile1
-rw-r--r--arch/mips/sibyte/common/dma.c14
-rw-r--r--arch/mips/vdso/Makefile4
-rw-r--r--arch/powerpc/boot/Makefile3
-rw-r--r--arch/powerpc/boot/crt0.S4
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h12
-rw-r--r--arch/powerpc/include/asm/uaccess.h2
-rw-r--r--arch/powerpc/kernel/entry_32.S9
-rw-r--r--arch/powerpc/kernel/msi.c7
-rw-r--r--arch/powerpc/platforms/83xx/suspend-asm.S34
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-msglog.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c2
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c12
-rw-r--r--arch/s390/mm/gup.c2
-rw-r--r--arch/sh/mm/gup.c3
-rw-r--r--arch/sparc/mm/gup.c3
-rw-r--r--arch/um/include/asm/pgtable.h9
-rw-r--r--arch/x86/boot/compressed/aslr.c4
-rw-r--r--arch/x86/crypto/chacha20_glue.c1
-rw-r--r--arch/x86/ia32/ia32_aout.c6
-rw-r--r--arch/x86/include/asm/fpu/internal.h3
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/uaccess.h71
-rw-r--r--arch/x86/include/asm/uaccess_32.h26
-rw-r--r--arch/x86/include/asm/uaccess_64.h94
-rw-r--r--arch/x86/kernel/cpu/amd.c8
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c1
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c4
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c3
-rw-r--r--arch/x86/kernel/livepatch.c1
-rw-r--r--arch/x86/kvm/cpuid.c31
-rw-r--r--arch/x86/kvm/cpuid.h40
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/svm.c158
-rw-r--r--arch/x86/kvm/vmx.c780
-rw-r--r--arch/x86/kvm/x86.c28
-rw-r--r--arch/x86/lib/usercopy_32.c20
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/mpx.c3
-rw-r--r--arch/x86/pci/broadcom_bus.c4
-rw-r--r--arch/x86/platform/efi/early_printk.c2
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig1
-rw-r--r--arch/xtensa/kernel/head.S5
-rw-r--r--arch/xtensa/kernel/smp.c41
-rw-r--r--arch/xtensa/kernel/time.c2
108 files changed, 1271 insertions, 625 deletions
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 06377400dc09..469642801a68 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -55,15 +55,15 @@
#elif defined(CONFIG_ALPHA_DP264) || \
defined(CONFIG_ALPHA_LYNX) || \
- defined(CONFIG_ALPHA_SHARK) || \
- defined(CONFIG_ALPHA_EIGER)
+ defined(CONFIG_ALPHA_SHARK)
# define NR_IRQS 64
#elif defined(CONFIG_ALPHA_TITAN)
#define NR_IRQS 80
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
- defined(CONFIG_ALPHA_TAKARA)
+ defined(CONFIG_ALPHA_TAKARA) || \
+ defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 128
#elif defined(CONFIG_ALPHA_WILDFIRE)
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 4a905bd667e2..0f68f0de9b5e 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -77,7 +77,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
/* Macro for exception fixup code to access integer registers. */
#define dpf_reg(r) \
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
- (r) <= 18 ? (r)+8 : (r)-10])
+ (r) <= 18 ? (r)+10 : (r)-10])
asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 0352fb8d21b9..9623ae002f5b 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -286,7 +286,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long word)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{
if (!word)
return word;
@@ -346,9 +346,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{
- int n;
+ unsigned long n;
asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index cb69299a492e..f120d823e8c2 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
+#include <asm/unaligned.h>
#ifdef CONFIG_ISA_ARCV2
#include <asm/barrier.h>
@@ -85,6 +86,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return w;
}
+/*
+ * {read,write}s{b,w,l}() repeatedly access the same IO address in
+ * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
+ * @count times
+ */
+#define __raw_readsx(t,f) \
+static inline void __raw_reads##f(const volatile void __iomem *addr, \
+ void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ *buf++ = x; \
+ } while (--count); \
+ } else { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ put_unaligned(x, buf++); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_readsb __raw_readsb
+__raw_readsx(8, b)
+#define __raw_readsw __raw_readsw
+__raw_readsx(16, w)
+#define __raw_readsl __raw_readsl
+__raw_readsx(32, l)
+
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
@@ -117,6 +154,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
+#define __raw_writesx(t,f) \
+static inline void __raw_writes##f(volatile void __iomem *addr, \
+ const void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ const u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ __raw_write##f(*buf++, addr); \
+ } while (--count); \
+ } else { \
+ do { \
+ __raw_write##f(get_unaligned(buf++), addr); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_writesb __raw_writesb
+__raw_writesx(8, b)
+#define __raw_writesw __raw_writesw
+__raw_writesx(16, w)
+#define __raw_writesl __raw_writesl
+__raw_writesx(32, l)
+
/*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case
@@ -132,10 +198,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
+#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
+#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
+#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
+#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
+#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
/*
* Relaxed API for drivers which can handle barrier ordering themselves
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 5f071762fb1c..6a2ae61748e4 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
/* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
+ /* All jump instructions that are taken */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 57387b567f34..f077a419cb51 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -209,7 +209,7 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -438,7 +438,7 @@ __arc_copy_to_user(void __user *to, const void *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -658,7 +658,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
" .previous \n"
: "+r"(d_char), "+r"(res)
: "i"(0)
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return res;
}
@@ -691,7 +691,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
" .previous \n"
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
: "g"(-EFAULT), "r"(count)
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return res;
}
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd867fdff..cd64cb4ef7b0 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -17,6 +17,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
+#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
@@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL]
1:
+
+#ifdef CONFIG_ISA_ARCV2
+ ; Unaligned access is disabled at reset, so re-enable early as
+ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
+ ; by default
+ lr r5, [status32]
+ bset r5, r5, STATUS_AD_BIT
+ kflag r5
+#endif
.endm
.section .init.text, "ax",@progbits
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 34e1569a11ee..3a0277c6c060 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1475,6 +1475,7 @@ config NR_CPUS
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
+ select GENERIC_IRQ_MIGRATION
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 6881757b03e8..67369f284b91 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -147,7 +147,7 @@
sound {
compatible = "simple-audio-card";
- simple-audio-card,name = "DA850/OMAP-L138 EVM";
+ simple-audio-card,name = "DA850-OMAPL138 EVM";
simple-audio-card,widgets =
"Line", "Line In",
"Line", "Line Out";
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 2f30d632f1cc..e81a27214188 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -150,6 +150,9 @@
interrupt-controller;
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
+ clock-names = "clkout8";
+ clocks = <&cmu CLK_FIN_PLL>;
+ #clock-cells = <1>;
};
mipi_phy: video-phy@10020710 {
diff --git a/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi
new file mode 100644
index 000000000000..c8771c660550
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Device tree sources for Exynos5420 TMU sensor configuration
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ * Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <8>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <55>;
+samsung,tmu_min_efuse_value = <0>;
+samsung,tmu_max_efuse_value = <100>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <85>;
+samsung,tmu_default_temp_offset = <50>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 1b3d6c769a3c..d5edb7766942 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -777,7 +777,7 @@
interrupts = <0 65 0>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu1: tmu@10064000 {
@@ -786,7 +786,7 @@
interrupts = <0 183 0>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu2: tmu@10068000 {
@@ -795,7 +795,7 @@
interrupts = <0 184 0>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu3: tmu@1006c000 {
@@ -804,7 +804,7 @@
interrupts = <0 185 0>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_gpu: tmu@100a0000 {
@@ -813,7 +813,7 @@
interrupts = <0 215 0>;
clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
thermal-zones {
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index 113dcf056dcf..1b2dacfa6132 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -35,8 +35,8 @@
compatible = "gpio-fan";
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
pinctrl-names = "default";
- gpios = <&gpio1 14 GPIO_ACTIVE_LOW
- &gpio1 13 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
+ &gpio1 13 GPIO_ACTIVE_HIGH>;
gpio-fan,speed-map = <0 0
3000 1
6000 2>;
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index 766bbb8495b6..47e5b63339d1 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -220,12 +220,15 @@
status = "disabled";
};
- twsi2: i2c@d4025000 {
+ twsi2: i2c@d4031000 {
compatible = "mrvl,mmp-twsi";
- reg = <0xd4025000 0x1000>;
- interrupts = <58>;
+ reg = <0xd4031000 0x1000>;
+ interrupt-parent = <&intcmux17>;
+ interrupts = <0>;
clocks = <&soc_clocks MMP2_CLK_TWSI1>;
resets = <&soc_clocks MMP2_CLK_TWSI1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index f0bdc41f8eff..235d1493f8aa 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -33,6 +33,7 @@
gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; /* gpio line 48 */
enable-active-high;
regulator-boot-on;
+ startup-delay-us = <25000>;
};
vbat: fixedregulator-vbat {
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 1bd9510de1b9..cae4df39f02e 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -24,7 +24,6 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
-extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ece04a457486..5b07c7a31c31 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -31,7 +31,6 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
-#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
@@ -119,64 +118,3 @@ int __init arch_probe_nr_irqs(void)
return nr_irqs;
}
#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_desc *desc)
-{
- struct irq_data *d = irq_desc_get_irq_data(desc);
- const struct cpumask *affinity = irq_data_get_affinity_mask(d);
- struct irq_chip *c;
- bool ret = false;
-
- /*
- * If this is a per-CPU interrupt, or the affinity does not
- * include this CPU, then we have nothing to do.
- */
- if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
- return false;
-
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
- ret = true;
- }
-
- c = irq_data_get_irq_chip(d);
- if (!c->irq_set_affinity)
- pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
- cpumask_copy(irq_data_get_affinity_mask(d), affinity);
-
- return ret;
-}
-
-/*
- * The current CPU has been marked offline. Migrate IRQs off this CPU.
- * If the affinity settings do not allow other CPUs, force them onto any
- * available CPU.
- *
- * Note: we must iterate over all IRQs, whether they have an attached
- * action structure or not, as we need to get chained interrupts too.
- */
-void migrate_irqs(void)
-{
- unsigned int i;
- struct irq_desc *desc;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for_each_irq_desc(i, desc) {
- bool affinity_broken;
-
- raw_spin_lock(&desc->lock);
- affinity_broken = migrate_one_irq(desc);
- raw_spin_unlock(&desc->lock);
-
- if (affinity_broken)
- pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
- i, smp_processor_id());
- }
-
- local_irq_restore(flags);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b26361355dae..08ce9e36dc5a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -218,7 +218,7 @@ int __cpu_disable(void)
/*
* OK - migrate IRQs away from this CPU
*/
- migrate_irqs();
+ irq_migrate_all_off_this_cpu();
/*
* Flush user cache and TLB mappings, and then remove this CPU
@@ -687,6 +687,21 @@ void smp_send_stop(void)
pr_warn("SMP: failed to stop secondary CPUs\n");
}
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
+ * kdump fails. So split out the panic_smp_self_stop() and add
+ * set_cpu_online(smp_processor_id(), false).
+ */
+void panic_smp_self_stop(void)
+{
+ pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
+ smp_processor_id());
+ set_cpu_online(smp_processor_id(), false);
+ while (1)
+ cpu_relax();
+}
+
/*
* not supported here
*/
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 387ee2a11e36..ae61e2ea7255 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -87,11 +87,10 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len)
/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+ * or in-kernel IO emulation
+ *
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
- *
- * This should only be called after returning from userspace for MMIO load
- * emulation.
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -118,6 +117,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
+ /*
+ * The MMIO instruction is emulated and should not be re-executed
+ * in the guest.
+ */
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
return 0;
}
@@ -151,11 +156,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
- /*
- * The MMIO instruction is emulated and should not be re-executed
- * in the guest.
- */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 0;
}
@@ -206,14 +206,17 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
- memcpy(run->mmio.data, data_buf, len);
if (!ret) {
/* We handled the access successfully in the kernel. */
+ if (!is_write)
+ memcpy(run->mmio.data, data_buf, len);
kvm_handle_mmio_return(vcpu, run);
return 1;
}
+ if (is_write)
+ memcpy(run->mmio.data, data_buf, len);
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394ed5c7a..5e11ad3164e0 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
} else /* remote PCI bus */
base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
- return base + (where & 0xffc) + (devfn << 12);
+ return base + where + (devfn << 12);
}
static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 3c6672b3796b..7f5df8992008 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -97,7 +97,7 @@ int __init imx6sx_cpuidle_init(void)
* except for power up sw2iso which need to be
* larger than LDO ramp up time.
*/
- imx_gpc_set_arm_power_up_timing(2, 1);
+ imx_gpc_set_arm_power_up_timing(0xf, 1);
imx_gpc_set_arm_power_down_timing(1, 1);
return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 38b0da300dd5..423a88ff908c 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -394,7 +394,11 @@ static int __init_refok impd1_probe(struct lm_device *dev)
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
GFP_KERNEL);
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
+ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
+ "lm%x:00700", dev->id);
+ if (!lookup || !chipname || !mmciname)
+ return -ENOMEM;
+
lookup->dev_id = mmciname;
/*
* Offsets on GPIO block 1:
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index c1cd80ecc219..a904244264ce 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
/*
* N2100 PCI.
*/
-static int __init
-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index a95499ea8706..fa1d41edce68 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -511,6 +511,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
{
struct modem_private_data *priv = port->private_data;
+ if (!priv)
+ return;
+
if (IS_ERR(priv->regulator))
return;
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 6ab13d18c636..cde86d1199cf 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -115,6 +115,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
u32 enable_mask, enable_shift;
u32 pipd_mask, pipd_shift;
u32 reg;
+ int ret;
if (dsi_id == 0) {
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@@ -130,7 +131,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
return -ENODEV;
}
- regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);
+ ret = regmap_read(omap4_dsi_mux_syscon,
+ OMAP4_DSIPHY_SYSCON_OFFSET,
+ &reg);
+ if (ret)
+ return ret;
reg &= ~enable_mask;
reg &= ~pipd_mask;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 147c90e70b2e..36706d32d656 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2526,7 +2526,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
* a stub; implementing this properly requires iclk autoidle usecounting in
* the clock code. No return value.
*/
-static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
+static void _setup_iclk_autoidle(struct omap_hwmod *oh)
{
struct omap_hwmod_ocp_if *os;
struct list_head *p;
@@ -2561,7 +2561,7 @@ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
* reset. Returns 0 upon success or a negative error code upon
* failure.
*/
-static int __init _setup_reset(struct omap_hwmod *oh)
+static int _setup_reset(struct omap_hwmod *oh)
{
int r;
@@ -2622,7 +2622,7 @@ static int __init _setup_reset(struct omap_hwmod *oh)
*
* No return value.
*/
-static void __init _setup_postsetup(struct omap_hwmod *oh)
+static void _setup_postsetup(struct omap_hwmod *oh)
{
u8 postsetup_state;
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 30768003f854..8c505284bc0c 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -344,7 +344,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
* to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
* omap44xx_prm_reconfigure_io_chain() must be called. No return value.
*/
-static void __init omap44xx_prm_enable_io_wakeup(void)
+static void omap44xx_prm_enable_io_wakeup(void)
{
s32 inst = omap4_prmst_get_prm_dev_inst();
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index a7dae60810e8..307fc18edede 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -547,7 +547,7 @@ static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = {
.exit = cm_x300_u2d_exit,
};
-static void cm_x300_init_u2d(void)
+static void __init cm_x300_init_u2d(void)
{
pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
}
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 5d665588c7eb..05aa7071efd6 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -183,7 +183,7 @@ static struct pxafb_mach_info littleton_lcd_info = {
.lcd_conn = LCD_COLOR_TFT_16BPP,
};
-static void littleton_init_lcd(void)
+static void __init littleton_init_lcd(void)
{
pxa_set_fb_info(NULL, &littleton_lcd_info);
}
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index d757cfb5f8a6..4da2458d7f32 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -558,7 +558,7 @@ static struct pxaohci_platform_data zeus_ohci_platform_data = {
.flags = ENABLE_PORT_ALL | POWER_SENSE_LOW,
};
-static void zeus_register_ohci(void)
+static void __init zeus_register_ohci(void)
{
/* Port 2 is shared between host and client interface. */
UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
index ce2db235dbaf..5e8a306163de 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
@@ -70,16 +70,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
switch (val) {
case CPUFREQ_PRECHANGE:
- if (old_dvs & !new_dvs ||
- cur_dvs & !new_dvs) {
+ if ((old_dvs && !new_dvs) ||
+ (cur_dvs && !new_dvs)) {
pr_debug("%s: exiting dvs\n", __func__);
cur_dvs = false;
gpio_set_value(OSIRIS_GPIO_DVS, 1);
}
break;
case CPUFREQ_POSTCHANGE:
- if (!old_dvs & new_dvs ||
- !cur_dvs & new_dvs) {
+ if ((!old_dvs && new_dvs) ||
+ (!cur_dvs && new_dvs)) {
pr_debug("entering dvs\n");
cur_dvs = true;
gpio_set_value(OSIRIS_GPIO_DVS, 0);
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a134d8a13d00..11d699af30ed 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -359,14 +359,16 @@ v7_dma_inv_range:
ALT_UP(W(nop))
#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
+ addne r0, r0, r2
tst r1, r3
bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
-1:
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
- add r0, r0, r2
cmp r0, r1
+1:
+ mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
+ addlo r0, r0, r2
+ cmplo r0, r1
blo 1b
dsb st
ret lr
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index daa1a65f2eb7..6748827c2ec8 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -238,8 +238,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
if (ssp == NULL)
return -ENODEV;
- iounmap(ssp->mmio_base);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -249,7 +247,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
- kfree(ssp);
return 0;
}
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index 3363560c79b7..7bc459d9235c 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
beq 10f
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
b 7b
-8: mov w7, w8
+8: cbz w8, 91f
+ mov w7, w8
add w8, w8, #16
9: ext v1.16b, v1.16b, v1.16b, #1
adds w7, w7, #1
bne 9b
- eor v0.16b, v0.16b, v1.16b
+91: eor v0.16b, v0.16b, v1.16b
st1 {v0.16b}, [x0]
10: str w8, [x3]
ret
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index ef8e13d379cb..d7e7cf56e8d6 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -23,6 +23,8 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+#define HCR_API (UL(1) << 41)
+#define HCR_APK (UL(1) << 40)
#define HCR_ID (UL(1) << 33)
#define HCR_CD (UL(1) << 32)
#define HCR_RW_SHIFT 31
@@ -81,6 +83,7 @@
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
/* Hyp System Control Register (SCTLR_EL2) bits */
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 0f03a8fe2314..d18d15810d19 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -78,7 +78,6 @@
.macro mcount_get_lr reg
ldr \reg, [x29]
ldr \reg, [\reg, #8]
- mcount_adjust_addr \reg, \reg
.endm
.macro mcount_get_lr_addr reg
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index d019c3a58cc2..6299a8a361ee 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -30,6 +30,7 @@
#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/kernel-pgtable.h>
+#include <asm/kvm_arm.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
@@ -464,7 +465,7 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
ret
/* Hyp configuration. */
-2: mov x0, #(1 << 31) // 64-bit EL1
+2: mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0
/* Generic timers. */
@@ -477,8 +478,7 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
/* GICv3 system register access */
mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #24, #4
- cmp x0, #1
- b.ne 3f
+ cbz x0, 3f
mrs_s x0, ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index a272f335c289..096e957aecb0 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -26,6 +26,8 @@
#include <asm/virt.h>
.text
+ .pushsection .hyp.text, "ax"
+
.align 11
ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 62d3dc60ca09..e99a0ed7e66b 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -670,6 +670,7 @@ static struct platform_driver armv8_pmu_driver = {
.driver = {
.name = "armv8-pmu",
.of_match_table = armv8_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
},
.probe = armv8_pmu_device_probe,
};
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 86c289832272..8d3da858c257 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -494,7 +494,7 @@
.endm
.macro deactivate_traps
- mov x2, #HCR_RW
+ mov_q x2, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x2
msr hstr_el2, xzr
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index c26b804015e8..a90615baa529 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -70,10 +70,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
struct page *page = pte_page(pte);
- /* no flushing needed for anonymous pages */
- if (!page_mapping(page))
- return;
-
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
__flush_dcache_area(page_address(page),
PAGE_SIZE << compound_order(page));
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 877da1908234..98e2a5dbcfda 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2724,7 +2724,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
(unsigned long int)(oper.indata + prev_ix),
noinpages,
0, /* read access only for in data */
- 0, /* no force */
inpages,
NULL);
@@ -2740,8 +2739,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
current->mm,
(unsigned long int)oper.cipher_outdata,
nooutpages,
- 1, /* write access for out data */
- 0, /* no force */
+ FOLL_WRITE, /* write access for out data */
outpages,
NULL);
up_read(&current->mm->mmap_sem);
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 0c161ed6d18e..8205b456de7a 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -143,7 +143,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
int ret;
ret = get_user_pages(current, current->mm, virt_addr,
- 1, VM_READ, 0, NULL, NULL);
+ 1, FOLL_WRITE, NULL, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr);
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 0b29dcfef69f..0c736ed58abd 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -59,7 +59,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
KBUILD_AFLAGS += $(cpuflags-y)
-KBUILD_CFLAGS += $(cpuflags-y) -pipe
+KBUILD_CFLAGS += $(cpuflags-y)
+
+KBUILD_CFLAGS += -pipe -ffreestanding
+
ifdef CONFIG_MMU
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8b0424abc84c..333ea0389adb 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -760,6 +760,7 @@ config SIBYTE_SWARM
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
@@ -782,6 +783,7 @@ config SIBYTE_SENTOSA
select SYS_HAS_CPU_SB1
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
@@ -795,6 +797,7 @@ config SIBYTE_BIGSUR
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SNI_RM
bool "SNI RM200/300/400"
@@ -2972,6 +2975,7 @@ config MIPS32_O32
config MIPS32_N32
bool "Kernel support for n32 binaries"
depends on 64BIT
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT
select MIPS32_COMPAT
select SYSVIPC_COMPAT if SYSVIPC
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 37fe58c19a90..542c3ede9722 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -13,6 +13,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+#include "../../../../include/linux/sizes.h"
int main(int argc, char *argv[])
{
@@ -45,11 +46,11 @@ int main(int argc, char *argv[])
vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
/*
- * Align with 16 bytes: "greater than that used for any standard data
- * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
+ * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
+ * which may be as large as 64KB depending on the kernel configuration.
*/
- vmlinuz_load_addr += (16 - vmlinux_size % 16);
+ vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
printf("0x%llx\n", vmlinuz_load_addr);
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 134879c1310a..4ed369c0ec6a 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -74,6 +74,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index cf661a2fb141..16fade4f49dd 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -189,6 +189,11 @@ static inline int pmd_bad(pmd_t pmd)
static inline int pmd_present(pmd_t pmd)
{
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return pmd_val(pmd) & _PAGE_PRESENT;
+#endif
+
return pmd_val(pmd) != (unsigned long) invalid_pte_table;
}
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 1b6f2f219298..9db764b51ffe 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -290,8 +290,8 @@ enum mm_32a_minor_op {
mm_ext_op = 0x02c,
mm_pool32axf_op = 0x03c,
mm_srl32_op = 0x040,
+ mm_srlv32_op = 0x050,
mm_sra_op = 0x080,
- mm_srlv32_op = 0x090,
mm_rotr_op = 0x0c0,
mm_lwxs_op = 0x118,
mm_addu32_op = 0x150,
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index db6f5afff4ff..ea897912bc71 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -71,14 +71,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
+ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
+ CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index dc1180a8bfa1..66736397af9f 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
void __init init_IRQ(void)
{
int i;
+ unsigned int order = get_order(IRQ_STACK_SIZE);
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
@@ -59,8 +60,7 @@ void __init init_IRQ(void)
arch_init_irq();
for_each_possible_cpu(i) {
- int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
- void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
+ void *s = (void *)__get_free_pages(GFP_KERNEL, order);
irq_stack[i] = s;
pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 1448c1f43d4e..76f18c56141c 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -424,5 +424,5 @@ void mips_cm_error_report(void)
}
/* reprime cause register */
- write_gcr_error_cause(0);
+ write_gcr_error_cause(cm_error);
}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ebd8a715fe38..e6102775892d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -339,7 +339,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
- union mips_instruction insn, *ip, *ip_end;
+ union mips_instruction insn, *ip;
const unsigned int max_insns = 128;
unsigned int last_insn_size = 0;
unsigned int i;
@@ -351,10 +351,9 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip)
goto err;
- ip_end = (void *)ip + info->func_size;
-
- for (i = 0; i < max_insns && ip < ip_end; i++) {
+ for (i = 0; i < max_insns; i++) {
ip = (void *)ip + last_insn_size;
+
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.halfword[0] = 0;
insn.halfword[1] = ip->halfword[0];
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 349995d19c7f..e596e0a1cecc 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -303,7 +303,7 @@ slow_irqon:
ret = get_user_pages_unlocked(current, mm, start,
(end - start) >> PAGE_SHIFT,
- write, 0, pages);
+ pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2a5bb849b10e..288b58b00dc8 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
int irq;
struct irq_chip *msi;
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+ return 0;
+ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index c258cd406fbb..b36bbda31058 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -571,6 +571,11 @@ static int __init octeon_pci_setup(void)
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
return 0;
+ if (!octeon_is_pci_host()) {
+ pr_notice("Not in host mode, PCI Controller not initialized\n");
+ return 0;
+ }
+
/* Point pcibios_map_irq() to the PCI version of it */
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
@@ -582,11 +587,6 @@ static int __init octeon_pci_setup(void)
else
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
- if (!octeon_is_pci_host()) {
- pr_notice("Not in host mode, PCI Controller not initialized\n");
- return 0;
- }
-
/* PCI I/O and PCI MEM values */
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
ioport_resource.start = 0;
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
index b3d6bf23a662..3ef3fb658136 100644
--- a/arch/mips/sibyte/common/Makefile
+++ b/arch/mips/sibyte/common/Makefile
@@ -1,4 +1,5 @@
obj-y := cfe.o
+obj-$(CONFIG_SWIOTLB) += dma.o
obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
new file mode 100644
index 000000000000..eb47a94f3583
--- /dev/null
+++ b/arch/mips/sibyte/common/dma.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DMA support for Broadcom SiByte platforms.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ */
+
+#include <linux/swiotlb.h>
+#include <asm/bootinfo.h>
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(1);
+}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 6c7d78546eee..886005b1e87d 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -107,7 +107,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
+$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
@@ -143,7 +143,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
+$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 99e4487248ff..57003d1bd243 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -70,7 +70,8 @@ $(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o): \
libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
libfdtheader := fdt.h libfdt.h libfdt_internal.h
-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o): \
+$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o \
+ treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
$(addprefix $(obj)/,$(libfdtheader))
src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 5c2199857aa8..a3550e8f1a77 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -15,7 +15,7 @@
RELA = 7
RELACOUNT = 0x6ffffff9
- .text
+ .data
/* A procedure descriptor used when booting this as a COFF file.
* When making COFF, this comes first in the link and we're
* linked at 0x500000.
@@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
.globl _zimage_start_opd
_zimage_start_opd:
.long 0x500000, 0, 0, 0
+ .text
+ b _zimage_start
#ifdef __powerpc64__
.balign 8
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index 334459ad145b..90863245df53 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -508,7 +508,7 @@ static unsigned long epapr_hypercall(unsigned long *in,
static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
unsigned long r;
@@ -520,7 +520,7 @@ static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
static inline long epapr_hypercall0(unsigned int nr)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
return epapr_hypercall(in, out, nr);
@@ -528,7 +528,7 @@ static inline long epapr_hypercall0(unsigned int nr)
static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
@@ -538,7 +538,7 @@ static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
unsigned long p2)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
@@ -549,7 +549,7 @@ static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
@@ -562,7 +562,7 @@ static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index a5ffe0207c16..05f1389228d2 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -59,7 +59,7 @@
#endif
#define access_ok(type, addr, size) \
- (__chk_user_ptr(addr), \
+ (__chk_user_ptr(addr), (void)(type), \
__access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 2405631e91a2..3728e617e17e 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -685,6 +685,9 @@ fast_exception_return:
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
+ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
+ li r10, 0
+ stw r10, 8(r11)
REST_GPR(10, r11)
mtspr SPRN_SRR1,r9
mtspr SPRN_SRR0,r12
@@ -915,6 +918,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtcrf 0xFF,r10
mtlr r11
+ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
+ li r10, 0
+ stw r10, 8(r1)
/*
* Once we put values in SRR0 and SRR1, we are in a state
* where exceptions are not recoverable, since taking an
@@ -952,6 +958,9 @@ exc_exit_restart_end:
mtlr r11
lwz r10,_CCR(r1)
mtcrf 0xff,r10
+ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
+ li r10, 0
+ stw r10, 8(r1)
REST_2GPRS(9, r1)
.globl exc_exit_restart
exc_exit_restart:
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
index dab616a33b8d..f2197654be07 100644
--- a/arch/powerpc/kernel/msi.c
+++ b/arch/powerpc/kernel/msi.c
@@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
- phb->controller_ops.teardown_msi_irqs(dev);
+ /*
+ * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
+ * so check the pointer again.
+ */
+ if (phb->controller_ops.teardown_msi_irqs)
+ phb->controller_ops.teardown_msi_irqs(dev);
}
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
index 3d1ecd211776..8137f77abad5 100644
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
@@ -26,13 +26,13 @@
#define SS_MSR 0x74
#define SS_SDR1 0x78
#define SS_LR 0x7c
-#define SS_SPRG 0x80 /* 4 SPRGs */
-#define SS_DBAT 0x90 /* 8 DBATs */
-#define SS_IBAT 0xd0 /* 8 IBATs */
-#define SS_TB 0x110
-#define SS_CR 0x118
-#define SS_GPREG 0x11c /* r12-r31 */
-#define STATE_SAVE_SIZE 0x16c
+#define SS_SPRG 0x80 /* 8 SPRGs */
+#define SS_DBAT 0xa0 /* 8 DBATs */
+#define SS_IBAT 0xe0 /* 8 IBATs */
+#define SS_TB 0x120
+#define SS_CR 0x128
+#define SS_GPREG 0x12c /* r12-r31 */
+#define STATE_SAVE_SIZE 0x17c
.section .data
.align 5
@@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
stw r7, SS_SPRG+12(r3)
stw r8, SS_SDR1(r3)
+ mfspr r4, SPRN_SPRG4
+ mfspr r5, SPRN_SPRG5
+ mfspr r6, SPRN_SPRG6
+ mfspr r7, SPRN_SPRG7
+
+ stw r4, SS_SPRG+16(r3)
+ stw r5, SS_SPRG+20(r3)
+ stw r6, SS_SPRG+24(r3)
+ stw r7, SS_SPRG+28(r3)
+
mfspr r4, SPRN_DBAT0U
mfspr r5, SPRN_DBAT0L
mfspr r6, SPRN_DBAT1U
@@ -493,6 +503,16 @@ mpc83xx_deep_resume:
mtspr SPRN_IBAT7U, r6
mtspr SPRN_IBAT7L, r7
+ lwz r4, SS_SPRG+16(r3)
+ lwz r5, SS_SPRG+20(r3)
+ lwz r6, SS_SPRG+24(r3)
+ lwz r7, SS_SPRG+28(r3)
+
+ mtspr SPRN_SPRG4, r4
+ mtspr SPRN_SPRG5, r5
+ mtspr SPRN_SPRG6, r6
+ mtspr SPRN_SPRG7, r7
+
lwz r4, SS_SPRG+0(r3)
lwz r5, SS_SPRG+4(r3)
lwz r6, SS_SPRG+8(r3)
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 352592d3e44e..7fd19a480422 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -104,6 +104,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
/* MEM2 64MB@0x10000000 */
delta = wii_hole_start + wii_hole_size;
size = top - delta;
+
+ if (__map_without_bats)
+ return delta;
+
for (bl = 128<<10; bl < max_size; bl <<= 1) {
if (bl * 2 > size)
break;
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index 44ed78af1a0d..9021b7272889 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -92,7 +92,7 @@ out:
}
static struct bin_attribute opal_msglog_attr = {
- .attr = {.name = "msglog", .mode = 0444},
+ .attr = {.name = "msglog", .mode = 0400},
.read = opal_msglog_read
};
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 96536c969c9c..a8efed3b4691 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -280,6 +280,8 @@ int dlpar_detach_node(struct device_node *dn)
if (rc)
return rc;
+ of_node_put(dn);
+
return 0;
}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 8eccead675d4..cc7b450a7766 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -224,10 +224,10 @@ static noinline __init void detect_machine_type(void)
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;
- /* Running under KVM? If not we assume z/VM */
+ /* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
- else
+ else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 929c147e07b4..1b69bfdf59f9 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -344,6 +344,8 @@ static int __hw_perf_event_init(struct perf_event *event)
break;
case PERF_TYPE_HARDWARE:
+ if (is_sampling_event(event)) /* No sampling support */
+ return -ENOENT;
ev = attr->config;
/* Count user space (problem-state) only */
if (!attr->exclude_user && attr->exclude_kernel) {
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e7a43a30e3ff..47692c78d09c 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -833,6 +833,8 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
+ else
+ pr_info("Linux is running as a guest in 64-bit mode\n");
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 77f4f334a465..29e5409c0d48 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -360,9 +360,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
+ struct _lowcore *lc = pcpu_devices->lowcore;
+
+ if (pcpu_devices[0].address == stap())
+ lc = &S390_lowcore;
+
pcpu_delegate(&pcpu_devices[0], func, data,
- pcpu_devices->lowcore->panic_stack -
- PANIC_FRAME_OFFSET + PAGE_SIZE);
+ lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
}
int smp_find_processor_id(u16 address)
@@ -1152,7 +1156,11 @@ static ssize_t __ref rescan_store(struct device *dev,
{
int rc;
+ rc = lock_device_hotplug_sysfs();
+ if (rc)
+ return rc;
rc = smp_rescan_cpus();
+ unlock_device_hotplug();
return rc ? rc : count;
}
static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 12bbf0e8478f..7ad41be8b373 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -242,7 +242,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start += nr << PAGE_SHIFT;
pages += nr;
ret = get_user_pages_unlocked(current, mm, start,
- nr_pages - nr, write, 0, pages);
+ nr_pages - nr, pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0)
ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index e7af6a65baab..8c51a0e94854 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -258,7 +258,8 @@ slow_irqon:
pages += nr;
ret = get_user_pages_unlocked(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages);
+ (end - start) >> PAGE_SHIFT, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 2e5c4fc2daa9..150f48303fb0 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -250,7 +250,8 @@ slow:
pages += nr;
ret = get_user_pages_unlocked(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages);
+ (end - start) >> PAGE_SHIFT, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 18eb9924dda3..aeb430212947 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -197,12 +197,17 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_wrprotect(pte_t pte)
{
- pte_clear_bits(pte, _PAGE_RW);
+ if (likely(pte_get_bits(pte, _PAGE_RW)))
+ pte_clear_bits(pte, _PAGE_RW);
+ else
+ return pte;
return(pte_mknewprot(pte));
}
static inline pte_t pte_mkread(pte_t pte)
{
+ if (unlikely(pte_get_bits(pte, _PAGE_USER)))
+ return pte;
pte_set_bits(pte, _PAGE_USER);
return(pte_mknewprot(pte));
}
@@ -221,6 +226,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{
+ if (unlikely(pte_get_bits(pte, _PAGE_RW)))
+ return pte;
pte_set_bits(pte, _PAGE_RW);
return(pte_mknewprot(pte));
}
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 31dab2135188..21332b431f10 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -25,8 +25,8 @@ static inline u16 i8254(void)
u16 status, timer;
do {
- outb(I8254_PORT_CONTROL,
- I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+ outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
+ I8254_PORT_CONTROL);
status = inb(I8254_PORT_COUNTER0);
timer = inb(I8254_PORT_COUNTER0);
timer |= inb(I8254_PORT_COUNTER0) << 8;
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 8baaff5af0b5..75b9d43069f1 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -77,6 +77,7 @@ static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index ae6aad1d24f7..b348c4641312 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -50,7 +50,7 @@ static unsigned long get_dr(int n)
/*
* fill in the user structure for a core dump..
*/
-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
+static void fill_dump(struct pt_regs *regs, struct user32 *dump)
{
u32 fs, gs;
memset(dump, 0, sizeof(*dump));
@@ -156,10 +156,12 @@ static int aout_core_dump(struct coredump_params *cprm)
fs = get_fs();
set_fs(KERNEL_DS);
has_dumped = 1;
+
+ fill_dump(cprm->regs, &dump);
+
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
dump.u_ar0 = offsetof(struct user32, regs);
dump.signal = cprm->siginfo->si_signo;
- dump_thread32(cprm->regs, &dump);
/*
* If the size of the dump file exceeds the rlimit, then see
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 16825dda18dc..66a5e60f60c4 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -94,6 +94,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
#define user_insn(insn, output, input...) \
({ \
int err; \
+ \
+ might_fault(); \
+ \
asm volatile(ASM_STAC "\n" \
"1:" #insn "\n\t" \
"2: " ASM_CLAC "\n" \
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3a37cdbdfbaa..2cb49ac1b2b2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -765,7 +765,7 @@ struct kvm_x86_ops {
int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void);
- bool (*cpu_has_high_real_mode_segbase)(void);
+ bool (*has_emulated_msr)(int index);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
/* Create, but do not attach this VCPU */
@@ -1200,7 +1200,7 @@ asmlinkage void kvm_spurious_fault(void);
"cmpb $0, kvm_rebooting \n\t" \
"jne 668b \n\t" \
__ASM_SIZE(push) " $666b \n\t" \
- "call kvm_spurious_fault \n\t" \
+ "jmp kvm_spurious_fault \n\t" \
".popsection \n\t" \
_ASM_EXTABLE(666b, 667b)
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 4928cf0d5af0..fb1251946b45 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -2,7 +2,11 @@
#define _ASM_X86_PAGE_64_DEFS_H
#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_STACK_ORDER 2
+#else
#define KASAN_STACK_ORDER 1
+#endif
#else
#define KASAN_STACK_ORDER 0
#endif
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index d788b0cdc0ad..ac6932bf1a01 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -144,6 +144,14 @@ extern int __get_user_4(void);
extern int __get_user_8(void);
extern int __get_user_bad(void);
+#define __uaccess_begin() stac()
+#define __uaccess_end() clac()
+#define __uaccess_begin_nospec() \
+({ \
+ stac(); \
+ barrier_nospec(); \
+})
+
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
@@ -203,10 +211,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
#ifdef CONFIG_X86_32
#define __put_user_asm_u64(x, addr, err, errret) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \
- "3: " ASM_CLAC "\n" \
+ "3:" \
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
" jmp 3b\n" \
@@ -217,10 +225,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
: "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
- "3: " ASM_CLAC "\n" \
+ "3:" \
_ASM_EXTABLE_EX(1b, 2b) \
_ASM_EXTABLE_EX(2b, 3b) \
: : "A" (x), "r" (addr))
@@ -306,14 +314,17 @@ do { \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \
case 8: \
- __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
- errret); \
+ __put_user_asm_u64(x, ptr, retval, errret); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
+/*
+ * This doesn't do __uaccess_begin/end - the exception handling
+ * around it must do that.
+ */
#define __put_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
@@ -368,9 +379,9 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: mov"itype" %2,%"rtype"1\n" \
- "2: " ASM_CLAC "\n" \
+ "2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \
@@ -380,6 +391,10 @@ do { \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
+/*
+ * This doesn't do __uaccess_begin/end - the exception handling
+ * around it must do that.
+ */
#define __get_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
@@ -410,7 +425,11 @@ do { \
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
- __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
+ __typeof__(*(ptr)) __pu_val; \
+ __pu_val = x; \
+ __uaccess_begin(); \
+ __put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\
+ __uaccess_end(); \
__builtin_expect(__pu_err, 0); \
})
@@ -418,7 +437,9 @@ do { \
({ \
int __gu_err; \
unsigned long __gu_val; \
+ __uaccess_begin_nospec(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+ __uaccess_end(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__builtin_expect(__gu_err, 0); \
})
@@ -433,9 +454,9 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: mov"itype" %"rtype"1,%2\n" \
- "2: " ASM_CLAC "\n" \
+ "2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
@@ -455,11 +476,15 @@ struct __large_struct { unsigned long buf[100]; };
*/
#define uaccess_try do { \
current_thread_info()->uaccess_err = 0; \
- stac(); \
+ __uaccess_begin(); \
barrier();
+#define uaccess_try_nospec do { \
+ current_thread_info()->uaccess_err = 0; \
+ __uaccess_begin_nospec(); \
+
#define uaccess_catch(err) \
- clac(); \
+ __uaccess_end(); \
(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
} while (0)
@@ -522,7 +547,7 @@ struct __large_struct { unsigned long buf[100]; };
* get_user_ex(...);
* } get_user_catch(err)
*/
-#define get_user_try uaccess_try
+#define get_user_try uaccess_try_nospec
#define get_user_catch(err) uaccess_catch(err)
#define get_user_ex(x, ptr) do { \
@@ -557,12 +582,13 @@ extern void __cmpxchg_wrong_size(void)
__typeof__(ptr) __uval = (uval); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
+ __uaccess_begin_nospec(); \
switch (size) { \
case 1: \
{ \
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -576,9 +602,9 @@ extern void __cmpxchg_wrong_size(void)
} \
case 2: \
{ \
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -592,9 +618,9 @@ extern void __cmpxchg_wrong_size(void)
} \
case 4: \
{ \
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -611,9 +637,9 @@ extern void __cmpxchg_wrong_size(void)
if (!IS_ENABLED(CONFIG_X86_64)) \
__cmpxchg_wrong_size(); \
\
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -628,6 +654,7 @@ extern void __cmpxchg_wrong_size(void)
default: \
__cmpxchg_wrong_size(); \
} \
+ __uaccess_end(); \
*__uval = __old; \
__ret; \
})
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index f5dcb5204dcd..f575ee3aea5c 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -48,20 +48,28 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
switch (n) {
case 1:
+ __uaccess_begin_nospec();
__put_user_size(*(u8 *)from, (u8 __user *)to,
1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin_nospec();
__put_user_size(*(u16 *)from, (u16 __user *)to,
2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin_nospec();
__put_user_size(*(u32 *)from, (u32 __user *)to,
4, ret, 4);
+ __uaccess_end();
return ret;
case 8:
+ __uaccess_begin_nospec();
__put_user_size(*(u64 *)from, (u64 __user *)to,
8, ret, 8);
+ __uaccess_end();
return ret;
}
}
@@ -103,13 +111,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
switch (n) {
case 1:
+ __uaccess_begin_nospec();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin_nospec();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin_nospec();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
@@ -148,13 +162,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
switch (n) {
case 1:
+ __uaccess_begin_nospec();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin_nospec();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin_nospec();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
@@ -170,13 +190,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
switch (n) {
case 1:
+ __uaccess_begin_nospec();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin_nospec();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin_nospec();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index d83a55b95a48..dc2d00e7ced3 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -56,35 +56,49 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+ case 1:
+ __uaccess_begin_nospec();
+ __get_user_asm(*(u8 *)dst, (u8 __user *)src,
ret, "b", "b", "=q", 1);
+ __uaccess_end();
return ret;
- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+ case 2:
+ __uaccess_begin_nospec();
+ __get_user_asm(*(u16 *)dst, (u16 __user *)src,
ret, "w", "w", "=r", 2);
+ __uaccess_end();
return ret;
- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+ case 4:
+ __uaccess_begin_nospec();
+ __get_user_asm(*(u32 *)dst, (u32 __user *)src,
ret, "l", "k", "=r", 4);
+ __uaccess_end();
return ret;
- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ case 8:
+ __uaccess_begin_nospec();
+ __get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 8);
+ __uaccess_end();
return ret;
case 10:
+ __uaccess_begin_nospec();
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 10);
- if (unlikely(ret))
- return ret;
- __get_user_asm(*(u16 *)(8 + (char *)dst),
- (u16 __user *)(8 + (char __user *)src),
- ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+ (u16 __user *)(8 + (char __user *)src),
+ ret, "w", "w", "=r", 2);
+ __uaccess_end();
return ret;
case 16:
+ __uaccess_begin_nospec();
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16);
- if (unlikely(ret))
- return ret;
- __get_user_asm(*(u64 *)(8 + (char *)dst),
- (u64 __user *)(8 + (char __user *)src),
- ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+ (u64 __user *)(8 + (char __user *)src),
+ ret, "q", "", "=r", 8);
+ __uaccess_end();
return ret;
default:
return copy_user_generic(dst, (__force void *)src, size);
@@ -106,35 +120,51 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+ case 1:
+ __uaccess_begin();
+ __put_user_asm(*(u8 *)src, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
+ __uaccess_end();
return ret;
- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+ case 2:
+ __uaccess_begin();
+ __put_user_asm(*(u16 *)src, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
+ __uaccess_end();
return ret;
- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+ case 4:
+ __uaccess_begin();
+ __put_user_asm(*(u32 *)src, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
+ __uaccess_end();
return ret;
- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ case 8:
+ __uaccess_begin();
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 8);
+ __uaccess_end();
return ret;
case 10:
+ __uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 10);
- if (unlikely(ret))
- return ret;
- asm("":::"memory");
- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
- ret, "w", "w", "ir", 2);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ }
+ __uaccess_end();
return ret;
case 16:
+ __uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 16);
- if (unlikely(ret))
- return ret;
- asm("":::"memory");
- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
- ret, "q", "", "er", 8);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ }
+ __uaccess_end();
return ret;
default:
return copy_user_generic((__force void *)dst, src, size);
@@ -160,39 +190,47 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
switch (size) {
case 1: {
u8 tmp;
+ __uaccess_begin_nospec();
__get_user_asm(tmp, (u8 __user *)src,
ret, "b", "b", "=q", 1);
if (likely(!ret))
__put_user_asm(tmp, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
+ __uaccess_end();
return ret;
}
case 2: {
u16 tmp;
+ __uaccess_begin_nospec();
__get_user_asm(tmp, (u16 __user *)src,
ret, "w", "w", "=r", 2);
if (likely(!ret))
__put_user_asm(tmp, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
+ __uaccess_end();
return ret;
}
case 4: {
u32 tmp;
+ __uaccess_begin_nospec();
__get_user_asm(tmp, (u32 __user *)src,
ret, "l", "k", "=r", 4);
if (likely(!ret))
__put_user_asm(tmp, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
+ __uaccess_end();
return ret;
}
case 8: {
u64 tmp;
+ __uaccess_begin_nospec();
__get_user_asm(tmp, (u64 __user *)src,
ret, "q", "", "=r", 8);
if (likely(!ret))
__put_user_asm(tmp, (u64 __user *)dst,
ret, "q", "", "er", 8);
+ __uaccess_end();
return ret;
}
default:
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 9f6151884249..e94e6f16172b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -716,11 +716,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static void init_amd_zn(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_ZEN);
- /*
- * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
- * all up to and including B1.
- */
- if (c->x86_model <= 1 && c->x86_mask <= 1)
+
+ /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
+ if (!cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index b12c0287d6cf..e8b46f575306 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -693,7 +693,8 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
set_cpu_cap(c, X86_FEATURE_STIBP);
- if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+ cpu_has(c, X86_FEATURE_VIRT_SSBD))
set_cpu_cap(c, X86_FEATURE_SSBD);
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 7b8c8c838191..77f7580e22c6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -670,6 +670,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
}
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ m->bank = i;
*msg = tmp;
ret = 1;
}
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index d76f13d6d8d6..ec894bf5eeb0 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -173,6 +173,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
struct mtrr_gentry gentry;
void __user *arg = (void __user *) __arg;
+ memset(&gentry, 0, sizeof(gentry));
+
switch (cmd) {
case MTRRIOC_ADD_ENTRY:
case MTRRIOC_SET_ENTRY:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index f0f4fcba252e..947579425861 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -1081,6 +1081,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
.id_table = snbep_uncore_pci_ids,
};
+#define NODE_ID_MASK 0x7
+
/*
* build pci bus to socket mapping
*/
@@ -1102,7 +1104,7 @@ static int snbep_pci2phy_map_init(int devid)
err = pci_read_config_dword(ubox_dev, 0x40, &config);
if (err)
break;
- nodeid = config;
+ nodeid = config & NODE_ID_MASK;
/* get the Node ID mapping */
err = pci_read_config_dword(ubox_dev, 0x54, &config);
if (err)
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 0f8a6bbaaa44..0bf17576dd2a 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -168,6 +168,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
struct efi_info *current_ei = &boot_params.efi_info;
struct efi_info *ei = &params->efi_info;
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
if (!current_ei->efi_memmap_size)
return 0;
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
index d1d35ccffed3..579f8f813ce0 100644
--- a/arch/x86/kernel/livepatch.c
+++ b/arch/x86/kernel/livepatch.c
@@ -58,6 +58,7 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
val = (s32)value;
break;
case R_X86_64_PC32:
+ case R_X86_64_PLT32:
val = (u32)(value - loc);
break;
default:
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 338d13d4fd2f..b857bb9f6f23 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -341,6 +341,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
+
/* cpuid 0xC0000001.edx */
const u32 kvm_supported_word5_x86_features =
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
@@ -358,6 +362,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
const u32 kvm_supported_word10_x86_features =
F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+ F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -435,11 +443,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
cpuid_mask(&entry->ebx, 9);
// TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST);
- } else
+ entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ cpuid_mask(&entry->edx, CPUID_7_EDX);
+ } else {
entry->ebx = 0;
+ entry->edx = 0;
+ }
entry->eax = 0;
entry->ecx = 0;
- entry->edx = 0;
break;
}
case 9:
@@ -583,7 +594,21 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
if (!g_phys_as)
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
- entry->ebx = entry->edx = 0;
+ entry->edx = 0;
+ /*
+ * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
+ * hardware cpuid
+ */
+ if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+ entry->ebx |= F(AMD_IBPB);
+ if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+ entry->ebx |= F(AMD_IBRS);
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+ entry->ebx |= F(VIRT_SSBD);
+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ entry->ebx |= F(VIRT_SSBD);
break;
}
case 0x80000019:
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index d1534feefcfe..72f159f4d456 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -159,6 +159,46 @@ static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
return best && (best->edx & bit(X86_FEATURE_RDTSCP));
}
+static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+}
+
+static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SPEC_CTRL_SSBD)));
+}
+
+static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
+}
+
+static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
+}
+
+
+
/*
* NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
*/
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a1afd80a68aa..3c70f6c76d3a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -56,7 +56,7 @@
#define APIC_BUS_CYCLE_NS 1
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
-#define apic_debug(fmt, arg...)
+#define apic_debug(fmt, arg...) do {} while (0)
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index df7827a981dd..acbde1249b6f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -37,6 +37,7 @@
#include <asm/desc.h>
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
+#include <asm/microcode.h>
#include <asm/spec-ctrl.h>
#include <asm/virtext.h>
@@ -147,6 +148,14 @@ struct vcpu_svm {
u64 gs_base;
} host;
+ u64 spec_ctrl;
+ /*
+ * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
+ * translated into the appropriate L2_CFG bits on the host to
+ * perform speculative control.
+ */
+ u64 virt_spec_ctrl;
+
u32 *msrpm;
ulong nmi_iret_rip;
@@ -182,6 +191,8 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_CSTAR, .always = true },
{ .index = MSR_SYSCALL_MASK, .always = true },
#endif
+ { .index = MSR_IA32_SPEC_CTRL, .always = false },
+ { .index = MSR_IA32_PRED_CMD, .always = false },
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
@@ -411,6 +422,7 @@ struct svm_cpu_data {
struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
+ struct vmcb *current_vmcb;
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -762,6 +774,25 @@ static bool valid_msr_intercept(u32 index)
return false;
}
+static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
+{
+ u8 bit_write;
+ unsigned long tmp;
+ u32 offset;
+ u32 *msrpm;
+
+ msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
+ to_svm(vcpu)->msrpm;
+
+ offset = svm_msrpm_offset(msr);
+ bit_write = 2 * (msr & 0x0f) + 1;
+ tmp = msrpm[offset];
+
+ BUG_ON(offset == MSR_INVALID);
+
+ return !!test_bit(bit_write, &tmp);
+}
+
static void set_msr_interception(u32 *msrpm, unsigned msr,
int read, int write)
{
@@ -1120,6 +1151,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
u32 dummy;
u32 eax = 1;
+ svm->spec_ctrl = 0;
+ svm->virt_spec_ctrl = 0;
+
if (!init_event) {
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
MSR_IA32_APICBASE_ENABLE;
@@ -1210,11 +1244,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
+ /*
+ * The vmcb page can be recycled, causing a false negative in
+ * svm_vcpu_load(). So do a full IBPB now.
+ */
+ indirect_branch_prediction_barrier();
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int i;
if (unlikely(cpu != vcpu->cpu)) {
@@ -1239,6 +1279,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
}
}
+ if (sd->current_vmcb != svm->vmcb) {
+ sd->current_vmcb = svm->vmcb;
+ indirect_branch_prediction_barrier();
+ }
}
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
@@ -2344,6 +2388,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
+ /*
+ * Drop what we picked up for L2 via svm_complete_interrupts() so it
+ * doesn't end up in L1.
+ */
+ svm->vcpu.arch.nmi_injected = false;
+ kvm_clear_exception_queue(&svm->vcpu);
+ kvm_clear_interrupt_queue(&svm->vcpu);
+
return 0;
}
@@ -3051,6 +3103,20 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_VM_CR:
msr_info->data = svm->nested.vm_cr_msr;
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ msr_info->data = svm->spec_ctrl;
+ break;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_virt_ssbd(vcpu))
+ return 1;
+
+ msr_info->data = svm->virt_spec_ctrl;
+ break;
case MSR_IA32_UCODE_REV:
msr_info->data = 0x01000065;
break;
@@ -3125,6 +3191,59 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr);
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr->host_initiated &&
+ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+ return 1;
+
+ svm->spec_ctrl = data;
+
+ if (!data)
+ break;
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_svm_vmrun_msrpm.
+ * We update the L1 MSR bit as well since it will end up
+ * touching the MSR anyway now.
+ */
+ set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
+ break;
+ case MSR_IA32_PRED_CMD:
+ if (!msr->host_initiated &&
+ !guest_cpuid_has_ibpb(vcpu))
+ return 1;
+
+ if (data & ~PRED_CMD_IBPB)
+ return 1;
+
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+ if (is_guest_mode(vcpu))
+ break;
+ set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
+ break;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ if (!msr->host_initiated &&
+ !guest_cpuid_has_virt_ssbd(vcpu))
+ return 1;
+
+ if (data & ~SPEC_CTRL_SSBD)
+ return 1;
+
+ svm->virt_spec_ctrl = data;
+ break;
case MSR_STAR:
svm->vmcb->save.star = data;
break;
@@ -3811,6 +3930,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
local_irq_enable();
+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+
asm volatile (
"push %%" _ASM_BP "; \n\t"
"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
@@ -3915,6 +4042,26 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#endif
#endif
+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+ * turn it off. This is much more efficient than blindly adding
+ * it to the atomic save/restore list. Especially as the former
+ * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
+ *
+ * For non-nested case:
+ * If the L01 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ *
+ * For nested case:
+ * If the L02 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ */
+ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+
reload_tss(vcpu);
local_irq_disable();
@@ -4015,8 +4162,15 @@ static bool svm_cpu_has_accelerated_tpr(void)
return false;
}
-static bool svm_has_high_real_mode_segbase(void)
+static bool svm_has_emulated_msr(int index)
{
+ switch (index) {
+ case MSR_IA32_MCG_EXT_CTL:
+ return false;
+ default:
+ break;
+ }
+
return true;
}
@@ -4299,7 +4453,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
- .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+ .has_emulated_msr = svm_has_emulated_msr,
.vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c5a4b1978cbf..098be61a6b4c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -48,6 +48,7 @@
#include <asm/kexec.h>
#include <asm/apic.h>
#include <asm/irq_remapping.h>
+#include <asm/microcode.h>
#include <asm/spec-ctrl.h>
#include "trace.h"
@@ -109,6 +110,14 @@ static u64 __read_mostly host_xss;
static bool __read_mostly enable_pml = 1;
module_param_named(pml, enable_pml, bool, S_IRUGO);
+#define MSR_TYPE_R 1
+#define MSR_TYPE_W 2
+#define MSR_TYPE_RW 3
+
+#define MSR_BITMAP_MODE_X2APIC 1
+#define MSR_BITMAP_MODE_X2APIC_APICV 2
+#define MSR_BITMAP_MODE_LM 4
+
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
@@ -172,7 +181,6 @@ module_param(ple_window_max, int, S_IRUGO);
extern const ulong vmx_return;
#define NR_AUTOLOAD_MSRS 8
-#define VMCS02_POOL_SIZE 1
struct vmcs {
u32 revision_id;
@@ -189,6 +197,7 @@ struct loaded_vmcs {
struct vmcs *vmcs;
int cpu;
int launched;
+ unsigned long *msr_bitmap;
struct list_head loaded_vmcss_on_cpu_link;
};
@@ -205,7 +214,7 @@ struct shared_msr_entry {
* stored in guest memory specified by VMPTRLD, but is opaque to the guest,
* which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
* More than one of these structures may exist, if L1 runs multiple L2 guests.
- * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
+ * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
* underlying hardware which will be used to run L2.
* This structure is packed to ensure that its layout is identical across
* machines (necessary for live migration).
@@ -384,13 +393,6 @@ struct __packed vmcs12 {
*/
#define VMCS12_SIZE 0x1000
-/* Used to remember the last vmcs02 used for some recently used vmcs12s */
-struct vmcs02_list {
- struct list_head list;
- gpa_t vmptr;
- struct loaded_vmcs vmcs02;
-};
-
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -412,16 +414,16 @@ struct nested_vmx {
*/
bool sync_shadow_vmcs;
- /* vmcs02_list cache of VMCSs recently used to run L2 guests */
- struct list_head vmcs02_pool;
- int vmcs02_num;
u64 vmcs01_tsc_offset;
bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
+
+ struct loaded_vmcs vmcs02;
+
/*
- * Guest pages referred to in vmcs02 with host-physical pointers, so
- * we must keep them pinned while L2 runs.
+ * Guest pages referred to in the vmcs02 with host-physical
+ * pointers, so we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
struct page *virtual_apic_page;
@@ -531,6 +533,7 @@ struct vcpu_vmx {
unsigned long host_rsp;
u8 fail;
bool nmi_known_unmasked;
+ u8 msr_bitmap_mode;
u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
@@ -542,6 +545,10 @@ struct vcpu_vmx {
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
+
+ u64 arch_capabilities;
+ u64 spec_ctrl;
+
u32 vm_entry_controls_shadow;
u32 vm_exit_controls_shadow;
/*
@@ -889,6 +896,9 @@ static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
static int alloc_identity_pagetable(struct kvm *kvm);
+static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
+static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -908,11 +918,6 @@ static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
-static unsigned long *vmx_msr_bitmap_legacy;
-static unsigned long *vmx_msr_bitmap_longmode;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_nested;
static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap;
@@ -1689,6 +1694,52 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb);
}
+/*
+ * Check if MSR is intercepted for currently loaded MSR bitmap.
+ */
+static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
+{
+ unsigned long *msr_bitmap;
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return true;
+
+ msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
+
+ if (msr <= 0x1fff) {
+ return !!test_bit(msr, msr_bitmap + 0x800 / f);
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ return !!test_bit(msr, msr_bitmap + 0xc00 / f);
+ }
+
+ return true;
+}
+
+/*
+ * Check if MSR is intercepted for L01 MSR bitmap.
+ */
+static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
+{
+ unsigned long *msr_bitmap;
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return true;
+
+ msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
+
+ if (msr <= 0x1fff) {
+ return !!test_bit(msr, msr_bitmap + 0x800 / f);
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ return !!test_bit(msr, msr_bitmap + 0xc00 / f);
+ }
+
+ return true;
+}
+
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit)
{
@@ -2074,6 +2125,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
+ indirect_branch_prediction_barrier();
}
if (vmx->loaded_vmcs->cpu != cpu) {
@@ -2353,27 +2405,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
vmx->guest_msrs[from] = tmp;
}
-static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
-{
- unsigned long *msr_bitmap;
-
- if (is_guest_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_nested;
- else if (vcpu->arch.apic_base & X2APIC_ENABLE) {
- if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
- else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
- } else {
- if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode;
- else
- msr_bitmap = vmx_msr_bitmap_legacy;
- }
-
- vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
-}
-
/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
@@ -2414,7 +2445,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
vmx->save_nmsrs = save_nmsrs;
if (cpu_has_vmx_msr_bitmap())
- vmx_set_msr_bitmap(&vmx->vcpu);
+ vmx_update_msr_bitmap(&vmx->vcpu);
}
/*
@@ -2828,6 +2859,19 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC:
msr_info->data = guest_read_tsc(vcpu);
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ msr_info->data = to_vmx(vcpu)->spec_ctrl;
+ break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_arch_capabilities(vcpu))
+ return 1;
+ msr_info->data = to_vmx(vcpu)->arch_capabilities;
+ break;
case MSR_IA32_SYSENTER_CS:
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break;
@@ -2927,6 +2971,68 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr_info);
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+ return 1;
+
+ vmx->spec_ctrl = data;
+
+ if (!data)
+ break;
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_vmx_merge_msr_bitmap. We should not touch the
+ * vmcs02.msr_bitmap here since it gets completely overwritten
+ * in the merging. We update the vmcs01 here for L1 as well
+ * since it will end up touching the MSR anyway now.
+ */
+ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
+ MSR_IA32_SPEC_CTRL,
+ MSR_TYPE_RW);
+ break;
+ case MSR_IA32_PRED_CMD:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_ibpb(vcpu))
+ return 1;
+
+ if (data & ~PRED_CMD_IBPB)
+ return 1;
+
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_vmx_merge_msr_bitmap. We should not touch the
+ * vmcs02.msr_bitmap here since it gets completely overwritten
+ * in the merging.
+ */
+ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
+ MSR_TYPE_W);
+ break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated)
+ return 1;
+ vmx->arch_capabilities = data;
+ break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -3352,11 +3458,6 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
return vmcs;
}
-static struct vmcs *alloc_vmcs(void)
-{
- return alloc_vmcs_cpu(raw_smp_processor_id());
-}
-
static void free_vmcs(struct vmcs *vmcs)
{
free_pages((unsigned long)vmcs, vmcs_config.order);
@@ -3372,6 +3473,34 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
loaded_vmcs_clear(loaded_vmcs);
free_vmcs(loaded_vmcs->vmcs);
loaded_vmcs->vmcs = NULL;
+ if (loaded_vmcs->msr_bitmap)
+ free_page((unsigned long)loaded_vmcs->msr_bitmap);
+}
+
+static struct vmcs *alloc_vmcs(void)
+{
+ return alloc_vmcs_cpu(raw_smp_processor_id());
+}
+
+static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
+{
+ loaded_vmcs->vmcs = alloc_vmcs();
+ if (!loaded_vmcs->vmcs)
+ return -ENOMEM;
+
+ loaded_vmcs_init(loaded_vmcs);
+
+ if (cpu_has_vmx_msr_bitmap()) {
+ loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!loaded_vmcs->msr_bitmap)
+ goto out_vmcs;
+ memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
+ }
+ return 0;
+
+out_vmcs:
+ free_loaded_vmcs(loaded_vmcs);
+ return -ENOMEM;
}
static void free_kvm_area(void)
@@ -4370,10 +4499,8 @@ static void free_vpid(int vpid)
spin_unlock(&vmx_vpid_lock);
}
-#define MSR_TYPE_R 1
-#define MSR_TYPE_W 2
-static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
+static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
{
int f = sizeof(unsigned long);
@@ -4407,8 +4534,8 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
}
}
-static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
+static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
{
int f = sizeof(unsigned long);
@@ -4488,37 +4615,78 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
}
}
-static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
+static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type, bool value)
{
- if (!longmode_only)
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
- msr, MSR_TYPE_R | MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
- msr, MSR_TYPE_R | MSR_TYPE_W);
+ if (value)
+ vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
+ else
+ vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
}
-static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
+static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
{
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ u8 mode = 0;
+
+ if (cpu_has_secondary_exec_ctrls() &&
+ (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
+ mode |= MSR_BITMAP_MODE_X2APIC;
+ if (enable_apicv)
+ mode |= MSR_BITMAP_MODE_X2APIC_APICV;
+ }
+
+ if (is_long_mode(vcpu))
+ mode |= MSR_BITMAP_MODE_LM;
+
+ return mode;
}
-static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
+#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
+
+static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
+ u8 mode)
{
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ int msr;
+
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+ msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
+ msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
+ }
+
+ if (mode & MSR_BITMAP_MODE_X2APIC) {
+ /*
+ * TPR reads and writes can be virtualized even if virtual interrupt
+ * delivery is not in use.
+ */
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
+ if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
+ vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_ID), MSR_TYPE_R);
+ vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
+ }
+ }
}
-static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
+static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
{
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_W);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
+ u8 mode = vmx_msr_bitmap_mode(vcpu);
+ u8 changed = mode ^ vmx->msr_bitmap_mode;
+
+ if (!changed)
+ return;
+
+ vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
+ !(mode & MSR_BITMAP_MODE_LM));
+
+ if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
+ vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
+
+ vmx->msr_bitmap_mode = mode;
}
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
@@ -4526,6 +4694,28 @@ static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
return enable_apicv && lapic_in_kernel(vcpu);
}
+static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ gfn_t gfn;
+
+ /*
+ * Don't need to mark the APIC access page dirty; it is never
+ * written to by the CPU during APIC virtualization.
+ */
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
+ gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
+ kvm_vcpu_mark_page_dirty(vcpu, gfn);
+ }
+
+ if (nested_cpu_has_posted_intr(vmcs12)) {
+ gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
+ kvm_vcpu_mark_page_dirty(vcpu, gfn);
+ }
+}
+
+
static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -4533,18 +4723,15 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
void *vapic_page;
u16 status;
- if (vmx->nested.pi_desc &&
- vmx->nested.pi_pending) {
- vmx->nested.pi_pending = false;
- if (!pi_test_and_clear_on(vmx->nested.pi_desc))
- return;
-
- max_irr = find_last_bit(
- (unsigned long *)vmx->nested.pi_desc->pir, 256);
+ if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
+ return;
- if (max_irr == 256)
- return;
+ vmx->nested.pi_pending = false;
+ if (!pi_test_and_clear_on(vmx->nested.pi_desc))
+ return;
+ max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
+ if (max_irr != 256) {
vapic_page = kmap(vmx->nested.virtual_apic_page);
__kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
kunmap(vmx->nested.virtual_apic_page);
@@ -4556,6 +4743,8 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
vmcs_write16(GUEST_INTR_STATUS, status);
}
}
+
+ nested_mark_vmcs12_pages_dirty(vcpu);
}
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
@@ -4818,7 +5007,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
}
if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
+ vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
@@ -4890,6 +5079,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
@@ -4918,6 +5109,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
u64 cr0;
vmx->rmode.vm86_active = 0;
+ vmx->spec_ctrl = 0;
vmx->soft_vnmi_blocked = 0;
@@ -5382,6 +5574,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
static int handle_triple_fault(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+ vcpu->mmio_needed = 0;
return 0;
}
@@ -5973,9 +6166,24 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
- skip_emulated_instruction(vcpu);
trace_kvm_fast_mmio(gpa);
- return 1;
+ /*
+ * Doing kvm_skip_emulated_instruction() depends on undefined
+ * behavior: Intel's manual doesn't mandate
+ * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
+ * occurs and while on real hardware it was observed to be set,
+ * other hypervisors (namely Hyper-V) don't set it, we end up
+ * advancing IP with some random value. Disable fast mmio when
+ * running nested and keep it for real hardware in hope that
+ * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
+ */
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ else
+ return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
+ NULL, 0) == EMULATE_DONE;
}
ret = handle_mmio_page_fault(vcpu, gpa, true);
@@ -6159,7 +6367,7 @@ static void wakeup_handler(void)
static __init int hardware_setup(void)
{
- int r = -ENOMEM, i, msr;
+ int r = -ENOMEM, i;
rdmsrl_safe(MSR_EFER, &host_efer);
@@ -6174,38 +6382,13 @@ static __init int hardware_setup(void)
if (!vmx_io_bitmap_b)
goto out;
- vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy)
- goto out1;
-
- vmx_msr_bitmap_legacy_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic)
- goto out2;
-
- vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode)
- goto out3;
-
- vmx_msr_bitmap_longmode_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic)
- goto out4;
-
- if (nested) {
- vmx_msr_bitmap_nested =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_nested)
- goto out5;
- }
-
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmread_bitmap)
- goto out6;
+ goto out1;
vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmwrite_bitmap)
- goto out7;
+ goto out2;
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
@@ -6214,14 +6397,9 @@ static __init int hardware_setup(void)
memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
- memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
- memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
- if (nested)
- memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
-
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
- goto out8;
+ goto out3;
}
if (boot_cpu_has(X86_FEATURE_NX))
@@ -6287,38 +6465,8 @@ static __init int hardware_setup(void)
kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
}
- vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
-
- memcpy(vmx_msr_bitmap_legacy_x2apic,
- vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
- vmx_msr_bitmap_longmode, PAGE_SIZE);
-
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
- if (enable_apicv) {
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
-
- /* According SDM, in x2apic mode, the whole id reg is used.
- * But in KVM, it only use the highest eight bits. Need to
- * intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
- /* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
- /* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
- }
-
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
@@ -6349,21 +6497,10 @@ static __init int hardware_setup(void)
return alloc_kvm_area();
-out8:
- free_page((unsigned long)vmx_vmwrite_bitmap);
-out7:
- free_page((unsigned long)vmx_vmread_bitmap);
-out6:
- if (nested)
- free_page((unsigned long)vmx_msr_bitmap_nested);
-out5:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out4:
- free_page((unsigned long)vmx_msr_bitmap_longmode);
out3:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+ free_page((unsigned long)vmx_vmwrite_bitmap);
out2:
- free_page((unsigned long)vmx_msr_bitmap_legacy);
+ free_page((unsigned long)vmx_vmread_bitmap);
out1:
free_page((unsigned long)vmx_io_bitmap_b);
out:
@@ -6374,16 +6511,10 @@ out:
static __exit void hardware_unsetup(void)
{
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_legacy);
- free_page((unsigned long)vmx_msr_bitmap_longmode);
free_page((unsigned long)vmx_io_bitmap_b);
free_page((unsigned long)vmx_io_bitmap_a);
free_page((unsigned long)vmx_vmwrite_bitmap);
free_page((unsigned long)vmx_vmread_bitmap);
- if (nested)
- free_page((unsigned long)vmx_msr_bitmap_nested);
free_kvm_area();
}
@@ -6427,93 +6558,6 @@ static int handle_monitor(struct kvm_vcpu *vcpu)
}
/*
- * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
- * We could reuse a single VMCS for all the L2 guests, but we also want the
- * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
- * allows keeping them loaded on the processor, and in the future will allow
- * optimizations where prepare_vmcs02 doesn't need to set all the fields on
- * every entry if they never change.
- * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
- * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
- *
- * The following functions allocate and free a vmcs02 in this pool.
- */
-
-/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
-static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
-{
- struct vmcs02_list *item;
- list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
- if (item->vmptr == vmx->nested.current_vmptr) {
- list_move(&item->list, &vmx->nested.vmcs02_pool);
- return &item->vmcs02;
- }
-
- if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
- /* Recycle the least recently used VMCS. */
- item = list_entry(vmx->nested.vmcs02_pool.prev,
- struct vmcs02_list, list);
- item->vmptr = vmx->nested.current_vmptr;
- list_move(&item->list, &vmx->nested.vmcs02_pool);
- return &item->vmcs02;
- }
-
- /* Create a new VMCS */
- item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
- if (!item)
- return NULL;
- item->vmcs02.vmcs = alloc_vmcs();
- if (!item->vmcs02.vmcs) {
- kfree(item);
- return NULL;
- }
- loaded_vmcs_init(&item->vmcs02);
- item->vmptr = vmx->nested.current_vmptr;
- list_add(&(item->list), &(vmx->nested.vmcs02_pool));
- vmx->nested.vmcs02_num++;
- return &item->vmcs02;
-}
-
-/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
-static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
-{
- struct vmcs02_list *item;
- list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
- if (item->vmptr == vmptr) {
- free_loaded_vmcs(&item->vmcs02);
- list_del(&item->list);
- kfree(item);
- vmx->nested.vmcs02_num--;
- return;
- }
-}
-
-/*
- * Free all VMCSs saved for this vcpu, except the one pointed by
- * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
- * must be &vmx->vmcs01.
- */
-static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
-{
- struct vmcs02_list *item, *n;
-
- WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
- list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
- /*
- * Something will leak if the above WARN triggers. Better than
- * a use-after-free.
- */
- if (vmx->loaded_vmcs == &item->vmcs02)
- continue;
-
- free_loaded_vmcs(&item->vmcs02);
- list_del(&item->list);
- kfree(item);
- vmx->nested.vmcs02_num--;
- }
-}
-
-/*
* The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
* set the success or error code of an emulated VMX instruction, as specified
* by Vol 2B, VMX Instruction Reference, "Conventions".
@@ -6613,6 +6657,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Addr = segment_base + offset */
/* offset = base + [index * scale] + displacement */
off = exit_qualification; /* holds the displacement */
+ if (addr_size == 1)
+ off = (gva_t)sign_extend64(off, 31);
+ else if (addr_size == 0)
+ off = (gva_t)sign_extend64(off, 15);
if (base_is_valid)
off += kvm_register_read(vcpu, base_reg);
if (index_is_valid)
@@ -6655,10 +6703,16 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
- /* Protected mode: #GP(0)/#SS(0) if the memory
- * operand is outside the segment limit.
+
+ /*
+ * Protected mode: #GP(0)/#SS(0) if the memory operand is
+ * outside the segment limit. All CPUs that support VMX ignore
+ * limit checks for flat segments, i.e. segments with base==0,
+ * limit==0xffffffff and of type expand-up data or code.
*/
- exn = exn || (off + sizeof(u64) > s.limit);
+ if (!(s.base == 0 && s.limit == 0xffffffff &&
+ ((s.type & 8) || !(s.type & 4))))
+ exn = exn || (off + sizeof(u64) > s.limit);
}
if (exn) {
kvm_queue_exception_e(vcpu,
@@ -6786,6 +6840,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
struct vmcs *shadow_vmcs;
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ int r;
/* The Intel VMX Instruction Reference lists a bunch of bits that
* are prerequisite to running VMXON, most notably cr4.VMXE must be
@@ -6825,10 +6880,14 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
+ r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
+ if (r < 0)
+ goto out_vmcs02;
+
if (enable_shadow_vmcs) {
shadow_vmcs = alloc_vmcs();
if (!shadow_vmcs)
- return -ENOMEM;
+ goto out_shadow_vmcs;
/* mark vmcs as shadow */
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
@@ -6836,9 +6895,6 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
vmx->nested.current_shadow_vmcs = shadow_vmcs;
}
- INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
- vmx->nested.vmcs02_num = 0;
-
hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
@@ -6850,6 +6906,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
+
+out_shadow_vmcs:
+ free_loaded_vmcs(&vmx->nested.vmcs02);
+
+out_vmcs02:
+ return -ENOMEM;
}
/*
@@ -6916,12 +6978,13 @@ static void free_nested(struct vcpu_vmx *vmx)
if (!vmx->nested.vmxon)
return;
+ hrtimer_cancel(&vmx->nested.preemption_timer);
vmx->nested.vmxon = false;
free_vpid(vmx->nested.vpid02);
nested_release_vmcs12(vmx);
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
- /* Unpin physical memory we referred to in current vmcs02 */
+ /* Unpin physical memory we referred to in the vmcs02 */
if (vmx->nested.apic_access_page) {
nested_release_page(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
@@ -6937,7 +7000,7 @@ static void free_nested(struct vcpu_vmx *vmx)
vmx->nested.pi_desc = NULL;
}
- nested_free_all_saved_vmcss(vmx);
+ free_loaded_vmcs(&vmx->nested.vmcs02);
}
/* Emulate the VMXOFF instruction */
@@ -6971,8 +7034,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
vmptr + offsetof(struct vmcs12, launch_state),
&zero, sizeof(zero));
- nested_free_vmcs02(vmx, vmptr);
-
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
@@ -7757,6 +7818,19 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX);
+ /*
+ * The host physical addresses of some pages of guest memory
+ * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
+ * Page). The CPU may write to these pages via their host
+ * physical address while L2 is running, bypassing any
+ * address-translation-based dirty tracking (e.g. EPT write
+ * protection).
+ *
+ * Mark them dirty on every exit from L2 to prevent them from
+ * getting out of sync with dirty tracking.
+ */
+ nested_mark_vmcs12_pages_dirty(vcpu);
+
if (vmx->nested.nested_run_pending)
return false;
@@ -8244,7 +8318,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
- vmx_set_msr_bitmap(vcpu);
+ vmx_update_msr_bitmap(vcpu);
}
static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
@@ -8413,9 +8487,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
local_irq_enable();
}
-static bool vmx_has_high_real_mode_segbase(void)
+static bool vmx_has_emulated_msr(int index)
{
- return enable_unrestricted_guest || emulate_invalid_guest_state;
+ switch (index) {
+ case MSR_IA32_SMBASE:
+ /*
+ * We cannot do SMM unless we can run the guest in big
+ * real mode.
+ */
+ return enable_unrestricted_guest || emulate_invalid_guest_state;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ /* This is AMD only. */
+ return false;
+ default:
+ return true;
+ }
}
static bool vmx_mpx_supported(void)
@@ -8607,7 +8693,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr();
+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
vmx->__launched = vmx->loaded_vmcs->launched;
+
asm(
/* Store host registers */
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -8725,6 +8820,26 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+ * turn it off. This is much more efficient than blindly adding
+ * it to the atomic save/restore list. Especially as the former
+ * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
+ *
+ * For non-nested case:
+ * If the L01 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ *
+ * For nested case:
+ * If the L02 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ */
+ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+ x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
+
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
@@ -8824,6 +8939,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ unsigned long *msr_bitmap;
int cpu;
if (!vmx)
@@ -8856,16 +8972,24 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (!vmx->guest_msrs)
goto free_pml;
- vmx->loaded_vmcs = &vmx->vmcs01;
- vmx->loaded_vmcs->vmcs = alloc_vmcs();
- if (!vmx->loaded_vmcs->vmcs)
- goto free_msrs;
if (!vmm_exclusive)
kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
- loaded_vmcs_init(vmx->loaded_vmcs);
+ err = alloc_loaded_vmcs(&vmx->vmcs01);
if (!vmm_exclusive)
kvm_cpu_vmxoff();
+ if (err < 0)
+ goto free_msrs;
+ msr_bitmap = vmx->vmcs01.msr_bitmap;
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
+ vmx->msr_bitmap_mode = 0;
+
+ vmx->loaded_vmcs = &vmx->vmcs01;
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
vmx->vcpu.cpu = cpu;
@@ -9248,9 +9372,26 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
{
int msr;
struct page *page;
- unsigned long *msr_bitmap;
+ unsigned long *msr_bitmap_l1;
+ unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
+ /*
+ * pred_cmd & spec_ctrl are trying to verify two things:
+ *
+ * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
+ * ensures that we do not accidentally generate an L02 MSR bitmap
+ * from the L12 MSR bitmap that is too permissive.
+ * 2. That L1 or L2s have actually used the MSR. This avoids
+ * unnecessarily merging of the bitmap if the MSR is unused. This
+ * works properly because we only update the L01 MSR bitmap lazily.
+ * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
+ * updated to reflect this when L1 (or its L2s) actually write to
+ * the MSR.
+ */
+ bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
+ bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
- if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
+ !pred_cmd && !spec_ctrl)
return false;
page = nested_get_page(vcpu, vmcs12->msr_bitmap);
@@ -9258,59 +9399,46 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
WARN_ON(1);
return false;
}
- msr_bitmap = (unsigned long *)kmap(page);
+ msr_bitmap_l1 = (unsigned long *)kmap(page);
+
+ memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
if (nested_cpu_has_apic_reg_virt(vmcs12))
for (msr = 0x800; msr <= 0x8ff; msr++)
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
msr, MSR_TYPE_R);
- /* TPR is allowed */
- nested_vmx_disable_intercept_for_msr(msr_bitmap,
- vmx_msr_bitmap_nested,
+
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
MSR_TYPE_R | MSR_TYPE_W);
+
if (nested_cpu_has_vid(vmcs12)) {
- /* EOI and self-IPI are allowed */
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_EOI >> 4),
MSR_TYPE_W);
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
MSR_TYPE_W);
}
- } else {
- /*
- * Enable reading intercept of all the x2apic
- * MSRs. We should not rely on vmcs12 to do any
- * optimizations here, it may have been modified
- * by L1.
- */
- for (msr = 0x800; msr <= 0x8ff; msr++)
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- msr,
- MSR_TYPE_R);
-
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_TASKPRI >> 4),
- MSR_TYPE_W);
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_EOI >> 4),
- MSR_TYPE_W);
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
- MSR_TYPE_W);
}
+
+ if (spec_ctrl)
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_SPEC_CTRL,
+ MSR_TYPE_R | MSR_TYPE_W);
+
+ if (pred_cmd)
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_PRED_CMD,
+ MSR_TYPE_W);
+
kunmap(page);
nested_release_page_clean(page);
@@ -9729,10 +9857,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
}
if (cpu_has_vmx_msr_bitmap() &&
- exec_control & CPU_BASED_USE_MSR_BITMAPS) {
- nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
- /* MSR_BITMAP will be set by following vmx_set_efer. */
- } else
+ exec_control & CPU_BASED_USE_MSR_BITMAPS &&
+ nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
+ ; /* MSR_BITMAP will be set by following vmx_set_efer. */
+ else
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
/*
@@ -9784,6 +9912,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
else
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ if (cpu_has_vmx_msr_bitmap())
+ vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
+
if (enable_vpid) {
/*
* There is no direct mapping between vpid02 and vpid12, the
@@ -9876,7 +10007,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
- struct loaded_vmcs *vmcs02;
bool ia32e;
u32 msr_entry_idx;
@@ -10016,10 +10146,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* the nested entry.
*/
- vmcs02 = nested_get_current_vmcs02(vmx);
- if (!vmcs02)
- return -ENOMEM;
-
enter_guest_mode(vcpu);
vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
@@ -10028,7 +10154,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
cpu = get_cpu();
- vmx->loaded_vmcs = vmcs02;
+ vmx->loaded_vmcs = &vmx->nested.vmcs02;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
@@ -10489,7 +10615,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
if (cpu_has_vmx_msr_bitmap())
- vmx_set_msr_bitmap(vcpu);
+ vmx_update_msr_bitmap(vcpu);
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
vmcs12->vm_exit_msr_load_count))
@@ -10540,10 +10666,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
vmx_segment_cache_clear(vmx);
- /* if no vmcs02 cache requested, remove the one we used */
- if (VMCS02_POOL_SIZE == 0)
- nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
-
load_vmcs12_host_state(vcpu, vmcs12);
/* Update TSC_OFFSET if TSC was changed while L2 ran */
@@ -10871,7 +10993,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.hardware_enable = hardware_enable,
.hardware_disable = hardware_disable,
.cpu_has_accelerated_tpr = report_flexpriority,
- .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
+ .has_emulated_msr = vmx_has_emulated_msr,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e6ab034f0bc7..706c5d63a53f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -961,6 +961,7 @@ static u32 msrs_to_save[] = {
#endif
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
+ MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES
};
static unsigned num_msrs_to_save;
@@ -984,6 +985,7 @@ static u32 emulated_msrs[] = {
MSR_IA32_MCG_STATUS,
MSR_IA32_MCG_CTL,
MSR_IA32_SMBASE,
+ MSR_AMD64_VIRT_SPEC_CTRL,
};
static unsigned num_emulated_msrs;
@@ -2583,7 +2585,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* fringe case that is not enabled except via specific settings
* of the module parameters.
*/
- r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+ r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
@@ -4072,14 +4074,8 @@ static void kvm_init_msr_list(void)
num_msrs_to_save = j;
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
- switch (emulated_msrs[i]) {
- case MSR_IA32_SMBASE:
- if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
- continue;
- break;
- default:
- break;
- }
+ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+ continue;
if (j < i)
emulated_msrs[j] = emulated_msrs[i];
@@ -4251,6 +4247,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ /*
+ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+ * is returned, but our callers are not ready for that and they blindly
+ * call kvm_inject_page_fault. Ensure that they at least do not leak
+ * uninitialized kernel stack memory into cr2 and error code.
+ */
+ memset(exception, 0, sizeof(*exception));
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
@@ -5440,7 +5443,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
* handle watchpoints yet, those would be handled in
* the emulate_ops.
*/
- if (kvm_vcpu_check_breakpoint(vcpu, &r))
+ if (!(emulation_type & EMULTYPE_SKIP) &&
+ kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
ctxt->interruptibility = 0;
@@ -5527,8 +5531,7 @@ restart:
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
- if (r == EMULATE_DONE &&
- (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+ if (r == EMULATE_DONE && ctxt->tf)
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
@@ -6475,6 +6478,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+ vcpu->mmio_needed = 0;
r = 0;
goto out;
}
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 91d93b95bd86..0a6fcae404f8 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -570,12 +570,12 @@ do { \
unsigned long __copy_to_user_ll(void __user *to, const void *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
if (movsl_is_ok(to, from, n))
__copy_user(to, from, n);
else
n = __copy_user_intel(to, from, n);
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_to_user_ll);
@@ -583,12 +583,12 @@ EXPORT_SYMBOL(__copy_to_user_ll);
unsigned long __copy_from_user_ll(void *to, const void __user *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to, from, n);
else
n = __copy_user_zeroing_intel(to, from, n);
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll);
@@ -596,13 +596,13 @@ EXPORT_SYMBOL(__copy_from_user_ll);
unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
if (movsl_is_ok(to, from, n))
__copy_user(to, from, n);
else
n = __copy_user_intel((void __user *)to,
(const void *)from, n);
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nozero);
@@ -610,7 +610,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2)
n = __copy_user_zeroing_intel_nocache(to, from, n);
@@ -619,7 +619,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
#else
__copy_user_zeroing(to, from, n);
#endif
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nocache);
@@ -627,7 +627,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache);
unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n);
@@ -636,7 +636,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
#else
__copy_user(to, from, n);
#endif
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index ae9a37bf1371..7d2542ad346a 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -388,7 +388,7 @@ slow_irqon:
ret = get_user_pages_unlocked(current, mm, start,
(end - start) >> PAGE_SHIFT,
- write, 0, pages);
+ pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 7ed47b1e6f42..7e94fc6f608a 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -536,10 +536,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
{
long gup_ret;
int nr_pages = 1;
- int force = 0;
gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
- nr_pages, write, force, NULL, NULL);
+ nr_pages, write ? FOLL_WRITE : 0, NULL, NULL);
/*
* get_user_pages() returns number of pages gotten.
* 0 means we failed to fault in and get anything,
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index 526536c81ddc..ca1e8e6dccc8 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
word1 = read_pci_config_16(bus, slot, func, 0xc0);
word2 = read_pci_config_16(bus, slot, func, 0xc2);
if (word1 != word2) {
- res.start = (word1 << 16) | 0x0000;
- res.end = (word2 << 16) | 0xffff;
+ res.start = ((resource_size_t) word1 << 16) | 0x0000;
+ res.end = ((resource_size_t) word2 << 16) | 0xffff;
res.flags = IORESOURCE_MEM;
update_res(info, res.start, res.end, res.flags, 0);
}
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
index 524142117296..82324fc25d5e 100644
--- a/arch/x86/platform/efi/early_printk.c
+++ b/arch/x86/platform/efi/early_printk.c
@@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
num--;
}
- if (efi_x >= si->lfb_width) {
+ if (efi_x + font->width > si->lfb_width) {
efi_x = 0;
efi_y += font->height;
}
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 22eeacba37cc..199e05f85e89 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -35,6 +35,7 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
# CONFIG_PCI is not set
+CONFIG_VECTORS_OFFSET=0x00002000
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index c7b3bedbfffe..e3823b4f9d08 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -286,12 +286,13 @@ should_never_return:
movi a2, cpu_start_ccount
1:
+ memw
l32i a3, a2, 0
beqi a3, 0, 1b
movi a3, 0
s32i a3, a2, 0
- memw
1:
+ memw
l32i a3, a2, 0
beqi a3, 0, 1b
wsr a3, ccount
@@ -328,11 +329,13 @@ ENTRY(cpu_restart)
rsr a0, prid
neg a2, a0
movi a3, cpu_start_id
+ memw
s32i a2, a3, 0
#if XCHAL_DCACHE_IS_WRITEBACK
dhwbi a3, 0
#endif
1:
+ memw
l32i a2, a3, 0
dhi a3, 0
bne a2, a0, 1b
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 4d02e38514f5..54bb8e0473a0 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -80,7 +80,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned i;
- for (i = 0; i < max_cpus; ++i)
+ for_each_possible_cpu(i)
set_cpu_present(i, true);
}
@@ -93,6 +93,11 @@ void __init smp_init_cpus(void)
pr_info("%s: Core Count = %d\n", __func__, ncpus);
pr_info("%s: Core Id = %d\n", __func__, core_id);
+ if (ncpus > NR_CPUS) {
+ ncpus = NR_CPUS;
+ pr_info("%s: limiting core count by %d\n", __func__, ncpus);
+ }
+
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
}
@@ -192,9 +197,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
int i;
#ifdef CONFIG_HOTPLUG_CPU
- cpu_start_id = cpu;
- system_flush_invalidate_dcache_range(
- (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+ WRITE_ONCE(cpu_start_id, cpu);
+ /* Pairs with the third memw in the cpu_restart */
+ mb();
+ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
#endif
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
@@ -203,18 +210,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
ccount = get_ccount();
while (!ccount);
- cpu_start_ccount = ccount;
+ WRITE_ONCE(cpu_start_ccount, ccount);
- while (time_before(jiffies, timeout)) {
+ do {
+ /*
+ * Pairs with the first two memws in the
+ * .Lboot_secondary.
+ */
mb();
- if (!cpu_start_ccount)
- break;
- }
+ ccount = READ_ONCE(cpu_start_ccount);
+ } while (ccount && time_before(jiffies, timeout));
- if (cpu_start_ccount) {
+ if (ccount) {
smp_call_function_single(0, mx_cpu_stop,
- (void *)cpu, 1);
- cpu_start_ccount = 0;
+ (void *)cpu, 1);
+ WRITE_ONCE(cpu_start_ccount, 0);
return -EIO;
}
}
@@ -234,6 +244,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
__func__, cpu, idle, start_info.stack);
+ init_completion(&cpu_running);
ret = boot_secondary(cpu, idle);
if (ret == 0) {
wait_for_completion_timeout(&cpu_running,
@@ -295,8 +306,10 @@ void __cpu_die(unsigned int cpu)
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
system_invalidate_dcache_range((unsigned long)&cpu_start_id,
- sizeof(cpu_start_id));
- if (cpu_start_id == -cpu) {
+ sizeof(cpu_start_id));
+ /* Pairs with the second memw in the cpu_restart */
+ mb();
+ if (READ_ONCE(cpu_start_id) == -cpu) {
platform_cpu_kill(cpu);
return;
}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index b9ad9feadc2d..a992cb6a47db 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -87,7 +87,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
container_of(evt, struct ccount_timer, evt);
if (timer->irq_enabled) {
- disable_irq(evt->irq);
+ disable_irq_nosync(evt->irq);
timer->irq_enabled = 0;
}
return 0;