summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcel Ziswiler <marcel.ziswiler@toradex.com>2020-05-19 23:01:26 +0200
committerMarcel Ziswiler <marcel.ziswiler@toradex.com>2020-05-19 23:37:01 +0200
commit2ae782ca839e0ee07de37122ddea362adff2e975 (patch)
treedf6b1a190760f51465122ca4c13492d5ac5984c6
parent0a8ab17689e628c84a666195bfc6ab85d11cf057 (diff)
parent0661b3d6cfd774e28a2e2ba90a3d87479e5c399b (diff)
Merge tag 'v4.9.220' into 4.9-2.3.x-imx
This is the 4.9.220 stable release Conflicts: arch/arm/Kconfig.debug arch/arm/boot/dts/imx7s.dtsi arch/arm/mach-imx/common.h arch/arm/mach-imx/cpuidle-imx6q.c arch/arm/mach-imx/cpuidle-imx6sx.c arch/arm/mach-imx/suspend-imx6.S block/blk-core.c drivers/crypto/caam/caamalg.c drivers/crypto/mxs-dcp.c drivers/dma/imx-sdma.c drivers/gpu/drm/bridge/adv7511/adv7511_drv.c drivers/input/keyboard/imx_keypad.c drivers/input/keyboard/snvs_pwrkey.c drivers/mmc/host/sdhci.c drivers/net/can/flexcan.c drivers/net/ethernet/freescale/fec_main.c drivers/net/phy/phy_device.c drivers/net/wireless/ath/ath10k/pci.c drivers/tty/serial/imx.c drivers/usb/dwc3/gadget.c drivers/usb/host/xhci.c include/linux/blkdev.h include/linux/cpu.h include/linux/platform_data/dma-imx-sdma.h kernel/cpu.c net/wireless/util.c sound/soc/fsl/Kconfig sound/soc/fsl/fsl_esai.c sound/soc/fsl/fsl_sai.c sound/soc/fsl/imx-sgtl5000.c
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mei2
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu6
-rw-r--r--Documentation/arm/kernel_mode_neon.txt4
-rw-r--r--Documentation/conf.py2
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt1
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/fman.txt7
-rw-r--r--Documentation/devicetree/bindings/rtc/abracon,abx80x.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/mvebu-uart.txt2
-rw-r--r--Documentation/filesystems/porting7
-rw-r--r--Documentation/hid/uhid.txt2
-rw-r--r--Documentation/hw-vuln/index.rst15
-rw-r--r--Documentation/hw-vuln/l1tf.rst (renamed from Documentation/l1tf.rst)9
-rw-r--r--Documentation/hw-vuln/mds.rst311
-rw-r--r--Documentation/hw-vuln/multihit.rst163
-rw-r--r--Documentation/hw-vuln/tsx_async_abort.rst279
-rw-r--r--Documentation/index.rst19
-rw-r--r--Documentation/kernel-parameters.txt246
-rw-r--r--Documentation/networking/ip-sysctl.txt9
-rw-r--r--Documentation/siphash.txt175
-rw-r--r--Documentation/spec_ctrl.txt9
-rw-r--r--Documentation/sysctl/net.txt8
-rw-r--r--Documentation/usb/power-management.txt14
-rw-r--r--Documentation/usb/rio.txt138
-rw-r--r--Documentation/virtual/kvm/api.txt16
-rw-r--r--Documentation/virtual/kvm/locking.txt6
-rw-r--r--Documentation/x86/conf.py10
-rw-r--r--Documentation/x86/index.rst9
-rw-r--r--Documentation/x86/mds.rst193
-rw-r--r--Documentation/x86/tsx_async_abort.rst117
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile36
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi1
-rw-r--r--arch/arc/include/asm/bug.h3
-rw-r--r--arch/arc/include/asm/cmpxchg.h14
-rw-r--r--arch/arc/include/asm/linkage.h2
-rw-r--r--arch/arc/kernel/head.S1
-rw-r--r--arch/arc/kernel/perf_event.c4
-rw-r--r--arch/arc/kernel/setup.c8
-rw-r--r--arch/arc/kernel/traps.c9
-rw-r--r--arch/arc/kernel/unwind.c9
-rw-r--r--arch/arc/mm/tlb.c13
-rw-r--r--arch/arc/plat-eznps/Kconfig2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/Kconfig.debug53
-rw-r--r--arch/arm/boot/compressed/efi-header.S3
-rw-r--r--arch/arm/boot/compressed/head.S16
-rw-r--r--arch/arm/boot/compressed/libfdt_env.h4
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts12
-rw-r--r--arch/arm/boot/dts/am4372.dtsi2
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts2
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts2
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi21
-rw-r--r--arch/arm/boot/dts/arm-realview-eb.dtsi2
-rw-r--r--arch/arm/boot/dts/arm-realview-pb1176.dts6
-rw-r--r--arch/arm/boot/dts/arm-realview-pb11mp.dts6
-rw-r--r--arch/arm/boot/dts/arm-realview-pbx.dtsi7
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts2
-rw-r--r--arch/arm/boot/dts/dove-cubox.dts2
-rw-r--r--arch/arm/boot/dts/dove.dtsi6
-rw-r--r--arch/arm/boot/dts/dra7.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts9
-rw-r--r--arch/arm/boot/dts/exynos5250-snow-rev5.dts11
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts2
-rw-r--r--arch/arm/boot/dts/exynos5420-peach-pit.dts4
-rw-r--r--arch/arm/boot/dts/exynos5800-peach-pi.dts4
-rw-r--r--arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi10
-rw-r--r--arch/arm/boot/dts/imx7d-cl-som-imx7.dts4
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi14
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi18
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi20
-rw-r--r--arch/arm/boot/dts/lpc3250-phy3250.dts4
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi32
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts9
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi9
-rw-r--r--arch/arm/boot/dts/mmp2.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi49
-rw-r--r--arch/arm/boot/dts/omap3-pandora-common.dtsi36
-rw-r--r--arch/arm/boot/dts/omap3-tao3530.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi5
-rw-r--r--arch/arm/boot/dts/omap5.dtsi1
-rw-r--r--arch/arm/boot/dts/orion5x-linkstation.dtsi2
-rw-r--r--arch/arm/boot/dts/pxa27x.dtsi4
-rw-r--r--arch/arm/boot/dts/pxa2xx.dtsi7
-rw-r--r--arch/arm/boot/dts/pxa3xx.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi8
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-som.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-mickey.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-minnie.dts4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/s3c6410-mini6410.dts4
-rw-r--r--arch/arm/boot/dts/s3c6410-smdk6410.dts4
-rw-r--r--arch/arm/boot/dts/sama5d2-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi28
-rw-r--r--arch/arm/boot/dts/sama5d3_can.dtsi4
-rw-r--r--arch/arm/boot/dts/sama5d3_tcb1.dtsi1
-rw-r--r--arch/arm/boot/dts/sama5d3_uart.dtsi4
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts2
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi6
-rw-r--r--arch/arm/boot/dts/ste-href-family-pinctrl.dtsi8
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts2
-rw-r--r--arch/arm/boot/dts/ste-u300.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-paz00.dts6
-rw-r--r--arch/arm/boot/dts/tegra30-apalis.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi6
-rw-r--r--arch/arm/boot/dts/versatile-ab.dts2
-rw-r--r--arch/arm/common/mcpm_entry.c2
-rw-r--r--arch/arm/configs/badge4_defconfig1
-rw-r--r--arch/arm/configs/corgi_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/s3c2410_defconfig1
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm/crypto/aesbs-glue.c4
-rw-r--r--arch/arm/crypto/sha256-armv4.pl3
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped3
-rw-r--r--arch/arm/crypto/sha512-armv4.pl3
-rw-r--r--arch/arm/crypto/sha512-core.S_shipped3
-rw-r--r--arch/arm/include/asm/barrier.h2
-rw-r--r--arch/arm/include/asm/cp15.h2
-rw-r--r--arch/arm/include/asm/hardirq.h1
-rw-r--r--arch/arm/include/asm/processor.h6
-rw-r--r--arch/arm/include/asm/suspend.h1
-rw-r--r--arch/arm/include/asm/uaccess.h18
-rw-r--r--arch/arm/kernel/entry-common.S9
-rw-r--r--arch/arm/kernel/hyp-stub.S4
-rw-r--r--arch/arm/kernel/machine_kexec.c5
-rw-r--r--arch/arm/kernel/patch.c6
-rw-r--r--arch/arm/kernel/sleep.S12
-rw-r--r--arch/arm/kernel/smp.c10
-rw-r--r--arch/arm/kernel/unwind.c14
-rw-r--r--arch/arm/kernel/vdso.c2
-rw-r--r--arch/arm/kvm/arm.c11
-rw-r--r--arch/arm/kvm/mmio.c7
-rw-r--r--arch/arm/kvm/mmu.c3
-rw-r--r--arch/arm/lib/Makefile2
-rw-r--r--arch/arm/lib/copy_from_user.S2
-rw-r--r--arch/arm/lib/getuser.S11
-rw-r--r--arch/arm/lib/putuser.S20
-rw-r--r--arch/arm/lib/xor-neon.c2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c3
-rw-r--r--arch/arm/mach-davinci/dm365.c4
-rw-r--r--arch/arm/mach-davinci/sleep.S1
-rw-r--r--arch/arm/mach-exynos/firmware.c1
-rw-r--r--arch/arm/mach-exynos/suspend.c21
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/common.h4
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c30
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c2
-rw-r--r--arch/arm/mach-imx/pm-imx6.c25
-rw-r--r--arch/arm/mach-imx/resume-imx6.S24
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S17
-rw-r--r--arch/arm/mach-iop13xx/setup.c8
-rw-r--r--arch/arm/mach-iop13xx/tpmi.c10
-rw-r--r--arch/arm/mach-ks8695/board-acs5k.c2
-rw-r--r--arch/arm/mach-omap1/id.c6
-rw-r--r--arch/arm/mach-omap2/id.c4
-rw-r--r--arch/arm/mach-omap2/omap4-common.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c3
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c93
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c2
-rw-r--r--arch/arm/mach-omap2/prm_common.c4
-rw-r--r--arch/arm/mach-rpc/dma.c5
-rw-r--r--arch/arm/mach-rpc/irq.c3
-rw-r--r--arch/arm/mach-tegra/reset-handler.S6
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S11
-rw-r--r--arch/arm/mach-vexpress/spc.c12
-rw-r--r--arch/arm/mach-zynq/platsmp.c2
-rw-r--r--arch/arm/mm/alignment.c44
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/fault.h1
-rw-r--r--arch/arm/mm/init.c8
-rw-r--r--arch/arm/mm/mmu.c19
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm/plat-iop/adma.c6
-rw-r--r--arch/arm/plat-orion/common.c4
-rw-r--r--arch/arm/plat-pxa/ssp.c6
-rw-r--r--arch/arm/plat-samsung/Kconfig2
-rw-r--r--arch/arm/vdso/vgettimeofday.c5
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/Makefile2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi8
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi4
-rw-r--r--arch/arm64/boot/dts/arm/juno-clocks.dtsi4
-rw-r--r--arch/arm64/boot/dts/lg/lg1312.dtsi4
-rw-r--r--arch/arm64/boot/dts/lg/lg1313.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi2
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sha256-core.S2061
-rw-r--r--arch/arm64/crypto/sha512-core.S1085
-rw-r--r--arch/arm64/include/asm/alternative.h34
-rw-r--r--arch/arm64/include/asm/cmpxchg.h6
-rw-r--r--arch/arm64/include/asm/compat.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/efi.h6
-rw-r--r--arch/arm64/include/asm/futex.h14
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h5
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h5
-rw-r--r--arch/arm64/include/asm/pgtable.h18
-rw-r--r--arch/arm64/include/asm/processor.h8
-rw-r--r--arch/arm64/include/asm/system_misc.h2
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h1
-rw-r--r--arch/arm64/kernel/acpi.c10
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c2
-rw-r--r--arch/arm64/kernel/asm-offsets.c2
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
-rw-r--r--arch/arm64/kernel/cpu_ops.c1
-rw-r--r--arch/arm64/kernel/cpufeature.c19
-rw-r--r--arch/arm64/kernel/debug-monitors.c1
-rw-r--r--arch/arm64/kernel/head.S28
-rw-r--r--arch/arm64/kernel/hibernate.c3
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c7
-rw-r--r--arch/arm64/kernel/image.h6
-rw-r--r--arch/arm64/kernel/kgdb.c15
-rw-r--r--arch/arm64/kernel/module.c8
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/kernel/psci.c15
-rw-r--r--arch/arm64/kernel/smp.c23
-rw-r--r--arch/arm64/kernel/traps.c1
-rw-r--r--arch/arm64/kernel/vdso.c13
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S7
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/lib/copy_from_user.S2
-rw-r--r--arch/arm64/lib/copy_in_user.S2
-rw-r--r--arch/arm64/lib/copy_to_user.S2
-rw-r--r--arch/arm64/mm/fault.c48
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/arm64/mm/mmu.c113
-rw-r--r--arch/arm64/mm/numa.c2
-rw-r--r--arch/arm64/mm/proc.S49
-rw-r--r--arch/arm64/net/bpf_jit_comp.c2
-rw-r--r--arch/h8300/Makefile2
-rw-r--r--arch/hexagon/include/asm/atomic.h8
-rw-r--r--arch/hexagon/include/asm/bitops.h8
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h2
-rw-r--r--arch/hexagon/include/asm/futex.h6
-rw-r--r--arch/hexagon/include/asm/spinlock.h20
-rw-r--r--arch/hexagon/kernel/stacktrace.c4
-rw-r--r--arch/hexagon/kernel/vm_entry.S2
-rw-r--r--arch/ia64/include/asm/bug.h6
-rw-r--r--arch/ia64/kernel/module.c8
-rw-r--r--arch/ia64/mm/numa.c1
-rw-r--r--arch/m68k/amiga/cia.c9
-rw-r--r--arch/m68k/atari/ataints.c4
-rw-r--r--arch/m68k/atari/time.c15
-rw-r--r--arch/m68k/bvme6000/config.c20
-rw-r--r--arch/m68k/hp300/time.c10
-rw-r--r--arch/m68k/include/asm/bug.h3
-rw-r--r--arch/m68k/kernel/uboot.c2
-rw-r--r--arch/m68k/mac/via.c119
-rw-r--r--arch/m68k/mvme147/config.c18
-rw-r--r--arch/m68k/mvme16x/config.c21
-rw-r--r--arch/m68k/q40/q40ints.c19
-rw-r--r--arch/m68k/sun3/sun3ints.c3
-rw-r--r--arch/m68k/sun3x/time.c16
-rw-r--r--arch/microblaze/Makefile12
-rw-r--r--arch/microblaze/boot/Makefile4
-rw-r--r--arch/microblaze/kernel/cpu/cache.c3
-rw-r--r--arch/mips/Kconfig5
-rw-r--r--arch/mips/bcm47xx/workarounds.c8
-rw-r--r--arch/mips/bcm63xx/prom.c2
-rw-r--r--arch/mips/bcm63xx/reset.c2
-rw-r--r--arch/mips/boot/compressed/Makefile5
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c2
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c3
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c2
-rw-r--r--arch/mips/configs/mtx1_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/fw/sni/sniprom.c2
-rw-r--r--arch/mips/include/asm/bmips.h10
-rw-r--r--arch/mips/include/asm/compiler.h35
-rw-r--r--arch/mips/include/asm/io.h14
-rw-r--r--arch/mips/include/asm/kexec.h6
-rw-r--r--arch/mips/include/asm/mach-ath79/ar933x_uart.h4
-rw-r--r--arch/mips/include/asm/netlogic/xlr/fmn.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pko.h2
-rw-r--r--arch/mips/include/asm/smp.h12
-rw-r--r--arch/mips/include/asm/thread_info.h20
-rw-r--r--arch/mips/include/uapi/asm/sgidefs.h8
-rw-r--r--arch/mips/kernel/i8253.c3
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c8
-rw-r--r--arch/mips/kernel/uprobes.c3
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/lantiq/irq.c5
-rw-r--r--arch/mips/loongson64/Platform4
-rw-r--r--arch/mips/loongson64/common/serial.c2
-rw-r--r--arch/mips/loongson64/loongson-3/platform.c3
-rw-r--r--arch/mips/math-emu/cp1emu.c4
-rw-r--r--arch/mips/mm/tlbex.c30
-rw-r--r--arch/mips/net/bpf_jit.c2
-rw-r--r--arch/mips/pistachio/Platform1
-rw-r--r--arch/mips/sibyte/common/Makefile1
-rw-r--r--arch/mips/sibyte/common/dma.c14
-rw-r--r--arch/mips/txx9/generic/setup.c5
-rw-r--r--arch/mips/vdso/Makefile5
-rw-r--r--arch/nios2/kernel/nios2_ksyms.c12
-rw-r--r--arch/openrisc/kernel/entry.S2
-rw-r--r--arch/openrisc/kernel/head.S2
-rw-r--r--arch/parisc/include/asm/assembly.h6
-rw-r--r--arch/parisc/include/asm/cmpxchg.h10
-rw-r--r--arch/parisc/include/asm/processor.h2
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/process.c7
-rw-r--r--arch/parisc/kernel/ptrace.c31
-rw-r--r--arch/parisc/kernel/setup.c3
-rw-r--r--arch/parisc/kernel/syscall.S2
-rw-r--r--arch/parisc/kernel/time.c2
-rw-r--r--arch/parisc/math-emu/cnv_float.h8
-rw-r--r--arch/parisc/mm/ioremap.c12
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/boot/4xx.c2
-rw-r--r--arch/powerpc/boot/addnote.c6
-rw-r--r--arch/powerpc/boot/dts/bamboo.dts4
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi1
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi1
-rw-r--r--arch/powerpc/boot/libfdt_env.h4
-rw-r--r--arch/powerpc/boot/xz_config.h20
-rw-r--r--arch/powerpc/include/asm/archrandom.h2
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h9
-rw-r--r--arch/powerpc/include/asm/barrier.h21
-rw-r--r--arch/powerpc/include/asm/code-patching-asm.h18
-rw-r--r--arch/powerpc/include/asm/code-patching.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h21
-rw-r--r--arch/powerpc/include/asm/futex.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h23
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/asm/security_features.h10
-rw-r--r--arch/powerpc/include/asm/setup.h21
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h92
-rw-r--r--arch/powerpc/include/asm/topology.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h11
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h2
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/cacheinfo.c17
-rw-r--r--arch/powerpc/kernel/cacheinfo.h4
-rw-r--r--arch/powerpc/kernel/cputable.c14
-rw-r--r--arch/powerpc/kernel/eeh.c15
-rw-r--r--arch/powerpc/kernel/eeh_driver.c6
-rw-r--r--arch/powerpc/kernel/eeh_pe.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S10
-rw-r--r--arch/powerpc/kernel/entry_64.S75
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S27
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S13
-rw-r--r--arch/powerpc/kernel/head_booke.h12
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S15
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c4
-rw-r--r--arch/powerpc/kernel/module.c10
-rw-r--r--arch/powerpc/kernel/pci_dn.c15
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c2
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/prom.c6
-rw-r--r--arch/powerpc/kernel/rtas.c13
-rw-r--r--arch/powerpc/kernel/security.c300
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/signal_32.c3
-rw-r--r--arch/powerpc/kernel/signal_64.c32
-rw-r--r--arch/powerpc/kernel/swsusp_32.S73
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S2
-rw-r--r--arch/powerpc/kernel/time.c3
-rw-r--r--arch/powerpc/kernel/vdso32/datapage.S1
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S8
-rw-r--r--arch/powerpc/kernel/vdso64/cacheflush.S4
-rw-r--r--arch/powerpc/kernel/vdso64/datapage.S1
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S8
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S25
-rw-r--r--arch/powerpc/kvm/book3s.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S27
-rw-r--r--arch/powerpc/kvm/book3s_pr.c4
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c14
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S4
-rw-r--r--arch/powerpc/kvm/e500_emulate.c7
-rw-r--r--arch/powerpc/lib/code-patching.c24
-rw-r--r--arch/powerpc/lib/copypage_power7.S14
-rw-r--r--arch/powerpc/lib/copyuser_power7.S66
-rw-r--r--arch/powerpc/lib/feature-fixups.c93
-rw-r--r--arch/powerpc/lib/memcpy_power7.S66
-rw-r--r--arch/powerpc/lib/string_64.S2
-rw-r--r--arch/powerpc/mm/fault.c17
-rw-r--r--arch/powerpc/mm/hash_utils_64.c10
-rw-r--r--arch/powerpc/mm/mem.c10
-rw-r--r--arch/powerpc/mm/numa.c27
-rw-r--r--arch/powerpc/mm/pgtable-radix.c16
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c4
-rw-r--r--arch/powerpc/mm/slb.c2
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S7
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S12
-rw-r--r--arch/powerpc/net/bpf_jit.h19
-rw-r--r--arch/powerpc/net/bpf_jit32.h4
-rw-r--r--arch/powerpc/net/bpf_jit64.h20
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c22
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/perf/power8-pmu.c3
-rw-r--r--arch/powerpc/perf/power9-pmu.c3
-rw-r--r--arch/powerpc/platforms/83xx/misc.c17
-rw-r--r--arch/powerpc/platforms/maple/setup.c34
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S68
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal.c11
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c23
-rw-r--r--arch/powerpc/platforms/powernv/pci.c25
-rw-r--r--arch/powerpc/platforms/powernv/setup.c7
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c2
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c5
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c4
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c19
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c7
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c43
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c9
-rw-r--r--arch/powerpc/platforms/pseries/setup.c10
-rw-r--r--arch/powerpc/sysdev/uic.c1
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/s390/crypto/aes_s390.c6
-rw-r--r--arch/s390/hypfs/inode.c9
-rw-r--r--arch/s390/include/asm/elf.h11
-rw-r--r--arch/s390/include/asm/facility.h21
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/timex.h2
-rw-r--r--arch/s390/include/asm/uaccess.h4
-rw-r--r--arch/s390/kernel/diag.c2
-rw-r--r--arch/s390/kernel/dis.c13
-rw-r--r--arch/s390/kernel/mcount.S15
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c45
-rw-r--r--arch/s390/kernel/processor.c5
-rw-r--r--arch/s390/kernel/smp.c80
-rw-r--r--arch/s390/kernel/topology.c3
-rw-r--r--arch/s390/kernel/vdso32/Makefile3
-rw-r--r--arch/s390/kernel/vdso64/Makefile3
-rw-r--r--arch/s390/kvm/interrupt.c10
-rw-r--r--arch/s390/kvm/kvm-s390.c60
-rw-r--r--arch/s390/kvm/vsie.c1
-rw-r--r--arch/s390/mm/cmm.c12
-rw-r--r--arch/s390/mm/gmap.c7
-rw-r--r--arch/s390/mm/gup.c9
-rw-r--r--arch/s390/net/bpf_jit_comp.c14
-rw-r--r--arch/sh/boards/of-generic.c4
-rw-r--r--arch/sh/include/asm/io.h6
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/sh7269.h11
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7734.h2
-rw-r--r--arch/sh/kernel/hw_breakpoint.c1
-rw-r--r--arch/sparc/include/asm/bug.h6
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h7
-rw-r--r--arch/sparc/include/asm/parport.h2
-rw-r--r--arch/sparc/include/uapi/asm/ipcbuf.h22
-rw-r--r--arch/sparc/kernel/perf_event.c4
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S6
-rw-r--r--arch/sparc/mm/ultra.S4
-rw-r--r--arch/sparc/net/bpf_jit_comp.c2
-rw-r--r--arch/tile/lib/atomic_asm_32.S3
-rw-r--r--arch/um/Kconfig.debug1
-rw-r--r--arch/um/drivers/line.c2
-rw-r--r--arch/um/include/asm/mmu_context.h2
-rw-r--r--arch/um/include/asm/thread_info.h3
-rw-r--r--arch/um/include/shared/os.h2
-rw-r--r--arch/um/kernel/process.c4
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/um/os-Linux/skas/process.c17
-rw-r--r--arch/x86/Kconfig64
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/Makefile3
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/head_32.S2
-rw-r--r--arch/x86/boot/compressed/head_64.S9
-rw-r--r--arch/x86/boot/compressed/misc.c1
-rw-r--r--arch/x86/boot/compressed/misc.h1
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c13
-rw-r--r--arch/x86/crypto/poly1305-avx2-x86_64.S14
-rw-r--r--arch/x86/crypto/poly1305-sse2-x86_64.S22
-rw-r--r--arch/x86/entry/calling.h18
-rw-r--r--arch/x86/entry/common.c3
-rw-r--r--arch/x86/entry/entry_32.S3
-rw-r--r--arch/x86/entry/entry_64.S23
-rw-r--r--arch/x86/entry/vdso/Makefile23
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c1
-rw-r--r--arch/x86/events/amd/core.c147
-rw-r--r--arch/x86/events/amd/ibs.c21
-rw-r--r--arch/x86/events/amd/uncore.c112
-rw-r--r--arch/x86/events/core.c9
-rw-r--r--arch/x86/events/intel/core.c26
-rw-r--r--arch/x86/events/intel/cstate.c4
-rw-r--r--arch/x86/events/intel/ds.c30
-rw-r--r--arch/x86/events/msr.c4
-rw-r--r--arch/x86/ia32/ia32_signal.c29
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/atomic.h21
-rw-r--r--arch/x86/include/asm/atomic64_64.h8
-rw-r--r--arch/x86/include/asm/barrier.h4
-rw-r--r--arch/x86/include/asm/bootparam_utils.h61
-rw-r--r--arch/x86/include/asm/cpufeatures.h22
-rw-r--r--arch/x86/include/asm/crash.h2
-rw-r--r--arch/x86/include/asm/fixmap.h2
-rw-r--r--arch/x86/include/asm/insn.h18
-rw-r--r--arch/x86/include/asm/intel-family.h33
-rw-r--r--arch/x86/include/asm/irqflags.h4
-rw-r--r--arch/x86/include/asm/kexec.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h41
-rw-r--r--arch/x86/include/asm/microcode_intel.h15
-rw-r--r--arch/x86/include/asm/msr-index.h57
-rw-r--r--arch/x86/include/asm/mwait.h9
-rw-r--r--arch/x86/include/asm/nospec-branch.h67
-rw-r--r--arch/x86/include/asm/perf_event.h12
-rw-r--r--arch/x86/include/asm/pgtable_64.h16
-rw-r--r--arch/x86/include/asm/processor.h31
-rw-r--r--arch/x86/include/asm/ptrace.h48
-rw-r--r--arch/x86/include/asm/smp.h10
-rw-r--r--arch/x86/include/asm/spec-ctrl.h20
-rw-r--r--arch/x86/include/asm/stacktrace.h5
-rw-r--r--arch/x86/include/asm/suspend_32.h8
-rw-r--r--arch/x86/include/asm/suspend_64.h19
-rw-r--r--arch/x86/include/asm/switch_to.h4
-rw-r--r--arch/x86/include/asm/thread_info.h20
-rw-r--r--arch/x86/include/asm/tlbflush.h8
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/vgtod.h7
-rw-r--r--arch/x86/include/asm/xen/hypercall.h3
-rw-r--r--arch/x86/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/include/uapi/asm/mce.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S9
-rw-r--r--arch/x86/kernel/apic/apic.c200
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c24
-rw-r--r--arch/x86/kernel/apic/io_apic.c17
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c73
-rw-r--r--arch/x86/kernel/cpu/bugs.c946
-rw-r--r--arch/x86/kernel/cpu/common.c178
-rw-r--r--arch/x86/kernel/cpu/cpu.h18
-rw-r--r--arch/x86/kernel/cpu/cyrix.c16
-rw-r--r--arch/x86/kernel/cpu/intel.c16
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c80
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c86
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c22
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c70
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh2
-rw-r--r--arch/x86/kernel/cpu/tsx.c141
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c1
-rw-r--r--arch/x86/kernel/irq_64.c19
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c30
-rw-r--r--arch/x86/kernel/nmi.c4
-rw-r--r--arch/x86/kernel/process.c101
-rw-r--r--arch/x86/kernel/process.h39
-rw-r--r--arch/x86/kernel/process_32.c16
-rw-r--r--arch/x86/kernel/process_64.c17
-rw-r--r--arch/x86/kernel/ptrace.c4
-rw-r--r--arch/x86/kernel/reboot.c21
-rw-r--r--arch/x86/kernel/signal.c29
-rw-r--r--arch/x86/kernel/smp.c46
-rw-r--r--arch/x86/kernel/sysfb_efi.c46
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c2
-rw-r--r--arch/x86/kernel/tls.c9
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/unwind_frame.c20
-rw-r--r--arch/x86/kernel/uprobes.c23
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/kvm/cpuid.c34
-rw-r--r--arch/x86/kvm/cpuid.h2
-rw-r--r--arch/x86/kvm/emulate.c74
-rw-r--r--arch/x86/kvm/hyperv.c11
-rw-r--r--arch/x86/kvm/ioapic.c15
-rw-r--r--arch/x86/kvm/irq_comm.c2
-rw-r--r--arch/x86/kvm/lapic.c20
-rw-r--r--arch/x86/kvm/mmu.c460
-rw-r--r--arch/x86/kvm/mmu.h21
-rw-r--r--arch/x86/kvm/mmutrace.h59
-rw-r--r--arch/x86/kvm/mtrr.c9
-rw-r--r--arch/x86/kvm/paging_tmpl.h79
-rw-r--r--arch/x86/kvm/pmu.c4
-rw-r--r--arch/x86/kvm/pmu.h18
-rw-r--r--arch/x86/kvm/pmu_intel.c37
-rw-r--r--arch/x86/kvm/svm.c37
-rw-r--r--arch/x86/kvm/trace.h4
-rw-r--r--arch/x86/kvm/vmx.c246
-rw-r--r--arch/x86/kvm/x86.c191
-rw-r--r--arch/x86/lib/cpu.c1
-rw-r--r--arch/x86/lib/delay.c4
-rw-r--r--arch/x86/lib/x86-opcode-map.txt20
-rw-r--r--arch/x86/math-emu/fpu_emu.h2
-rw-r--r--arch/x86/math-emu/reg_constant.c2
-rw-r--r--arch/x86/mm/fault.c43
-rw-r--r--arch/x86/mm/gup.c41
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/kaiser.c4
-rw-r--r--arch/x86/mm/pgtable.c10
-rw-r--r--arch/x86/mm/tlb.c114
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/pci/fixup.c11
-rw-r--r--arch/x86/pci/irq.c10
-rw-r--r--arch/x86/platform/atom/punit_atom_debug.c4
-rw-r--r--arch/x86/platform/efi/efi.c16
-rw-r--r--arch/x86/power/cpu.c192
-rw-r--r--arch/x86/power/hibernate_64.c33
-rw-r--r--arch/x86/realmode/rm/Makefile2
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk4
-rw-r--r--arch/x86/um/os-Linux/registers.c30
-rw-r--r--arch/x86/um/user-offsets.c6
-rw-r--r--arch/xtensa/kernel/setup.c1
-rw-r--r--arch/xtensa/kernel/stacktrace.c6
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c7
-rw-r--r--arch/xtensa/mm/tlb.c4
-rw-r--r--block/bio.c5
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c8
-rw-r--r--block/blk-mq-sysfs.c15
-rw-r--r--block/blk-mq-tag.c9
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-settings.c2
-rw-r--r--block/compat_ioctl.c349
-rw-r--r--crypto/af_alg.c6
-rw-r--r--crypto/algapi.c22
-rw-r--r--crypto/algif_skcipher.c2
-rw-r--r--crypto/api.c3
-rw-r--r--crypto/asymmetric_keys/Kconfig3
-rw-r--r--crypto/chacha20poly1305.c34
-rw-r--r--crypto/crct10dif_generic.c11
-rw-r--r--crypto/crypto_user.c44
-rw-r--r--crypto/ecc.c3
-rw-r--r--crypto/gcm.c36
-rw-r--r--crypto/ghash-generic.c8
-rw-r--r--crypto/internal.h1
-rw-r--r--crypto/pcrypt.c6
-rw-r--r--crypto/rsa-pkcs1pad.c9
-rw-r--r--crypto/salsa20_generic.c2
-rw-r--r--crypto/testmgr.h44
-rw-r--r--crypto/tgr192.c6
-rw-r--r--drivers/acpi/acpi_lpss.c9
-rw-r--r--drivers/acpi/acpi_memhotplug.c2
-rw-r--r--drivers/acpi/acpi_video.c20
-rw-r--r--drivers/acpi/acpi_watchdog.c15
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dswload.c21
-rw-r--r--drivers/acpi/acpica/evregion.c17
-rw-r--r--drivers/acpi/acpica/evrgnini.c6
-rw-r--r--drivers/acpi/acpica/evxfregn.c1
-rw-r--r--drivers/acpi/apei/ghes.c30
-rw-r--r--drivers/acpi/arm64/iort.c4
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/cppc_acpi.c6
-rw-r--r--drivers/acpi/custom_method.c5
-rw-r--r--drivers/acpi/device_pm.c12
-rw-r--r--drivers/acpi/osl.c29
-rw-r--r--drivers/acpi/pci_irq.c4
-rw-r--r--drivers/acpi/pci_root.c5
-rw-r--r--drivers/acpi/sbs.c8
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/android/binder.c68
-rw-r--r--drivers/ata/Kconfig3
-rw-r--r--drivers/ata/ahci.c7
-rw-r--r--drivers/ata/ahci_brcm.c112
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libahci_platform.c9
-rw-r--r--drivers/ata/libata-core.c33
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/libata-pmp.c1
-rw-r--r--drivers/ata/libata-scsi.c30
-rw-r--r--drivers/ata/libata-sff.c6
-rw-r--r--drivers/ata/libata-zpodd.c36
-rw-r--r--drivers/ata/pata_ep93xx.c8
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/atm/eni.c4
-rw-r--r--drivers/atm/firestream.c3
-rw-r--r--drivers/atm/iphase.c8
-rw-r--r--drivers/atm/zatm.c42
-rw-r--r--drivers/base/component.c6
-rw-r--r--drivers/base/core.c56
-rw-r--r--drivers/base/cpu.c25
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/memory.c9
-rw-r--r--drivers/base/platform.c15
-rw-r--r--drivers/base/power/main.c4
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/bcma/driver_pci.c4
-rw-r--r--drivers/block/amiflop.c84
-rw-r--r--drivers/block/brd.c22
-rw-r--r--drivers/block/drbd/drbd_main.c3
-rw-r--r--drivers/block/drbd/drbd_nl.c43
-rw-r--r--drivers/block/drbd/drbd_receiver.c66
-rw-r--r--drivers/block/drbd/drbd_state.h2
-rw-r--r--drivers/block/floppy.c369
-rw-r--r--drivers/block/loop.c43
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/rsxx/core.c2
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c16
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/bluetooth/btqca.c3
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/bluetooth/hci_ath.c3
-rw-r--r--drivers/bluetooth/hci_bcm.c3
-rw-r--r--drivers/bluetooth/hci_bcsp.c8
-rw-r--r--drivers/bluetooth/hci_intel.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c12
-rw-r--r--drivers/bluetooth/hci_mrvl.c3
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/cdrom/cdrom.c19
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/hpet.c5
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c3
-rw-r--r--drivers/char/hw_random/stm32-rng.c8
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c24
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c16
-rw-r--r--drivers/char/mem.c21
-rw-r--r--drivers/char/ppdev.c16
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/tpm/tpm_crb.c22
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c10
-rw-r--r--drivers/char/ttyprintk.c15
-rw-r--r--drivers/char/virtio_console.c143
-rw-r--r--drivers/clk/at91/clk-main.c17
-rw-r--r--drivers/clk/at91/clk-usb.c3
-rw-r--r--drivers/clk/at91/sckc.c20
-rw-r--r--drivers/clk/clk-highbank.c1
-rw-r--r--drivers/clk/clk-qoriq.c3
-rw-r--r--drivers/clk/clk-s2mps11.c2
-rw-r--r--drivers/clk/clk.c10
-rw-r--r--drivers/clk/imx/clk-imx6q.c1
-rw-r--r--drivers/clk/imx/clk-imx6sx.c1
-rw-r--r--drivers/clk/imx/clk-imx7d.c1
-rw-r--r--drivers/clk/imx/clk-vf610.c1
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c6
-rw-r--r--drivers/clk/mvebu/armada-370.c4
-rw-r--r--drivers/clk/mvebu/armada-xp.c4
-rw-r--r--drivers/clk/mvebu/dove.c8
-rw-r--r--drivers/clk/mvebu/kirkwood.c2
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c7
-rw-r--r--drivers/clk/qcom/common.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c36
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c8
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c11
-rw-r--r--drivers/clk/samsung/clk-cpu.c6
-rw-r--r--drivers/clk/samsung/clk-cpu.h2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c8
-rw-r--r--drivers/clk/sirf/clk-common.c12
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c2
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c4
-rw-r--r--drivers/clk/tegra/clk-pll.c4
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c6
-rw-r--r--drivers/clk/tegra/clk-tegra-pmc.c12
-rw-r--r--drivers/clocksource/asm9260_timer.c4
-rw-r--r--drivers/clocksource/exynos_mct.c18
-rw-r--r--drivers/clocksource/timer-sun5i.c10
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/cpufreq/cpufreq_governor.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c24
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c6
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpuidle/driver.c15
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c15
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c5
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h4
-rw-r--r--drivers/crypto/atmel-aes.c37
-rw-r--r--drivers/crypto/caam/caamalg.c26
-rw-r--r--drivers/crypto/caam/caamrng.c5
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c8
-rw-r--r--drivers/crypto/ccp/ccp-dev.c102
-rw-r--r--drivers/crypto/ccp/ccp-dev.h2
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c1
-rw-r--r--drivers/crypto/mxc-scc.c12
-rw-r--r--drivers/crypto/mxs-dcp.c73
-rw-r--r--drivers/crypto/picoxcell_crypto.c15
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/s5p-sss.c4
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-hash.c17
-rw-r--r--drivers/crypto/talitos.c119
-rw-r--r--drivers/crypto/vmx/Makefile6
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl6
-rw-r--r--drivers/crypto/vmx/ghash.c213
-rw-r--r--drivers/devfreq/Kconfig3
-rw-r--r--drivers/devfreq/devfreq.c20
-rw-r--r--drivers/devfreq/event/Kconfig2
-rw-r--r--drivers/devfreq/exynos-bus.c31
-rw-r--r--drivers/devfreq/governor_passive.c7
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/at_xdmac.c6
-rw-r--r--drivers/dma/bcm2835-dma.c4
-rw-r--r--drivers/dma/coh901318.c9
-rw-r--r--drivers/dma/cppi41.c21
-rw-r--r--drivers/dma/dma-axi-dmac.c2
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dw/platform.c14
-rw-r--r--drivers/dma/edma.c15
-rw-r--r--drivers/dma/hsu/hsu.c4
-rw-r--r--drivers/dma/idma64.c6
-rw-r--r--drivers/dma/idma64.h2
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c58
-rw-r--r--drivers/dma/ioat/dma.c3
-rw-r--r--drivers/dma/ioat/init.c7
-rw-r--r--drivers/dma/iop-adma.c18
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/omap-dma.c4
-rw-r--r--drivers/dma/pl330.c10
-rw-r--r--drivers/dma/qcom/bam_dma.c14
-rw-r--r--drivers/dma/qcom/hidma.c17
-rw-r--r--drivers/dma/sh/rcar-dmac.c6
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/tegra20-apb-dma.c11
-rw-r--r--drivers/dma/tegra210-adma.c100
-rw-r--r--drivers/dma/ti-dma-crossbar.c4
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c7
-rw-r--r--drivers/edac/altera_edac.c4
-rw-r--r--drivers/edac/edac_mc_sysfs.c24
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/ghes_edac.c10
-rw-r--r--drivers/extcon/extcon-arizona.c10
-rw-r--r--drivers/extcon/extcon-max8997.c10
-rw-r--r--drivers/extcon/extcon-sm5502.c4
-rw-r--r--drivers/extcon/extcon-sm5502.h2
-rw-r--r--drivers/firewire/net.c6
-rw-r--r--drivers/firmware/Kconfig5
-rw-r--r--drivers/firmware/efi/cper.c17
-rw-r--r--drivers/firmware/efi/efi.c3
-rw-r--r--drivers/firmware/efi/efivars.c32
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c23
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c19
-rw-r--r--drivers/firmware/efi/libstub/efistub.h2
-rw-r--r--drivers/firmware/efi/libstub/gop.c80
-rw-r--r--drivers/firmware/efi/memattr.c2
-rw-r--r--drivers/firmware/google/gsmi.c5
-rw-r--r--drivers/firmware/iscsi_ibft.c4
-rw-r--r--drivers/firmware/qcom_scm-64.c2
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-adnp.c6
-rw-r--r--drivers/gpio/gpio-grgpio.c10
-rw-r--r--drivers/gpio/gpio-max77620.c6
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c4
-rw-r--r--drivers/gpio/gpio-omap.c52
-rw-r--r--drivers/gpio/gpio-pxa.c6
-rw-r--r--drivers/gpio/gpio-syscon.c2
-rw-r--r--drivers/gpio/gpiolib-of.c8
-rw-r--r--drivers/gpio/gpiolib.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c5
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c8
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c8
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c9
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c25
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c9
-rw-r--r--drivers/gpu/drm/drm_pci.c25
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c9
-rw-r--r--drivers/gpu/drm/drm_property.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c3
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c8
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h1
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c616
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h147
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c110
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c63
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c4
-rw-r--r--drivers/gpu/drm/i915/i915_params.c6
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c18
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c27
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c160
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c11
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h18
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c24
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c47
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c9
-rw-r--r--drivers/gpu/drm/radeon/cik.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r200.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/imx/ipu-v3/ipu-dp.c12
-rw-r--r--drivers/gpu/imx/ipu-v3/ipu-ic.c2
-rw-r--r--drivers/hid/hid-a4tech.c30
-rw-r--r--drivers/hid/hid-apple.c52
-rw-r--r--drivers/hid/hid-axff.c11
-rw-r--r--drivers/hid/hid-core.c96
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-dr.c12
-rw-r--r--drivers/hid/hid-emsff.c12
-rw-r--r--drivers/hid/hid-gaff.c12
-rw-r--r--drivers/hid/hid-holtek-kbd.c9
-rw-r--r--drivers/hid/hid-holtekff.c12
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c30
-rw-r--r--drivers/hid/hid-lg.c10
-rw-r--r--drivers/hid/hid-lg2ff.c12
-rw-r--r--drivers/hid/hid-lg3ff.c11
-rw-r--r--drivers/hid/hid-lg4ff.c12
-rw-r--r--drivers/hid/hid-lgff.c11
-rw-r--r--drivers/hid/hid-logitech-hidpp.c36
-rw-r--r--drivers/hid/hid-prodikeys.c12
-rw-r--r--drivers/hid/hid-sony.c12
-rw-r--r--drivers/hid/hid-tmff.c24
-rw-r--r--drivers/hid/hid-zpff.c12
-rw-r--r--drivers/hid/hidraw.c9
-rw-r--r--drivers/hid/i2c-hid/Makefile3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c (renamed from drivers/hid/i2c-hid/i2c-hid.c)56
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c420
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h20
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c9
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c2
-rw-r--r--drivers/hid/uhid.c6
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c14
-rw-r--r--drivers/hid/wacom_wac.c16
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/adt7475.c5
-rw-r--r--drivers/hwmon/f71805f.c15
-rw-r--r--drivers/hwmon/hwmon.c98
-rw-r--r--drivers/hwmon/ina3221.c6
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/nct6775.c3
-rw-r--r--drivers/hwmon/nct7802.c10
-rw-r--r--drivers/hwmon/pc87427.c14
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c4
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c34
-rw-r--r--drivers/hwmon/pwm-fan.c8
-rw-r--r--drivers/hwmon/shtc1.c2
-rw-r--r--drivers/hwmon/smsc47b397.c13
-rw-r--r--drivers/hwmon/smsc47m1.c28
-rw-r--r--drivers/hwmon/vt1211.c15
-rw-r--r--drivers/hwmon/w83627hf.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c21
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c57
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c8
-rw-r--r--drivers/hwtracing/coresight/coresight.c22
-rw-r--r--drivers/hwtracing/intel_th/gth.c2
-rw-r--r--drivers/hwtracing/intel_th/msu.c43
-rw-r--r--drivers/hwtracing/stm/core.c3
-rw-r--r--drivers/i2c/busses/Kconfig7
-rw-r--r--drivers/i2c/busses/i2c-acorn.c1
-rw-r--r--drivers/i2c/busses/i2c-emev2.c16
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c1
-rw-r--r--drivers/i2c/busses/i2c-imx.c3
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c36
-rw-r--r--drivers/i2c/busses/i2c-piix4.c12
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-riic.c1
-rw-r--r--drivers/i2c/busses/i2c-st.c1
-rw-r--r--drivers/i2c/i2c-core.c12
-rw-r--r--drivers/i2c/i2c-dev.c1
-rw-r--r--drivers/ide/cmd64x.c3
-rw-r--r--drivers/ide/serverworks.c6
-rw-r--r--drivers/idle/intel_idle.c14
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c2
-rw-r--r--drivers/iio/accel/kxcjk-1013.c2
-rw-r--r--drivers/iio/adc/ad799x.c4
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c17
-rw-r--r--drivers/iio/adc/at91_adc.c28
-rw-r--r--drivers/iio/adc/max1027.c8
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c2
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio.c2
-rw-r--r--drivers/iio/dac/ad5380.c2
-rw-r--r--drivers/iio/dac/mcp4922.c11
-rw-r--r--drivers/iio/gyro/bmg160_core.c6
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c210
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c5
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c1
-rw-r--r--drivers/iio/humidity/hdc100x.c2
-rw-r--r--drivers/iio/imu/adis16480.c12
-rw-r--r--drivers/iio/industrialio-buffer.c6
-rw-r--r--drivers/iio/light/bh1750.c4
-rw-r--r--drivers/iio/light/opt3001.c6
-rw-r--r--drivers/iio/magnetometer/ak8974.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c7
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c7
-rw-r--r--drivers/infiniband/core/addr.c17
-rw-r--r--drivers/infiniband/core/cm.c1
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/cq.c8
-rw-r--r--drivers/infiniband/core/device.c15
-rw-r--r--drivers/infiniband/core/iwcm.c4
-rw-r--r--drivers/infiniband/core/mad.c22
-rw-r--r--drivers/infiniband/core/sa_query.c14
-rw-r--r--drivers/infiniband/core/user_mad.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c16
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c28
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c1
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c45
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c4
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c9
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c26
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c13
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c13
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c12
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c19
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c21
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c35
-rw-r--r--drivers/input/ff-memless.c9
-rw-r--r--drivers/input/input.c26
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c5
-rw-r--r--drivers/input/keyboard/imx_keypad.c7
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c2
-rw-r--r--drivers/input/misc/da9063_onkey.c5
-rw-r--r--drivers/input/misc/keyspan_remote.c9
-rw-r--r--drivers/input/misc/uinput.c22
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/trackpoint.h3
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/input/rmi4/rmi_f11.c2
-rw-r--r--drivers/input/rmi4/rmi_f54.c5
-rw-r--r--drivers/input/serio/gscps2.c4
-rw-r--r--drivers/input/serio/hp_sdc.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h11
-rw-r--r--drivers/input/tablet/aiptek.c6
-rw-r--r--drivers/input/tablet/gtco.c30
-rw-r--r--drivers/input/tablet/kbtab.c6
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c4
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c7
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c7
-rw-r--r--drivers/input/touchscreen/goodix.c9
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c4
-rw-r--r--drivers/input/touchscreen/silead.c13
-rw-r--r--drivers/input/touchscreen/st1232.c1
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c6
-rw-r--r--drivers/input/touchscreen/sur40.c2
-rw-r--r--drivers/iommu/amd_iommu.c30
-rw-r--r--drivers/iommu/amd_iommu_init.c7
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/arm-smmu-v3.c3
-rw-r--r--drivers/iommu/dma-iommu.c2
-rw-r--r--drivers/iommu/dmar.c23
-rw-r--r--drivers/iommu/intel-iommu.c28
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c3
-rw-r--r--drivers/iommu/iommu.c7
-rw-r--r--drivers/iommu/tegra-smmu.c36
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c11
-rw-r--r--drivers/irqchip/irq-gic-v3.c9
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c1
-rw-r--r--drivers/irqchip/irq-ingenic.c15
-rw-r--r--drivers/irqchip/irq-mbigen.c3
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c18
-rw-r--r--drivers/isdn/capi/capi.c10
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c23
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c16
-rw-r--r--drivers/isdn/mISDN/socket.c11
-rw-r--r--drivers/isdn/mISDN/tei.c7
-rw-r--r--drivers/leds/leds-lp5562.c6
-rw-r--r--drivers/leds/leds-lp55xx-common.c4
-rw-r--r--drivers/leds/leds-pca9532.c8
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c25
-rw-r--r--drivers/mailbox/mailbox.c6
-rw-r--r--drivers/md/bcache/alloc.c5
-rw-r--r--drivers/md/bcache/bset.c16
-rw-r--r--drivers/md/bcache/bset.h37
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/journal.c37
-rw-r--r--drivers/md/bcache/super.c25
-rw-r--r--drivers/md/bcache/sysfs.c17
-rw-r--r--drivers/md/bcache/sysfs.h13
-rw-r--r--drivers/md/bitmap.c8
-rw-r--r--drivers/md/dm-bio-prison.c2
-rw-r--r--drivers/md/dm-cache-target.c4
-rw-r--r--drivers/md/dm-delay.c3
-rw-r--r--drivers/md/dm-flakey.c38
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-snap.c178
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin.c15
-rw-r--r--drivers/md/dm-verity-fec.c1
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/dm.c13
-rw-r--r--drivers/md/md.c56
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c8
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c27
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h2
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c6
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c7
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c15
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/dvb-core/dvbdev.c4
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c9
-rw-r--r--drivers/media/dvb-frontends/tua6100.c22
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c (renamed from drivers/media/i2c/adv7511.c)5
-rw-r--r--drivers/media/i2c/mt9m111.c2
-rw-r--r--drivers/media/i2c/mt9v032.c10
-rw-r--r--drivers/media/i2c/ov2659.c26
-rw-r--r--drivers/media/i2c/ov7670.c16
-rw-r--r--drivers/media/i2c/ov9650.c5
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c108
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/media-device.c10
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c5
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c2
-rw-r--r--drivers/media/pci/meye/meye.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c12
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c8
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c5
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c4
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c4
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c8
-rw-r--r--drivers/media/platform/coda/coda-bit.c12
-rw-r--r--drivers/media/platform/davinci/isif.c12
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c2
-rw-r--r--drivers/media/platform/davinci/vpss.c5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c2
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c5
-rw-r--r--drivers/media/platform/mx2_emmaprp.c6
-rw-r--r--drivers/media/platform/omap/omap_vout.c15
-rw-r--r--drivers/media/platform/omap3isp/isp.c8
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c1
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c1
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c1
-rw-r--r--drivers/media/platform/omap3isp/isppreview.c1
-rw-r--r--drivers/media/platform/omap3isp/ispresizer.c1
-rw-r--r--drivers/media/platform/omap3isp/ispstat.c2
-rw-r--r--drivers/media/platform/pxa_camera.c2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c6
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c59
-rw-r--r--drivers/media/platform/sh_veu.c4
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-hw.c6
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c3
-rw-r--r--drivers/media/platform/ti-vpe/cal.c16
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c16
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c8
-rw-r--r--drivers/media/platform/vivid/vivid-osd.c2
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c5
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c3
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c3
-rw-r--r--drivers/media/radio/radio-raremono.c30
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c5
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c12
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c3
-rw-r--r--drivers/media/rc/iguanair.c15
-rw-r--r--drivers/media/rc/imon.c3
-rw-r--r--drivers/media/usb/au0828/au0828-core.c16
-rw-r--r--drivers/media/usb/au0828/au0828-video.c16
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c13
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c7
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c3
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c7
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c3
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c4
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c8
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c10
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-init.c7
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-urb.c2
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c22
-rw-r--r--drivers/media/usb/go7007/go7007-fw.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/konica.c5
-rw-r--r--drivers/media/usb/gspca/nw80x.c5
-rw-r--r--drivers/media/usb/gspca/ov519.c20
-rw-r--r--drivers/media/usb/gspca/ov534.c5
-rw-r--r--drivers/media/usb/gspca/ov534_9.c1
-rw-r--r--drivers/media/usb/gspca/se401.c5
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c12
-rw-r--r--drivers/media/usb/gspca/sonixb.c5
-rw-r--r--drivers/media/usb/gspca/sonixj.c5
-rw-r--r--drivers/media/usb/gspca/spca1528.c5
-rw-r--r--drivers/media/usb/gspca/sq930x.c5
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx.c19
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c4
-rw-r--r--drivers/media/usb/gspca/sunplus.c5
-rw-r--r--drivers/media/usb/gspca/vc032x.c5
-rw-r--r--drivers/media/usb/gspca/w996Xcf.c5
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c18
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c13
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.h1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c9
-rw-r--r--drivers/media/usb/siano/smsusb.c33
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c9
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c3
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c21
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c44
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c17
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c5
-rw-r--r--drivers/memory/tegra/mc.c2
-rw-r--r--drivers/memstick/core/memstick.c13
-rw-r--r--drivers/memstick/host/jmb38x_ms.c2
-rw-r--r--drivers/message/fusion/mptctl.c213
-rw-r--r--drivers/mfd/arizona-core.c10
-rw-r--r--drivers/mfd/da9062-core.c2
-rw-r--r--drivers/mfd/dln2.c18
-rw-r--r--drivers/mfd/hi655x-pmic.c2
-rw-r--r--drivers/mfd/intel-lpss-pci.c2
-rw-r--r--drivers/mfd/intel-lpss.c4
-rw-r--r--drivers/mfd/max8997.c8
-rw-r--r--drivers/mfd/mc13xxx-core.c3
-rw-r--r--drivers/mfd/mfd-core.c1
-rw-r--r--drivers/mfd/omap-usb-tll.c4
-rw-r--r--drivers/mfd/rn5t618.c1
-rw-r--r--drivers/mfd/rts5227.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c13
-rw-r--r--drivers/mfd/tps65912-spi.c1
-rw-r--r--drivers/mfd/twl6040.c13
-rw-r--r--drivers/misc/altera-stapl/altera.c15
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/echo/echo.c2
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/misc/enclosure.c3
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c17
-rw-r--r--drivers/misc/kgdbts.c20
-rw-r--r--drivers/misc/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm_core.c2
-rw-r--r--drivers/misc/lkdtm_perms.c18
-rw-r--r--drivers/misc/mei/bus.c9
-rw-r--r--drivers/misc/mic/card/mic_x100.c28
-rw-r--r--drivers/misc/mic/scif/scif_fence.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c80
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.c38
-rw-r--r--drivers/misc/vmw_vmci/vmci_handle_array.h29
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c38
-rw-r--r--drivers/mmc/core/sd.c14
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c3
-rw-r--r--drivers/mmc/host/mmc_spi.c15
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/omap.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c30
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c4
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c11
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c5
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mmc/host/sdhci.c55
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c18
-rw-r--r--drivers/mtd/devices/phram.c15
-rw-r--r--drivers/mtd/devices/spear_smi.c38
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c1
-rw-r--r--drivers/mtd/maps/physmap_of.c27
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdpart.c35
-rw-r--r--drivers/mtd/nand/mtk_nand.c21
-rw-r--r--drivers/mtd/nand/sh_flctl.c4
-rw-r--r--drivers/mtd/nand/sunxi_nand.c2
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c2
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/fastmap.c23
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/net/arcnet/arcnet.c31
-rw-r--r--drivers/net/bonding/bond_alb.c64
-rw-r--r--drivers/net/bonding/bond_main.c66
-rw-r--r--drivers/net/bonding/bond_options.c7
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/caif/caif_hsi.c2
-rw-r--r--drivers/net/can/c_can/c_can.c51
-rw-r--r--drivers/net/can/c_can/c_can.h1
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c3
-rw-r--r--drivers/net/can/mscan/mscan.c21
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/slcan.c21
-rw-r--r--drivers/net/can/spi/Kconfig5
-rw-r--r--drivers/net/can/spi/mcp251x.c42
-rw-r--r--drivers/net/can/usb/gs_usb.c5
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c32
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c12
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/dsa/qca8k.c30
-rw-r--r--drivers/net/dsa/qca8k.h1
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c51
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h9
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c46
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c13
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/atarilance.c6
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c6
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c3
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c72
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c29
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c68
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c11
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c6
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c30
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c26
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c10
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c9
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c10
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c60
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c7
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c10
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c19
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c36
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c97
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c115
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h25
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h14
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c9
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c7
-rw-r--r--drivers/net/ethernet/sfc/ef10.c29
-rw-r--r--drivers/net/ethernet/sfc/ptp.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c16
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c5
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c56
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c3
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c8
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c25
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c9
-rw-r--r--drivers/net/fjes/fjes_main.c18
-rw-r--r--drivers/net/gtp.c57
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/ieee802154/atusb.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c19
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c5
-rw-r--r--drivers/net/macsec.c42
-rw-r--r--drivers/net/macvlan.c8
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/phy/fixed_phy.c6
-rw-r--r--drivers/net/phy/marvell.c6
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c20
-rw-r--r--drivers/net/phy/micrel.c7
-rw-r--r--drivers/net/phy/national.c9
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c16
-rw-r--r--drivers/net/phy/spi_ks8995.c9
-rw-r--r--drivers/net/ppp/ppp_async.c18
-rw-r--r--drivers/net/ppp/ppp_deflate.c20
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/ppp/ppp_mppe.c1
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/ppp/pppox.c13
-rw-r--r--drivers/net/ppp/pptp.c3
-rw-r--r--drivers/net/slip/slhc.c16
-rw-r--r--drivers/net/slip/slip.c13
-rw-r--r--drivers/net/team/team.c40
-rw-r--r--drivers/net/tun.c70
-rw-r--r--drivers/net/usb/asix_devices.c6
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/cdc_ether.c13
-rw-r--r--drivers/net/usb/cdc_ncm.c12
-rw-r--r--drivers/net/usb/cx82310_eth.c3
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/ipheth.c36
-rw-r--r--drivers/net/usb/kalmia.c6
-rw-r--r--drivers/net/usb/lan78xx.c33
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c5
-rw-r--r--drivers/net/usb/r8152.c23
-rw-r--r--drivers/net/usb/sr9800.c2
-rw-r--r--drivers/net/usb/usbnet.c14
-rw-r--r--drivers/net/vrf.c58
-rw-r--r--drivers/net/vxlan.c24
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c8
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wimax/i2400m/fw.c4
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c33
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h8
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c10
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c39
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c22
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h1
-rw-r--r--drivers/net/wireless/cisco/airo.c22
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c7
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c43
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c12
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c18
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c19
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c48
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c59
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c29
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c70
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c54
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c15
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c3
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h1
-rw-r--r--drivers/net/wireless/st/cw1200/fwio.c6
-rw-r--r--drivers/net/wireless/st/cw1200/main.c5
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netfront.c19
-rw-r--r--drivers/nfc/fdp/fdp.c5
-rw-r--r--drivers/nfc/fdp/i2c.c2
-rw-r--r--drivers/nfc/nxp-nci/i2c.c6
-rw-r--r--drivers/nfc/pn544/i2c.c1
-rw-r--r--drivers/nfc/pn544/pn544.c2
-rw-r--r--drivers/nfc/port100.c4
-rw-r--r--drivers/nfc/st-nci/se.c2
-rw-r--r--drivers/nfc/st21nfca/core.c1
-rw-r--r--drivers/nfc/st21nfca/se.c2
-rw-r--r--drivers/nfc/st95hf/core.c7
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c2
-rw-r--r--drivers/nvdimm/btt_devs.c18
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvdimm/dax_devs.c2
-rw-r--r--drivers/nvdimm/label.c29
-rw-r--r--drivers/nvdimm/namespace_devs.c20
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvdimm/pfn.h1
-rw-r--r--drivers/nvdimm/pfn_devs.c18
-rw-r--r--drivers/nvme/host/core.c7
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c14
-rw-r--r--drivers/nvme/target/core.c20
-rw-r--r--drivers/nvmem/core.c32
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/address.c6
-rw-r--r--drivers/of/base.c5
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/of/unittest.c8
-rw-r--r--drivers/parisc/ccio-dma.c4
-rw-r--r--drivers/parisc/dino.c24
-rw-r--r--drivers/parisc/led.c3
-rw-r--r--drivers/parisc/sba_iommu.c3
-rw-r--r--drivers/parport/share.c23
-rw-r--r--drivers/pci/host/pci-hyperv.c20
-rw-r--r--drivers/pci/host/pci-keystone-dw.c2
-rw-r--r--drivers/pci/host/pci-keystone.c3
-rw-r--r--drivers/pci/host/pci-tegra.c29
-rw-r--r--drivers/pci/host/pcie-rcar.c10
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/host/pcie-xilinx.c12
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c4
-rw-r--r--drivers/pci/iov.c1
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci-mid.c4
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c31
-rw-r--r--drivers/pci/pcie/aspm.c49
-rw-r--r--drivers/pci/pcie/ptm.c2
-rw-r--r--drivers/pci/quirks.c22
-rw-r--r--drivers/pci/setup-bus.c20
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/phy/phy-rcar-gen2.c2
-rw-r--r--drivers/phy/phy-twl4030-usb.c29
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c89
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c8
-rw-r--r--drivers/pinctrl/pinctrl-at91.c28
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c10
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c1
-rw-r--r--drivers/pinctrl/pinctrl-xway.c39
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c9
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c21
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c23
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c10
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-emev2.c20
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c8
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7792.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c18
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c41
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c24
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c4
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c11
-rw-r--r--drivers/platform/mips/cpu_hwmon.c2
-rw-r--r--drivers/platform/x86/alienware-wmi.c2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c21
-rw-r--r--drivers/platform/x86/asus-wmi.c10
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/hp-wmi.c8
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c6
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/power/avs/smartreflex.c3
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c3
-rw-r--r--drivers/power/supply/ab8500_fg.c31
-rw-r--r--drivers/power/supply/axp288_charger.c4
-rw-r--r--drivers/power/supply/bq27xxx_battery.c5
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c2
-rw-r--r--drivers/power/supply/max8998_charger.c2
-rw-r--r--drivers/power/supply/power_supply_core.c10
-rw-r--r--drivers/power/supply/power_supply_sysfs.c9
-rw-r--r--drivers/power/supply/twl4030_charger.c30
-rw-r--r--drivers/powercap/intel_rapl.c8
-rw-r--r--drivers/pps/pps.c8
-rw-r--r--drivers/pwm/core.c11
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c1
-rw-r--r--drivers/pwm/pwm-berlin.c1
-rw-r--r--drivers/pwm/pwm-clps711x.c4
-rw-r--r--drivers/pwm/pwm-lpss.c6
-rw-r--r--drivers/pwm/pwm-meson.c30
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c7
-rw-r--r--drivers/pwm/pwm-samsung.c1
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/pwm/sysfs.c14
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/rapidio/rio_cm.c12
-rw-r--r--drivers/regulator/ab8500.c17
-rw-r--r--drivers/regulator/act8865-regulator.c5
-rw-r--r--drivers/regulator/lm363x-regulator.c2
-rw-r--r--drivers/regulator/max8907-regulator.c15
-rw-r--r--drivers/regulator/palmas-regulator.c5
-rw-r--r--drivers/regulator/pfuze100-regulator.c8
-rw-r--r--drivers/regulator/pv88060-regulator.c2
-rw-r--r--drivers/regulator/pv88080-regulator.c2
-rw-r--r--drivers/regulator/pv88090-regulator.c2
-rw-r--r--drivers/regulator/rk808-regulator.c2
-rw-r--r--drivers/regulator/rn5t618-regulator.c1
-rw-r--r--drivers/regulator/s2mps11.c4
-rw-r--r--drivers/regulator/ti-abb-regulator.c26
-rw-r--r--drivers/regulator/tps65086-regulator.c4
-rw-r--r--drivers/regulator/tps65910-regulator.c4
-rw-r--r--drivers/regulator/wm831x-dcdc.c4
-rw-r--r--drivers/remoteproc/remoteproc_core.c2
-rw-r--r--drivers/reset/core.c15
-rw-r--r--drivers/rtc/Kconfig1
-rw-r--r--drivers/rtc/rtc-88pm80x.c21
-rw-r--r--drivers/rtc/rtc-88pm860x.c23
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/rtc/rtc-da9063.c7
-rw-r--r--drivers/rtc/rtc-ds1672.c3
-rw-r--r--drivers/rtc/rtc-hym8563.c2
-rw-r--r--drivers/rtc/rtc-max8997.c2
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c2
-rw-r--r--drivers/rtc/rtc-msm6242.c3
-rw-r--r--drivers/rtc/rtc-mt6397.c47
-rw-r--r--drivers/rtc/rtc-omap.c4
-rw-r--r--drivers/rtc/rtc-pcf8523.c60
-rw-r--r--drivers/rtc/rtc-pcf8563.c11
-rw-r--r--drivers/rtc/rtc-pm8xxx.c55
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/s390/block/dasd_alias.c22
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/char/con3270.c2
-rw-r--r--drivers/s390/char/fs3270.c3
-rw-r--r--drivers/s390/char/raw3270.c3
-rw-r--r--drivers/s390/char/raw3270.h4
-rw-r--r--drivers/s390/char/tty3270.c3
-rw-r--r--drivers/s390/cio/blacklist.c5
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/cio.h2
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/qdio_main.c13
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c5
-rw-r--r--drivers/s390/net/ctcm_main.c1
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c8
-rw-r--r--drivers/s390/scsi/zfcp_erp.c29
-rw-r--r--drivers/s390/scsi/zfcp_ext.h3
-rw-r--r--drivers/s390/scsi/zfcp_fc.c21
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c16
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c13
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c55
-rw-r--r--drivers/s390/scsi/zfcp_unit.c8
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/NCR5380.c75
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/bfa/bfad_attr.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c15
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c7
-rw-r--r--drivers/scsi/dc395x.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c28
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c51
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_dev.c30
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c10
-rw-r--r--drivers/scsi/hpsa.c19
-rw-r--r--drivers/scsi/hpsa_cmd.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/ips.c1
-rw-r--r--drivers/scsi/isci/host.c8
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/isci/task.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c7
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c6
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_discover.c11
-rw-r--r--drivers/scsi/libsas/sas_expander.c58
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/mac_scsi.c6
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c11
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c36
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c6
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c82
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c15
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c48
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c7
-rw-r--r--drivers/scsi/scsi_debug.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_dh.c1
-rw-r--r--drivers/scsi/scsi_logging.c48
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/scsi_sysfs.c11
-rw-r--r--drivers/scsi/scsi_trace.c124
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c33
-rw-r--r--drivers/scsi/sd.c33
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2
-rw-r--r--drivers/scsi/sni_53c710.c4
-rw-r--r--drivers/scsi/storvsc_drv.c13
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c15
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c11
-rw-r--r--drivers/scsi/ufs/ufshcd.c71
-rw-r--r--drivers/scsi/ufs/unipro.h2
-rw-r--r--drivers/scsi/vmw_pvscsi.c6
-rw-r--r--drivers/soc/fsl/qe/gpio.c4
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c2
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c7
-rw-r--r--drivers/soc/qcom/smem.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c12
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c2
-rw-r--r--drivers/soc/tegra/pmc.c8
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c4
-rw-r--r--drivers/spi/spi-atmel.c16
-rw-r--r--drivers/spi/spi-bcm2835.c3
-rw-r--r--drivers/spi/spi-bcm2835aux.c70
-rw-r--r--drivers/spi/spi-bitbang.c2
-rw-r--r--drivers/spi/spi-cavium-thunderx.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c26
-rw-r--r--drivers/spi/spi-pic32.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c21
-rw-r--r--drivers/spi/spi-qup.c11
-rw-r--r--drivers/spi/spi-rockchip.c3
-rw-r--r--drivers/spi/spi-rspi.c9
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/spi/spi-st-ssc4.c3
-rw-r--r--drivers/spi/spi-tegra114.c77
-rw-r--r--drivers/spi/spi-tegra20-slink.c5
-rw-r--r--drivers/spi/spi-topcliff-pch.c15
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c3
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/spi/spidev.c11
-rw-r--r--drivers/ssb/bridge_pcmcia_80211.c9
-rw-r--r--drivers/staging/android/ashmem.c28
-rw-r--r--drivers/staging/comedi/comedidev.h2
-rw-r--r--drivers/staging/comedi/drivers.c33
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c4
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c3
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c3
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c8
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c10
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c34
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c10
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c21
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c8
-rw-r--r--drivers/staging/fbtft/fbtft-core.c13
-rw-r--r--drivers/staging/fbtft/fbtft.h1
-rw-r--r--drivers/staging/greybus/audio_manager.c2
-rw-r--r--drivers/staging/greybus/light.c12
-rw-r--r--drivers/staging/greybus/power_supply.c2
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c17
-rw-r--r--drivers/staging/iio/adc/ad7192.c8
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c22
-rw-r--r--drivers/staging/iio/cdc/ad7150.c19
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c3
-rw-r--r--drivers/staging/media/pulse8-cec/pulse8-cec.c2
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c5
-rw-r--r--drivers/staging/most/aim-network/networking.c10
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c5
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c17
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/speakup/main.c3
-rw-r--r--drivers/staging/vt6655/device_main.c15
-rw-r--r--drivers/staging/vt6656/device.h3
-rw-r--r--drivers/staging/vt6656/dpc.c2
-rw-r--r--drivers/staging/vt6656/int.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c2
-rw-r--r--drivers/staging/vt6656/rxtx.c26
-rw-r--r--drivers/staging/vt6656/wcmd.c1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c79
-rw-r--r--drivers/target/iscsi/iscsi_target.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c5
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c5
-rw-r--r--drivers/target/target_core_device.c21
-rw-r--r--drivers/target/target_core_fabric_lib.c2
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c21
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c2
-rw-r--r--drivers/thermal/mtk_thermal.c6
-rw-r--r--drivers/thermal/qcom/tsens.c3
-rw-r--r--drivers/thermal/rcar_thermal.c4
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--drivers/thunderbolt/nhi.c22
-rw-r--r--drivers/tty/Kconfig23
-rw-r--r--drivers/tty/ehv_bytechan.c21
-rw-r--r--drivers/tty/hvc/hvc_vio.c16
-rw-r--r--drivers/tty/ipwireless/hardware.c2
-rw-r--r--drivers/tty/ipwireless/main.c8
-rw-r--r--drivers/tty/n_hdlc.c4
-rw-r--r--drivers/tty/n_r3964.c2
-rw-r--r--drivers/tty/n_tty.c8
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c2
-rw-r--r--drivers/tty/serial/8250/8250_core.c5
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_port.c10
-rw-r--r--drivers/tty/serial/amba-pl011.c6
-rw-r--r--drivers/tty/serial/ar933x_uart.c32
-rw-r--r--drivers/tty/serial/atmel_serial.c92
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c17
-rw-r--r--drivers/tty/serial/digicolor-usart.c6
-rw-r--r--drivers/tty/serial/ifx6x60.c3
-rw-r--r--drivers/tty/serial/imx.c4
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/max310x.c62
-rw-r--r--drivers/tty/serial/msm_serial.c28
-rw-r--r--drivers/tty/serial/mvebu-uart.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c3
-rw-r--r--drivers/tty/serial/pch_uart.c5
-rw-r--r--drivers/tty/serial/sc16is7xx.c40
-rw-r--r--drivers/tty/serial/serial_core.c34
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c3
-rw-r--r--drivers/tty/serial/sh-sci.c45
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c11
-rw-r--r--drivers/tty/serial/sunhv.c2
-rw-r--r--drivers/tty/serial/uartlite.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/synclink_gt.c34
-rw-r--r--drivers/tty/synclinkmp.c24
-rw-r--r--drivers/tty/sysrq.c8
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/tty/tty_ldisc.c54
-rw-r--r--drivers/tty/tty_ldsem.c5
-rw-r--r--drivers/tty/vt/keyboard.c35
-rw-r--r--drivers/tty/vt/selection.c38
-rw-r--r--drivers/tty/vt/vt.c32
-rw-r--r--drivers/tty/vt/vt_ioctl.c155
-rw-r--r--drivers/uio/uio_dmem_genirq.c6
-rw-r--r--drivers/usb/atm/ueagle-atm.c18
-rw-r--r--drivers/usb/chipidea/core.c9
-rw-r--r--drivers/usb/chipidea/host.c4
-rw-r--r--drivers/usb/chipidea/otg.c9
-rw-r--r--drivers/usb/chipidea/udc.c20
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c2
-rw-r--r--drivers/usb/class/cdc-acm.c32
-rw-r--r--drivers/usb/class/cdc-wdm.c18
-rw-r--r--drivers/usb/class/usblp.c12
-rw-r--r--drivers/usb/common/common.c2
-rw-r--r--drivers/usb/core/config.c106
-rw-r--r--drivers/usb/core/devio.c17
-rw-r--r--drivers/usb/core/driver.c36
-rw-r--r--drivers/usb/core/file.c10
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/hub.c80
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/core/port.c10
-rw-r--r--drivers/usb/core/quirks.c18
-rw-r--r--drivers/usb/core/sysfs.c5
-rw-r--r--drivers/usb/core/urb.c1
-rw-r--r--drivers/usb/core/usb.h10
-rw-r--r--drivers/usb/dwc2/hcd.c10
-rw-r--r--drivers/usb/dwc3/core.c11
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/composite.c44
-rw-r--r--drivers/usb/gadget/configfs.c111
-rw-r--r--drivers/usb/gadget/function/f_ecm.c22
-rw-r--r--drivers/usb/gadget/function/f_fs.c13
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/function/f_ncm.c17
-rw-r--r--drivers/usb/gadget/function/f_rndis.c1
-rw-r--r--drivers/usb/gadget/function/u_ether.c6
-rw-r--r--drivers/usb/gadget/function/u_serial.c6
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c7
-rw-r--r--drivers/usb/gadget/function/uvc_video.c32
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c2
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c2
-rw-r--r--drivers/usb/gadget/legacy/multi.c2
-rw-r--r--drivers/usb/gadget/legacy/ncm.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/udc/core.c16
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c3
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c5
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c16
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c9
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/host/ehci-q.c13
-rw-r--r--drivers/usb/host/fotg210-hcd.c4
-rw-r--r--drivers/usb/host/hwa-hc.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c15
-rw-r--r--drivers/usb/host/pci-quirks.c31
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/xhci-hub.c18
-rw-r--r--drivers/usb/host/xhci-mem.c16
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c4
-rw-r--r--drivers/usb/host/xhci-pci.c17
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci-rcar.c3
-rw-r--r--drivers/usb/host/xhci-ring.c32
-rw-r--r--drivers/usb/host/xhci.c63
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/adutux.c28
-rw-r--r--drivers/usb/misc/appledisplay.c15
-rw-r--r--drivers/usb/misc/chaoskey.c29
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c17
-rw-r--r--drivers/usb/misc/ldusb.c50
-rw-r--r--drivers/usb/misc/legousbtower.c63
-rw-r--r--drivers/usb/misc/rio500.c553
-rw-r--r--drivers/usb/misc/rio500_usb.h37
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c15
-rw-r--r--drivers/usb/misc/usblcd.c33
-rw-r--r--drivers/usb/misc/yurex.c21
-rw-r--r--drivers/usb/mon/mon_bin.c32
-rw-r--r--drivers/usb/musb/musb_core.c11
-rw-r--r--drivers/usb/musb/musb_host.c17
-rw-r--r--drivers/usb/musb/musbhsdma.c2
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.h4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c36
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h1
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c30
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c15
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h1
-rw-r--r--drivers/usb/serial/ch341.c6
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/cypress_m8.c2
-rw-r--r--drivers/usb/serial/f81232.c39
-rw-r--r--drivers/usb/serial/ftdi_sio.c9
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h26
-rw-r--r--drivers/usb/serial/generic.c39
-rw-r--r--drivers/usb/serial/io_edgeport.c45
-rw-r--r--drivers/usb/serial/ir-usb.c136
-rw-r--r--drivers/usb/serial/keyspan.c8
-rw-r--r--drivers/usb/serial/mos7720.c8
-rw-r--r--drivers/usb/serial/mos7840.c16
-rw-r--r--drivers/usb/serial/opticon.c2
-rw-r--r--drivers/usb/serial/option.c66
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/quatech2.c6
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c10
-rw-r--r--drivers/usb/serial/usb-serial-simple.c2
-rw-r--r--drivers/usb/serial/usb-serial.c8
-rw-r--r--drivers/usb/serial/usb-wwan.h1
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/serial/whiteheat.c13
-rw-r--r--drivers/usb/serial/whiteheat.h2
-rw-r--r--drivers/usb/storage/realtek_cr.c28
-rw-r--r--drivers/usb/storage/scsiglue.c16
-rw-r--r--drivers/usb/storage/uas.c50
-rw-r--r--drivers/usb/storage/unusual_devs.h8
-rw-r--r--drivers/usb/storage/unusual_realtek.h5
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/usb-skeleton.c15
-rw-r--r--drivers/usb/usbip/stub_dev.c75
-rw-r--r--drivers/usb/usbip/stub_rx.c23
-rw-r--r--drivers/usb/usbip/usbip_common.h7
-rw-r--r--drivers/usb/usbip/vhci_hcd.c10
-rw-r--r--drivers/usb/usbip/vhci_rx.c13
-rw-r--r--drivers/vfio/pci/vfio_pci.c48
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c31
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c10
-rw-r--r--drivers/vfio/vfio_iommu_type1.c14
-rw-r--r--drivers/vhost/net.c49
-rw-r--r--drivers/vhost/scsi.c15
-rw-r--r--drivers/vhost/test.c13
-rw-r--r--drivers/vhost/vhost.c30
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/vhost/vsock.c16
-rw-r--r--drivers/video/backlight/lm3630a_bl.c4
-rw-r--r--drivers/video/backlight/lm3639_bl.c6
-rw-r--r--drivers/video/console/vgacon.c3
-rw-r--r--drivers/video/fbdev/chipsfb.c3
-rw-r--r--drivers/video/fbdev/core/fbcmap.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c5
-rw-r--r--drivers/video/fbdev/core/fbmon.c95
-rw-r--r--drivers/video/fbdev/core/modedb.c60
-rw-r--r--drivers/video/fbdev/goldfishfb.c2
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/video/fbdev/imsttfb.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c6
-rw-r--r--drivers/video/fbdev/sbuslib.c28
-rw-r--r--drivers/video/fbdev/sis/init301.c4
-rw-r--r--drivers/video/fbdev/sm712.h12
-rw-r--r--drivers/video/fbdev/sm712fb.c243
-rw-r--r--drivers/video/fbdev/ssd1307fb.c2
-rw-r--r--drivers/video/hdmi.c8
-rw-r--r--drivers/virt/fsl_hypervisor.c29
-rw-r--r--drivers/virtio/virtio_balloon.c11
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--drivers/vme/bridges/vme_fake.c30
-rw-r--r--drivers/w1/masters/ds2490.c6
-rw-r--r--drivers/w1/w1_io.c3
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/da9062_wdt.c7
-rw-r--r--drivers/watchdog/imx2_wdt.c6
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c4
-rw-r--r--drivers/watchdog/rn5t618_wdt.c1
-rw-r--r--drivers/watchdog/wdat_wdt.c2
-rw-r--r--drivers/xen/balloon.c22
-rw-r--r--drivers/xen/cpu_hotplug.c2
-rw-r--r--drivers/xen/pci.c21
-rw-r--r--drivers/xen/preempt.c4
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c3
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c3
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--firmware/Makefile2
-rw-r--r--fs/9p/acl.c2
-rw-r--r--fs/9p/v9fs.c21
-rw-r--r--fs/9p/v9fs.h1
-rw-r--r--fs/9p/vfs_addr.c6
-rw-r--r--fs/9p/vfs_dir.c8
-rw-r--r--fs/9p/vfs_file.c9
-rw-r--r--fs/adfs/super.c5
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/autofs4/expire.c5
-rw-r--r--fs/binfmt_flat.c23
-rw-r--r--fs/binfmt_script.c57
-rw-r--r--fs/btrfs/async-thread.c64
-rw-r--r--fs/btrfs/async-thread.h2
-rw-r--r--fs/btrfs/backref.c18
-rw-r--r--fs/btrfs/ctree.c75
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/delayed-ref.c11
-rw-r--r--fs/btrfs/dev-replace.c29
-rw-r--r--fs/btrfs/disk-io.c27
-rw-r--r--fs/btrfs/extent-tree.c9
-rw-r--r--fs/btrfs/extent_io.c14
-rw-r--r--fs/btrfs/extent_map.c11
-rw-r--r--fs/btrfs/file.c19
-rw-r--r--fs/btrfs/free-space-cache.c6
-rw-r--r--fs/btrfs/inode-map.c1
-rw-r--r--fs/btrfs/inode.c14
-rw-r--r--fs/btrfs/ioctl.c20
-rw-r--r--fs/btrfs/ordered-data.c7
-rw-r--r--fs/btrfs/qgroup.c43
-rw-r--r--fs/btrfs/raid56.c3
-rw-r--r--fs/btrfs/reada.c15
-rw-r--r--fs/btrfs/relocation.c40
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/send.c102
-rw-r--r--fs/btrfs/super.c12
-rw-r--r--fs/btrfs/sysfs.c7
-rw-r--r--fs/btrfs/tests/btrfs-tests.c1
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c6
-rw-r--r--fs/btrfs/tests/qgroup-tests.c4
-rw-r--r--fs/btrfs/transaction.c8
-rw-r--r--fs/btrfs/tree-log.c57
-rw-r--r--fs/btrfs/uuid-tree.c2
-rw-r--r--fs/btrfs/volumes.c5
-rw-r--r--fs/btrfs/volumes.h6
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/ceph/caps.c17
-rw-r--r--fs/ceph/dir.c6
-rw-r--r--fs/ceph/inode.c17
-rw-r--r--fs/ceph/mds_client.c13
-rw-r--r--fs/ceph/snap.c7
-rw-r--r--fs/ceph/super.c61
-rw-r--r--fs/ceph/super.h9
-rw-r--r--fs/ceph/xattr.c22
-rw-r--r--fs/char_dev.c8
-rw-r--r--fs/cifs/cifs_dfs_ref.c4
-rw-r--r--fs/cifs/cifsacl.c4
-rw-r--r--fs/cifs/cifsglob.h7
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/connect.c38
-rw-r--r--fs/cifs/dir.c9
-rw-r--r--fs/cifs/file.c89
-rw-r--r--fs/cifs/inode.c83
-rw-r--r--fs/cifs/misc.c25
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/cifs/smb1ops.c5
-rw-r--r--fs/cifs/smb2file.c4
-rw-r--r--fs/cifs/smb2maperror.c3
-rw-r--r--fs/cifs/smb2misc.c13
-rw-r--r--fs/cifs/smb2ops.c19
-rw-r--r--fs/cifs/smb2pdu.c17
-rw-r--r--fs/cifs/xattr.c2
-rw-r--r--fs/coda/file.c69
-rw-r--r--fs/coda/psdev.c5
-rw-r--r--fs/compat_ioctl.c16
-rw-r--r--fs/configfs/configfs_internal.h15
-rw-r--r--fs/configfs/dir.c168
-rw-r--r--fs/configfs/file.c294
-rw-r--r--fs/configfs/symlink.c33
-rw-r--r--fs/crypto/policy.c2
-rw-r--r--fs/debugfs/inode.c13
-rw-r--r--fs/dlm/lockspace.c1
-rw-r--r--fs/dlm/member.c5
-rw-r--r--fs/dlm/memory.c9
-rw-r--r--fs/dlm/user.c5
-rw-r--r--fs/ecryptfs/crypto.c18
-rw-r--r--fs/ecryptfs/inode.c19
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/messaging.c1
-rw-r--r--fs/exec.c4
-rw-r--r--fs/exportfs/expfs.c32
-rw-r--r--fs/ext2/inode.c7
-rw-r--r--fs/ext2/super.c6
-rw-r--r--fs/ext2/xattr.c8
-rw-r--r--fs/ext4/balloc.c14
-rw-r--r--fs/ext4/dir.c38
-rw-r--r--fs/ext4/ext4.h44
-rw-r--r--fs/ext4/extents.c29
-rw-r--r--fs/ext4/file.c7
-rw-r--r--fs/ext4/ialloc.c23
-rw-r--r--fs/ext4/indirect.c47
-rw-r--r--fs/ext4/inline.c2
-rw-r--r--fs/ext4/inode.c64
-rw-r--r--fs/ext4/ioctl.c9
-rw-r--r--fs/ext4/mballoc.c61
-rw-r--r--fs/ext4/migrate.c27
-rw-r--r--fs/ext4/mmp.c12
-rw-r--r--fs/ext4/namei.c96
-rw-r--r--fs/ext4/page-io.c19
-rw-r--r--fs/ext4/resize.c77
-rw-r--r--fs/ext4/super.c194
-rw-r--r--fs/f2fs/data.c10
-rw-r--r--fs/f2fs/dir.c1
-rw-r--r--fs/f2fs/file.c2
-rw-r--r--fs/f2fs/gc.c2
-rw-r--r--fs/f2fs/inode.c1
-rw-r--r--fs/f2fs/recovery.c12
-rw-r--r--fs/f2fs/segment.c43
-rw-r--r--fs/f2fs/segment.h3
-rw-r--r--fs/f2fs/super.c40
-rw-r--r--fs/f2fs/trace.c8
-rw-r--r--fs/fat/dir.c13
-rw-r--r--fs/fat/fatent.c3
-rw-r--r--fs/fat/file.c11
-rw-r--r--fs/fat/inode.c19
-rw-r--r--fs/file.c1
-rw-r--r--fs/fs-writeback.c68
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/cuse.c1
-rw-r--r--fs/fuse/dev.c14
-rw-r--r--fs/fuse/dir.c40
-rw-r--r--fs/fuse/file.c24
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/glock.c25
-rw-r--r--fs/gfs2/inode.c2
-rw-r--r--fs/gfs2/lock_dlm.c9
-rw-r--r--fs/gfs2/rgrp.c15
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/hfs/brec.c1
-rw-r--r--fs/hfs/btree.c41
-rw-r--r--fs/hfs/btree.h1
-rw-r--r--fs/hfs/catalog.c16
-rw-r--r--fs/hfs/extent.c10
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/attributes.c14
-rw-r--r--fs/hfsplus/brec.c1
-rw-r--r--fs/hfsplus/btree.c44
-rw-r--r--fs/hfsplus/catalog.c24
-rw-r--r--fs/hfsplus/extents.c8
-rw-r--r--fs/hfsplus/hfsplus_fs.h2
-rw-r--r--fs/hfsplus/inode.c1
-rw-r--r--fs/hugetlbfs/inode.c28
-rw-r--r--fs/inode.c10
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jbd2/commit.c61
-rw-r--r--fs/jbd2/journal.c25
-rw-r--r--fs/jbd2/transaction.c18
-rw-r--r--fs/jffs2/readinode.c5
-rw-r--r--fs/jffs2/super.c5
-rw-r--r--fs/jfs/jfs_txnmgr.c3
-rw-r--r--fs/kernfs/symlink.c5
-rw-r--r--fs/libfs.c142
-rw-r--r--fs/locks.c2
-rw-r--r--fs/namei.c19
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/delegation.c36
-rw-r--r--fs/nfs/delegation.h2
-rw-r--r--fs/nfs/dir.c90
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c2
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/nfs4_fs.h3
-rw-r--r--fs/nfs/nfs4client.c7
-rw-r--r--fs/nfs/nfs4file.c14
-rw-r--r--fs/nfs/nfs4proc.c57
-rw-r--r--fs/nfs/nfs4state.c31
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/pagelist.c19
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfs/proc.c7
-rw-r--r--fs/nfs/super.c5
-rw-r--r--fs/nfsd/nfs4callback.c8
-rw-r--r--fs/nfsd/nfs4layouts.c2
-rw-r--r--fs/nfsd/nfs4recover.c17
-rw-r--r--fs/nfsd/nfs4state.c28
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--fs/nfsd/state.h3
-rw-r--r--fs/nfsd/vfs.c17
-rw-r--r--fs/nfsd/vfs.h5
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/ocfs2/alloc.c4
-rw-r--r--fs/ocfs2/aops.c25
-rw-r--r--fs/ocfs2/buffer_head_io.c77
-rw-r--r--fs/ocfs2/cluster/nodemanager.c14
-rw-r--r--fs/ocfs2/dcache.c12
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c2
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c23
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/export.c30
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/ocfs2/journal.c17
-rw-r--r--fs/ocfs2/journal.h8
-rw-r--r--fs/ocfs2/localalloc.c3
-rw-r--r--fs/ocfs2/move_extents.c17
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/ocfs2/stackglue.c6
-rw-r--r--fs/ocfs2/stackglue.h3
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/open.c46
-rw-r--r--fs/orangefs/orangefs-debugfs.c1
-rw-r--r--fs/orangefs/orangefs-sysfs.c2
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/pipe.c4
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/proc_sysctl.c11
-rw-r--r--fs/proc/task_mmu.c18
-rw-r--r--fs/proc/vmcore.c10
-rw-r--r--fs/pstore/ram.c11
-rw-r--r--fs/quota/dquot.c40
-rw-r--r--fs/read_write.c11
-rw-r--r--fs/readdir.c40
-rw-r--r--fs/reiserfs/inode.c12
-rw-r--r--fs/reiserfs/namei.c7
-rw-r--r--fs/reiserfs/reiserfs.h2
-rw-r--r--fs/reiserfs/stree.c3
-rw-r--r--fs/reiserfs/super.c6
-rw-r--r--fs/reiserfs/xattr.c27
-rw-r--r--fs/reiserfs/xattr_acl.c4
-rw-r--r--fs/splice.c12
-rw-r--r--fs/ubifs/file.c6
-rw-r--r--fs/udf/inode.c93
-rw-r--r--fs/udf/super.c22
-rw-r--r--fs/ufs/util.h2
-rw-r--r--fs/userfaultfd.c28
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c31
-rw-r--r--fs/xfs/xfs_buf.c40
-rw-r--r--fs/xfs/xfs_ioctl32.c40
-rw-r--r--fs/xfs/xfs_iops.c1
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_quotaops.c3
-rw-r--r--fs/xfs/xfs_rtalloc.c4
-rw-r--r--fs/xfs/xfs_super.c10
-rw-r--r--include/acpi/actypes.h2
-rw-r--r--include/asm-generic/bug.h1
-rw-r--r--include/asm-generic/getorder.h50
-rw-r--r--include/drm/drm_dp_mst_helper.h2
-rw-r--r--include/drm/drm_vma_manager.h1
-rw-r--r--include/dt-bindings/reset/amlogic,meson8b-reset.h6
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/ahci_platform.h2
-rw-r--r--include/linux/atalk.h22
-rw-r--r--include/linux/backing-dev-defs.h1
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/bitmap.h17
-rw-r--r--include/linux/bitops.h40
-rw-r--r--include/linux/bitrev.h36
-rw-r--r--include/linux/bits.h26
-rw-r--r--include/linux/blkdev.h17
-rw-r--r--include/linux/bug.h5
-rw-r--r--include/linux/can/dev.h34
-rw-r--r--include/linux/cec-funcs.h6
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/ceph/ceph_debug.h6
-rw-r--r--include/linux/cgroup.h10
-rw-r--r--include/linux/coda.h3
-rw-r--r--include/linux/coda_psdev.h11
-rw-r--r--include/linux/compiler-gcc.h15
-rw-r--r--include/linux/compiler.h45
-rw-r--r--include/linux/cpu.h14
-rw-r--r--include/linux/cpufeature.h2
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cred.h7
-rw-r--r--include/linux/devfreq_cooling.h2
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/dma-mapping.h3
-rw-r--r--include/linux/dmaengine.h5
-rw-r--r--include/linux/edac.h3
-rw-r--r--include/linux/efi.h9
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/futex.h17
-rw-r--r--include/linux/genalloc.h13
-rw-r--r--include/linux/gfp.h23
-rw-r--r--include/linux/gpio.h24
-rw-r--r--include/linux/gpio/consumer.h2
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/hrtimer.h14
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/ieee80211.h53
-rw-r--r--include/linux/if_ether.h8
-rw-r--r--include/linux/if_pppox.h3
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h1
-rw-r--r--include/linux/intel-iommu.h6
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/jbd2.h4
-rw-r--r--include/linux/kasan.h1
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/kref.h5
-rw-r--r--include/linux/kvm_host.h9
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/libfdt_env.h4
-rw-r--r--include/linux/list.h30
-rw-r--r--include/linux/list_lru.h1
-rw-r--r--include/linux/list_nulls.h13
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mfd/da9063/registers.h6
-rw-r--r--include/linux/mfd/max77620.h4
-rw-r--r--include/linux/mfd/max8997.h1
-rw-r--r--include/linux/mfd/mc13xxx.h1
-rw-r--r--include/linux/mlx5/driver.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2
-rw-r--r--include/linux/mm.h44
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mod_devicetable.h4
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/netfilter/ipset/ip_set.h7
-rw-r--r--include/linux/of.h4
-rw-r--r--include/linux/page-flags.h20
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/percpu_counter.h4
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/pipe_fs_i.h10
-rw-r--r--include/linux/platform_data/dma-ep93xx.h2
-rw-r--r--include/linux/poll.h4
-rw-r--r--include/linux/ptrace.h21
-rw-r--r--include/linux/pwm.h5
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/quotaops.h12
-rw-r--r--include/linux/rculist_nulls.h59
-rw-r--r--include/linux/rcupdate.h8
-rw-r--r--include/linux/regulator/ab8500.h3
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/relay.h2
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sched/smt.h20
-rw-r--r--include/linux/selection.h4
-rw-r--r--include/linux/serial_core.h37
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/siphash.h145
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/smpboot.h2
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/swap.h10
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/time.h12
-rw-r--r--include/linux/tty.h7
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/irda.h13
-rw-r--r--include/linux/virtio_ring.h2
-rw-r--r--include/linux/vmalloc.h5
-rw-r--r--include/linux/vmw_vmci_defs.h11
-rw-r--r--include/linux/vt_kern.h2
-rw-r--r--include/math-emu/soft-fp.h2
-rw-r--r--include/media/davinci/vpbe.h2
-rw-r--r--include/media/v4l2-device.h12
-rw-r--r--include/media/v4l2-rect.h8
-rw-r--r--include/net/arp.h8
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/caif/cfpkt.h27
-rw-r--r--include/net/cfg80211.h11
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/fib_rules.h1
-rw-r--r--include/net/flow_dissector.h12
-rw-r--r--include/net/fq.h2
-rw-r--r--include/net/fq_impl.h4
-rw-r--r--include/net/inet_frag.h16
-rw-r--r--include/net/inet_hashtables.h12
-rw-r--r--include/net/ip.h7
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/ip6_tunnel.h9
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/ipv6.h29
-rw-r--r--include/net/ipv6_frag.h110
-rw-r--r--include/net/llc.h1
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/neighbour.h6
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netfilter/br_netfilter.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_tables.h3
-rw-r--r--include/net/netns/hash.h15
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/sch_generic.h5
-rw-r--r--include/net/sctp/checksum.h2
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sock.h28
-rw-r--r--include/net/tcp.h46
-rw-r--r--include/net/xfrm.h17
-rw-r--r--include/rdma/ib_verbs.h11
-rw-r--r--include/scsi/libfcoe.h1
-rw-r--r--include/scsi/scsi_dbg.h2
-rw-r--r--include/sound/compress_driver.h5
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/sound/rawmidi.h6
-rw-r--r--include/sound/soc-dapm.h2
-rw-r--r--include/sound/timer.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h2
-rw-r--r--include/trace/events/xen.h6
-rw-r--r--include/uapi/drm/i915_drm.h2
-rw-r--r--include/uapi/linux/coda_psdev.h13
-rw-r--r--include/uapi/linux/coresight-stm.h6
-rw-r--r--include/uapi/linux/ethtool.h6
-rw-r--r--include/uapi/linux/fuse.h2
-rw-r--r--include/uapi/linux/isdn/capicmd.h1
-rw-r--r--include/uapi/linux/netfilter/xt_sctp.h6
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h24
-rw-r--r--include/uapi/linux/prctl.h1
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/tipc_config.h10
-rw-r--r--init/main.c4
-rw-r--r--ipc/mqueue.c27
-rw-r--r--ipc/msgutil.c6
-rw-r--r--ipc/sem.c6
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/audit.c40
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/auditfilter.c83
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/bpf/Makefile1
-rw-r--r--kernel/bpf/core.c73
-rw-r--r--kernel/bpf/hashtab.c99
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/cgroup.c4
-rw-r--r--kernel/cgroup_pids.c11
-rw-r--r--kernel/cpu.c90
-rw-r--r--kernel/cred.c36
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/elfcore.c1
-rw-r--r--kernel/events/core.c19
-rw-r--r--kernel/events/ring_buffer.c33
-rw-r--r--kernel/events/uprobes.c4
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/futex.c93
-rw-r--r--kernel/hung_task.c30
-rw-r--r--kernel/irq/chip.c16
-rw-r--r--kernel/irq/internals.h8
-rw-r--r--kernel/irq/irqdesc.c22
-rw-r--r--kernel/irq/manage.c13
-rw-r--r--kernel/irq/resend.c2
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/kprobes.c17
-rw-r--r--kernel/locking/lockdep.c22
-rw-r--r--kernel/locking/lockdep_proc.c8
-rw-r--r--kernel/locking/locktorture.c8
-rw-r--r--kernel/locking/rwsem-xadd.c44
-rw-r--r--kernel/locking/spinlock_debug.c32
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/notifier.c2
-rw-r--r--kernel/padata.c61
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/power/hibernate.c9
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/printk/printk.c131
-rw-r--r--kernel/ptrace.c34
-rw-r--r--kernel/rcu/rcuperf.c5
-rw-r--r--kernel/rcu/rcutorture.c5
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/sched/fair.c90
-rw-r--r--kernel/sched/sched.h9
-rw-r--r--kernel/signal.c36
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/taskstats.c30
-rw-r--r--kernel/time/alarmtimer.c8
-rw-r--r--kernel/time/clocksource.c11
-rw-r--r--kernel/time/hrtimer.c11
-rw-r--r--kernel/time/ntp.c4
-rw-r--r--kernel/time/timer.c8
-rw-r--r--kernel/time/timer_list.c36
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/trace/ftrace.c45
-rw-r--r--kernel/trace/ring_buffer.c7
-rw-r--r--kernel/trace/trace.c53
-rw-r--r--kernel/trace/trace.h18
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_events_trigger.c15
-rw-r--r--kernel/trace/trace_hwlat.c4
-rw-r--r--kernel/trace/trace_kdb.c12
-rw-r--r--kernel/trace/trace_sched_wakeup.c4
-rw-r--r--kernel/trace/trace_stack.c5
-rw-r--r--kernel/trace/trace_stat.c31
-rw-r--r--kernel/trace/tracing_map.c4
-rw-r--r--kernel/workqueue.c52
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/Makefile6
-rw-r--r--lib/bitmap.c20
-rw-r--r--lib/bsearch.c2
-rw-r--r--lib/devres.c3
-rw-r--r--lib/div64.c4
-rw-r--r--lib/dma-debug.c1
-rw-r--r--lib/dump_stack.c7
-rw-r--r--lib/genalloc.c25
-rw-r--r--lib/int_sqrt.c6
-rw-r--r--lib/kfifo.c3
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/mpi/mpi-pow.c6
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/raid6/unroll.awk2
-rw-r--r--lib/reed_solomon/decode_rs.c18
-rw-r--r--lib/scatterlist.c11
-rw-r--r--lib/siphash.c551
-rw-r--r--lib/stackdepot.c8
-rw-r--r--lib/string.c22
-rw-r--r--lib/strncpy_from_user.c5
-rw-r--r--lib/strnlen_user.c4
-rw-r--r--lib/test_kasan.c3
-rw-r--r--lib/test_siphash.c223
-rw-r--r--lib/ubsan.c49
-rw-r--r--mm/backing-dev.c1
-rw-r--r--mm/cma.c21
-rw-r--r--mm/cma_debug.c2
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/gup.c54
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/hugetlb.c56
-rw-r--r--mm/hugetlb_cgroup.c2
-rw-r--r--mm/internal.h10
-rw-r--r--mm/kasan/kasan.c9
-rw-r--r--mm/kasan/kasan_init.c15
-rw-r--r--mm/kasan/report.c1
-rw-r--r--mm/khugepaged.c3
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/ksm.c14
-rw-r--r--mm/list_lru.c10
-rw-r--r--mm/memcontrol.c65
-rw-r--r--mm/memory_hotplug.c22
-rw-r--r--mm/mempolicy.c30
-rw-r--r--mm/mincore.c23
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/mmu_notifier.c2
-rw-r--r--mm/nommu.c10
-rw-r--r--mm/page-writeback.c37
-rw-r--r--mm/page_alloc.c14
-rw-r--r--mm/page_ext.c1
-rw-r--r--mm/page_idle.c4
-rw-r--r--mm/percpu.c8
-rw-r--r--mm/shmem.c22
-rw-r--r--mm/slab.c23
-rw-r--r--mm/slub.c54
-rw-r--r--mm/usercopy.c10
-rw-r--r--mm/vmalloc.c24
-rw-r--r--mm/vmstat.c7
-rw-r--r--mm/zsmalloc.c85
-rw-r--r--net/6lowpan/nhc.c2
-rw-r--r--net/8021q/vlan.h1
-rw-r--r--net/8021q/vlan_dev.c7
-rw-r--r--net/8021q/vlan_netlink.c19
-rw-r--r--net/9p/protocol.c15
-rw-r--r--net/9p/trans_common.c1
-rw-r--r--net/9p/trans_rdma.c7
-rw-r--r--net/9p/trans_virtio.c8
-rw-r--r--net/appletalk/aarp.c15
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/appletalk/ddp.c59
-rw-r--r--net/appletalk/sysctl_net_atalk.c5
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c125
-rw-r--r--net/batman-adv/bat_v.c25
-rw-r--r--net/batman-adv/bat_v_elp.c22
-rw-r--r--net/batman-adv/bat_v_ogm.c60
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c16
-rw-r--r--net/batman-adv/debugfs.c40
-rw-r--r--net/batman-adv/debugfs.h11
-rw-r--r--net/batman-adv/distributed-arp-table.c9
-rw-r--r--net/batman-adv/fragmentation.c22
-rw-r--r--net/batman-adv/gateway_client.c11
-rw-r--r--net/batman-adv/gateway_common.c5
-rw-r--r--net/batman-adv/hard-interface.c51
-rw-r--r--net/batman-adv/netlink.c2
-rw-r--r--net/batman-adv/originator.c4
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/routing.c11
-rw-r--r--net/batman-adv/soft-interface.c1
-rw-r--r--net/batman-adv/translation-table.c183
-rw-r--r--net/batman-adv/types.h22
-rw-r--r--net/bluetooth/6lowpan.c14
-rw-r--r--net/bluetooth/hci_conn.c14
-rw-r--r--net/bluetooth/hci_core.c9
-rw-r--r--net/bluetooth/hci_request.c9
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/bluetooth/hidp/sock.c1
-rw-r--r--net/bluetooth/l2cap_core.c136
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--net/bluetooth/smp.c13
-rw-r--r--net/bridge/br_device.c6
-rw-r--r--net/bridge/br_if.c13
-rw-r--r--net/bridge/br_input.c23
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_multicast.c39
-rw-r--r--net/bridge/br_netfilter_hooks.c9
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/bridge/br_stp_bpdu.c3
-rw-r--r--net/bridge/br_vlan.c5
-rw-r--r--net/bridge/netfilter/ebtables.c40
-rw-r--r--net/caif/cfctrl.c50
-rw-r--r--net/can/af_can.c1
-rw-r--r--net/ceph/ceph_common.c13
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/ethtool.c86
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/flow_dissector.c48
-rw-r--r--net/core/neighbour.c39
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/netclassid_cgroup.c47
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/pktgen.c11
-rw-r--r--net/core/rtnetlink.c23
-rw-r--r--net/core/skbuff.c19
-rw-r--r--net/core/sock.c13
-rw-r--r--net/core/stream.c16
-rw-r--r--net/core/sysctl_net_core.c73
-rw-r--r--net/core/utils.c20
-rw-r--r--net/dccp/feat.c7
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/dsa/dsa2.c2
-rw-r--r--net/dsa/tag_brcm.c2
-rw-r--r--net/dsa/tag_qca.c3
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/hsr/hsr_device.c2
-rw-r--r--net/hsr/hsr_framereg.c10
-rw-r--r--net/hsr/hsr_netlink.c83
-rw-r--r--net/hsr/hsr_slave.c10
-rw-r--r--net/ieee802154/6lowpan/reassembly.c4
-rw-r--r--net/ieee802154/nl_policy.c6
-rw-r--r--net/ieee802154/socket.c3
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/cipso_ipv4.c7
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c26
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/gre_demux.c19
-rw-r--r--net/ipv4/icmp.c11
-rw-r--r--net/ipv4/igmp.c47
-rw-r--r--net/ipv4/inet_diag.c3
-rw-r--r--net/ipv4/inet_fragment.c293
-rw-r--r--net/ipv4/inet_hashtables.c18
-rw-r--r--net/ipv4/ip_fragment.c295
-rw-r--r--net/ipv4/ip_gre.c9
-rw-r--r--net/ipv4/ip_input.c7
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/ip_output.c15
-rw-r--r--net/ipv4/ip_tunnel.c18
-rw-r--r--net/ipv4/ip_vti.c40
-rw-r--r--net/ipv4/netfilter/arp_tables.c46
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c63
-rw-r--r--net/ipv4/sysctl_net_ipv4.c16
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_bbr.c3
-rw-r--r--net/ipv4/tcp_dctcp.c36
-rw-r--r--net/ipv4/tcp_input.c70
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tcp_output.c33
-rw-r--r--net/ipv4/tcp_timer.c11
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv4/xfrm4_policy.c24
-rw-r--r--net/ipv6/addrconf.c21
-rw-r--r--net/ipv6/inet6_hashtables.c3
-rw-r--r--net/ipv6/ip6_fib.c7
-rw-r--r--net/ipv6/ip6_flowlabel.c29
-rw-r--r--net/ipv6/ip6_input.c10
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c16
-rw-r--r--net/ipv6/ip6_vti.c33
-rw-r--r--net/ipv6/ip6mr.c11
-rw-r--r--net/ipv6/ipv6_sockglue.c10
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c281
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c3
-rw-r--r--net/ipv6/output_core.c30
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c27
-rw-r--r--net/ipv6/reassembly.c363
-rw-r--r--net/ipv6/route.c1
-rw-r--r--net/ipv6/sit.c7
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c4
-rw-r--r--net/iucv/af_iucv.c27
-rw-r--r--net/kcm/kcmsock.c16
-rw-r--r--net/key/af_key.c12
-rw-r--r--net/l2tp/l2tp_core.c9
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/l2tp/l2tp_ip.c24
-rw-r--r--net/l2tp/l2tp_ip6.c24
-rw-r--r--net/l2tp/l2tp_ppp.c11
-rw-r--r--net/lapb/lapb_iface.c1
-rw-r--r--net/llc/af_llc.c34
-rw-r--r--net/llc/llc_c_ac.c8
-rw-r--r--net/llc/llc_conn.c67
-rw-r--r--net/llc/llc_core.c4
-rw-r--r--net/llc/llc_if.c12
-rw-r--r--net/llc/llc_output.c2
-rw-r--r--net/llc/llc_s_ac.c12
-rw-r--r--net/llc/llc_sap.c23
-rw-r--r--net/llc/llc_station.c4
-rw-r--r--net/mac80211/cfg.c64
-rw-r--r--net/mac80211/debugfs_netdev.c11
-rw-r--r--net/mac80211/driver-ops.c13
-rw-r--r--net/mac80211/driver-ops.h3
-rw-r--r--net/mac80211/ieee80211_i.h5
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c6
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/mlme.c35
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c4
-rw-r--r--net/mac80211/rx.c15
-rw-r--r--net/mac80211/sta_info.c13
-rw-r--r--net/mac80211/tdls.c23
-rw-r--r--net/mac80211/tkip.c18
-rw-r--r--net/mac80211/tx.c20
-rw-r--r--net/mac80211/util.c18
-rw-r--r--net/mac80211/wpa.c7
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h4
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c6
-rw-r--r--net/netfilter/ipset/ip_set_core.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c15
-rw-r--r--net/netfilter/nf_conntrack_core.c35
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c37
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nfnetlink_cthelper.c2
-rw-r--r--net/netfilter/nft_fwd_netdev.c8
-rw-r--r--net/netfilter/nft_set_rbtree.c7
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/netfilter/xt_bpf.c3
-rw-r--r--net/netfilter/xt_hashlimit.c10
-rw-r--r--net/netfilter/xt_physdev.c9
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/netrom/af_netrom.c4
-rw-r--r--net/nfc/hci/core.c19
-rw-r--r--net/nfc/llcp_sock.c14
-rw-r--r--net/nfc/nci/data.c2
-rw-r--r--net/nfc/nci/uart.c2
-rw-r--r--net/nfc/netlink.c11
-rw-r--r--net/openvswitch/actions.c6
-rw-r--r--net/openvswitch/conntrack.c12
-rw-r--r--net/openvswitch/datapath.c19
-rw-r--r--net/openvswitch/flow_netlink.c4
-rw-r--r--net/openvswitch/vport-internal_dev.c5
-rw-r--r--net/packet/af_packet.c108
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/qrtr/qrtr.c8
-rw-r--r--net/rds/ib.c6
-rw-r--r--net/rds/ib_fmr.c11
-rw-r--r--net/rds/ib_rdma.c13
-rw-r--r--net/rds/ib_stats.c2
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/rfkill/core.c7
-rw-r--r--net/rose/rose_subr.c21
-rw-r--r--net/rxrpc/af_rxrpc.c4
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_object.c4
-rw-r--r--net/rxrpc/conn_client.c3
-rw-r--r--net/rxrpc/conn_object.c3
-rw-r--r--net/rxrpc/input.c3
-rw-r--r--net/rxrpc/output.c29
-rw-r--r--net/rxrpc/peer_event.c3
-rw-r--r--net/sched/act_api.c12
-rw-r--r--net/sched/act_ife.c3
-rw-r--r--net/sched/act_mirred.c6
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/cls_route.c4
-rw-r--r--net/sched/cls_rsvp.h6
-rw-r--r--net/sched/cls_tcindex.c41
-rw-r--r--net/sched/ematch.c5
-rw-r--r--net/sched/sch_cbq.c27
-rw-r--r--net/sched/sch_codel.c6
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_fq.c3
-rw-r--r--net/sched/sch_fq_codel.c6
-rw-r--r--net/sched/sch_generic.c6
-rw-r--r--net/sched/sch_hhf.c10
-rw-r--r--net/sched/sch_mq.c3
-rw-r--r--net/sched/sch_mqprio.c4
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_netem.c22
-rw-r--r--net/sched/sch_prio.c12
-rw-r--r--net/sched/sch_sfb.c13
-rw-r--r--net/sched/sch_sfq.c14
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/endpointola.c9
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/ipv6.c22
-rw-r--r--net/sctp/protocol.c37
-rw-r--r--net/sctp/sm_sideeffect.c32
-rw-r--r--net/sctp/sm_statefuns.c27
-rw-r--r--net/sctp/socket.c92
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/socket.c10
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c1
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c4
-rw-r--r--net/sunrpc/cache.c3
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/sched.c109
-rw-r--r--net/sunrpc/xprtsock.c34
-rw-r--r--net/tipc/core.c59
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/name_distr.c3
-rw-r--r--net/tipc/netlink_compat.c57
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/subscr.c14
-rw-r--r--net/tipc/subscr.h5
-rw-r--r--net/tipc/sysctl.c8
-rw-r--r--net/tipc/udp_media.c8
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/vmw_vsock/af_vsock.c7
-rw-r--r--net/vmw_vsock/virtio_transport.c13
-rw-r--r--net/vmw_vsock/virtio_transport_common.c29
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/ethtool.c8
-rw-r--r--net/wireless/nl80211.c74
-rw-r--r--net/wireless/rdev-ops.h4
-rw-r--r--net/wireless/reg.c65
-rw-r--r--net/wireless/scan.c14
-rw-r--r--net/wireless/util.c48
-rw-r--r--net/wireless/wext-compat.c2
-rw-r--r--net/wireless/wext-core.c3
-rw-r--r--net/wireless/wext-sme.c8
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/xfrm/Kconfig2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--net/xfrm/xfrm_user.c41
-rw-r--r--samples/bpf/bpf_load.c2
-rw-r--r--samples/bpf/trace_event_user.c4
-rw-r--r--samples/mei/mei-amt-version.c2
-rw-r--r--samples/pktgen/functions.sh17
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/Makefile.modpost2
-rwxr-xr-xscripts/checkstack.pl2
-rw-r--r--scripts/coccinelle/api/stream_open.cocci363
-rwxr-xr-xscripts/decode_stacktrace.sh4
-rw-r--r--scripts/dtc/dtc-lexer.l1
-rw-r--r--scripts/dtc/dtc-lexer.lex.c_shipped1
-rw-r--r--scripts/gcc-plugins/gcc-common.h4
-rw-r--r--scripts/gdb/linux/symbols.py3
-rw-r--r--scripts/kallsyms.c5
-rw-r--r--scripts/kconfig/confdata.c2
-rw-r--r--scripts/kconfig/expr.c7
-rw-r--r--scripts/kconfig/lxdialog/inputbox.c3
-rw-r--r--scripts/kconfig/nconf.c2
-rw-r--r--scripts/kconfig/nconf.gui.c3
-rw-r--r--scripts/mod/file2alias.c141
-rw-r--r--scripts/mod/modpost.c12
-rwxr-xr-xscripts/namespace.pl13
-rw-r--r--scripts/recordmcount.c17
-rw-r--r--scripts/recordmcount.h3
-rwxr-xr-xscripts/setlocalversion12
-rw-r--r--security/apparmor/policy_unpack.c2
-rw-r--r--security/device_cgroup.c2
-rw-r--r--security/integrity/ima/ima_crypto.c5
-rw-r--r--security/keys/key.c3
-rw-r--r--security/keys/keyctl.c4
-rw-r--r--security/keys/request_key_auth.c6
-rw-r--r--security/selinux/avc.c2
-rw-r--r--security/selinux/hooks.c49
-rw-r--r--security/selinux/ss/policydb.c6
-rw-r--r--security/smack/smack_access.c4
-rw-r--r--security/smack/smack_lsm.c7
-rw-r--r--sound/aoa/codecs/onyx.c4
-rw-r--r--sound/core/compress_offload.c62
-rw-r--r--sound/core/hrtimer.c1
-rw-r--r--sound/core/info.c12
-rw-r--r--sound/core/init.c18
-rw-r--r--sound/core/oss/linear.c2
-rw-r--r--sound/core/oss/mulaw.c2
-rw-r--r--sound/core/oss/pcm_oss.c43
-rw-r--r--sound/core/oss/pcm_plugin.c36
-rw-r--r--sound/core/oss/route.c2
-rw-r--r--sound/core/pcm_lib.c10
-rw-r--r--sound/core/pcm_native.c27
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_ioctl.c2
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c1
-rw-r--r--sound/core/seq/oss/seq_oss_rw.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c7
-rw-r--r--sound/core/seq/seq_clientmgr.c34
-rw-r--r--sound/core/seq/seq_fifo.c17
-rw-r--r--sound/core/seq/seq_fifo.h2
-rw-r--r--sound/core/seq/seq_ports.c15
-rw-r--r--sound/core/seq/seq_ports.h5
-rw-r--r--sound/core/seq/seq_queue.c29
-rw-r--r--sound/core/seq/seq_system.c18
-rw-r--r--sound/core/seq/seq_timer.c27
-rw-r--r--sound/core/seq/seq_timer.h3
-rw-r--r--sound/core/seq/seq_virmidi.c1
-rw-r--r--sound/core/timer.c155
-rw-r--r--sound/drivers/dummy.c2
-rw-r--r--sound/drivers/opl3/opl3_voice.h2
-rw-r--r--sound/firewire/amdtp-am824.c2
-rw-r--r--sound/firewire/bebob/bebob_focusrite.c3
-rw-r--r--sound/firewire/bebob/bebob_stream.c3
-rw-r--r--sound/firewire/isight.c10
-rw-r--r--sound/firewire/oxfw/oxfw.c3
-rw-r--r--sound/firewire/packets-buffer.c2
-rw-r--r--sound/firewire/tascam/tascam-pcm.c3
-rw-r--r--sound/firewire/tascam/tascam-stream.c42
-rw-r--r--sound/hda/hdmi_chmap.c2
-rw-r--r--sound/i2c/cs8427.c2
-rw-r--r--sound/i2c/other/ak4xxx-adda.c7
-rw-r--r--sound/isa/cs423x/cs4236.c3
-rw-r--r--sound/isa/sb/sb8.c4
-rw-r--r--sound/pci/echoaudio/echoaudio.c5
-rw-r--r--sound/pci/hda/hda_auto_parser.c4
-rw-r--r--sound/pci/hda/hda_beep.c6
-rw-r--r--sound/pci/hda/hda_bind.c4
-rw-r--r--sound/pci/hda/hda_codec.c3
-rw-r--r--sound/pci/hda/hda_controller.c9
-rw-r--r--sound/pci/hda/hda_controller.h9
-rw-r--r--sound/pci/hda/hda_eld.c2
-rw-r--r--sound/pci/hda/hda_generic.c24
-rw-r--r--sound/pci/hda/hda_generic.h2
-rw-r--r--sound/pci/hda/hda_intel.c44
-rw-r--r--sound/pci/hda/hda_sysfs.c4
-rw-r--r--sound/pci/hda/patch_analog.c1
-rw-r--r--sound/pci/hda/patch_ca0132.c9
-rw-r--r--sound/pci/hda/patch_conexant.c17
-rw-r--r--sound/pci/hda/patch_hdmi.c11
-rw-r--r--sound/pci/hda/patch_realtek.c33
-rw-r--r--sound/pci/hda/patch_sigmatel.c20
-rw-r--r--sound/pci/ice1712/ice1724.c9
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c4
-rw-r--r--sound/pci/intel8x0m.c20
-rw-r--r--sound/sh/aica.c4
-rw-r--r--sound/soc/atmel/Kconfig2
-rw-r--r--sound/soc/codecs/cs4265.c2
-rw-r--r--sound/soc/codecs/cs4270.c1
-rw-r--r--sound/soc/codecs/cs42xx8.c1
-rw-r--r--sound/soc/codecs/cs4349.c1
-rw-r--r--sound/soc/codecs/es8328.c2
-rw-r--r--sound/soc/codecs/hdac_hdmi.c6
-rw-r--r--sound/soc/codecs/hdmi-codec.c6
-rw-r--r--sound/soc/codecs/max98090.c28
-rw-r--r--sound/soc/codecs/nau8810.c4
-rw-r--r--sound/soc/codecs/pcm512x.c8
-rw-r--r--sound/soc/codecs/rt5677-spi.c35
-rw-r--r--sound/soc/codecs/rt5677.c1
-rw-r--r--sound/soc/codecs/sgtl5000.c249
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c2
-rw-r--r--sound/soc/codecs/wm8737.c2
-rw-r--r--sound/soc/codecs/wm_adsp.c3
-rw-r--r--sound/soc/davinci/davinci-mcasp.c58
-rw-r--r--sound/soc/fsl/Kconfig9
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c4
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c1
-rw-r--r--sound/soc/fsl/fsl_asrc.c4
-rw-r--r--sound/soc/fsl/fsl_esai.c19
-rw-r--r--sound/soc/fsl/fsl_sai.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c5
-rw-r--r--sound/soc/fsl/fsl_utils.c1
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c4
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c2
-rw-r--r--sound/soc/intel/atom/sst/sst_pci.c2
-rw-r--r--sound/soc/intel/common/sst-firmware.c8
-rw-r--r--sound/soc/intel/common/sst-ipc.c2
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c2
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c2
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c8
-rw-r--r--sound/soc/qcom/apq8016_sbc.c24
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c2
-rw-r--r--sound/soc/sh/rcar/core.c1
-rw-r--r--sound/soc/soc-dapm.c18
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c6
-rw-r--r--sound/soc/soc-jack.c3
-rw-r--r--sound/soc/soc-ops.c4
-rw-r--r--sound/soc/soc-pcm.c133
-rw-r--r--sound/soc/soc-topology.c2
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c4
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c17
-rw-r--r--sound/sound_core.c3
-rw-r--r--sound/usb/endpoint.c3
-rw-r--r--sound/usb/line6/driver.c62
-rw-r--r--sound/usb/line6/midibuf.c2
-rw-r--r--sound/usb/line6/pcm.c19
-rw-r--r--sound/usb/line6/podhd.c23
-rw-r--r--sound/usb/line6/toneport.c24
-rw-r--r--sound/usb/mixer.c42
-rw-r--r--sound/usb/mixer_maps.c28
-rw-r--r--sound/usb/mixer_quirks.c4
-rw-r--r--sound/usb/pcm.c1
-rw-r--r--sound/usb/quirks-table.h9
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c2
-rw-r--r--tools/accounting/getdelays.c2
-rw-r--r--tools/gpio/Build1
-rw-r--r--tools/gpio/Makefile10
-rw-r--r--tools/hv/hv_kvp_daemon.c4
-rw-r--r--tools/hv/hv_vss_daemon.c2
-rw-r--r--tools/iio/iio_utils.c4
-rw-r--r--tools/include/linux/bitops.h7
-rw-r--r--tools/include/linux/bits.h26
-rw-r--r--tools/include/linux/string.h8
-rw-r--r--tools/lib/api/fs/fs.c4
-rw-r--r--tools/lib/string.c7
-rw-r--r--tools/lib/traceevent/Makefile6
-rw-r--r--tools/lib/traceevent/event-parse.c7
-rw-r--r--tools/lib/traceevent/event-plugin.c2
-rw-r--r--tools/lib/traceevent/parse-filter.c9
-rw-r--r--tools/objtool/Makefile5
-rw-r--r--tools/objtool/arch/x86/lib/x86-opcode-map.txt20
-rw-r--r--tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk4
-rw-r--r--tools/objtool/check.c18
-rw-r--r--tools/objtool/elf.c2
-rw-r--r--tools/perf/Documentation/perf-config.txt2
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c127
-rw-r--r--tools/perf/arch/s390/util/machine.c23
-rw-r--r--tools/perf/bench/numa.c10
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-kmem.c1
-rw-r--r--tools/perf/builtin-probe.c10
-rw-r--r--tools/perf/builtin-report.c5
-rw-r--r--tools/perf/builtin-stat.c5
-rw-r--r--tools/perf/builtin-top.c5
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/pmu-events/jevents.c11
-rw-r--r--tools/perf/tests/evsel-tp-sched.c7
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c2
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c4
-rw-r--r--tools/perf/tests/task-exit.c1
-rw-r--r--tools/perf/ui/browsers/hists.c1
-rw-r--r--tools/perf/ui/tui/helpline.c2
-rw-r--r--tools/perf/util/build-id.c1
-rw-r--r--tools/perf/util/config.c3
-rw-r--r--tools/perf/util/data-convert-bt.c2
-rw-r--r--tools/perf/util/dwarf-aux.c80
-rw-r--r--tools/perf/util/dwarf-aux.h3
-rw-r--r--tools/perf/util/evsel.c9
-rw-r--r--tools/perf/util/header.c15
-rw-r--r--tools/perf/util/hist.c6
-rw-r--r--tools/perf/util/hist.h4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c51
-rw-r--r--tools/perf/util/jitdump.c6
-rw-r--r--tools/perf/util/llvm-utils.c6
-rw-r--r--tools/perf/util/machine.c7
-rw-r--r--tools/perf/util/machine.h2
-rw-r--r--tools/perf/util/map.c5
-rw-r--r--tools/perf/util/parse-events.c1
-rw-r--r--tools/perf/util/perf_regs.h2
-rw-r--r--tools/perf/util/probe-finder.c80
-rw-r--r--tools/perf/util/stat.c17
-rw-r--r--tools/perf/util/stat.h1
-rw-r--r--tools/perf/util/strbuf.c1
-rw-r--r--tools/perf/util/symbol-elf.c2
-rw-r--r--tools/perf/util/symbol.c21
-rw-r--r--tools/perf/util/symbol.h2
-rw-r--r--tools/perf/util/thread.c12
-rw-r--r--tools/perf/util/util.h1
-rw-r--r--tools/power/acpi/Makefile.config2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c1
-rw-r--r--tools/power/x86/turbostat/Makefile2
-rw-r--r--tools/power/x86/turbostat/turbostat.c5
-rw-r--r--tools/scripts/Makefile.include4
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc3
-rw-r--r--tools/testing/selftests/kvm/config3
-rw-r--r--tools/testing/selftests/net/reuseport_dualstack.c3
-rwxr-xr-xtools/testing/selftests/net/run_netsocktests2
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_icmp_related.sh283
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh6
-rw-r--r--tools/testing/selftests/rseq/settings1
-rw-r--r--tools/testing/selftests/timers/adjtick.c1
-rw-r--r--tools/testing/selftests/timers/leapcrash.c1
-rw-r--r--tools/testing/selftests/timers/mqueue-lat.c1
-rw-r--r--tools/testing/selftests/timers/nanosleep.c1
-rw-r--r--tools/testing/selftests/timers/nsleep-lat.c1
-rw-r--r--tools/testing/selftests/timers/raw_skew.c1
-rw-r--r--tools/testing/selftests/timers/set-tai.c1
-rw-r--r--tools/testing/selftests/timers/set-tz.c2
-rw-r--r--tools/testing/selftests/timers/threadtest.c1
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c2
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c8
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c8
-rw-r--r--tools/usb/usbip/src/usbip_network.c40
-rw-r--r--tools/usb/usbip/src/usbip_network.h12
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c1
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c18
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c5
-rw-r--r--virt/kvm/arm/vgic/vgic.c7
-rw-r--r--virt/kvm/coalesced_mmio.c17
-rw-r--r--virt/kvm/kvm_main.c173
3475 files changed, 38753 insertions, 20540 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei
index 6bd45346ac7e..3f8701e8fa24 100644
--- a/Documentation/ABI/testing/sysfs-bus-mei
+++ b/Documentation/ABI/testing/sysfs-bus-mei
@@ -4,7 +4,7 @@ KernelVersion: 3.10
Contact: Samuel Ortiz <sameo@linux.intel.com>
linux-mei@linux.intel.com
Description: Stores the same MODALIAS value emitted by uevent
- Format: mei:<mei device name>:<device uuid>:
+ Format: mei:<mei device name>:<device uuid>:<protocol version>
What: /sys/bus/mei/devices/.../name
Date: May 2015
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 069e8d52c991..b41046b5713b 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -357,6 +357,9 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/l1tf
+ /sys/devices/system/cpu/vulnerabilities/mds
+ /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+ /sys/devices/system/cpu/vulnerabilities/itlb_multihit
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
@@ -369,8 +372,7 @@ Description: Information about CPU vulnerabilities
"Vulnerable" CPU is affected and no mitigation in effect
"Mitigation: $M" CPU is affected and mitigation $M is in effect
- Details about the l1tf file can be found in
- Documentation/admin-guide/l1tf.rst
+ See also: Documentation/hw-vuln/index.rst
What: /sys/devices/system/cpu/smt
/sys/devices/system/cpu/smt/active
diff --git a/Documentation/arm/kernel_mode_neon.txt b/Documentation/arm/kernel_mode_neon.txt
index 525452726d31..b9e060c5b61e 100644
--- a/Documentation/arm/kernel_mode_neon.txt
+++ b/Documentation/arm/kernel_mode_neon.txt
@@ -6,7 +6,7 @@ TL;DR summary
* Use only NEON instructions, or VFP instructions that don't rely on support
code
* Isolate your NEON code in a separate compilation unit, and compile it with
- '-mfpu=neon -mfloat-abi=softfp'
+ '-march=armv7-a -mfpu=neon -mfloat-abi=softfp'
* Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
NEON code
* Don't sleep in your NEON code, and be aware that it will be executed with
@@ -87,7 +87,7 @@ instructions appearing in unexpected places if no special care is taken.
Therefore, the recommended and only supported way of using NEON/VFP in the
kernel is by adhering to the following rules:
* isolate the NEON code in a separate compilation unit and compile it with
- '-mfpu=neon -mfloat-abi=softfp';
+ '-march=armv7-a -mfpu=neon -mfloat-abi=softfp';
* issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
into the unit containing the NEON code from a compilation unit which is *not*
built with the GCC flag '-mfpu=neon' set.
diff --git a/Documentation/conf.py b/Documentation/conf.py
index d769cd89a9f7..da7b54388eff 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -37,7 +37,7 @@ from load_config import loadConfig
extensions = ['kernel-doc', 'rstFlatTable', 'kernel_include', 'cdomain']
# The name of the math extension changed on Sphinx 1.4
-if major == 1 and minor > 3:
+if (major == 1 and minor > 3) or (major > 1):
extensions.append("sphinx.ext.imgmath")
else:
extensions.append("sphinx.ext.pngmath")
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
index ee3723beb701..33b38716b77f 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: Should be one of the following:
- "microchip,mcp2510" for MCP2510.
- "microchip,mcp2515" for MCP2515.
+ - "microchip,mcp25625" for MCP25625.
- reg: SPI chip select.
- clocks: The clock feeding the CAN controller.
- interrupt-parent: The parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/fman.txt b/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
index df873d1f3b7c..2aaae210317b 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
@@ -110,6 +110,13 @@ PROPERTIES
Usage: required
Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
+- fsl,erratum-a050385
+ Usage: optional
+ Value type: boolean
+ Definition: A boolean property. Indicates the presence of the
+ erratum A050385 which indicates that DMA transactions that are
+ split can result in a FMan lock.
+
=============================================================================
FMan MURAM Node
diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
index be789685a1c2..18b892d010d8 100644
--- a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
+++ b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
@@ -27,4 +27,4 @@ and valid to enable charging:
- "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V)
- "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output
- resistor, the other values are in ohm.
+ resistor, the other values are in kOhm.
diff --git a/Documentation/devicetree/bindings/serial/mvebu-uart.txt b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
index 6087defd9f93..d37fabe17bd1 100644
--- a/Documentation/devicetree/bindings/serial/mvebu-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
@@ -8,6 +8,6 @@ Required properties:
Example:
serial@12000 {
compatible = "marvell,armada-3700-uart";
- reg = <0x12000 0x400>;
+ reg = <0x12000 0x200>;
interrupts = <43>;
};
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index bdd025ceb763..85ed3450099a 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -596,3 +596,10 @@ in your dentry operations instead.
[mandatory]
->rename() has an added flags argument. Any flags not handled by the
filesystem should result in EINVAL being returned.
+--
+[mandatory]
+
+ [should've been added in 2016] stale comment in finish_open()
+ nonwithstanding, failure exits in ->atomic_open() instances should
+ *NOT* fput() the file, no matter what. Everything is handled by the
+ caller.
diff --git a/Documentation/hid/uhid.txt b/Documentation/hid/uhid.txt
index c8656dd029a9..958fff945304 100644
--- a/Documentation/hid/uhid.txt
+++ b/Documentation/hid/uhid.txt
@@ -160,7 +160,7 @@ them but you should handle them according to your needs.
UHID_OUTPUT:
This is sent if the HID device driver wants to send raw data to the I/O
device on the interrupt channel. You should read the payload and forward it to
- the device. The payload is of type "struct uhid_data_req".
+ the device. The payload is of type "struct uhid_output_req".
This may be received even though you haven't received UHID_OPEN, yet.
UHID_GET_REPORT:
diff --git a/Documentation/hw-vuln/index.rst b/Documentation/hw-vuln/index.rst
new file mode 100644
index 000000000000..24f53c501366
--- /dev/null
+++ b/Documentation/hw-vuln/index.rst
@@ -0,0 +1,15 @@
+========================
+Hardware vulnerabilities
+========================
+
+This section describes CPU vulnerabilities and provides an overview of the
+possible mitigations along with guidance for selecting mitigations if they
+are configurable at compile, boot or run time.
+
+.. toctree::
+ :maxdepth: 1
+
+ l1tf
+ mds
+ tsx_async_abort
+ multihit.rst
diff --git a/Documentation/l1tf.rst b/Documentation/hw-vuln/l1tf.rst
index bae52b845de0..31653a9f0e1b 100644
--- a/Documentation/l1tf.rst
+++ b/Documentation/hw-vuln/l1tf.rst
@@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are:
off Disables hypervisor mitigations and doesn't emit any
warnings.
+ It also drops the swap size and available RAM limit restrictions
+ on both hypervisor and bare metal.
+
============ =============================================================
The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
@@ -442,6 +445,7 @@ The default is 'cond'. If 'l1tf=full,force' is given on the kernel command
line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
module parameter is ignored and writes to the sysfs file are rejected.
+.. _mitigation_selection:
Mitigation selection guide
--------------------------
@@ -553,7 +557,7 @@ When nested virtualization is in use, three operating systems are involved:
the bare metal hypervisor, the nested hypervisor and the nested virtual
machine. VMENTER operations from the nested hypervisor into the nested
guest will always be processed by the bare metal hypervisor. If KVM is the
-bare metal hypervisor it wiil:
+bare metal hypervisor it will:
- Flush the L1D cache on every switch from the nested hypervisor to the
nested virtual machine, so that the nested hypervisor's secrets are not
@@ -576,7 +580,8 @@ Default mitigations
The kernel default mitigations for vulnerable processors are:
- PTE inversion to protect against malicious user space. This is done
- unconditionally and cannot be controlled.
+ unconditionally and cannot be controlled. The swap storage is limited
+ to ~16TB.
- L1D conditional flushing on VMENTER when EPT is enabled for
a guest.
diff --git a/Documentation/hw-vuln/mds.rst b/Documentation/hw-vuln/mds.rst
new file mode 100644
index 000000000000..fbbd1719afb9
--- /dev/null
+++ b/Documentation/hw-vuln/mds.rst
@@ -0,0 +1,311 @@
+MDS - Microarchitectural Data Sampling
+======================================
+
+Microarchitectural Data Sampling is a hardware vulnerability which allows
+unprivileged speculative access to data which is available in various CPU
+internal buffers.
+
+Affected processors
+-------------------
+
+This vulnerability affects a wide range of Intel processors. The
+vulnerability is not present on:
+
+ - Processors from AMD, Centaur and other non Intel vendors
+
+ - Older processor models, where the CPU family is < 6
+
+ - Some Atoms (Bonnell, Saltwell, Goldmont, GoldmontPlus)
+
+ - Intel processors which have the ARCH_CAP_MDS_NO bit set in the
+ IA32_ARCH_CAPABILITIES MSR.
+
+Whether a processor is affected or not can be read out from the MDS
+vulnerability file in sysfs. See :ref:`mds_sys_info`.
+
+Not all processors are affected by all variants of MDS, but the mitigation
+is identical for all of them so the kernel treats them as a single
+vulnerability.
+
+Related CVEs
+------------
+
+The following CVE entries are related to the MDS vulnerability:
+
+ ============== ===== ===================================================
+ CVE-2018-12126 MSBDS Microarchitectural Store Buffer Data Sampling
+ CVE-2018-12130 MFBDS Microarchitectural Fill Buffer Data Sampling
+ CVE-2018-12127 MLPDS Microarchitectural Load Port Data Sampling
+ CVE-2019-11091 MDSUM Microarchitectural Data Sampling Uncacheable Memory
+ ============== ===== ===================================================
+
+Problem
+-------
+
+When performing store, load, L1 refill operations, processors write data
+into temporary microarchitectural structures (buffers). The data in the
+buffer can be forwarded to load operations as an optimization.
+
+Under certain conditions, usually a fault/assist caused by a load
+operation, data unrelated to the load memory address can be speculatively
+forwarded from the buffers. Because the load operation causes a fault or
+assist and its result will be discarded, the forwarded data will not cause
+incorrect program execution or state changes. But a malicious operation
+may be able to forward this speculative data to a disclosure gadget which
+allows in turn to infer the value via a cache side channel attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+Deeper technical information is available in the MDS specific x86
+architecture section: :ref:`Documentation/x86/mds.rst <mds>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the MDS vulnerabilities can be mounted from malicious non
+priviledged user space applications running on hosts or guest. Malicious
+guest OSes can obviously mount attacks as well.
+
+Contrary to other speculation based vulnerabilities the MDS vulnerability
+does not allow the attacker to control the memory target address. As a
+consequence the attacks are purely sampling based, but as demonstrated with
+the TLBleed attack samples can be postprocessed successfully.
+
+Web-Browsers
+^^^^^^^^^^^^
+
+ It's unclear whether attacks through Web-Browsers are possible at
+ all. The exploitation through Java-Script is considered very unlikely,
+ but other widely used web technologies like Webassembly could possibly be
+ abused.
+
+
+.. _mds_sys_info:
+
+MDS system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current MDS
+status of the system: whether the system is vulnerable, and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/mds
+
+The possible values in this file are:
+
+ .. list-table::
+
+ * - 'Not affected'
+ - The processor is not vulnerable
+ * - 'Vulnerable'
+ - The processor is vulnerable, but no mitigation enabled
+ * - 'Vulnerable: Clear CPU buffers attempted, no microcode'
+ - The processor is vulnerable but microcode is not updated.
+
+ The mitigation is enabled on a best effort basis. See :ref:`vmwerv`
+ * - 'Mitigation: Clear CPU buffers'
+ - The processor is vulnerable and the CPU buffer clearing mitigation is
+ enabled.
+
+If the processor is vulnerable then the following information is appended
+to the above information:
+
+ ======================== ============================================
+ 'SMT vulnerable' SMT is enabled
+ 'SMT mitigated' SMT is enabled and mitigated
+ 'SMT disabled' SMT is disabled
+ 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown
+ ======================== ============================================
+
+.. _vmwerv:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If the processor is vulnerable, but the availability of the microcode based
+ mitigation mechanism is not advertised via CPUID the kernel selects a best
+ effort mitigation mode. This mode invokes the mitigation instructions
+ without a guarantee that they clear the CPU buffers.
+
+ This is done to address virtualization scenarios where the host has the
+ microcode update applied, but the hypervisor is not yet updated to expose
+ the CPUID to the guest. If the host has updated microcode the protection
+ takes effect otherwise a few cpu cycles are wasted pointlessly.
+
+ The state in the mds sysfs file reflects this situation accordingly.
+
+
+Mitigation mechanism
+-------------------------
+
+The kernel detects the affected CPUs and the presence of the microcode
+which is required.
+
+If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default. The mitigation can be controlled at boot
+time via a kernel command line option. See
+:ref:`mds_mitigation_control_command_line`.
+
+.. _cpu_buffer_clear:
+
+CPU buffer clearing
+^^^^^^^^^^^^^^^^^^^
+
+ The mitigation for MDS clears the affected CPU buffers on return to user
+ space and when entering a guest.
+
+ If SMT is enabled it also clears the buffers on idle entry when the CPU
+ is only affected by MSBDS and not any other MDS variant, because the
+ other variants cannot be protected against cross Hyper-Thread attacks.
+
+ For CPUs which are only affected by MSBDS the user space, guest and idle
+ transition mitigations are sufficient and SMT is not affected.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The protection for host to guest transition depends on the L1TF
+ vulnerability of the CPU:
+
+ - CPU is affected by L1TF:
+
+ If the L1D flush mitigation is enabled and up to date microcode is
+ available, the L1D flush mitigation is automatically protecting the
+ guest transition.
+
+ If the L1D flush mitigation is disabled then the MDS mitigation is
+ invoked explicit when the host MDS mitigation is enabled.
+
+ For details on L1TF and virtualization see:
+ :ref:`Documentation/hw-vuln//l1tf.rst <mitigation_control_kvm>`.
+
+ - CPU is not affected by L1TF:
+
+ CPU buffers are flushed before entering the guest when the host MDS
+ mitigation is enabled.
+
+ The resulting MDS protection matrix for the host to guest transition:
+
+ ============ ===== ============= ============ =================
+ L1TF MDS VMX-L1FLUSH Host MDS MDS-State
+
+ Don't care No Don't care N/A Not affected
+
+ Yes Yes Disabled Off Vulnerable
+
+ Yes Yes Disabled Full Mitigated
+
+ Yes Yes Enabled Don't care Mitigated
+
+ No Yes N/A Off Vulnerable
+
+ No Yes N/A Full Mitigated
+ ============ ===== ============= ============ =================
+
+ This only covers the host to guest transition, i.e. prevents leakage from
+ host to guest, but does not protect the guest internally. Guests need to
+ have their own protections.
+
+.. _xeon_phi:
+
+XEON PHI specific considerations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The XEON PHI processor family is affected by MSBDS which can be exploited
+ cross Hyper-Threads when entering idle states. Some XEON PHI variants allow
+ to use MWAIT in user space (Ring 3) which opens an potential attack vector
+ for malicious user space. The exposure can be disabled on the kernel
+ command line with the 'ring3mwait=disable' command line option.
+
+ XEON PHI is not affected by the other MDS variants and MSBDS is mitigated
+ before the CPU enters a idle state. As XEON PHI is not affected by L1TF
+ either disabling SMT is not required for full protection.
+
+.. _mds_smt_control:
+
+SMT control
+^^^^^^^^^^^
+
+ All MDS variants except MSBDS can be attacked cross Hyper-Threads. That
+ means on CPUs which are affected by MFBDS or MLPDS it is necessary to
+ disable SMT for full protection. These are most of the affected CPUs; the
+ exception is XEON PHI, see :ref:`xeon_phi`.
+
+ Disabling SMT can have a significant performance impact, but the impact
+ depends on the type of workloads.
+
+ See the relevant chapter in the L1TF mitigation documentation for details:
+ :ref:`Documentation/hw-vuln/l1tf.rst <smt_control>`.
+
+
+.. _mds_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the MDS mitigations at boot
+time with the option "mds=". The valid arguments for this option are:
+
+ ============ =============================================================
+ full If the CPU is vulnerable, enable all available mitigations
+ for the MDS vulnerability, CPU buffer clearing on exit to
+ userspace and when entering a VM. Idle transitions are
+ protected as well if SMT is enabled.
+
+ It does not automatically disable SMT.
+
+ full,nosmt The same as mds=full, with SMT disabled on vulnerable
+ CPUs. This is the complete mitigation.
+
+ off Disables MDS mitigations completely.
+
+ ============ =============================================================
+
+Not specifying this option is equivalent to "mds=full". For processors
+that are affected by both TAA (TSX Asynchronous Abort) and MDS,
+specifying just "mds=off" without an accompanying "tsx_async_abort=off"
+will have no effect as the same mitigation is used for both
+vulnerabilities.
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace
+^^^^^^^^^^^^^^^^^^^^
+
+ If all userspace applications are from a trusted source and do not
+ execute untrusted code which is supplied externally, then the mitigation
+ can be disabled.
+
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The same considerations as above versus trusted user space apply.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The protection depends on the state of the L1TF mitigations.
+ See :ref:`virt_mechanism`.
+
+ If the MDS mitigation is enabled and SMT is disabled, guest to host and
+ guest to guest attacks are prevented.
+
+.. _mds_default_mitigations:
+
+Default mitigations
+-------------------
+
+ The kernel default mitigations for vulnerable processors are:
+
+ - Enable CPU buffer clearing
+
+ The kernel does not by default enforce the disabling of SMT, which leaves
+ SMT systems vulnerable when running untrusted code. The same rationale as
+ for L1TF applies.
+ See :ref:`Documentation/hw-vuln//l1tf.rst <default_mitigations>`.
diff --git a/Documentation/hw-vuln/multihit.rst b/Documentation/hw-vuln/multihit.rst
new file mode 100644
index 000000000000..ba9988d8bce5
--- /dev/null
+++ b/Documentation/hw-vuln/multihit.rst
@@ -0,0 +1,163 @@
+iTLB multihit
+=============
+
+iTLB multihit is an erratum where some processors may incur a machine check
+error, possibly resulting in an unrecoverable CPU lockup, when an
+instruction fetch hits multiple entries in the instruction TLB. This can
+occur when the page size is changed along with either the physical address
+or cache type. A malicious guest running on a virtualized system can
+exploit this erratum to perform a denial of service attack.
+
+
+Affected processors
+-------------------
+
+Variations of this erratum are present on most Intel Core and Xeon processor
+models. The erratum is not present on:
+
+ - non-Intel processors
+
+ - Some Atoms (Airmont, Bonnell, Goldmont, GoldmontPlus, Saltwell, Silvermont)
+
+ - Intel processors that have the PSCHANGE_MC_NO bit set in the
+ IA32_ARCH_CAPABILITIES MSR.
+
+
+Related CVEs
+------------
+
+The following CVE entry is related to this issue:
+
+ ============== =================================================
+ CVE-2018-12207 Machine Check Error Avoidance on Page Size Change
+ ============== =================================================
+
+
+Problem
+-------
+
+Privileged software, including OS and virtual machine managers (VMM), are in
+charge of memory management. A key component in memory management is the control
+of the page tables. Modern processors use virtual memory, a technique that creates
+the illusion of a very large memory for processors. This virtual space is split
+into pages of a given size. Page tables translate virtual addresses to physical
+addresses.
+
+To reduce latency when performing a virtual to physical address translation,
+processors include a structure, called TLB, that caches recent translations.
+There are separate TLBs for instruction (iTLB) and data (dTLB).
+
+Under this errata, instructions are fetched from a linear address translated
+using a 4 KB translation cached in the iTLB. Privileged software modifies the
+paging structure so that the same linear address using large page size (2 MB, 4
+MB, 1 GB) with a different physical address or memory type. After the page
+structure modification but before the software invalidates any iTLB entries for
+the linear address, a code fetch that happens on the same linear address may
+cause a machine-check error which can result in a system hang or shutdown.
+
+
+Attack scenarios
+----------------
+
+Attacks against the iTLB multihit erratum can be mounted from malicious
+guests in a virtualized system.
+
+
+iTLB multihit system information
+--------------------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current iTLB
+multihit status of the system:whether the system is vulnerable and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/itlb_multihit
+
+The possible values in this file are:
+
+.. list-table::
+
+ * - Not affected
+ - The processor is not vulnerable.
+ * - KVM: Mitigation: Split huge pages
+ - Software changes mitigate this issue.
+ * - KVM: Vulnerable
+ - The processor is vulnerable, but no mitigation enabled
+
+
+Enumeration of the erratum
+--------------------------------
+
+A new bit has been allocated in the IA32_ARCH_CAPABILITIES (PSCHANGE_MC_NO) msr
+and will be set on CPU's which are mitigated against this issue.
+
+ ======================================= =========== ===============================
+ IA32_ARCH_CAPABILITIES MSR Not present Possibly vulnerable,check model
+ IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '0' Likely vulnerable,check model
+ IA32_ARCH_CAPABILITIES[PSCHANGE_MC_NO] '1' Not vulnerable
+ ======================================= =========== ===============================
+
+
+Mitigation mechanism
+-------------------------
+
+This erratum can be mitigated by restricting the use of large page sizes to
+non-executable pages. This forces all iTLB entries to be 4K, and removes
+the possibility of multiple hits.
+
+In order to mitigate the vulnerability, KVM initially marks all huge pages
+as non-executable. If the guest attempts to execute in one of those pages,
+the page is broken down into 4K pages, which are then marked executable.
+
+If EPT is disabled or not available on the host, KVM is in control of TLB
+flushes and the problematic situation cannot happen. However, the shadow
+EPT paging mechanism used by nested virtualization is vulnerable, because
+the nested guest can trigger multiple iTLB hits by modifying its own
+(non-nested) page tables. For simplicity, KVM will make large pages
+non-executable in all shadow paging modes.
+
+Mitigation control on the kernel command line and KVM - module parameter
+------------------------------------------------------------------------
+
+The KVM hypervisor mitigation mechanism for marking huge pages as
+non-executable can be controlled with a module parameter "nx_huge_pages=".
+The kernel command line allows to control the iTLB multihit mitigations at
+boot time with the option "kvm.nx_huge_pages=".
+
+The valid arguments for these options are:
+
+ ========== ================================================================
+ force Mitigation is enabled. In this case, the mitigation implements
+ non-executable huge pages in Linux kernel KVM module. All huge
+ pages in the EPT are marked as non-executable.
+ If a guest attempts to execute in one of those pages, the page is
+ broken down into 4K pages, which are then marked executable.
+
+ off Mitigation is disabled.
+
+ auto Enable mitigation only if the platform is affected and the kernel
+ was not booted with the "mitigations=off" command line parameter.
+ This is the default option.
+ ========== ================================================================
+
+
+Mitigation selection guide
+--------------------------
+
+1. No virtualization in use
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The system is protected by the kernel unconditionally and no further
+ action is required.
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If the guest comes from a trusted source, you may assume that the guest will
+ not attempt to maliciously exploit these errata and no further action is
+ required.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ If the guest comes from an untrusted source, the guest host kernel will need
+ to apply iTLB multihit mitigation via the kernel command line or kvm
+ module parameter.
diff --git a/Documentation/hw-vuln/tsx_async_abort.rst b/Documentation/hw-vuln/tsx_async_abort.rst
new file mode 100644
index 000000000000..af6865b822d2
--- /dev/null
+++ b/Documentation/hw-vuln/tsx_async_abort.rst
@@ -0,0 +1,279 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+TAA - TSX Asynchronous Abort
+======================================
+
+TAA is a hardware vulnerability that allows unprivileged speculative access to
+data which is available in various CPU internal buffers by using asynchronous
+aborts within an Intel TSX transactional region.
+
+Affected processors
+-------------------
+
+This vulnerability only affects Intel processors that support Intel
+Transactional Synchronization Extensions (TSX) when the TAA_NO bit (bit 8)
+is 0 in the IA32_ARCH_CAPABILITIES MSR. On processors where the MDS_NO bit
+(bit 5) is 0 in the IA32_ARCH_CAPABILITIES MSR, the existing MDS mitigations
+also mitigate against TAA.
+
+Whether a processor is affected or not can be read out from the TAA
+vulnerability file in sysfs. See :ref:`tsx_async_abort_sys_info`.
+
+Related CVEs
+------------
+
+The following CVE entry is related to this TAA issue:
+
+ ============== ===== ===================================================
+ CVE-2019-11135 TAA TSX Asynchronous Abort (TAA) condition on some
+ microprocessors utilizing speculative execution may
+ allow an authenticated user to potentially enable
+ information disclosure via a side channel with
+ local access.
+ ============== ===== ===================================================
+
+Problem
+-------
+
+When performing store, load or L1 refill operations, processors write
+data into temporary microarchitectural structures (buffers). The data in
+those buffers can be forwarded to load operations as an optimization.
+
+Intel TSX is an extension to the x86 instruction set architecture that adds
+hardware transactional memory support to improve performance of multi-threaded
+software. TSX lets the processor expose and exploit concurrency hidden in an
+application due to dynamically avoiding unnecessary synchronization.
+
+TSX supports atomic memory transactions that are either committed (success) or
+aborted. During an abort, operations that happened within the transactional region
+are rolled back. An asynchronous abort takes place, among other options, when a
+different thread accesses a cache line that is also used within the transactional
+region when that access might lead to a data race.
+
+Immediately after an uncompleted asynchronous abort, certain speculatively
+executed loads may read data from those internal buffers and pass it to dependent
+operations. This can be then used to infer the value via a cache side channel
+attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+The victim of a malicious actor does not need to make use of TSX. Only the
+attacker needs to begin a TSX transaction and raise an asynchronous abort
+which in turn potenitally leaks data stored in the buffers.
+
+More detailed technical information is available in the TAA specific x86
+architecture section: :ref:`Documentation/x86/tsx_async_abort.rst <tsx_async_abort>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the TAA vulnerability can be implemented from unprivileged
+applications running on hosts or guests.
+
+As for MDS, the attacker has no control over the memory addresses that can
+be leaked. Only the victim is responsible for bringing data to the CPU. As
+a result, the malicious actor has to sample as much data as possible and
+then postprocess it to try to infer any useful information from it.
+
+A potential attacker only has read access to the data. Also, there is no direct
+privilege escalation by using this technique.
+
+
+.. _tsx_async_abort_sys_info:
+
+TAA system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current TAA status
+of mitigated systems. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+
+The possible values in this file are:
+
+.. list-table::
+
+ * - 'Vulnerable'
+ - The CPU is affected by this vulnerability and the microcode and kernel mitigation are not applied.
+ * - 'Vulnerable: Clear CPU buffers attempted, no microcode'
+ - The system tries to clear the buffers but the microcode might not support the operation.
+ * - 'Mitigation: Clear CPU buffers'
+ - The microcode has been updated to clear the buffers. TSX is still enabled.
+ * - 'Mitigation: TSX disabled'
+ - TSX is disabled.
+ * - 'Not affected'
+ - The CPU is not affected by this issue.
+
+.. _ucode_needed:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If the processor is vulnerable, but the availability of the microcode-based
+mitigation mechanism is not advertised via CPUID the kernel selects a best
+effort mitigation mode. This mode invokes the mitigation instructions
+without a guarantee that they clear the CPU buffers.
+
+This is done to address virtualization scenarios where the host has the
+microcode update applied, but the hypervisor is not yet updated to expose the
+CPUID to the guest. If the host has updated microcode the protection takes
+effect; otherwise a few CPU cycles are wasted pointlessly.
+
+The state in the tsx_async_abort sysfs file reflects this situation
+accordingly.
+
+
+Mitigation mechanism
+--------------------
+
+The kernel detects the affected CPUs and the presence of the microcode which is
+required. If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default.
+
+
+The mitigation can be controlled at boot time via a kernel command line option.
+See :ref:`taa_mitigation_control_command_line`.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Affected systems where the host has TAA microcode and TAA is mitigated by
+having disabled TSX previously, are not vulnerable regardless of the status
+of the VMs.
+
+In all other cases, if the host either does not have the TAA microcode or
+the kernel is not mitigated, the system might be vulnerable.
+
+
+.. _taa_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the TAA mitigations at boot time with
+the option "tsx_async_abort=". The valid arguments for this option are:
+
+ ============ =============================================================
+ off This option disables the TAA mitigation on affected platforms.
+ If the system has TSX enabled (see next parameter) and the CPU
+ is affected, the system is vulnerable.
+
+ full TAA mitigation is enabled. If TSX is enabled, on an affected
+ system it will clear CPU buffers on ring transitions. On
+ systems which are MDS-affected and deploy MDS mitigation,
+ TAA is also mitigated. Specifying this option on those
+ systems will have no effect.
+
+ full,nosmt The same as tsx_async_abort=full, with SMT disabled on
+ vulnerable CPUs that have TSX enabled. This is the complete
+ mitigation. When TSX is disabled, SMT is not disabled because
+ CPU is not vulnerable to cross-thread TAA attacks.
+ ============ =============================================================
+
+Not specifying this option is equivalent to "tsx_async_abort=full". For
+processors that are affected by both TAA and MDS, specifying just
+"tsx_async_abort=off" without an accompanying "mds=off" will have no
+effect as the same mitigation is used for both vulnerabilities.
+
+The kernel command line also allows to control the TSX feature using the
+parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used
+to control the TSX feature and the enumeration of the TSX feature bits (RTM
+and HLE) in CPUID.
+
+The valid options are:
+
+ ============ =============================================================
+ off Disables TSX on the system.
+
+ Note that this option takes effect only on newer CPUs which are
+ not vulnerable to MDS, i.e., have MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1
+ and which get the new IA32_TSX_CTRL MSR through a microcode
+ update. This new MSR allows for the reliable deactivation of
+ the TSX functionality.
+
+ on Enables TSX.
+
+ Although there are mitigations for all known security
+ vulnerabilities, TSX has been known to be an accelerator for
+ several previous speculation-related CVEs, and so there may be
+ unknown security risks associated with leaving it enabled.
+
+ auto Disables TSX if X86_BUG_TAA is present, otherwise enables TSX
+ on the system.
+ ============ =============================================================
+
+Not specifying this option is equivalent to "tsx=off".
+
+The following combinations of the "tsx_async_abort" and "tsx" are possible. For
+affected platforms tsx=auto is equivalent to tsx=off and the result will be:
+
+ ========= ========================== =========================================
+ tsx=on tsx_async_abort=full The system will use VERW to clear CPU
+ buffers. Cross-thread attacks are still
+ possible on SMT machines.
+ tsx=on tsx_async_abort=full,nosmt As above, cross-thread attacks on SMT
+ mitigated.
+ tsx=on tsx_async_abort=off The system is vulnerable.
+ tsx=off tsx_async_abort=full TSX might be disabled if microcode
+ provides a TSX control MSR. If so,
+ system is not vulnerable.
+ tsx=off tsx_async_abort=full,nosmt Ditto
+ tsx=off tsx_async_abort=off ditto
+ ========= ========================== =========================================
+
+
+For unaffected platforms "tsx=on" and "tsx_async_abort=full" does not clear CPU
+buffers. For platforms without TSX control (MSR_IA32_ARCH_CAPABILITIES.MDS_NO=0)
+"tsx" command line argument has no effect.
+
+For the affected platforms below table indicates the mitigation status for the
+combinations of CPUID bit MD_CLEAR and IA32_ARCH_CAPABILITIES MSR bits MDS_NO
+and TSX_CTRL_MSR.
+
+ ======= ========= ============= ========================================
+ MDS_NO MD_CLEAR TSX_CTRL_MSR Status
+ ======= ========= ============= ========================================
+ 0 0 0 Vulnerable (needs microcode)
+ 0 1 0 MDS and TAA mitigated via VERW
+ 1 1 0 MDS fixed, TAA vulnerable if TSX enabled
+ because MD_CLEAR has no meaning and
+ VERW is not guaranteed to clear buffers
+ 1 X 1 MDS fixed, TAA can be mitigated by
+ VERW or TSX_CTRL_MSR
+ ======= ========= ============= ========================================
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace and guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If all user space applications are from a trusted source and do not execute
+untrusted code which is supplied externally, then the mitigation can be
+disabled. The same applies to virtualized environments with trusted guests.
+
+
+2. Untrusted userspace and guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If there are untrusted applications or guests on the system, enabling TSX
+might allow a malicious actor to leak data from the host or from other
+processes running on the same physical core.
+
+If the microcode is available and the TSX is disabled on the host, attacks
+are prevented in a virtualized environment as well, even if the VMs do not
+explicitly enable the mitigation.
+
+
+.. _taa_default_mitigations:
+
+Default mitigations
+-------------------
+
+The kernel's default action for vulnerable processors is:
+
+ - Deploy TSX disable mitigation (tsx_async_abort=full tsx=off).
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 213399aac757..f95c58dbbbc3 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -12,7 +12,6 @@ Contents:
:maxdepth: 2
kernel-documentation
- l1tf
development-process/index
dev-tools/tools
driver-api/index
@@ -20,6 +19,24 @@ Contents:
gpu/index
80211/index
+This section describes CPU vulnerabilities and their mitigations.
+
+.. toctree::
+ :maxdepth: 1
+
+ hw-vuln/index
+
+Architecture-specific documentation
+-----------------------------------
+
+These books provide programming details about architecture-specific
+implementation.
+
+.. toctree::
+ :maxdepth: 2
+
+ x86/index
+
Indices and tables
==================
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c708a50b060e..e05d65d6fcb6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -335,6 +335,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
dynamic table installation which will install SSDT
tables to /sys/firmware/acpi/tables/dynamic.
+ acpi_no_watchdog [HW,ACPI,WDT]
+ Ignore the ACPI-based watchdog interface (WDAT) and let
+ a native driver control the watchdog device instead.
+
acpi_rsdp= [ACPI,EFI,KEXEC]
Pass the RSDP address to the kernel, mostly used
on machines running EFI runtime service to boot the
@@ -1965,6 +1969,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
kmemcheck=2 (one-shot mode)
Default: 2 (one-shot mode)
+ kpti= [ARM64] Control page table isolation of user
+ and kernel address spaces.
+ Default: enabled on cores which need mitigation.
+ 0: force disabled
+ 1: force enabled
+
kstack=N [X86] Print N words from the kernel stack
in oops dumps.
@@ -1975,6 +1985,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
KVM MMU at runtime.
Default is 0 (off)
+ kvm.nx_huge_pages=
+ [KVM] Controls the software workaround for the
+ X86_BUG_ITLB_MULTIHIT bug.
+ force : Always deploy workaround.
+ off : Never deploy workaround.
+ auto : Deploy workaround based on the presence of
+ X86_BUG_ITLB_MULTIHIT.
+
+ Default is 'auto'.
+
+ If the software workaround is enabled for the host,
+ guests do need not to enable it for nested guests.
+
+ kvm.nx_huge_pages_recovery_ratio=
+ [KVM] Controls how many 4KiB pages are periodically zapped
+ back to huge pages. 0 disables the recovery, otherwise if
+ the value is N KVM will zap 1/Nth of the 4KiB pages every
+ minute. The default is 60.
+
kvm-amd.nested= [KVM,AMD] Allow nested virtualization in KVM/SVM.
Default is 1 (enabled)
@@ -2076,10 +2105,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
off
Disables hypervisor mitigations and doesn't
emit any warnings.
+ It also drops the swap size and available
+ RAM limit restriction on both hypervisor and
+ bare metal.
Default is 'flush'.
- For details see: Documentation/admin-guide/l1tf.rst
+ For details see: Documentation/hw-vuln/l1tf.rst
l2cr= [PPC]
@@ -2322,6 +2354,38 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: <first>,<last>
Specifies range of consoles to be captured by the MDA.
+ mds= [X86,INTEL]
+ Control mitigation for the Micro-architectural Data
+ Sampling (MDS) vulnerability.
+
+ Certain CPUs are vulnerable to an exploit against CPU
+ internal buffers which can forward information to a
+ disclosure gadget under certain conditions.
+
+ In vulnerable processors, the speculatively
+ forwarded data can be used in a cache side channel
+ attack, to access data to which the attacker does
+ not have direct access.
+
+ This parameter controls the MDS mitigation. The
+ options are:
+
+ full - Enable MDS mitigation on vulnerable CPUs
+ full,nosmt - Enable MDS mitigation and disable
+ SMT on vulnerable CPUs
+ off - Unconditionally disable MDS mitigation
+
+ On TAA-affected machines, mds=off can be prevented by
+ an active TAA mitigation as both vulnerabilities are
+ mitigated with the same mechanism so in order to disable
+ this mitigation, you need to specify tsx_async_abort=off
+ too.
+
+ Not specifying this option is equivalent to
+ mds=full.
+
+ For details see: Documentation/hw-vuln/mds.rst
+
mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
Amount of memory to be used when the kernel is not able
to see the whole system memory or for test.
@@ -2444,6 +2508,47 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
in the "bleeding edge" mini2440 support kernel at
http://repo.or.cz/w/linux-2.6/mini2440.git
+ mitigations=
+ [X86] Control optional mitigations for CPU
+ vulnerabilities. This is a set of curated,
+ arch-independent options, each of which is an
+ aggregation of existing arch-specific options.
+
+ off
+ Disable all optional CPU mitigations. This
+ improves system performance, but it may also
+ expose users to several CPU vulnerabilities.
+ Equivalent to: nopti [X86]
+ nospectre_v1 [X86]
+ nospectre_v2 [X86]
+ spectre_v2_user=off [X86]
+ spec_store_bypass_disable=off [X86]
+ l1tf=off [X86]
+ mds=off [X86]
+ tsx_async_abort=off [X86]
+ kvm.nx_huge_pages=off [X86]
+
+ Exceptions:
+ This does not have any effect on
+ kvm.nx_huge_pages when
+ kvm.nx_huge_pages=force.
+
+ auto (default)
+ Mitigate all CPU vulnerabilities, but leave SMT
+ enabled, even if it's vulnerable. This is for
+ users who don't want to be surprised by SMT
+ getting disabled across kernel upgrades, or who
+ have other ways of avoiding SMT-based attacks.
+ Equivalent to: (default behavior)
+
+ auto,nosmt
+ Mitigate all CPU vulnerabilities, disabling SMT
+ if needed. This is for users who always want to
+ be fully mitigated, even if it means losing SMT.
+ Equivalent to: l1tf=flush,nosmt [X86]
+ mds=full,nosmt [X86]
+ tsx_async_abort=full,nosmt [X86]
+
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
parameter allows control of the logging verbosity for
@@ -2765,7 +2870,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nosmt=force: Force disable SMT, cannot be undone
via the sysfs control file.
- nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
+ nospectre_v1 [X86,PPC] Disable mitigations for Spectre Variant 1
+ (bounds check bypass). With this option data leaks are
+ possible in the system.
+
+ nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
allow data leaks with this option, which is equivalent
to spectre_v2=off.
@@ -3763,6 +3872,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Run specified binary instead of /init from the ramdisk,
used for early userspace startup. See initrd.
+ rdrand= [X86]
+ force - Override the decision by the kernel to hide the
+ advertisement of RDRAND support (this affects
+ certain AMD processors because of buggy BIOS
+ support, specifically around the suspend/resume
+ path).
+
reboot= [KNL]
Format (x86 or x86_64):
[w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
@@ -4026,9 +4142,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
spectre_v2= [X86] Control mitigation of Spectre variant 2
(indirect branch speculation) vulnerability.
+ The default operation protects the kernel from
+ user space attacks.
- on - unconditionally enable
- off - unconditionally disable
+ on - unconditionally enable, implies
+ spectre_v2_user=on
+ off - unconditionally disable, implies
+ spectre_v2_user=off
auto - kernel detects whether your CPU model is
vulnerable
@@ -4038,6 +4158,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
CONFIG_RETPOLINE configuration option, and the
compiler with which the kernel was built.
+ Selecting 'on' will also enable the mitigation
+ against user space to user space task attacks.
+
+ Selecting 'off' will disable both the kernel and
+ the user space protections.
+
Specific mitigations can also be selected manually:
retpoline - replace indirect branches
@@ -4047,6 +4173,48 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Not specifying this option is equivalent to
spectre_v2=auto.
+ spectre_v2_user=
+ [X86] Control mitigation of Spectre variant 2
+ (indirect branch speculation) vulnerability between
+ user space tasks
+
+ on - Unconditionally enable mitigations. Is
+ enforced by spectre_v2=on
+
+ off - Unconditionally disable mitigations. Is
+ enforced by spectre_v2=off
+
+ prctl - Indirect branch speculation is enabled,
+ but mitigation can be enabled via prctl
+ per thread. The mitigation control state
+ is inherited on fork.
+
+ prctl,ibpb
+ - Like "prctl" above, but only STIBP is
+ controlled per thread. IBPB is issued
+ always when switching between different user
+ space processes.
+
+ seccomp
+ - Same as "prctl" above, but all seccomp
+ threads will enable the mitigation unless
+ they explicitly opt out.
+
+ seccomp,ibpb
+ - Like "seccomp" above, but only STIBP is
+ controlled per thread. IBPB is issued
+ always when switching between different
+ user space processes.
+
+ auto - Kernel selects the mitigation depending on
+ the available CPU features and vulnerability.
+
+ Default mitigation:
+ If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
+
+ Not specifying this option is equivalent to
+ spectre_v2_user=auto.
+
spec_store_bypass_disable=
[HW] Control Speculative Store Bypass (SSB) Disable mitigation
(Speculative Store Bypass vulnerability)
@@ -4391,6 +4559,76 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
platforms where RDTSC is slow and this accounting
can add overhead.
+ tsx= [X86] Control Transactional Synchronization
+ Extensions (TSX) feature in Intel processors that
+ support TSX control.
+
+ This parameter controls the TSX feature. The options are:
+
+ on - Enable TSX on the system. Although there are
+ mitigations for all known security vulnerabilities,
+ TSX has been known to be an accelerator for
+ several previous speculation-related CVEs, and
+ so there may be unknown security risks associated
+ with leaving it enabled.
+
+ off - Disable TSX on the system. (Note that this
+ option takes effect only on newer CPUs which are
+ not vulnerable to MDS, i.e., have
+ MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 and which get
+ the new IA32_TSX_CTRL MSR through a microcode
+ update. This new MSR allows for the reliable
+ deactivation of the TSX functionality.)
+
+ auto - Disable TSX if X86_BUG_TAA is present,
+ otherwise enable TSX on the system.
+
+ Not specifying this option is equivalent to tsx=off.
+
+ See Documentation/hw-vuln/tsx_async_abort.rst
+ for more details.
+
+ tsx_async_abort= [X86,INTEL] Control mitigation for the TSX Async
+ Abort (TAA) vulnerability.
+
+ Similar to Micro-architectural Data Sampling (MDS)
+ certain CPUs that support Transactional
+ Synchronization Extensions (TSX) are vulnerable to an
+ exploit against CPU internal buffers which can forward
+ information to a disclosure gadget under certain
+ conditions.
+
+ In vulnerable processors, the speculatively forwarded
+ data can be used in a cache side channel attack, to
+ access data to which the attacker does not have direct
+ access.
+
+ This parameter controls the TAA mitigation. The
+ options are:
+
+ full - Enable TAA mitigation on vulnerable CPUs
+ if TSX is enabled.
+
+ full,nosmt - Enable TAA mitigation and disable SMT on
+ vulnerable CPUs. If TSX is disabled, SMT
+ is not disabled because CPU is not
+ vulnerable to cross-thread TAA attacks.
+ off - Unconditionally disable TAA mitigation
+
+ On MDS-affected machines, tsx_async_abort=off can be
+ prevented by an active MDS mitigation as both vulnerabilities
+ are mitigated with the same mechanism so in order to disable
+ this mitigation, you need to specify mds=off too.
+
+ Not specifying this option is equivalent to
+ tsx_async_abort=full. On CPUs which are MDS affected
+ and deploy MDS mitigation, TAA mitigation is not
+ required and doesn't provide any additional
+ mitigation.
+
+ For details see:
+ Documentation/hw-vuln/tsx_async_abort.rst
+
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
Format:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index dbdc4130e149..49935d5bb5c6 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -230,6 +230,14 @@ tcp_base_mss - INTEGER
Path MTU discovery (MTU probing). If MTU probing is enabled,
this is the initial MSS used by the connection.
+tcp_min_snd_mss - INTEGER
+ TCP SYN and SYNACK messages usually advertise an ADVMSS option,
+ as described in RFC 1122 and RFC 6691.
+ If this ADVMSS option is smaller than tcp_min_snd_mss,
+ it is silently capped to tcp_min_snd_mss.
+
+ Default : 48 (at least 8 bytes of payload per segment)
+
tcp_congestion_control - STRING
Set the congestion control algorithm to be used for new
connections. The algorithm "reno" is always available, but
@@ -405,6 +413,7 @@ tcp_min_rtt_wlen - INTEGER
minimum RTT when it is moved to a longer path (e.g., due to traffic
engineering). A longer window makes the filter more resistant to RTT
inflations such as transient congestion. The unit is seconds.
+ Possible values: 0 - 86400 (1 day)
Default: 300
tcp_moderate_rcvbuf - BOOLEAN
diff --git a/Documentation/siphash.txt b/Documentation/siphash.txt
new file mode 100644
index 000000000000..908d348ff777
--- /dev/null
+++ b/Documentation/siphash.txt
@@ -0,0 +1,175 @@
+ SipHash - a short input PRF
+-----------------------------------------------
+Written by Jason A. Donenfeld <jason@zx2c4.com>
+
+SipHash is a cryptographically secure PRF -- a keyed hash function -- that
+performs very well for short inputs, hence the name. It was designed by
+cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended
+as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`,
+and so forth.
+
+SipHash takes a secret key filled with randomly generated numbers and either
+an input buffer or several input integers. It spits out an integer that is
+indistinguishable from random. You may then use that integer as part of secure
+sequence numbers, secure cookies, or mask it off for use in a hash table.
+
+1. Generating a key
+
+Keys should always be generated from a cryptographically secure source of
+random numbers, either using get_random_bytes or get_random_once:
+
+siphash_key_t key;
+get_random_bytes(&key, sizeof(key));
+
+If you're not deriving your key from here, you're doing it wrong.
+
+2. Using the functions
+
+There are two variants of the function, one that takes a list of integers, and
+one that takes a buffer:
+
+u64 siphash(const void *data, size_t len, const siphash_key_t *key);
+
+And:
+
+u64 siphash_1u64(u64, const siphash_key_t *key);
+u64 siphash_2u64(u64, u64, const siphash_key_t *key);
+u64 siphash_3u64(u64, u64, u64, const siphash_key_t *key);
+u64 siphash_4u64(u64, u64, u64, u64, const siphash_key_t *key);
+u64 siphash_1u32(u32, const siphash_key_t *key);
+u64 siphash_2u32(u32, u32, const siphash_key_t *key);
+u64 siphash_3u32(u32, u32, u32, const siphash_key_t *key);
+u64 siphash_4u32(u32, u32, u32, u32, const siphash_key_t *key);
+
+If you pass the generic siphash function something of a constant length, it
+will constant fold at compile-time and automatically choose one of the
+optimized functions.
+
+3. Hashtable key function usage:
+
+struct some_hashtable {
+ DECLARE_HASHTABLE(hashtable, 8);
+ siphash_key_t key;
+};
+
+void init_hashtable(struct some_hashtable *table)
+{
+ get_random_bytes(&table->key, sizeof(table->key));
+}
+
+static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+{
+ return &table->hashtable[siphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+You may then iterate like usual over the returned hash bucket.
+
+4. Security
+
+SipHash has a very high security margin, with its 128-bit key. So long as the
+key is kept secret, it is impossible for an attacker to guess the outputs of
+the function, even if being able to observe many outputs, since 2^128 outputs
+is significant.
+
+Linux implements the "2-4" variant of SipHash.
+
+5. Struct-passing Pitfalls
+
+Often times the XuY functions will not be large enough, and instead you'll
+want to pass a pre-filled struct to siphash. When doing this, it's important
+to always ensure the struct has no padding holes. The easiest way to do this
+is to simply arrange the members of the struct in descending order of size,
+and to use offsetendof() instead of sizeof() for getting the size. For
+performance reasons, if possible, it's probably a good thing to align the
+struct to the right boundary. Here's an example:
+
+const struct {
+ struct in6_addr saddr;
+ u32 counter;
+ u16 dport;
+} __aligned(SIPHASH_ALIGNMENT) combined = {
+ .saddr = *(struct in6_addr *)saddr,
+ .counter = counter,
+ .dport = dport
+};
+u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret);
+
+6. Resources
+
+Read the SipHash paper if you're interested in learning more:
+https://131002.net/siphash/siphash.pdf
+
+
+~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
+
+HalfSipHash - SipHash's insecure younger cousin
+-----------------------------------------------
+Written by Jason A. Donenfeld <jason@zx2c4.com>
+
+On the off-chance that SipHash is not fast enough for your needs, you might be
+able to justify using HalfSipHash, a terrifying but potentially useful
+possibility. HalfSipHash cuts SipHash's rounds down from "2-4" to "1-3" and,
+even scarier, uses an easily brute-forcable 64-bit key (with a 32-bit output)
+instead of SipHash's 128-bit key. However, this may appeal to some
+high-performance `jhash` users.
+
+Danger!
+
+Do not ever use HalfSipHash except for as a hashtable key function, and only
+then when you can be absolutely certain that the outputs will never be
+transmitted out of the kernel. This is only remotely useful over `jhash` as a
+means of mitigating hashtable flooding denial of service attacks.
+
+1. Generating a key
+
+Keys should always be generated from a cryptographically secure source of
+random numbers, either using get_random_bytes or get_random_once:
+
+hsiphash_key_t key;
+get_random_bytes(&key, sizeof(key));
+
+If you're not deriving your key from here, you're doing it wrong.
+
+2. Using the functions
+
+There are two variants of the function, one that takes a list of integers, and
+one that takes a buffer:
+
+u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key);
+
+And:
+
+u32 hsiphash_1u32(u32, const hsiphash_key_t *key);
+u32 hsiphash_2u32(u32, u32, const hsiphash_key_t *key);
+u32 hsiphash_3u32(u32, u32, u32, const hsiphash_key_t *key);
+u32 hsiphash_4u32(u32, u32, u32, u32, const hsiphash_key_t *key);
+
+If you pass the generic hsiphash function something of a constant length, it
+will constant fold at compile-time and automatically choose one of the
+optimized functions.
+
+3. Hashtable key function usage:
+
+struct some_hashtable {
+ DECLARE_HASHTABLE(hashtable, 8);
+ hsiphash_key_t key;
+};
+
+void init_hashtable(struct some_hashtable *table)
+{
+ get_random_bytes(&table->key, sizeof(table->key));
+}
+
+static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+{
+ return &table->hashtable[hsiphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+You may then iterate like usual over the returned hash bucket.
+
+4. Performance
+
+HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements,
+this will not be a problem, as the hashtable lookup isn't the bottleneck. And
+in general, this is probably a good sacrifice to make for the security and DoS
+resistance of HalfSipHash.
diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt
index 32f3d55c54b7..c4dbe6f7cdae 100644
--- a/Documentation/spec_ctrl.txt
+++ b/Documentation/spec_ctrl.txt
@@ -92,3 +92,12 @@ Speculation misfeature controls
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
+
+- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes
+ (Mitigate Spectre V2 style attacks against user processes)
+
+ Invocations:
+ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index f0480f7ea740..4d5e3b0cab3f 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -54,6 +54,14 @@ Values :
1 - enable JIT hardening for unprivileged users only
2 - enable JIT hardening for all users
+bpf_jit_limit
+-------------
+
+This enforces a global limit for memory allocations to the BPF JIT
+compiler in order to reject unprivileged JIT requests once it has
+been surpassed. bpf_jit_limit contains the value of the global limit
+in bytes.
+
dev_weight
--------------
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 0a94ffe17ab6..b13e031beaa6 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -365,11 +365,15 @@ autosuspend the interface's device. When the usage counter is = 0
then the interface is considered to be idle, and the kernel may
autosuspend the device.
-Drivers need not be concerned about balancing changes to the usage
-counter; the USB core will undo any remaining "get"s when a driver
-is unbound from its interface. As a corollary, drivers must not call
-any of the usb_autopm_* functions after their disconnect() routine has
-returned.
+Drivers must be careful to balance their overall changes to the usage
+counter. Unbalanced "get"s will remain in effect when a driver is
+unbound from its interface, preventing the device from going into
+runtime suspend should the interface be bound to a driver again. On
+the other hand, drivers are allowed to achieve this balance by calling
+the ``usb_autopm_*`` functions even after their ``disconnect`` routine
+has returned -- say from within a work-queue routine -- provided they
+retain an active reference to the interface (via ``usb_get_intf`` and
+``usb_put_intf``).
Drivers using the async routines are responsible for their own
synchronization and mutual exclusion.
diff --git a/Documentation/usb/rio.txt b/Documentation/usb/rio.txt
deleted file mode 100644
index aee715af7db7..000000000000
--- a/Documentation/usb/rio.txt
+++ /dev/null
@@ -1,138 +0,0 @@
-Copyright (C) 1999, 2000 Bruce Tenison
-Portions Copyright (C) 1999, 2000 David Nelson
-Thanks to David Nelson for guidance and the usage of the scanner.txt
-and scanner.c files to model our driver and this informative file.
-
-Mar. 2, 2000
-
-CHANGES
-
-- Initial Revision
-
-
-OVERVIEW
-
-This README will address issues regarding how to configure the kernel
-to access a RIO 500 mp3 player.
-Before I explain how to use this to access the Rio500 please be warned:
-
-W A R N I N G:
---------------
-
-Please note that this software is still under development. The authors
-are in no way responsible for any damage that may occur, no matter how
-inconsequential.
-
-It seems that the Rio has a problem when sending .mp3 with low batteries.
-I suggest when the batteries are low and you want to transfer stuff that you
-replace it with a fresh one. In my case, what happened is I lost two 16kb
-blocks (they are no longer usable to store information to it). But I don't
-know if that's normal or not; it could simply be a problem with the flash
-memory.
-
-In an extreme case, I left my Rio playing overnight and the batteries wore
-down to nothing and appear to have corrupted the flash memory. My RIO
-needed to be replaced as a result. Diamond tech support is aware of the
-problem. Do NOT allow your batteries to wear down to nothing before
-changing them. It appears RIO 500 firmware does not handle low battery
-power well at all.
-
-On systems with OHCI controllers, the kernel OHCI code appears to have
-power on problems with some chipsets. If you are having problems
-connecting to your RIO 500, try turning it on first and then plugging it
-into the USB cable.
-
-Contact information:
---------------------
-
- The main page for the project is hosted at sourceforge.net in the following
- URL: <http://rio500.sourceforge.net>. You can also go to the project's
- sourceforge home page at: <http://sourceforge.net/projects/rio500/>.
- There is also a mailing list: rio500-users@lists.sourceforge.net
-
-Authors:
--------
-
-Most of the code was written by Cesar Miquel <miquel@df.uba.ar>. Keith
-Clayton <kclayton@jps.net> is incharge of the PPC port and making sure
-things work there. Bruce Tenison <btenison@dibbs.net> is adding support
-for .fon files and also does testing. The program will mostly sure be
-re-written and Pete Ikusz along with the rest will re-design it. I would
-also like to thank Tri Nguyen <tmn_3022000@hotmail.com> who provided use
-with some important information regarding the communication with the Rio.
-
-ADDITIONAL INFORMATION and Userspace tools
-
-http://rio500.sourceforge.net/
-
-
-REQUIREMENTS
-
-A host with a USB port. Ideally, either a UHCI (Intel) or OHCI
-(Compaq and others) hardware port should work.
-
-A Linux development kernel (2.3.x) with USB support enabled or a
-backported version to linux-2.2.x. See http://www.linux-usb.org for
-more information on accomplishing this.
-
-A Linux kernel with RIO 500 support enabled.
-
-'lspci' which is only needed to determine the type of USB hardware
-available in your machine.
-
-CONFIGURATION
-
-Using `lspci -v`, determine the type of USB hardware available.
-
- If you see something like:
-
- USB Controller: ......
- Flags: .....
- I/O ports at ....
-
- Then you have a UHCI based controller.
-
- If you see something like:
-
- USB Controller: .....
- Flags: ....
- Memory at .....
-
- Then you have a OHCI based controller.
-
-Using `make menuconfig` or your preferred method for configuring the
-kernel, select 'Support for USB', 'OHCI/UHCI' depending on your
-hardware (determined from the steps above), 'USB Diamond Rio500 support', and
-'Preliminary USB device filesystem'. Compile and install the modules
-(you may need to execute `depmod -a` to update the module
-dependencies).
-
-Add a device for the USB rio500:
- `mknod /dev/usb/rio500 c 180 64`
-
-Set appropriate permissions for /dev/usb/rio500 (don't forget about
-group and world permissions). Both read and write permissions are
-required for proper operation.
-
-Load the appropriate modules (if compiled as modules):
-
- OHCI:
- modprobe usbcore
- modprobe usb-ohci
- modprobe rio500
-
- UHCI:
- modprobe usbcore
- modprobe usb-uhci (or uhci)
- modprobe rio500
-
-That's it. The Rio500 Utils at: http://rio500.sourceforge.net should
-be able to access the rio500.
-
-BUGS
-
-If you encounter any problems feel free to drop me an email.
-
-Bruce Tenison
-btenison@dibbs.net
-
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 3ff58a8ffabb..d1908e50b506 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -13,7 +13,7 @@ of a virtual machine. The ioctls belong to three classes
- VM ioctls: These query and set attributes that affect an entire virtual
machine, for example memory layout. In addition a VM ioctl is used to
- create virtual cpus (vcpus).
+ create virtual cpus (vcpus) and devices.
Only run VM ioctls from the same process (address space) that was used
to create the VM.
@@ -24,6 +24,11 @@ of a virtual machine. The ioctls belong to three classes
Only run vcpu ioctls from the same thread that was used to create the
vcpu.
+ - device ioctls: These query and set attributes that control the operation
+ of a single device.
+
+ device ioctls must be issued from the same process (address space) that
+ was used to create the VM.
2. File descriptors
-------------------
@@ -32,10 +37,11 @@ The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
handle will create a VM file descriptor which can be used to issue VM
-ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
-and return a file descriptor pointing to it. Finally, ioctls on a vcpu
-fd can be used to control the vcpu, including the important task of
-actually running guest code.
+ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
+create a virtual cpu or device and return a file descriptor pointing to
+the new resource. Finally, ioctls on a vcpu or device fd can be used
+to control the vcpu or device. For vcpus, this includes the important
+task of actually running guest code.
In general file descriptors can be migrated among processes by means
of fork() and the SCM_RIGHTS facility of unix domain socket. These
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index e5dd9f4d6100..46ef3680c8ab 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -13,8 +13,8 @@ The acquisition orders for mutexes are as follows:
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.
-For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
-else is a leaf: no other lock is taken inside the critical sections.
+Everything else is a leaf: no other lock is taken inside the critical
+sections.
2: Exception
------------
@@ -142,7 +142,7 @@ See the comments in spte_has_volatile_bits() and mmu_spte_update().
------------
Name: kvm_lock
-Type: spinlock_t
+Type: mutex
Arch: any
Protects: - vm_list
diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py
new file mode 100644
index 000000000000..33c5c3142e20
--- /dev/null
+++ b/Documentation/x86/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "X86 architecture specific documentation"
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'x86.tex', project,
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
new file mode 100644
index 000000000000..0780d55c5aa8
--- /dev/null
+++ b/Documentation/x86/index.rst
@@ -0,0 +1,9 @@
+==========================
+x86 architecture specifics
+==========================
+
+.. toctree::
+ :maxdepth: 1
+
+ mds
+ tsx_async_abort
diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
new file mode 100644
index 000000000000..5d4330be200f
--- /dev/null
+++ b/Documentation/x86/mds.rst
@@ -0,0 +1,193 @@
+Microarchitectural Data Sampling (MDS) mitigation
+=================================================
+
+.. _mds:
+
+Overview
+--------
+
+Microarchitectural Data Sampling (MDS) is a family of side channel attacks
+on internal buffers in Intel CPUs. The variants are:
+
+ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126)
+ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130)
+ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127)
+ - Microarchitectural Data Sampling Uncacheable Memory (MDSUM) (CVE-2019-11091)
+
+MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a
+dependent load (store-to-load forwarding) as an optimization. The forward
+can also happen to a faulting or assisting load operation for a different
+memory address, which can be exploited under certain conditions. Store
+buffers are partitioned between Hyper-Threads so cross thread forwarding is
+not possible. But if a thread enters or exits a sleep state the store
+buffer is repartitioned which can expose data from one thread to the other.
+
+MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage
+L1 miss situations and to hold data which is returned or sent in response
+to a memory or I/O operation. Fill buffers can forward data to a load
+operation and also write data to the cache. When the fill buffer is
+deallocated it can retain the stale data of the preceding operations which
+can then be forwarded to a faulting or assisting load operation, which can
+be exploited under certain conditions. Fill buffers are shared between
+Hyper-Threads so cross thread leakage is possible.
+
+MLPDS leaks Load Port Data. Load ports are used to perform load operations
+from memory or I/O. The received data is then forwarded to the register
+file or a subsequent operation. In some implementations the Load Port can
+contain stale data from a previous operation which can be forwarded to
+faulting or assisting loads under certain conditions, which again can be
+exploited eventually. Load ports are shared between Hyper-Threads so cross
+thread leakage is possible.
+
+MDSUM is a special case of MSBDS, MFBDS and MLPDS. An uncacheable load from
+memory that takes a fault or assist can leave data in a microarchitectural
+structure that may later be observed using one of the same methods used by
+MSBDS, MFBDS or MLPDS.
+
+Exposure assumptions
+--------------------
+
+It is assumed that attack code resides in user space or in a guest with one
+exception. The rationale behind this assumption is that the code construct
+needed for exploiting MDS requires:
+
+ - to control the load to trigger a fault or assist
+
+ - to have a disclosure gadget which exposes the speculatively accessed
+ data for consumption through a side channel.
+
+ - to control the pointer through which the disclosure gadget exposes the
+ data
+
+The existence of such a construct in the kernel cannot be excluded with
+100% certainty, but the complexity involved makes it extremly unlikely.
+
+There is one exception, which is untrusted BPF. The functionality of
+untrusted BPF is limited, but it needs to be thoroughly investigated
+whether it can be used to create such a construct.
+
+
+Mitigation strategy
+-------------------
+
+All variants have the same mitigation strategy at least for the single CPU
+thread case (SMT off): Force the CPU to clear the affected buffers.
+
+This is achieved by using the otherwise unused and obsolete VERW
+instruction in combination with a microcode update. The microcode clears
+the affected CPU buffers when the VERW instruction is executed.
+
+For virtualization there are two ways to achieve CPU buffer
+clearing. Either the modified VERW instruction or via the L1D Flush
+command. The latter is issued when L1TF mitigation is enabled so the extra
+VERW can be avoided. If the CPU is not affected by L1TF then VERW needs to
+be issued.
+
+If the VERW instruction with the supplied segment selector argument is
+executed on a CPU without the microcode update there is no side effect
+other than a small number of pointlessly wasted CPU cycles.
+
+This does not protect against cross Hyper-Thread attacks except for MSBDS
+which is only exploitable cross Hyper-thread when one of the Hyper-Threads
+enters a C-state.
+
+The kernel provides a function to invoke the buffer clearing:
+
+ mds_clear_cpu_buffers()
+
+The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
+(idle) transitions.
+
+As a special quirk to address virtualization scenarios where the host has
+the microcode updated, but the hypervisor does not (yet) expose the
+MD_CLEAR CPUID bit to guests, the kernel issues the VERW instruction in the
+hope that it might actually clear the buffers. The state is reflected
+accordingly.
+
+According to current knowledge additional mitigations inside the kernel
+itself are not required because the necessary gadgets to expose the leaked
+data cannot be controlled in a way which allows exploitation from malicious
+user space or VM guests.
+
+Kernel internal mitigation modes
+--------------------------------
+
+ ======= ============================================================
+ off Mitigation is disabled. Either the CPU is not affected or
+ mds=off is supplied on the kernel command line
+
+ full Mitigation is enabled. CPU is affected and MD_CLEAR is
+ advertised in CPUID.
+
+ vmwerv Mitigation is enabled. CPU is affected and MD_CLEAR is not
+ advertised in CPUID. That is mainly for virtualization
+ scenarios where the host has the updated microcode but the
+ hypervisor does not expose MD_CLEAR in CPUID. It's a best
+ effort approach without guarantee.
+ ======= ============================================================
+
+If the CPU is affected and mds=off is not supplied on the kernel command
+line then the kernel selects the appropriate mitigation mode depending on
+the availability of the MD_CLEAR CPUID bit.
+
+Mitigation points
+-----------------
+
+1. Return to user space
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ When transitioning from kernel to user space the CPU buffers are flushed
+ on affected CPUs when the mitigation is not disabled on the kernel
+ command line. The migitation is enabled through the static key
+ mds_user_clear.
+
+ The mitigation is invoked in prepare_exit_to_usermode() which covers
+ all but one of the kernel to user space transitions. The exception
+ is when we return from a Non Maskable Interrupt (NMI), which is
+ handled directly in do_nmi().
+
+ (The reason that NMI is special is that prepare_exit_to_usermode() can
+ enable IRQs. In NMI context, NMIs are blocked, and we don't want to
+ enable IRQs with NMIs blocked.)
+
+
+2. C-State transition
+^^^^^^^^^^^^^^^^^^^^^
+
+ When a CPU goes idle and enters a C-State the CPU buffers need to be
+ cleared on affected CPUs when SMT is active. This addresses the
+ repartitioning of the store buffer when one of the Hyper-Threads enters
+ a C-State.
+
+ When SMT is inactive, i.e. either the CPU does not support it or all
+ sibling threads are offline CPU buffer clearing is not required.
+
+ The idle clearing is enabled on CPUs which are only affected by MSBDS
+ and not by any other MDS variant. The other MDS variants cannot be
+ protected against cross Hyper-Thread attacks because the Fill Buffer and
+ the Load Ports are shared. So on CPUs affected by other variants, the
+ idle clearing would be a window dressing exercise and is therefore not
+ activated.
+
+ The invocation is controlled by the static key mds_idle_clear which is
+ switched depending on the chosen mitigation mode and the SMT state of
+ the system.
+
+ The buffer clear is only invoked before entering the C-State to prevent
+ that stale data from the idling CPU from spilling to the Hyper-Thread
+ sibling after the store buffer got repartitioned and all entries are
+ available to the non idle sibling.
+
+ When coming out of idle the store buffer is partitioned again so each
+ sibling has half of it available. The back from idle CPU could be then
+ speculatively exposed to contents of the sibling. The buffers are
+ flushed either on exit to user space or on VMENTER so malicious code
+ in user space or the guest cannot speculatively access them.
+
+ The mitigation is hooked into all variants of halt()/mwait(), but does
+ not cover the legacy ACPI IO-Port mechanism because the ACPI idle driver
+ has been superseded by the intel_idle driver around 2010 and is
+ preferred on all affected CPUs which are expected to gain the MD_CLEAR
+ functionality in microcode. Aside of that the IO-Port mechanism is a
+ legacy interface which is only used on older systems which are either
+ not affected or do not receive microcode updates anymore.
diff --git a/Documentation/x86/tsx_async_abort.rst b/Documentation/x86/tsx_async_abort.rst
new file mode 100644
index 000000000000..4a4336a89372
--- /dev/null
+++ b/Documentation/x86/tsx_async_abort.rst
@@ -0,0 +1,117 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+TSX Async Abort (TAA) mitigation
+================================
+
+.. _tsx_async_abort:
+
+Overview
+--------
+
+TSX Async Abort (TAA) is a side channel attack on internal buffers in some
+Intel processors similar to Microachitectural Data Sampling (MDS). In this
+case certain loads may speculatively pass invalid data to dependent operations
+when an asynchronous abort condition is pending in a Transactional
+Synchronization Extensions (TSX) transaction. This includes loads with no
+fault or assist condition. Such loads may speculatively expose stale data from
+the same uarch data structures as in MDS, with same scope of exposure i.e.
+same-thread and cross-thread. This issue affects all current processors that
+support TSX.
+
+Mitigation strategy
+-------------------
+
+a) TSX disable - one of the mitigations is to disable TSX. A new MSR
+IA32_TSX_CTRL will be available in future and current processors after
+microcode update which can be used to disable TSX. In addition, it
+controls the enumeration of the TSX feature bits (RTM and HLE) in CPUID.
+
+b) Clear CPU buffers - similar to MDS, clearing the CPU buffers mitigates this
+vulnerability. More details on this approach can be found in
+:ref:`Documentation/hw-vuln/mds.rst <mds>`.
+
+Kernel internal mitigation modes
+--------------------------------
+
+ ============= ============================================================
+ off Mitigation is disabled. Either the CPU is not affected or
+ tsx_async_abort=off is supplied on the kernel command line.
+
+ tsx disabled Mitigation is enabled. TSX feature is disabled by default at
+ bootup on processors that support TSX control.
+
+ verw Mitigation is enabled. CPU is affected and MD_CLEAR is
+ advertised in CPUID.
+
+ ucode needed Mitigation is enabled. CPU is affected and MD_CLEAR is not
+ advertised in CPUID. That is mainly for virtualization
+ scenarios where the host has the updated microcode but the
+ hypervisor does not expose MD_CLEAR in CPUID. It's a best
+ effort approach without guarantee.
+ ============= ============================================================
+
+If the CPU is affected and the "tsx_async_abort" kernel command line parameter is
+not provided then the kernel selects an appropriate mitigation depending on the
+status of RTM and MD_CLEAR CPUID bits.
+
+Below tables indicate the impact of tsx=on|off|auto cmdline options on state of
+TAA mitigation, VERW behavior and TSX feature for various combinations of
+MSR_IA32_ARCH_CAPABILITIES bits.
+
+1. "tsx=off"
+
+========= ========= ============ ============ ============== =================== ======================
+MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=off
+---------------------------------- -------------------------------------------------------------------------
+TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
+ after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
+========= ========= ============ ============ ============== =================== ======================
+ 0 0 0 HW default Yes Same as MDS Same as MDS
+ 0 0 1 Invalid case Invalid case Invalid case Invalid case
+ 0 1 0 HW default No Need ucode update Need ucode update
+ 0 1 1 Disabled Yes TSX disabled TSX disabled
+ 1 X 1 Disabled X None needed None needed
+========= ========= ============ ============ ============== =================== ======================
+
+2. "tsx=on"
+
+========= ========= ============ ============ ============== =================== ======================
+MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=on
+---------------------------------- -------------------------------------------------------------------------
+TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
+ after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
+========= ========= ============ ============ ============== =================== ======================
+ 0 0 0 HW default Yes Same as MDS Same as MDS
+ 0 0 1 Invalid case Invalid case Invalid case Invalid case
+ 0 1 0 HW default No Need ucode update Need ucode update
+ 0 1 1 Enabled Yes None Same as MDS
+ 1 X 1 Enabled X None needed None needed
+========= ========= ============ ============ ============== =================== ======================
+
+3. "tsx=auto"
+
+========= ========= ============ ============ ============== =================== ======================
+MSR_IA32_ARCH_CAPABILITIES bits Result with cmdline tsx=auto
+---------------------------------- -------------------------------------------------------------------------
+TAA_NO MDS_NO TSX_CTRL_MSR TSX state VERW can clear TAA mitigation TAA mitigation
+ after bootup CPU buffers tsx_async_abort=off tsx_async_abort=full
+========= ========= ============ ============ ============== =================== ======================
+ 0 0 0 HW default Yes Same as MDS Same as MDS
+ 0 0 1 Invalid case Invalid case Invalid case Invalid case
+ 0 1 0 HW default No Need ucode update Need ucode update
+ 0 1 1 Disabled Yes TSX disabled TSX disabled
+ 1 X 1 Enabled X None needed None needed
+========= ========= ============ ============ ============== =================== ======================
+
+In the tables, TSX_CTRL_MSR is a new bit in MSR_IA32_ARCH_CAPABILITIES that
+indicates whether MSR_IA32_TSX_CTRL is supported.
+
+There are two control bits in IA32_TSX_CTRL MSR:
+
+ Bit 0: When set it disables the Restricted Transactional Memory (RTM)
+ sub-feature of TSX (will force all transactions to abort on the
+ XBEGIN instruction).
+
+ Bit 1: When set it disables the enumeration of the RTM and HLE feature
+ (i.e. it will make CPUID(EAX=7).EBX{bit4} and
+ CPUID(EAX=7).EBX{bit11} read as 0).
diff --git a/MAINTAINERS b/MAINTAINERS
index cd222f4af0cf..71e53bee940e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11087,6 +11087,13 @@ F: arch/arm/mach-s3c24xx/mach-bast.c
F: arch/arm/mach-s3c24xx/bast-ide.c
F: arch/arm/mach-s3c24xx/bast-irq.c
+SIPHASH PRF ROUTINES
+M: Jason A. Donenfeld <Jason@zx2c4.com>
+S: Maintained
+F: lib/siphash.c
+F: lib/test_siphash.c
+F: include/linux/siphash.h
+
TI DAVINCI MACHINE SUPPORT
M: Sekhar Nori <nsekhar@ti.com>
M: Kevin Hilman <khilman@kernel.org>
@@ -12485,13 +12492,6 @@ W: http://www.linux-usb.org/usbnet
S: Maintained
F: drivers/net/usb/dm9601.c
-USB DIAMOND RIO500 DRIVER
-M: Cesar Miquel <miquel@df.uba.ar>
-L: rio500-users@lists.sourceforge.net
-W: http://rio500.sourceforge.net
-S: Maintained
-F: drivers/usb/misc/rio500*
-
USB EHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
diff --git a/Makefile b/Makefile
index 90478086eff5..e7866bc2d817 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 166
+SUBLEVEL = 220
EXTRAVERSION =
NAME = Roaring Lionus
@@ -400,6 +400,7 @@ KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
GCC_PLUGINS_CFLAGS :=
+CLANG_FLAGS :=
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
@@ -506,8 +507,8 @@ endif
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
-CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
@@ -515,6 +516,7 @@ ifneq ($(GCC_TOOLCHAIN),)
CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
CLANG_FLAGS += -no-integrated-as
+CLANG_FLAGS += -Werror=unknown-warning-option
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS)
endif
@@ -647,6 +649,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
@@ -655,8 +658,7 @@ KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
endif
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
else
ifdef CONFIG_PROFILE_ALL_BRANCHES
KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
@@ -719,7 +721,6 @@ ifeq ($(cc-name),clang)
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
# Quiet clang warning: comparison of unsigned expression < 0 is always false
KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
@@ -833,6 +834,18 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
# enforce correct pointer usage
KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
+# Require designated initializers for all marked structures
+KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
+
+# change __FILE__ to the relative path from the srctree
+KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
+
+# ensure -fcf-protection is disabled when using retpoline as it is
+# incompatible with -mindirect-branch=thunk-extern
+ifdef CONFIG_RETPOLINE
+KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
+endif
+
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
@@ -1508,9 +1521,6 @@ else # KBUILD_EXTMOD
# We are always building modules
KBUILD_MODULES := 1
-PHONY += crmodverdir
-crmodverdir:
- $(cmd_crmodverdir)
PHONY += $(objtree)/Module.symvers
$(objtree)/Module.symvers:
@@ -1522,7 +1532,7 @@ $(objtree)/Module.symvers:
module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD))
PHONY += $(module-dirs) modules
-$(module-dirs): crmodverdir $(objtree)/Module.symvers
+$(module-dirs): prepare $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
modules: $(module-dirs)
@@ -1563,7 +1573,8 @@ help:
# Dummies...
PHONY += prepare scripts
-prepare: ;
+prepare:
+ $(cmd_crmodverdir)
scripts: ;
endif # KBUILD_EXTMOD
@@ -1688,17 +1699,14 @@ endif
# Modules
/: prepare scripts FORCE
- $(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
# Make sure the latest headers are built for Documentation
Documentation/ samples/: headers_install
%/: prepare scripts FORCE
- $(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir)
%.ko: prepare scripts FORCE
- $(cmd_crmodverdir)
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
$(build)=$(build-dir) $(@:.ko=.o)
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c7a081c583b9..2de75779a247 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -23,7 +23,7 @@ config ARC
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
- select HAVE_FUTEX_CMPXCHG
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select HAVE_KRETPROBES
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index d6c1bbc98ac3..15698b3e490f 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -63,6 +63,7 @@
interrupt-names = "macirq";
phy-mode = "rgmii";
snps,pbl = < 32 >;
+ snps,multicast-filter-bins = <256>;
clocks = <&apbclk>;
clock-names = "stmmaceth";
max-speed = <100>;
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index ea022d47896c..21ec82466d62 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);
#define BUG() do { \
pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
- dump_stack(); \
+ barrier_before_unreachable(); \
+ __builtin_trap(); \
} while (0)
#define HAVE_ARCH_BUG
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index d819de1c5d10..3ea4112c8302 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -92,8 +92,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
#endif /* CONFIG_ARC_HAS_LLSC */
-#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
- (unsigned long)(o), (unsigned long)(n)))
+#define cmpxchg(ptr, o, n) ({ \
+ (typeof(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n)); \
+})
/*
* atomic_cmpxchg is same as cmpxchg
@@ -198,8 +201,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
return __xchg_bad_pointer();
}
-#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
- sizeof(*(ptr))))
+#define xchg(ptr, with) ({ \
+ (typeof(*(ptr)))__xchg((unsigned long)(with), \
+ (ptr), \
+ sizeof(*(ptr))); \
+})
#endif /* CONFIG_ARC_PLAT_EZNPS */
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index b29f1a9fd6f7..07c8e1a6c56e 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -14,6 +14,8 @@
#ifdef __ASSEMBLY__
#define ASM_NL ` /* use '`' to mark new line in macro */
+#define __ALIGN .align 4
+#define __ALIGN_STR __stringify(__ALIGN)
/* annotation for data we want in DCCM - if enabled in .config */
.macro ARCFP_DATA nm
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 1f945d0f40da..208bf2c9e7b0 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -107,6 +107,7 @@ ENTRY(stext)
; r2 = pointer to uboot provided cmdline or external DTB in mem
; These are handled later in handle_uboot_args()
st r0, [@uboot_tag]
+ st r1, [@uboot_magic]
st r2, [@uboot_arg]
#endif
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 2ce24e74f879..a509b77ef80d 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -488,8 +488,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
/* loop thru all available h/w condition indexes */
for (j = 0; j < cc_bcr.c; j++) {
write_aux_reg(ARC_REG_CC_INDEX, j);
- cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
- cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
+ cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
+ cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
/* See if it has been mapped to a perf event_id */
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 9119bea503a7..9f96120eee6e 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -32,6 +32,7 @@ unsigned int intr_to_DE_cnt;
/* Part of U-boot ABI: see head.S */
int __initdata uboot_tag;
+int __initdata uboot_magic;
char __initdata *uboot_arg;
const struct machine_desc *machine_desc;
@@ -400,6 +401,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
#define UBOOT_TAG_NONE 0
#define UBOOT_TAG_CMDLINE 1
#define UBOOT_TAG_DTB 2
+/* We always pass 0 as magic from U-boot */
+#define UBOOT_MAGIC_VALUE 0
void __init handle_uboot_args(void)
{
@@ -415,6 +418,11 @@ void __init handle_uboot_args(void)
goto ignore_uboot_args;
}
+ if (uboot_magic != UBOOT_MAGIC_VALUE) {
+ pr_warn(IGNORE_ARGS "non zero uboot magic\n");
+ goto ignore_uboot_args;
+ }
+
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_arg_invalid((unsigned long)uboot_arg)) {
pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index c927aa84e652..cd6e3615e3d1 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -155,3 +155,12 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
insterror_is_error(address, regs);
}
+
+/*
+ * abort() call generated by older gcc for __builtin_trap()
+ */
+void abort(void)
+{
+ __asm__ __volatile__("trap_s 5\n");
+}
+EXPORT_SYMBOL(abort);
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 61fd1ce63c56..6bb9f8ea9291 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -185,11 +185,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz)
MAX_DMA_ADDRESS);
}
-static void *unw_hdr_alloc(unsigned long sz)
-{
- return kmalloc(sz, GFP_KERNEL);
-}
-
static void init_unwind_table(struct unwind_table *table, const char *name,
const void *core_start, unsigned long core_size,
const void *init_start, unsigned long init_size,
@@ -370,6 +365,10 @@ ret_err:
}
#ifdef CONFIG_MODULES
+static void *unw_hdr_alloc(unsigned long sz)
+{
+ return kmalloc(sz, GFP_KERNEL);
+}
static struct unwind_table *last_table;
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index a4dc881da277..3c88ccbe01af 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -890,9 +890,11 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- unsigned int pd0[mmu->ways];
unsigned long flags;
- int set;
+ int set, n_ways = mmu->ways;
+
+ n_ways = min(n_ways, 4);
+ BUG_ON(mmu->ways > 4);
local_irq_save(flags);
@@ -900,9 +902,10 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
for (set = 0; set < mmu->sets; set++) {
int is_valid, way;
+ unsigned int pd0[4];
/* read out all the ways of current set */
- for (way = 0, is_valid = 0; way < mmu->ways; way++) {
+ for (way = 0, is_valid = 0; way < n_ways; way++) {
write_aux_reg(ARC_REG_TLBINDEX,
SET_WAY_TO_IDX(mmu, set, way));
write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
@@ -916,14 +919,14 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
continue;
/* Scan the set for duplicate ways: needs a nested loop */
- for (way = 0; way < mmu->ways - 1; way++) {
+ for (way = 0; way < n_ways - 1; way++) {
int n;
if (!pd0[way])
continue;
- for (n = way + 1; n < mmu->ways; n++) {
+ for (n = way + 1; n < n_ways; n++) {
if (pd0[way] != pd0[n])
continue;
diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig
index 1d175cc6ad6d..86f844caa405 100644
--- a/arch/arc/plat-eznps/Kconfig
+++ b/arch/arc/plat-eznps/Kconfig
@@ -7,7 +7,7 @@ menuconfig ARC_PLAT_EZNPS
bool "\"EZchip\" ARC dev platform"
select ARC_HAS_COH_CACHES if SMP
select CPU_BIG_ENDIAN
- select CLKSRC_NPS
+ select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
select EZNPS_GIC
select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
help
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 92eca4f39c1e..23b0ff0f75a2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2031,7 +2031,7 @@ config XIP_PHYS_ADDR
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on (!SMP || PM_SLEEP_SMP)
- depends on !CPU_V7M
+ depends on MMU
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 6d449e5d5450..455ec5e5d208 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -994,14 +994,21 @@ choice
Say Y here if you want kernel low-level debugging support
on SOCFPGA(Cyclone 5 and Arria 5) based platforms.
- config DEBUG_SOCFPGA_UART1
+ config DEBUG_SOCFPGA_ARRIA10_UART1
depends on ARCH_SOCFPGA
- bool "Use SOCFPGA UART1 for low-level debug"
+ bool "Use SOCFPGA Arria10 UART1 for low-level debug"
select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on SOCFPGA(Arria 10) based platforms.
+ config DEBUG_SOCFPGA_CYCLONE5_UART1
+ depends on ARCH_SOCFPGA
+ bool "Use SOCFPGA Cyclone 5 UART1 for low-level debug"
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on SOCFPGA(Cyclone 5 and Arria 5) based platforms.
config DEBUG_SUN9I_UART0
bool "Kernel low-level debugging messages via sun9i UART0"
@@ -1347,22 +1354,22 @@ config DEBUG_OMAP2PLUS_UART
depends on ARCH_OMAP2PLUS
config DEBUG_IMX_UART_PORT
- int "i.MX Debug UART Port Selection" if DEBUG_IMX1_UART || \
- DEBUG_IMX25_UART || \
- DEBUG_IMX21_IMX27_UART || \
- DEBUG_IMX31_UART || \
- DEBUG_IMX35_UART || \
- DEBUG_IMX50_UART || \
- DEBUG_IMX51_UART || \
- DEBUG_IMX53_UART || \
- DEBUG_IMX6Q_UART || \
- DEBUG_IMX6SL_UART || \
- DEBUG_IMX6SLL_UART || \
- DEBUG_IMX6SX_UART || \
- DEBUG_IMX6UL_UART || \
- DEBUG_IMX7D_UART
+ int "i.MX Debug UART Port Selection"
+ depends on DEBUG_IMX1_UART || \
+ DEBUG_IMX25_UART || \
+ DEBUG_IMX21_IMX27_UART || \
+ DEBUG_IMX31_UART || \
+ DEBUG_IMX35_UART || \
+ DEBUG_IMX50_UART || \
+ DEBUG_IMX51_UART || \
+ DEBUG_IMX53_UART || \
+ DEBUG_IMX6Q_UART || \
+ DEBUG_IMX6SL_UART || \
+ DEBUG_IMX6SLL_UART || \
+ DEBUG_IMX6SX_UART || \
+ DEBUG_IMX6UL_UART || \
+ DEBUG_IMX7D_UART
default 1
- depends on ARCH_MXC
help
Choose UART port on which kernel low-level debug messages
should be output.
@@ -1543,7 +1550,8 @@ config DEBUG_UART_PHYS
default 0xfe800000 if ARCH_IOP32X
default 0xff690000 if DEBUG_RK32_UART2
default 0xffc02000 if DEBUG_SOCFPGA_UART0
- default 0xffc02100 if DEBUG_SOCFPGA_UART1
+ default 0xffc02100 if DEBUG_SOCFPGA_ARRIA10_UART1
+ default 0xffc03000 if DEBUG_SOCFPGA_CYCLONE5_UART1
default 0xffd82340 if ARCH_IOP13XX
default 0xffe40000 if DEBUG_RCAR_GEN1_SCIF0
default 0xffe42000 if DEBUG_RCAR_GEN1_SCIF2
@@ -1633,7 +1641,8 @@ config DEBUG_UART_VIRT
default 0xfeb30c00 if DEBUG_KEYSTONE_UART0
default 0xfeb31000 if DEBUG_KEYSTONE_UART1
default 0xfec02000 if DEBUG_SOCFPGA_UART0
- default 0xfec02100 if DEBUG_SOCFPGA_UART1
+ default 0xfec02100 if DEBUG_SOCFPGA_ARRIA10_UART1
+ default 0xfec03000 if DEBUG_SOCFPGA_CYCLONE5_UART1
default 0xfec12000 if (DEBUG_MVEBU_UART0 || DEBUG_MVEBU_UART0_ALTERNATE) && ARCH_MVEBU
default 0xfec12100 if DEBUG_MVEBU_UART1_ALTERNATE
default 0xfec10000 if DEBUG_SIRFATLAS7_UART0
@@ -1681,9 +1690,9 @@ config DEBUG_UART_8250_WORD
depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
depends on DEBUG_UART_8250_SHIFT >= 2
default y if DEBUG_PICOXCELL_UART || \
- DEBUG_SOCFPGA_UART0 || DEBUG_SOCFPGA_UART1 || \
- DEBUG_KEYSTONE_UART0 || DEBUG_KEYSTONE_UART1 || \
- DEBUG_ALPINE_UART0 || \
+ DEBUG_SOCFPGA_UART0 || DEBUG_SOCFPGA_ARRIA10_UART1 || \
+ DEBUG_SOCFPGA_CYCLONE5_UART1 || DEBUG_KEYSTONE_UART0 || \
+ DEBUG_KEYSTONE_UART1 || DEBUG_ALPINE_UART0 || \
DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
DEBUG_DAVINCI_DA8XX_UART2 || \
DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
index 3f7d1b74c5e0..a17ca8d78656 100644
--- a/arch/arm/boot/compressed/efi-header.S
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -17,7 +17,8 @@
@ there.
.inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
#else
- W(mov) r0, r0
+ AR_CLASS( mov r0, r0 )
+ M_CLASS( nop.w )
#endif
.endm
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 2d7f2bb0d66a..a67ed746b0e3 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1383,7 +1383,21 @@ ENTRY(efi_stub_entry)
@ Preserve return value of efi_entry() in r4
mov r4, r0
- bl cache_clean_flush
+
+ @ our cache maintenance code relies on CP15 barrier instructions
+ @ but since we arrived here with the MMU and caches configured
+ @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
+ @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
+ @ the enable path will be executed on v7+ only.
+ mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
+ tst r1, #(1 << 5) @ CP15BEN bit set?
+ bne 0f
+ orr r1, r1, #(1 << 5) @ CP15 barrier instructions
+ mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
+ ARM( .inst 0xf57ff06f @ v7+ isb )
+ THUMB( isb )
+
+0: bl cache_clean_flush
bl cache_off
@ Set parameters for booting zImage according to boot protocol
diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
index 17ae0f3efac8..f3ddd4f599e3 100644
--- a/arch/arm/boot/compressed/libfdt_env.h
+++ b/arch/arm/boot/compressed/libfdt_env.h
@@ -1,10 +1,14 @@
#ifndef _ARM_LIBFDT_ENV_H
#define _ARM_LIBFDT_ENV_H
+#include <linux/limits.h>
#include <linux/types.h>
#include <linux/string.h>
#include <asm/byteorder.h>
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
+
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index e82432c79f85..3f3ad09c7cd5 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -701,6 +701,7 @@
pinctrl-0 = <&cpsw_default>;
pinctrl-1 = <&cpsw_sleep>;
status = "okay";
+ slaves = <1>;
};
&davinci_mdio {
@@ -708,15 +709,14 @@
pinctrl-0 = <&davinci_mdio_default>;
pinctrl-1 = <&davinci_mdio_sleep>;
status = "okay";
-};
-&cpsw_emac0 {
- phy_id = <&davinci_mdio>, <0>;
- phy-mode = "rgmii-txid";
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ };
};
-&cpsw_emac1 {
- phy_id = <&davinci_mdio>, <1>;
+&cpsw_emac0 {
+ phy-handle = <&ethphy0>;
phy-mode = "rgmii-txid";
};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index c9c9a47446e8..56224aa5e83e 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -1117,6 +1117,8 @@
ti,hwmods = "dss_dispc";
clocks = <&disp_clk>;
clock-names = "fck";
+
+ max-memory-bandwidth = <230000000>;
};
rfbi: rfbi@4832a800 {
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index 957840cc7b78..b55c094893c6 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -79,7 +79,7 @@
};
lcd0: display {
- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+ compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";
panel-timing {
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 9d35c3f07cad..21918807c9f6 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -41,7 +41,7 @@
};
lcd0: display {
- compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+ compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
label = "lcd";
panel-timing {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index 78bee26361f1..552de167f95f 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -27,6 +27,27 @@
reg = <0x0 0x80000000 0x0 0x80000000>;
};
+ main_12v0: fixedregulator-main_12v0 {
+ /* main supply */
+ compatible = "regulator-fixed";
+ regulator-name = "main_12v0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ evm_5v0: fixedregulator-evm_5v0 {
+ /* Output of TPS54531D */
+ compatible = "regulator-fixed";
+ regulator-name = "evm_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&main_12v0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vdd_3v3: fixedregulator-vdd_3v3 {
compatible = "regulator-fixed";
regulator-name = "vdd_3v3";
diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi
index e2e9599596e2..05379b6c1c13 100644
--- a/arch/arm/boot/dts/arm-realview-eb.dtsi
+++ b/arch/arm/boot/dts/arm-realview-eb.dtsi
@@ -334,7 +334,7 @@
clock-names = "uartclk", "apb_pclk";
};
- ssp: ssp@1000d000 {
+ ssp: spi@1000d000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x1000d000 0x1000>;
clocks = <&sspclk>, <&pclk>;
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
index c789564f2803..939c108c24a6 100644
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
@@ -45,7 +45,7 @@
};
/* The voltage to the MMC card is hardwired at 3.3V */
- vmmc: fixedregulator@0 {
+ vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
@@ -53,7 +53,7 @@
regulator-boot-on;
};
- veth: fixedregulator@0 {
+ veth: regulator-veth {
compatible = "regulator-fixed";
regulator-name = "veth";
regulator-min-microvolt = <3300000>;
@@ -343,7 +343,7 @@
clock-names = "apb_pclk";
};
- pb1176_ssp: ssp@1010b000 {
+ pb1176_ssp: spi@1010b000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x1010b000 0x1000>;
interrupt-parent = <&intc_dc1176>;
diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts
index 3944765ac4b0..95037c48182d 100644
--- a/arch/arm/boot/dts/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts
@@ -145,7 +145,7 @@
};
/* The voltage to the MMC card is hardwired at 3.3V */
- vmmc: fixedregulator@0 {
+ vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
@@ -153,7 +153,7 @@
regulator-boot-on;
};
- veth: fixedregulator@0 {
+ veth: regulator-veth {
compatible = "regulator-fixed";
regulator-name = "veth";
regulator-min-microvolt = <3300000>;
@@ -480,7 +480,7 @@
clock-names = "uartclk", "apb_pclk";
};
- ssp@1000d000 {
+ spi@1000d000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x1000d000 0x1000>;
interrupt-parent = <&intc_pb11mp>;
diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi
index aeb49c4bd773..068293254fbb 100644
--- a/arch/arm/boot/dts/arm-realview-pbx.dtsi
+++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi
@@ -43,7 +43,7 @@
};
/* The voltage to the MMC card is hardwired at 3.3V */
- vmmc: fixedregulator@0 {
+ vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
@@ -51,7 +51,7 @@
regulator-boot-on;
};
- veth: fixedregulator@0 {
+ veth: regulator-veth {
compatible = "regulator-fixed";
regulator-name = "veth";
regulator-min-microvolt = <3300000>;
@@ -318,7 +318,7 @@
clock-names = "uartclk", "apb_pclk";
};
- ssp: ssp@1000d000 {
+ ssp: spi@1000d000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x1000d000 0x1000>;
clocks = <&sspclk>, <&pclk>;
@@ -539,4 +539,3 @@
};
};
};
-
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index b3501ae2a3bd..4fba898b8f4f 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -546,7 +546,7 @@
};
};
- uart1 {
+ usart1 {
pinctrl_usart1: usart1-0 {
atmel,pins =
<AT91_PIOB 4 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PB4 periph A with pullup */
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index 84df85ea6296..7efde03daadd 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -26,5 +26,5 @@
};
&hdmi {
- hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+ hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm/boot/dts/dove-cubox.dts b/arch/arm/boot/dts/dove-cubox.dts
index af3cb633135f..ee32315e3d3a 100644
--- a/arch/arm/boot/dts/dove-cubox.dts
+++ b/arch/arm/boot/dts/dove-cubox.dts
@@ -86,7 +86,7 @@
status = "okay";
clock-frequency = <100000>;
- si5351: clock-generator {
+ si5351: clock-generator@60 {
compatible = "silabs,si5351a-msop";
reg = <0x60>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 698d58cea20d..11342aeccb73 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -152,7 +152,7 @@
0xffffe000 MBUS_ID(0x03, 0x01) 0 0x0000800 /* CESA SRAM 2k */
0xfffff000 MBUS_ID(0x0d, 0x00) 0 0x0000800>; /* PMU SRAM 2k */
- spi0: spi-ctrl@10600 {
+ spi0: spi@10600 {
compatible = "marvell,orion-spi";
#address-cells = <1>;
#size-cells = <0>;
@@ -165,7 +165,7 @@
status = "disabled";
};
- i2c: i2c-ctrl@11000 {
+ i2c: i2c@11000 {
compatible = "marvell,mv64xxx-i2c";
reg = <0x11000 0x20>;
#address-cells = <1>;
@@ -215,7 +215,7 @@
status = "disabled";
};
- spi1: spi-ctrl@14600 {
+ spi1: spi@14600 {
compatible = "marvell,orion-spi";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index a1a928064b53..204ba77e3e4d 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -123,6 +123,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0xc0000000>;
+ dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
ti,hwmods = "l3_main_1", "l3_main_2";
reg = <0x0 0x44000000 0x0 0x1000000>,
<0x0 0x45000000 0x0 0x1000>;
@@ -282,6 +283,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+ dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
@@ -319,6 +321,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+ dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 51dbd8cb91cb..99b3d2331971 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -345,7 +345,7 @@
};
hsotg: hsotg@12480000 {
- compatible = "snps,dwc2";
+ compatible = "samsung,s3c6400-hsotg", "snps,dwc2";
reg = <0x12480000 0x20000>;
interrupts = <0 141 0>;
clocks = <&cmu CLK_USBOTG>;
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 6098dacd09f1..1b2709af2a42 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -170,6 +170,8 @@
reg = <0x66>;
interrupt-parent = <&gpx3>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&s5m8767_irq>;
vinb1-supply = <&main_dc_reg>;
vinb2-supply = <&main_dc_reg>;
@@ -547,6 +549,13 @@
cap-sd-highspeed;
};
+&pinctrl_0 {
+ s5m8767_irq: s5m8767-irq {
+ samsung,pins = "gpx3-2";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
+};
+
&rtc {
status = "okay";
};
diff --git a/arch/arm/boot/dts/exynos5250-snow-rev5.dts b/arch/arm/boot/dts/exynos5250-snow-rev5.dts
index 90560c316f64..cb986175b69b 100644
--- a/arch/arm/boot/dts/exynos5250-snow-rev5.dts
+++ b/arch/arm/boot/dts/exynos5250-snow-rev5.dts
@@ -23,6 +23,14 @@
samsung,model = "Snow-I2S-MAX98090";
samsung,audio-codec = <&max98090>;
+
+ cpu {
+ sound-dai = <&i2s0 0>;
+ };
+
+ codec {
+ sound-dai = <&max98090 0>, <&hdmi>;
+ };
};
};
@@ -34,6 +42,9 @@
interrupt-parent = <&gpx0>;
pinctrl-names = "default";
pinctrl-0 = <&max98090_irq>;
+ clocks = <&pmu_system_controller 0>;
+ clock-names = "mclk";
+ #sound-dai-cells = <1>;
};
};
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 9cc83c51c925..e664c33c3c64 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -110,6 +110,7 @@
regulator-name = "PVDD_APIO_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
ldo3_reg: LDO3 {
@@ -148,6 +149,7 @@
regulator-name = "PVDD_ABB_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
ldo9_reg: LDO9 {
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index ec4a00f1ce01..c9d379b1a166 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -302,6 +302,7 @@
regulator-name = "vdd_1v35";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
+ regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -323,6 +324,7 @@
regulator-name = "vdd_2v";
regulator-min-microvolt = <2000000>;
regulator-max-microvolt = <2000000>;
+ regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -333,6 +335,7 @@
regulator-name = "vdd_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -427,6 +430,7 @@
regulator-name = "vdd_ldo10";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
};
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 01f466816fea..ae58b8d6f614 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -302,6 +302,7 @@
regulator-name = "vdd_1v35";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
+ regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -323,6 +324,7 @@
regulator-name = "vdd_2v";
regulator-min-microvolt = <2000000>;
regulator-max-microvolt = <2000000>;
+ regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -333,6 +335,7 @@
regulator-name = "vdd_1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
@@ -427,6 +430,7 @@
regulator-name = "vdd_ldo10";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
};
diff --git a/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
index ba689fbd0e41..301cf8d45947 100644
--- a/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
+++ b/arch/arm/boot/dts/imx53-voipac-dmm-668.dtsi
@@ -17,12 +17,8 @@
memory@70000000 {
device_type = "memory";
- reg = <0x70000000 0x20000000>;
- };
-
- memory@b0000000 {
- device_type = "memory";
- reg = <0xb0000000 0x20000000>;
+ reg = <0x70000000 0x20000000>,
+ <0xb0000000 0x20000000>;
};
regulators {
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index e0280cac2484..fed72a5f3ffa 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -90,6 +90,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii";
+ phy-reset-duration = <10>; /* in msecs */
phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
phy-supply = <&vdd_eth_io_reg>;
status = "disabled";
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 8d195ba49548..7eff6466f6d5 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -997,7 +997,7 @@
compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6QDL_CLK_SDMA>,
+ clocks = <&clks IMX6QDL_CLK_IPG>,
<&clks IMX6QDL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index ee3f9a0c0c32..06f64ba5a706 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -805,7 +805,7 @@
reg = <0x020ec000 0x4000>;
interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SL_CLK_SDMA>,
- <&clks IMX6SL_CLK_SDMA>;
+ <&clks IMX6SL_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
iram = <&ocram>;
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 0a454d185260..4381470a6468 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -921,7 +921,7 @@
compatible = "fsl,imx6sx-sdma", "fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SX_CLK_SDMA>,
+ clocks = <&clks IMX6SX_CLK_IPG>,
<&clks IMX6SX_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index bfdfcd7ab680..dd948b18778b 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -454,7 +454,7 @@
pwm1: pwm@02080000 {
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
reg = <0x02080000 0x4000>;
- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_PWM1>,
<&clks IMX6UL_CLK_PWM1>;
clock-names = "ipg", "per";
@@ -465,7 +465,7 @@
pwm2: pwm@02084000 {
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
reg = <0x02084000 0x4000>;
- interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_PWM2>,
<&clks IMX6UL_CLK_PWM2>;
clock-names = "ipg", "per";
@@ -476,7 +476,7 @@
pwm3: pwm@02088000 {
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
reg = <0x02088000 0x4000>;
- interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_PWM3>,
<&clks IMX6UL_CLK_PWM3>;
clock-names = "ipg", "per";
@@ -487,7 +487,7 @@
pwm4: pwm@0208c000 {
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
reg = <0x0208c000 0x4000>;
- interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6UL_CLK_PWM4>,
<&clks IMX6UL_CLK_PWM4>;
clock-names = "ipg", "per";
@@ -823,7 +823,7 @@
"fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6UL_CLK_SDMA>,
+ clocks = <&clks IMX6UL_CLK_IPG>,
<&clks IMX6UL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
index 205130600853..72d1b8209f5e 100644
--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
@@ -43,7 +43,7 @@
<&clks IMX7D_ENET1_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
fsl,magic-packet;
status = "okay";
@@ -69,7 +69,7 @@
<&clks IMX7D_ENET2_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy1>;
fsl,magic-packet;
status = "okay";
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 74a3938ffd46..71cbd97a1a8d 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -461,8 +461,8 @@
reg = <0x302d0000 0x10000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
- <&clks IMX7D_GPT1_ROOT_CLK>,
- <&clks IMX7D_GPT_3M_CLK>;
+ <&clks IMX7D_GPT1_ROOT_CLK>,
+ <&clks IMX7D_GPT_3M_CLK>;
clock-names = "ipg", "per", "osc_per";
};
@@ -470,7 +470,7 @@
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302e0000 0x10000>;
interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
<&clks IMX7D_GPT2_ROOT_CLK>;
clock-names = "ipg", "per";
status = "disabled";
@@ -480,7 +480,7 @@
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x302f0000 0x10000>;
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
<&clks IMX7D_GPT3_ROOT_CLK>;
clock-names = "ipg", "per";
status = "disabled";
@@ -490,7 +490,7 @@
compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
reg = <0x30300000 0x10000>;
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_CLK_DUMMY>,
+ clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
<&clks IMX7D_GPT4_ROOT_CLK>;
clock-names = "ipg", "per";
status = "disabled";
@@ -1028,8 +1028,8 @@
compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma";
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_SDMA_CORE_CLK>,
- <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>;
+ clocks = <&clks IMX7D_IPG_ROOT_CLK>,
+ <&clks IMX7D_SDMA_CORE_CLK>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 876ed5f2922c..f82f193b8856 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -108,16 +108,21 @@
twl_audio: audio {
compatible = "ti,twl4030-audio";
codec {
+ ti,hs_extmute_gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
};
};
};
};
&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
clock-frequency = <400000>;
};
&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
clock-frequency = <400000>;
};
@@ -221,6 +226,7 @@
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
+ OMAP3_CORE1_IOPAD(0x20ba, PIN_OUTPUT | MUX_MODE4) /* gpmc_ncs6.gpio_57 */
>;
};
};
@@ -239,6 +245,18 @@
OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
>;
};
+ i2c2_pins: pinmux_i2c2_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
+ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
+ >;
+ };
+ i2c3_pins: pinmux_i2c3_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
+ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
+ >;
+ };
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 08f0a35dc0d1..20ee7ca8c653 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -117,10 +117,14 @@
};
&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
clock-frequency = <400000>;
};
&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
clock-frequency = <400000>;
at24@50 {
compatible = "atmel,24c64";
@@ -215,6 +219,18 @@
OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
>;
};
+ i2c2_pins: pinmux_i2c2_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
+ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
+ >;
+ };
+ i2c3_pins: pinmux_i2c3_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
+ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
+ >;
+ };
};
&uart2 {
@@ -250,3 +266,7 @@
&twl_gpio {
ti,use-leds;
};
+
+&twl_keypad {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
index b7bd3a110a8d..dd0bdf765599 100644
--- a/arch/arm/boot/dts/lpc3250-phy3250.dts
+++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
@@ -49,8 +49,8 @@
sd_reg: regulator@2 {
compatible = "regulator-fixed";
regulator-name = "sd_reg";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
gpio = <&gpio 5 5 0>;
enable-active-high;
};
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index b5841fab51c1..2802c9565b6c 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -139,11 +139,11 @@
};
clcd: clcd@31040000 {
- compatible = "arm,pl110", "arm,primecell";
+ compatible = "arm,pl111", "arm,primecell";
reg = <0x31040000 0x1000>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk LPC32XX_CLK_LCD>;
- clock-names = "apb_pclk";
+ clocks = <&clk LPC32XX_CLK_LCD>, <&clk LPC32XX_CLK_LCD>;
+ clock-names = "clcdclk", "apb_pclk";
status = "disabled";
};
@@ -179,7 +179,7 @@
* ssp0 and spi1 are shared pins;
* enable one in your board dts, as needed.
*/
- ssp0: ssp@20084000 {
+ ssp0: spi@20084000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x20084000 0x1000>;
interrupts = <20 IRQ_TYPE_LEVEL_HIGH>;
@@ -199,7 +199,7 @@
* ssp1 and spi2 are shared pins;
* enable one in your board dts, as needed.
*/
- ssp1: ssp@2008c000 {
+ ssp1: spi@2008c000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x2008c000 0x1000>;
interrupts = <21 IRQ_TYPE_LEVEL_HIGH>;
@@ -230,7 +230,7 @@
status = "disabled";
};
- i2s1: i2s@2009C000 {
+ i2s1: i2s@2009c000 {
compatible = "nxp,lpc3220-i2s";
reg = <0x2009C000 0x1000>;
};
@@ -273,7 +273,7 @@
status = "disabled";
};
- i2c1: i2c@400A0000 {
+ i2c1: i2c@400a0000 {
compatible = "nxp,pnx-i2c";
reg = <0x400A0000 0x100>;
interrupt-parent = <&sic1>;
@@ -284,7 +284,7 @@
clocks = <&clk LPC32XX_CLK_I2C1>;
};
- i2c2: i2c@400A8000 {
+ i2c2: i2c@400a8000 {
compatible = "nxp,pnx-i2c";
reg = <0x400A8000 0x100>;
interrupt-parent = <&sic1>;
@@ -295,7 +295,7 @@
clocks = <&clk LPC32XX_CLK_I2C2>;
};
- mpwm: mpwm@400E8000 {
+ mpwm: mpwm@400e8000 {
compatible = "nxp,lpc3220-motor-pwm";
reg = <0x400E8000 0x78>;
status = "disabled";
@@ -394,7 +394,7 @@
#gpio-cells = <3>; /* bank, pin, flags */
};
- timer4: timer@4002C000 {
+ timer4: timer@4002c000 {
compatible = "nxp,lpc3220-timer";
reg = <0x4002C000 0x1000>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
@@ -412,7 +412,7 @@
status = "disabled";
};
- watchdog: watchdog@4003C000 {
+ watchdog: watchdog@4003c000 {
compatible = "nxp,pnx4008-wdt";
reg = <0x4003C000 0x1000>;
clocks = <&clk LPC32XX_CLK_WDOG>;
@@ -451,7 +451,7 @@
status = "disabled";
};
- timer1: timer@4004C000 {
+ timer1: timer@4004c000 {
compatible = "nxp,lpc3220-timer";
reg = <0x4004C000 0x1000>;
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@@ -462,7 +462,9 @@
key: key@40050000 {
compatible = "nxp,lpc3220-key";
reg = <0x40050000 0x1000>;
- interrupts = <54 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk LPC32XX_CLK_KEY>;
+ interrupt-parent = <&sic1>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -475,14 +477,14 @@
status = "disabled";
};
- pwm1: pwm@4005C000 {
+ pwm1: pwm@4005c000 {
compatible = "nxp,lpc3220-pwm";
reg = <0x4005C000 0x4>;
clocks = <&clk LPC32XX_CLK_PWM1>;
status = "disabled";
};
- pwm2: pwm@4005C004 {
+ pwm2: pwm@4005c004 {
compatible = "nxp,lpc3220-pwm";
reg = <0x4005C004 0x4>;
clocks = <&clk LPC32XX_CLK_PWM2>;
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index 44715c8ef756..72a3fc63d0ec 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -143,7 +143,7 @@
};
&enet0 {
- tbi-handle = <&tbi1>;
+ tbi-handle = <&tbi0>;
phy-handle = <&sgmii_phy2>;
phy-connection-type = "sgmii";
status = "okay";
@@ -222,6 +222,13 @@
sgmii_phy2: ethernet-phy@2 {
reg = <0x2>;
};
+ tbi0: tbi-phy@1f {
+ reg = <0x1f>;
+ device_type = "tbi-phy";
+ };
+};
+
+&mdio1 {
tbi1: tbi-phy@1f {
reg = <0x1f>;
device_type = "tbi-phy";
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 825f6eae3d1c..0de4ba698d1d 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -512,6 +512,15 @@
reg = <0x0 0x2d24000 0x0 0x4000>;
};
+ mdio1: mdio@2d64000 {
+ compatible = "gianfar";
+ device_type = "mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2d64000 0x0 0x4000>,
+ <0x0 0x2d50030 0x0 0x4>;
+ };
+
ptp_clock@2d10e00 {
compatible = "fsl,etsec-ptp";
reg = <0x0 0x2d10e00 0x0 0xb0>;
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index 47e5b63339d1..e95deed6a797 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -180,7 +180,7 @@
clocks = <&soc_clocks MMP2_CLK_GPIO>;
resets = <&soc_clocks MMP2_CLK_GPIO>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
ranges;
gcb0: gpio@d4019000 {
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index b3a8b1f24499..719150693449 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -28,6 +28,7 @@
aliases {
display0 = &lcd;
+ display1 = &tv0;
};
gpio-keys {
@@ -70,7 +71,7 @@
#sound-dai-cells = <0>;
};
- spi_lcd {
+ spi_lcd: spi_lcd {
compatible = "spi-gpio";
#address-cells = <0x1>;
#size-cells = <0x0>;
@@ -122,7 +123,7 @@
};
tv0: connector {
- compatible = "svideo-connector";
+ compatible = "composite-video-connector";
label = "tv";
port {
@@ -134,7 +135,7 @@
tv_amp: opa362 {
compatible = "ti,opa362";
- enable-gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+ enable-gpios = <&gpio1 23 GPIO_ACTIVE_HIGH>; /* GPIO_23 to enable video out amplifier */
ports {
#address-cells = <1>;
@@ -273,6 +274,13 @@
OMAP3_CORE1_IOPAD(0x2134, PIN_INPUT_PULLUP | MUX_MODE4) /* gpio112 */
>;
};
+
+ penirq_pins: pinmux_penirq_pins {
+ pinctrl-single,pins = <
+ /* here we could enable to wakeup the cpu from suspend by a pen touch */
+ OMAP3_CORE1_IOPAD(0x2194, PIN_INPUT_PULLUP | MUX_MODE4) /* gpio160 */
+ >;
+ };
};
&omap3_pmx_core2 {
@@ -410,10 +418,19 @@
tsc2007@48 {
compatible = "ti,tsc2007";
reg = <0x48>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&penirq_pins>;
interrupt-parent = <&gpio6>;
interrupts = <0 IRQ_TYPE_EDGE_FALLING>; /* GPIO_160 */
- gpios = <&gpio6 0 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* GPIO_160 */
ti,x-plate-ohms = <600>;
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <640>;
+ touchscreen-max-pressure = <1000>;
+ touchscreen-fuzz-x = <3>;
+ touchscreen-fuzz-y = <8>;
+ touchscreen-fuzz-pressure = <10>;
+ touchscreen-inverted-y;
};
/* RFID EEPROM */
@@ -519,6 +536,12 @@
regulator-max-microvolt = <3150000>;
};
+/* Needed to power the DPI pins */
+
+&vpll2 {
+ regulator-always-on;
+};
+
&dss {
pinctrl-names = "default";
pinctrl-0 = < &dss_dpi_pins >;
@@ -539,10 +562,14 @@
vdda-supply = <&vdac>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
port {
+ reg = <0>;
venc_out: endpoint {
remote-endpoint = <&opa_in>;
- ti,channels = <2>;
+ ti,channels = <1>;
ti,invert-polarity;
};
};
@@ -586,22 +613,22 @@
bootloaders@80000 {
label = "U-Boot";
- reg = <0x80000 0x1e0000>;
+ reg = <0x80000 0x1c0000>;
};
- bootloaders_env@260000 {
+ bootloaders_env@240000 {
label = "U-Boot Env";
- reg = <0x260000 0x20000>;
+ reg = <0x240000 0x40000>;
};
kernel@280000 {
label = "Kernel";
- reg = <0x280000 0x400000>;
+ reg = <0x280000 0x600000>;
};
- filesystem@680000 {
+ filesystem@880000 {
label = "File System";
- reg = <0x680000 0xf980000>;
+ reg = <0x880000 0>; /* 0 = MTDPART_SIZ_FULL */
};
};
};
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
index 53e007abdc71..964240a0f4a9 100644
--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -221,6 +221,17 @@
gpio = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* GPIO_164 */
};
+ /* wl1251 wifi+bt module */
+ wlan_en: fixed-regulator-wg7210_en {
+ compatible = "regulator-fixed";
+ regulator-name = "vwlan";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ startup-delay-us = <50000>;
+ enable-active-high;
+ gpio = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+ };
+
/* wg7210 (wifi+bt module) 32k clock buffer */
wg7210_32k: fixed-regulator-wg7210_32k {
compatible = "regulator-fixed";
@@ -514,9 +525,30 @@
/*wp-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>;*/ /* GPIO_127 */
};
-/* mmc3 is probed using pdata-quirks to pass wl1251 card data */
&mmc3 {
- status = "disabled";
+ vmmc-supply = <&wlan_en>;
+
+ bus-width = <4>;
+ non-removable;
+ ti,non-removable;
+ cap-power-off-card;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wlan: wifi@1 {
+ compatible = "ti,wl1251";
+
+ reg = <1>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* GPIO_21 */
+
+ ti,wl1251-has-eeprom;
+ };
};
/* bluetooth*/
diff --git a/arch/arm/boot/dts/omap3-tao3530.dtsi b/arch/arm/boot/dts/omap3-tao3530.dtsi
index dc80886b5329..e3dfba8b3efe 100644
--- a/arch/arm/boot/dts/omap3-tao3530.dtsi
+++ b/arch/arm/boot/dts/omap3-tao3530.dtsi
@@ -225,7 +225,7 @@
pinctrl-0 = <&mmc1_pins>;
vmmc-supply = <&vmmc1>;
vmmc_aux-supply = <&vsim>;
- cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
bus-width = <8>;
};
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 4caadb253249..e412373fe7bf 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -694,6 +694,11 @@
vbus-supply = <&smps10_out1_reg>;
};
+&dwc3 {
+ extcon = <&extcon_usb3>;
+ dr_mode = "otg";
+};
+
&mcspi1 {
};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 1d1d8e90cd80..a76266f242a1 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -131,6 +131,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0 0 0xc0000000>;
+ dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
reg = <0 0x44000000 0 0x2000>,
<0 0x44800000 0 0x3000>,
diff --git a/arch/arm/boot/dts/orion5x-linkstation.dtsi b/arch/arm/boot/dts/orion5x-linkstation.dtsi
index ed456ab35fd8..c1bc8376d4eb 100644
--- a/arch/arm/boot/dts/orion5x-linkstation.dtsi
+++ b/arch/arm/boot/dts/orion5x-linkstation.dtsi
@@ -156,7 +156,7 @@
&i2c {
status = "okay";
- rtc {
+ rtc@32 {
compatible = "ricoh,rs5c372a";
reg = <0x32>;
};
diff --git a/arch/arm/boot/dts/pxa27x.dtsi b/arch/arm/boot/dts/pxa27x.dtsi
index 9e73dc6b3ed3..d629948000db 100644
--- a/arch/arm/boot/dts/pxa27x.dtsi
+++ b/arch/arm/boot/dts/pxa27x.dtsi
@@ -34,7 +34,7 @@
clocks = <&clks CLK_NONE>;
};
- pxa27x_ohci: usb@4c000000 {
+ usb0: usb@4c000000 {
compatible = "marvell,pxa-ohci";
reg = <0x4c000000 0x10000>;
interrupts = <3>;
@@ -70,7 +70,7 @@
clocks = <&clks CLK_PWM1>;
};
- pwri2c: i2c@40f000180 {
+ pwri2c: i2c@40f00180 {
compatible = "mrvl,pxa-i2c";
reg = <0x40f00180 0x24>;
interrupts = <6>;
diff --git a/arch/arm/boot/dts/pxa2xx.dtsi b/arch/arm/boot/dts/pxa2xx.dtsi
index 3ff077ca4400..5a6f4ed92dac 100644
--- a/arch/arm/boot/dts/pxa2xx.dtsi
+++ b/arch/arm/boot/dts/pxa2xx.dtsi
@@ -117,13 +117,6 @@
status = "disabled";
};
- usb0: ohci@4c000000 {
- compatible = "marvell,pxa-ohci";
- reg = <0x4c000000 0x10000>;
- interrupts = <3>;
- status = "disabled";
- };
-
mmc0: mmc@41100000 {
compatible = "marvell,pxa-mmc";
reg = <0x41100000 0x1000>;
diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
index 9d6f3aacedb7..4aee15062690 100644
--- a/arch/arm/boot/dts/pxa3xx.dtsi
+++ b/arch/arm/boot/dts/pxa3xx.dtsi
@@ -187,7 +187,7 @@
status = "disabled";
};
- pxa3xx_ohci: usb@4c000000 {
+ usb0: usb@4c000000 {
compatible = "marvell,pxa-ohci";
reg = <0x4c000000 0x10000>;
interrupts = <3>;
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 4b7d97275c62..5ee84e3cb3e9 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -211,7 +211,7 @@
saw0: regulator@b089000 {
compatible = "qcom,saw2";
- reg = <0x02089000 0x1000>, <0x0b009000 0x1000>;
+ reg = <0x0b089000 0x1000>, <0x0b009000 0x1000>;
regulator;
};
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index b9bbcce69dfb..6c6d4893e92d 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -67,6 +67,14 @@
<0xf0000100 0x100>;
};
+ timer@f0000200 {
+ compatible = "arm,cortex-a9-global-timer";
+ reg = <0xf0000200 0x100>;
+ interrupts = <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ clocks = <&cpg_clocks R8A7779_CLK_ZS>;
+ };
+
timer@f0000600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0xf0000600 0x20>;
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index a935523a1eb8..147c73f68f1d 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -744,7 +744,7 @@
/* no rts / cts for uart2 */
};
- spi {
+ spi-pins {
spi_txd:spi-txd {
rockchip,pins = <1 29 RK_FUNC_3 &pcfg_pull_default>;
};
diff --git a/arch/arm/boot/dts/rk3288-rock2-som.dtsi b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
index bb1f01e037ba..c1c576875bc8 100644
--- a/arch/arm/boot/dts/rk3288-rock2-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
@@ -63,7 +63,7 @@
vcc_flash: flash-regulator {
compatible = "regulator-fixed";
- regulator-name = "vcc_sys";
+ regulator-name = "vcc_flash";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
startup-delay-us = <150>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-mickey.dts b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
index f36f6f459225..365382ab9ebd 100644
--- a/arch/arm/boot/dts/rk3288-veyron-mickey.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
@@ -161,10 +161,6 @@
};
};
-&emmc {
- /delete-property/mmc-hs200-1_8v;
-};
-
&i2c2 {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
index f72d616d1bf8..9647d9b6b299 100644
--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
@@ -125,10 +125,6 @@
power-supply = <&backlight_regulator>;
};
-&emmc {
- /delete-property/mmc-hs200-1_8v;
-};
-
&gpio_keys {
pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 17ec2e2d7a60..30f1384f619b 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -210,6 +210,7 @@
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
clock-frequency = <24000000>;
+ arm,no-tick-in-suspend;
};
timer: timer@ff810000 {
diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts
index f4afda3594f8..de04d8764b0f 100644
--- a/arch/arm/boot/dts/s3c6410-mini6410.dts
+++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
@@ -167,6 +167,10 @@
};
};
+&clocks {
+ clocks = <&fin_pll>;
+};
+
&sdhci0 {
pinctrl-names = "default";
pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
diff --git a/arch/arm/boot/dts/s3c6410-smdk6410.dts b/arch/arm/boot/dts/s3c6410-smdk6410.dts
index ecf35ec466f7..7ade1a0686d2 100644
--- a/arch/arm/boot/dts/s3c6410-smdk6410.dts
+++ b/arch/arm/boot/dts/s3c6410-smdk6410.dts
@@ -71,6 +71,10 @@
};
};
+&clocks {
+ clocks = <&fin_pll>;
+};
+
&sdhci0 {
pinctrl-names = "default";
pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
index 8a394f336003..ee65702f9645 100644
--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
@@ -517,7 +517,7 @@
#define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
#define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
#define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
-#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
+#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
#define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
#define PIN_PC10 74
#define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 4c84d333fc7e..33c0d2668934 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1109,49 +1109,49 @@
usart0_clk: usart0_clk {
#clock-cells = <0>;
reg = <12>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
usart1_clk: usart1_clk {
#clock-cells = <0>;
reg = <13>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
usart2_clk: usart2_clk {
#clock-cells = <0>;
reg = <14>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
usart3_clk: usart3_clk {
#clock-cells = <0>;
reg = <15>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
uart0_clk: uart0_clk {
#clock-cells = <0>;
reg = <16>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
twi0_clk: twi0_clk {
reg = <18>;
#clock-cells = <0>;
- atmel,clk-output-range = <0 16625000>;
+ atmel,clk-output-range = <0 41500000>;
};
twi1_clk: twi1_clk {
#clock-cells = <0>;
reg = <19>;
- atmel,clk-output-range = <0 16625000>;
+ atmel,clk-output-range = <0 41500000>;
};
twi2_clk: twi2_clk {
#clock-cells = <0>;
reg = <20>;
- atmel,clk-output-range = <0 16625000>;
+ atmel,clk-output-range = <0 41500000>;
};
mci0_clk: mci0_clk {
@@ -1167,19 +1167,19 @@
spi0_clk: spi0_clk {
#clock-cells = <0>;
reg = <24>;
- atmel,clk-output-range = <0 133000000>;
+ atmel,clk-output-range = <0 166000000>;
};
spi1_clk: spi1_clk {
#clock-cells = <0>;
reg = <25>;
- atmel,clk-output-range = <0 133000000>;
+ atmel,clk-output-range = <0 166000000>;
};
tcb0_clk: tcb0_clk {
#clock-cells = <0>;
reg = <26>;
- atmel,clk-output-range = <0 133000000>;
+ atmel,clk-output-range = <0 166000000>;
};
pwm_clk: pwm_clk {
@@ -1190,7 +1190,7 @@
adc_clk: adc_clk {
#clock-cells = <0>;
reg = <29>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
dma0_clk: dma0_clk {
@@ -1221,13 +1221,13 @@
ssc0_clk: ssc0_clk {
#clock-cells = <0>;
reg = <38>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
ssc1_clk: ssc1_clk {
#clock-cells = <0>;
reg = <39>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
sha_clk: sha_clk {
diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
index c5a3772741bf..0fac79f75c06 100644
--- a/arch/arm/boot/dts/sama5d3_can.dtsi
+++ b/arch/arm/boot/dts/sama5d3_can.dtsi
@@ -37,13 +37,13 @@
can0_clk: can0_clk {
#clock-cells = <0>;
reg = <40>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
can1_clk: can1_clk {
#clock-cells = <0>;
reg = <41>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
};
};
diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
index 801f9745e82f..b80dbc45a3c2 100644
--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
+++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
@@ -23,6 +23,7 @@
tcb1_clk: tcb1_clk {
#clock-cells = <0>;
reg = <27>;
+ atmel,clk-output-range = <0 166000000>;
};
};
};
diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
index 2511d748867b..71818c7bfb67 100644
--- a/arch/arm/boot/dts/sama5d3_uart.dtsi
+++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
@@ -42,13 +42,13 @@
uart0_clk: uart0_clk {
#clock-cells = <0>;
reg = <16>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
uart1_clk: uart1_clk {
#clock-cells = <0>;
reg = <17>;
- atmel,clk-output-range = <0 66000000>;
+ atmel,clk-output-range = <0 83000000>;
};
};
};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
index afea3645ada4..89d55894d916 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
@@ -88,7 +88,7 @@
status = "okay";
speed-mode = <0>;
- adxl345: adxl345@0 {
+ adxl345: adxl345@53 {
compatible = "adi,adxl345";
reg = <0x53>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index d309314f3a36..5f1769209526 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -188,7 +188,7 @@
<0xa0410100 0x100>;
};
- scu@a04100000 {
+ scu@a0410000 {
compatible = "arm,cortex-a9-scu";
reg = <0xa0410000 0x100>;
};
@@ -864,7 +864,7 @@
power-domains = <&pm_domains DOMAIN_VAPE>;
};
- ssp@80002000 {
+ spi@80002000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x80002000 0x1000>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
@@ -878,7 +878,7 @@
power-domains = <&pm_domains DOMAIN_VAPE>;
};
- ssp@80003000 {
+ spi@80003000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x80003000 0x1000>;
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi
index 5c5cea232743..1ec193b0c506 100644
--- a/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi
+++ b/arch/arm/boot/dts/ste-href-family-pinctrl.dtsi
@@ -607,16 +607,20 @@
mcde {
lcd_default_mode: lcd_default {
- default_mux {
+ default_mux1 {
/* Mux in VSI0 and all the data lines */
function = "lcd";
groups =
"lcdvsi0_a_1", /* VSI0 for LCD */
"lcd_d0_d7_a_1", /* Data lines */
"lcd_d8_d11_a_1", /* TV-out */
- "lcdaclk_b_1", /* Clock line for TV-out */
"lcdvsi1_a_1"; /* VSI1 for HDMI */
};
+ default_mux2 {
+ function = "lcda";
+ groups =
+ "lcdaclk_b_1"; /* Clock line for TV-out */
+ };
default_cfg1 {
pins =
"GPIO68_E1", /* VSI0 */
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index ece222d51717..cf8d03bc42c1 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dtsi
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -57,7 +57,7 @@
};
};
- ssp@80002000 {
+ spi@80002000 {
/*
* On the first generation boards, this SSP/SPI port was connected
* to the AB8500.
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 386eee6de232..272d36c3d223 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -386,7 +386,7 @@
pinctrl-1 = <&i2c3_sleep_mode>;
};
- ssp@80002000 {
+ spi@80002000 {
pinctrl-names = "default";
pinctrl-0 = <&ssp0_snowball_mode>;
};
diff --git a/arch/arm/boot/dts/ste-u300.dts b/arch/arm/boot/dts/ste-u300.dts
index 2f5107ffeef0..ea6768b96a9d 100644
--- a/arch/arm/boot/dts/ste-u300.dts
+++ b/arch/arm/boot/dts/ste-u300.dts
@@ -441,7 +441,7 @@
dma-names = "rx";
};
- spi: ssp@c0006000 {
+ spi: spi@c0006000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0xc0006000 0x1000>;
interrupt-parent = <&vica>;
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index ce1960453a0b..3bfa79717dfa 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -174,7 +174,7 @@
};
pmu {
- compatible = "arm,cortex-a7-pmu", "arm,cortex-a15-pmu";
+ compatible = "arm,cortex-a7-pmu";
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 94cf5a1c7172..db5d30598ad6 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -172,7 +172,7 @@
};
pmu {
- compatible = "arm,cortex-a7-pmu", "arm,cortex-a15-pmu";
+ compatible = "arm,cortex-a7-pmu";
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index b4bfa5586c23..23d4c837b87a 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -521,10 +521,10 @@
gpio-keys {
compatible = "gpio-keys";
- power {
- label = "Power";
+ wakeup {
+ label = "Wakeup";
gpios = <&gpio TEGRA_GPIO(J, 7) GPIO_ACTIVE_LOW>;
- linux,code = <KEY_POWER>;
+ linux,code = <KEY_WAKEUP>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi
index 192b95177aac..826bdd0b8a25 100644
--- a/arch/arm/boot/dts/tegra30-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra30-apalis.dtsi
@@ -147,14 +147,14 @@
/* Apalis MMC1 */
sdmmc3_clk_pa6 {
- nvidia,pins = "sdmmc3_clk_pa6",
- "sdmmc3_cmd_pa7";
+ nvidia,pins = "sdmmc3_clk_pa6";
nvidia,function = "sdmmc3";
nvidia,pull = <TEGRA_PIN_PULL_NONE>;
nvidia,tristate = <TEGRA_PIN_DISABLE>;
};
sdmmc3_dat0_pb7 {
- nvidia,pins = "sdmmc3_dat0_pb7",
+ nvidia,pins = "sdmmc3_cmd_pa7",
+ "sdmmc3_dat0_pb7",
"sdmmc3_dat1_pb6",
"sdmmc3_dat2_pb5",
"sdmmc3_dat3_pb4",
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 5030065cbdfe..ad30d2a51af1 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -823,7 +823,7 @@
nvidia,elastic-limit = <16>;
nvidia,term-range-adj = <6>;
nvidia,xcvr-setup = <51>;
- nvidia.xcvr-setup-use-fuses;
+ nvidia,xcvr-setup-use-fuses;
nvidia,xcvr-lsfslew = <1>;
nvidia,xcvr-lsrslew = <1>;
nvidia,xcvr-hsslew = <32>;
@@ -860,7 +860,7 @@
nvidia,elastic-limit = <16>;
nvidia,term-range-adj = <6>;
nvidia,xcvr-setup = <51>;
- nvidia.xcvr-setup-use-fuses;
+ nvidia,xcvr-setup-use-fuses;
nvidia,xcvr-lsfslew = <2>;
nvidia,xcvr-lsrslew = <2>;
nvidia,xcvr-hsslew = <32>;
@@ -896,7 +896,7 @@
nvidia,elastic-limit = <16>;
nvidia,term-range-adj = <6>;
nvidia,xcvr-setup = <51>;
- nvidia.xcvr-setup-use-fuses;
+ nvidia,xcvr-setup-use-fuses;
nvidia,xcvr-lsfslew = <2>;
nvidia,xcvr-lsrslew = <2>;
nvidia,xcvr-hsslew = <32>;
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index 409e069b3a84..00d7d28e86f0 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -303,7 +303,7 @@
clock-names = "apb_pclk";
};
- ssp@101f4000 {
+ spi@101f4000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x101f4000 0x1000>;
interrupts = <11>;
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index a923524d1040..8617323eb273 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -379,7 +379,7 @@ static int __init nocache_trampoline(unsigned long _arg)
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
phys_reset_t phys_reset;
- mcpm_set_entry_vector(cpu, cluster, cpu_resume);
+ mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp);
setup_mm_for_reboot();
__mcpm_cpu_going_down(cpu, cluster);
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index d59009878312..067d73e3b28b 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -97,7 +97,6 @@ CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m
CONFIG_MSDOS_FS=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index c1470a00f55a..031d9d3549b9 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -207,7 +207,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_LED=m
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index a016ecc0084b..178ee84dffa1 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -591,7 +591,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_LED=m
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 60d3fecd7a22..dc873d23d603 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -354,7 +354,6 @@ CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
CONFIG_USB_ADUTUX=m
CONFIG_USB_SEVSEG=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_LED=m
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index a1ede1966baf..7d9aa284cb6f 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -202,7 +202,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_LED=m
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 5d934a0039d7..cb2486a526e6 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -265,6 +265,8 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+ if (err)
+ return err;
/* generate the initial tweak */
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
@@ -289,6 +291,8 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+ if (err)
+ return err;
/* generate the initial tweak */
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
index fac0533ea633..f64e8413ab9a 100644
--- a/arch/arm/crypto/sha256-armv4.pl
+++ b/arch/arm/crypto/sha256-armv4.pl
@@ -205,10 +205,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
+.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,sha256_block_data_order
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
index 555a1a8eec90..72c248081d27 100644
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ b/arch/arm/crypto/sha256-core.S_shipped
@@ -86,10 +86,11 @@ K256:
.global sha256_block_data_order
.type sha256_block_data_order,%function
sha256_block_data_order:
+.Lsha256_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,sha256_block_data_order
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
index a2b11a844357..5fe336420bcf 100644
--- a/arch/arm/crypto/sha512-armv4.pl
+++ b/arch/arm/crypto/sha512-armv4.pl
@@ -267,10 +267,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
+.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
- adr r3,sha512_block_data_order
+ adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
index 3694c4d4ca2b..de9bd7f55242 100644
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ b/arch/arm/crypto/sha512-core.S_shipped
@@ -134,10 +134,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
.global sha512_block_data_order
.type sha512_block_data_order,%function
sha512_block_data_order:
+.Lsha512_block_data_order:
#if __ARM_ARCH__<7
sub r3,pc,#8 @ sha512_block_data_order
#else
- adr r3,sha512_block_data_order
+ adr r3,.Lsha512_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 513e03d138ea..8331cb0d3461 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -10,6 +10,8 @@
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+#else
+#define wfe() do { } while (0)
#endif
#if __LINUX_ARM_ARCH__ >= 7
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index b74b174ac9fc..b458e4122794 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -67,6 +67,8 @@
#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+#define CNTVCT __ACCESS_CP15_64(1, c14)
+
extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 3d7351c844aa..2fd0a2619b0b 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,6 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
+/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
#define NR_IPI 7
typedef struct {
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 8a1e8e995dae..08509183c7df 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -77,7 +77,11 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
-#define cpu_relax() smp_mb()
+#define cpu_relax() \
+ do { \
+ smp_mb(); \
+ __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
+ } while (0)
#else
#define cpu_relax() barrier()
#endif
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
index 6c7182f32cef..e6c2f426f8c8 100644
--- a/arch/arm/include/asm/suspend.h
+++ b/arch/arm/include/asm/suspend.h
@@ -7,6 +7,7 @@ struct sleep_save_sp {
};
extern void cpu_resume(void);
+extern void cpu_resume_no_hyp(void);
extern void cpu_resume_arm(void);
extern int cpu_suspend(unsigned long, int (*)(unsigned long));
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 0f6c6b873bc5..e05c31af48d1 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -379,6 +379,13 @@ do { \
#define __get_user_asm_byte(x, addr, err) \
__get_user_asm(x, addr, err, ldrb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __get_user_asm_half(x, addr, err) \
+ __get_user_asm(x, addr, err, ldrh)
+
+#else
+
#ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err) \
({ \
@@ -397,6 +404,8 @@ do { \
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
#endif
@@ -472,6 +481,13 @@ do { \
#define __put_user_asm_byte(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, strb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __put_user_asm_half(x, __pu_addr, err) \
+ __put_user_asm(x, __pu_addr, err, strh)
+
+#else
+
#ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err) \
({ \
@@ -488,6 +504,8 @@ do { \
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __put_user_asm_word(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, str)
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index d69adfb3d79e..178a2a960659 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -263,16 +263,15 @@ __sys_trace:
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
- b ret_slow_syscall
-__sys_trace_return:
- str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+__sys_trace_return_nosave:
+ enable_irq_notrace
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
-__sys_trace_return_nosave:
- enable_irq_notrace
+__sys_trace_return:
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 15d073ae5da2..f5e5e3e19659 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -179,8 +179,8 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
@ Check whether GICv3 system registers are available
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
ubfx r7, r7, #28, #4
- cmp r7, #1
- bne 2f
+ teq r7, #0
+ beq 2f
@ Enable system register accesses
mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index b18c1ea56bed..ef6b27fe1d2e 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -87,8 +87,11 @@ void machine_crash_nonpanic_core(void *unused)
set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi);
- while (1)
+
+ while (1) {
cpu_relax();
+ wfe();
+ }
}
static void machine_kexec_mask_interrupts(void)
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index 69bda1a5707e..1f665acaa6a9 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -15,7 +15,7 @@ struct patch {
unsigned int insn;
};
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
__acquires(&patch_lock)
@@ -32,7 +32,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
return addr;
if (flags)
- spin_lock_irqsave(&patch_lock, *flags);
+ raw_spin_lock_irqsave(&patch_lock, *flags);
else
__acquire(&patch_lock);
@@ -47,7 +47,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
clear_fixmap(fixmap);
if (flags)
- spin_unlock_irqrestore(&patch_lock, *flags);
+ raw_spin_unlock_irqrestore(&patch_lock, *flags);
else
__release(&patch_lock);
}
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 0f6c1000582c..c8569390e7e7 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -119,6 +119,14 @@ ENDPROC(cpu_resume_after_mmu)
.text
.align
+#ifdef CONFIG_MCPM
+ .arm
+THUMB( .thumb )
+ENTRY(cpu_resume_no_hyp)
+ARM_BE8(setend be) @ ensure we are in BE mode
+ b no_hyp
+#endif
+
#ifdef CONFIG_MMU
.arm
ENTRY(cpu_resume_arm)
@@ -134,6 +142,7 @@ ARM_BE8(setend be) @ ensure we are in BE mode
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r1
+no_hyp:
mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f)
@@ -163,6 +172,9 @@ ENDPROC(cpu_resume)
#ifdef CONFIG_MMU
ENDPROC(cpu_resume_arm)
#endif
+#ifdef CONFIG_MCPM
+ENDPROC(cpu_resume_no_hyp)
+#endif
.align 2
_sleep_save_sp:
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index bc83ec7ed53f..deea60f01d24 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -75,6 +75,10 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
+ /*
+ * CPU_BACKTRACE is special and not included in NR_IPI
+ * or tracable with trace_ipi_*
+ */
IPI_CPU_BACKTRACE,
/*
* SGI8-15 can be reserved by secure firmware, and thus may
@@ -602,8 +606,10 @@ static void ipi_cpu_stop(unsigned int cpu)
local_fiq_disable();
local_irq_disable();
- while (1)
+ while (1) {
cpu_relax();
+ wfe();
+ }
}
static DEFINE_PER_CPU(struct completion *, cpu_completion);
@@ -799,7 +805,7 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
- smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ __smp_cross_call(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 0bee233fef9a..314cfb232a63 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
-static DEFINE_SPINLOCK(unwind_lock);
+static DEFINE_RAW_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
/* module unwind tables */
struct unwind_table *table;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
break;
}
}
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
}
pr_debug("%s: idx = %p\n", __func__, idx);
@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
return tab;
}
@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
if (!tab)
return;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_del(&tab->list);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 890439737374..bf6e45dec017 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -86,6 +86,8 @@ static bool __init cntvct_functional(void)
*/
np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (!np)
+ np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
+ if (!np)
goto out_put;
if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index a670c70f4def..dfc00a5bdc10 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -801,7 +801,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init)
{
- unsigned int i;
+ unsigned int i, ret;
int phys_target = kvm_target_cpu();
if (init->target != phys_target)
@@ -836,9 +836,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
vcpu->arch.target = phys_target;
/* Now we know what it is, we can reset it. */
- return kvm_reset_vcpu(vcpu);
-}
+ ret = kvm_reset_vcpu(vcpu);
+ if (ret) {
+ vcpu->arch.target = -1;
+ bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+ }
+ return ret;
+}
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
struct kvm_vcpu_init *init)
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 08443a15e6be..3caee91bca08 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -98,6 +98,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
unsigned int len;
int mask;
+ /* Detect an already handled MMIO return */
+ if (unlikely(!vcpu->mmio_needed))
+ return 0;
+
+ vcpu->mmio_needed = 0;
+
if (!run->mmio.is_write) {
len = run->mmio.len;
if (len > sizeof(unsigned long))
@@ -200,6 +206,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
+ vcpu->mmio_needed = 1;
if (!ret) {
/* We handled the access successfully in the kernel. */
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index b3d268a79f05..bb0d5e21d60b 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -366,7 +366,8 @@ static void stage2_flush_memslot(struct kvm *kvm,
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
next = stage2_pgd_addr_end(addr, end);
- stage2_flush_puds(kvm, pgd, addr, next);
+ if (!stage2_pgd_none(*pgd))
+ stage2_flush_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 27f4d96258a2..b3ecffb76c3f 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -38,7 +38,7 @@ $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
- NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon
+ NEON_FLAGS := -march=armv7-a -mfloat-abi=softfp -mfpu=neon
CFLAGS_xor-neon.o += $(NEON_FLAGS)
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
endif
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 6709a8d33963..f1e34f16cfab 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -100,7 +100,7 @@ ENTRY(arm_copy_from_user)
ENDPROC(arm_copy_from_user)
- .pushsection .fixup,"ax"
+ .pushsection .text.fixup,"ax"
.align 0
copy_abort_preamble
ldmfd sp!, {r1, r2, r3}
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 746e7801dcdf..b2e4bc3a635e 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -42,6 +42,12 @@ _ASM_NOKPROBE(__get_user_1)
ENTRY(__get_user_2)
check_uaccess r0, 2, r1, r2, __get_user_bad
+#if __LINUX_ARM_ARCH__ >= 6
+
+2: TUSER(ldrh) r2, [r0]
+
+#else
+
#ifdef CONFIG_CPU_USE_DOMAINS
rb .req ip
2: ldrbt r2, [r0], #1
@@ -56,6 +62,9 @@ rb .req r0
#else
orr r2, rb, r2, lsl #8
#endif
+
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
mov r0, #0
ret lr
ENDPROC(__get_user_2)
@@ -145,7 +154,9 @@ _ASM_NOKPROBE(__get_user_bad8)
.pushsection __ex_table, "a"
.long 1b, __get_user_bad
.long 2b, __get_user_bad
+#if __LINUX_ARM_ARCH__ < 6
.long 3b, __get_user_bad
+#endif
.long 4b, __get_user_bad
.long 5b, __get_user_bad8
.long 6b, __get_user_bad8
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 38d660d3705f..515eeaa9975c 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -41,16 +41,13 @@ ENDPROC(__put_user_1)
ENTRY(__put_user_2)
check_uaccess r0, 2, r1, ip, __put_user_bad
- mov ip, r2, lsr #8
-#ifdef CONFIG_THUMB2_KERNEL
-#ifndef __ARMEB__
-2: TUSER(strb) r2, [r0]
-3: TUSER(strb) ip, [r0, #1]
+#if __LINUX_ARM_ARCH__ >= 6
+
+2: TUSER(strh) r2, [r0]
+
#else
-2: TUSER(strb) ip, [r0]
-3: TUSER(strb) r2, [r0, #1]
-#endif
-#else /* !CONFIG_THUMB2_KERNEL */
+
+ mov ip, r2, lsr #8
#ifndef __ARMEB__
2: TUSER(strb) r2, [r0], #1
3: TUSER(strb) ip, [r0]
@@ -58,7 +55,8 @@ ENTRY(__put_user_2)
2: TUSER(strb) ip, [r0], #1
3: TUSER(strb) r2, [r0]
#endif
-#endif /* CONFIG_THUMB2_KERNEL */
+
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
mov r0, #0
ret lr
ENDPROC(__put_user_2)
@@ -91,7 +89,9 @@ ENDPROC(__put_user_bad)
.pushsection __ex_table, "a"
.long 1b, __put_user_bad
.long 2b, __put_user_bad
+#if __LINUX_ARM_ARCH__ < 6
.long 3b, __put_user_bad
+#endif
.long 4b, __put_user_bad
.long 5b, __put_user_bad
.long 6b, __put_user_bad
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
index 2c40aeab3eaa..c691b901092f 100644
--- a/arch/arm/lib/xor-neon.c
+++ b/arch/arm/lib/xor-neon.c
@@ -14,7 +14,7 @@
MODULE_LICENSE("GPL");
#ifndef __ARM_NEON__
-#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
+#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
#endif
/*
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 8e4539f69fdc..3bdf0d588238 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1479,6 +1479,8 @@ static __init void da850_evm_init(void)
if (ret)
pr_warn("%s: dsp/rproc registration failed: %d\n",
__func__, ret);
+
+ regulator_has_full_constraints();
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 9a22d40602aa..24779504f489 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -706,6 +706,9 @@ static struct platform_device da8xx_lcdc_device = {
.id = 0,
.num_resources = ARRAY_SIZE(da8xx_lcdc_resources),
.resource = da8xx_lcdc_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
};
int __init da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata)
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index ef3add999263..8db549c56914 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -864,8 +864,8 @@ static s8 dm365_queue_priority_mapping[][2] = {
};
static const struct dma_slave_map dm365_edma_map[] = {
- { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) },
- { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) },
+ { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) },
+ { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) },
{ "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) },
{ "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) },
{ "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) },
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index cd350dee4df3..efcd400b2abb 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -37,6 +37,7 @@
#define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
.text
+ .arch armv5te
/*
* Move DaVinci into deep sleep state
*
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index fd6da5419b51..2199c3adfd84 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -205,6 +205,7 @@ void __init exynos_firmware_init(void)
return;
addr = of_get_address(nd, 0, NULL, NULL);
+ of_node_put(nd);
if (!addr) {
pr_err("%s: No address specified.\n", __func__);
return;
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 3e1430a886b2..b406c12077b9 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -500,8 +500,27 @@ early_wakeup:
static void exynos5420_prepare_pm_resume(void)
{
+ unsigned int mpidr, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
WARN_ON(mcpm_cpu_powered_up());
+
+ if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
+ /*
+ * When system is resumed on the LITTLE/KFC core (cluster 1),
+ * the DSCR is not properly updated until the power is turned
+ * on also for the cluster 0. Enable it for a while to
+ * propagate the SPNIDEN and SPIDEN signals from Secure JTAG
+ * block and avoid undefined instruction issue on CP14 reset.
+ */
+ pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
+ EXYNOS_COMMON_CONFIGURATION(0));
+ pmu_raw_writel(0,
+ EXYNOS_COMMON_CONFIGURATION(0));
+ }
}
static void exynos5420_pm_resume(void)
@@ -715,8 +734,10 @@ void __init exynos_pm_init(void)
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+ of_node_put(np);
return;
}
+ of_node_put(np);
pm_data = (const struct exynos_pm_data *) match->data;
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 5d43296bdbe4..1bb0b019bb15 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -116,6 +116,8 @@ obj-$(CONFIG_SOC_IMX7D) += suspend-imx7.o
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
obj-$(CONFIG_SOC_IMX7ULP) += suspend-imx7ulp.o
endif
+AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
+obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
AFLAGS_smp_wfe.o :=-Wa,-march=armv7-a
AFLAGS_ddr3_freq_imx7d.o :=-Wa,-march=armv7-a
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 17cde76138b4..c49ef5d0f19c 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -195,7 +195,6 @@ bool imx_gpc_usb_wakeup_enabled(void);
bool imx_gpc_enet_wakeup_enabled(void);
#ifdef CONFIG_SUSPEND
-void v7_cpu_resume(void);
void ca7_cpu_resume(void);
void imx53_suspend(void __iomem *ocram_vbase);
extern const u32 imx53_suspend_sz;
@@ -204,7 +203,6 @@ void imx6_suspend(void __iomem *ocram_vbase);
void imx7_suspend(void __iomem *ocram_vbase);
void imx7ulp_suspend(void __iomem *ocram_vbase);
#else
-static inline void v7_cpu_resume(void) {}
static inline void ca7_cpu_resume(void) {}
static inline void imx53_suspend(void __iomem *ocram_vbase) {}
static const u32 imx53_suspend_sz;
@@ -215,6 +213,8 @@ static inline void imx7ulp_suspend(void __iomem *ocram_vbase) {}
#endif
void pm_shutdown_notify_m4(void);
+void v7_cpu_resume(void);
+
void imx6_pm_ccm_init(const char *ccm_compat);
void imx6q_pm_init(void);
void imx6dl_pm_init(void);
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index 02d55ae7e0eb..2a2129c1cdb8 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -16,34 +16,24 @@
#include "cpuidle.h"
#include "hardware.h"
-static atomic_t master = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(master_lock);
+static int num_idle_cpus = 0;
+static DEFINE_SPINLOCK(cpuidle_lock);
static int imx6q_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- if (atomic_inc_return(&master) == num_online_cpus()) {
- /*
- * With this lock, we prevent other cpu to exit and enter
- * this function again and become the master.
- */
- if (!spin_trylock(&master_lock))
- goto idle;
+ spin_lock(&cpuidle_lock);
+ if (++num_idle_cpus == num_online_cpus())
imx6_set_lpm(WAIT_UNCLOCKED);
- if (atomic_read(&master) != num_online_cpus())
- imx6_set_lpm(WAIT_CLOCKED);
- cpu_do_idle();
- imx6_set_lpm(WAIT_CLOCKED);
- spin_unlock(&master_lock);
- goto done;
- }
+ spin_unlock(&cpuidle_lock);
-idle:
cpu_do_idle();
-done:
- atomic_dec(&master);
- imx6_set_lpm(WAIT_CLOCKED);
+ spin_lock(&cpuidle_lock);
+ if (num_idle_cpus-- == num_online_cpus())
+ imx6_set_lpm(WAIT_CLOCKED);
+ spin_unlock(&cpuidle_lock);
+
return index;
}
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 534e47ff037b..71f667753ef9 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -240,7 +240,7 @@ int __init imx6sx_cpuidle_init(void)
* except for power up sw2iso which need to be
* larger than LDO ramp up time.
*/
- imx_gpc_set_arm_power_up_timing(0xf, 1);
+ imx_gpc_set_arm_power_up_timing(cpu_is_imx6sx() ? 0xf : 0x2, 1);
imx_gpc_set_arm_power_down_timing(1, 1);
if (imx_get_soc_revision() >= IMX_CHIP_REVISION_1_2) {
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 7b1f7acddfd6..1611dff22c4c 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -1272,6 +1272,28 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata
IMX6Q_GPR1_GINT);
}
+static void imx6_pm_stby_poweroff(void)
+{
+ imx6_set_lpm(STOP_POWER_OFF);
+ imx6q_suspend_finish(0);
+
+ mdelay(1000);
+
+ pr_emerg("Unable to poweroff system\n");
+}
+
+static int imx6_pm_stby_poweroff_probe(void)
+{
+ if (pm_power_off) {
+ pr_warn("%s: pm_power_off already claimed %p %pf!\n",
+ __func__, pm_power_off, pm_power_off);
+ return -EBUSY;
+ }
+
+ pm_power_off = imx6_pm_stby_poweroff;
+ return 0;
+}
+
void __init imx6_pm_ccm_init(const char *ccm_compat)
{
struct device_node *np;
@@ -1288,6 +1310,9 @@ void __init imx6_pm_ccm_init(const char *ccm_compat)
val = readl_relaxed(ccm_base + CLPCR);
val &= ~BM_CLPCR_LPM;
writel_relaxed(val, ccm_base + CLPCR);
+
+ if (of_property_read_bool(np, "fsl,pmic-stby-poweroff"))
+ imx6_pm_stby_poweroff_probe();
}
void __init imx6q_pm_init(void)
diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S
new file mode 100644
index 000000000000..5bd1ba7ef15b
--- /dev/null
+++ b/arch/arm/mach-imx/resume-imx6.S
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/hardware/cache-l2x0.h>
+#include "hardware.h"
+
+/*
+ * The following code must assume it is running from physical address
+ * where absolute virtual addresses to the data section have to be
+ * turned into relative ones.
+ */
+
+ENTRY(v7_cpu_resume)
+ bl v7_invalidate_l1
+#ifdef CONFIG_CACHE_L2X0
+ bl l2c310_early_resume
+#endif
+ b cpu_resume
+ENDPROC(v7_cpu_resume)
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index b8ad48fc85a9..ab8eee4875c0 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -735,20 +735,3 @@ dsm_resume_mmdc_done:
ret lr
ENDPROC(imx6_suspend)
-
-/*
- * The following code must assume it is running from physical address
- * where absolute virtual addresses to the data section have to be
- * turned into relative ones.
- */
-
-ENTRY(v7_cpu_resume)
- bl v7_invalidate_l1
- is_cortex_a7
- beq done
-#ifdef CONFIG_CACHE_L2X0
- bl l2c310_early_resume
-#endif
-done:
- b cpu_resume
-ENDPROC(v7_cpu_resume)
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index 53c316f7301e..fe4932fda01d 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
}
};
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0,
.pool_size = PAGE_SIZE,
@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
.resource = iop13xx_adma_0_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_0_data,
},
};
@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
.resource = iop13xx_adma_1_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_1_data,
},
};
@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
.resource = iop13xx_adma_2_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_2_data,
},
};
diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
index db511ec2b1df..116feb6b261e 100644
--- a/arch/arm/mach-iop13xx/tpmi.c
+++ b/arch/arm/mach-iop13xx/tpmi.c
@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
}
};
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
+u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
static struct platform_device iop13xx_tpmi_0_device = {
.name = "iop-tpmi",
.id = 0,
@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
.resource = iop13xx_tpmi_0_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
.resource = iop13xx_tpmi_1_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
.resource = iop13xx_tpmi_2_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
.resource = iop13xx_tpmi_3_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-ks8695/board-acs5k.c b/arch/arm/mach-ks8695/board-acs5k.c
index e4d709c8ed32..76d3083f1f63 100644
--- a/arch/arm/mach-ks8695/board-acs5k.c
+++ b/arch/arm/mach-ks8695/board-acs5k.c
@@ -92,7 +92,7 @@ static struct i2c_board_info acs5k_i2c_devs[] __initdata = {
},
};
-static void acs5k_i2c_init(void)
+static void __init acs5k_i2c_init(void)
{
/* The gpio interface */
platform_device_register(&acs5k_i2c_device);
diff --git a/arch/arm/mach-omap1/id.c b/arch/arm/mach-omap1/id.c
index 52de382fc804..7e49dfda3d2f 100644
--- a/arch/arm/mach-omap1/id.c
+++ b/arch/arm/mach-omap1/id.c
@@ -200,10 +200,10 @@ void __init omap_check_revision(void)
printk(KERN_INFO "Unknown OMAP cpu type: 0x%02x\n", cpu_type);
}
- printk(KERN_INFO "OMAP%04x", omap_revision >> 16);
+ pr_info("OMAP%04x", omap_revision >> 16);
if ((omap_revision >> 8) & 0xff)
- printk(KERN_INFO "%x", (omap_revision >> 8) & 0xff);
- printk(KERN_INFO " revision %i handled as %02xxx id: %08x%08x\n",
+ pr_cont("%x", (omap_revision >> 8) & 0xff);
+ pr_cont(" revision %i handled as %02xxx id: %08x%08x\n",
die_rev, omap_revision & 0xff, system_serial_low,
system_serial_high);
}
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index cc6d9fa60924..9d942f022f2f 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -199,8 +199,8 @@ void __init omap2xxx_check_revision(void)
pr_info("%s", soc_name);
if ((omap_rev() >> 8) & 0x0f)
- pr_info("%s", soc_rev);
- pr_info("\n");
+ pr_cont("%s", soc_rev);
+ pr_cont("\n");
}
#define OMAP3_SHOW_FEATURE(feat) \
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index cf65ab8bb004..e5dcbda20129 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -131,6 +131,9 @@ static int __init omap4_sram_init(void)
struct device_node *np;
struct gen_pool *sram_pool;
+ if (!soc_is_omap44xx() && !soc_is_omap54xx())
+ return 0;
+
np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
if (!np)
pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index bfc74954540c..9421b78f869d 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2588,7 +2588,7 @@ static void _setup_iclk_autoidle(struct omap_hwmod *oh)
*/
static int _setup_reset(struct omap_hwmod *oh)
{
- int r;
+ int r = 0;
if (oh->_state != _HWMOD_STATE_INITIALIZED)
return -EINVAL;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index e2d84aa7f595..fa1c6707877a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -939,7 +939,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_RESET_STATUS,
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP),
.sysc_fields = &omap_hwmod_sysc_type2,
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 1ab7096af8e2..f850fc3a91e8 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -387,7 +387,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
.rev_offs = 0x0,
.sysc_offs = 0x4,
- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
+ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_RESET_STATUS,
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type2,
};
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 88676fe9b119..c3b3972c301a 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -308,108 +308,15 @@ static void __init omap3_logicpd_torpedo_init(void)
}
/* omap3pandora legacy devices */
-#define PANDORA_WIFI_IRQ_GPIO 21
-#define PANDORA_WIFI_NRESET_GPIO 23
static struct platform_device pandora_backlight = {
.name = "pandora-backlight",
.id = -1,
};
-static struct regulator_consumer_supply pandora_vmmc3_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
-};
-
-static struct regulator_init_data pandora_vmmc3 = {
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc3_supply),
- .consumer_supplies = pandora_vmmc3_supply,
-};
-
-static struct fixed_voltage_config pandora_vwlan = {
- .supply_name = "vwlan",
- .microvolts = 1800000, /* 1.8V */
- .gpio = PANDORA_WIFI_NRESET_GPIO,
- .startup_delay = 50000, /* 50ms */
- .enable_high = 1,
- .init_data = &pandora_vmmc3,
-};
-
-static struct platform_device pandora_vwlan_device = {
- .name = "reg-fixed-voltage",
- .id = 1,
- .dev = {
- .platform_data = &pandora_vwlan,
- },
-};
-
-static void pandora_wl1251_init_card(struct mmc_card *card)
-{
- /*
- * We have TI wl1251 attached to MMC3. Pass this information to
- * SDIO core because it can't be probed by normal methods.
- */
- if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
- card->quirks |= MMC_QUIRK_NONSTD_SDIO;
- card->cccr.wide_bus = 1;
- card->cis.vendor = 0x104c;
- card->cis.device = 0x9066;
- card->cis.blksize = 512;
- card->cis.max_dtr = 24000000;
- card->ocr = 0x80;
- }
-}
-
-static struct omap2_hsmmc_info pandora_mmc3[] = {
- {
- .mmc = 3,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
- .gpio_cd = -EINVAL,
- .gpio_wp = -EINVAL,
- .init_card = pandora_wl1251_init_card,
- },
- {} /* Terminator */
-};
-
-static void __init pandora_wl1251_init(void)
-{
- struct wl1251_platform_data pandora_wl1251_pdata;
- int ret;
-
- memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
-
- pandora_wl1251_pdata.power_gpio = -1;
-
- ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq");
- if (ret < 0)
- goto fail;
-
- pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO);
- if (pandora_wl1251_pdata.irq < 0)
- goto fail_irq;
-
- pandora_wl1251_pdata.use_eeprom = true;
- ret = wl1251_set_platform_data(&pandora_wl1251_pdata);
- if (ret < 0)
- goto fail_irq;
-
- return;
-
-fail_irq:
- gpio_free(PANDORA_WIFI_IRQ_GPIO);
-fail:
- pr_err("wl1251 board initialisation failed\n");
-}
-
static void __init omap3_pandora_legacy_init(void)
{
platform_device_register(&pandora_backlight);
- platform_device_register(&pandora_vwlan_device);
- omap_hsmmc_init(pandora_mmc3);
- omap_hsmmc_late_init(pandora_mmc3);
- pandora_wl1251_init();
}
#endif /* CONFIG_ARCH_OMAP3 */
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index 718981bb80cd..0aec48c1736b 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -433,7 +433,7 @@ static void omap3_prm_reconfigure_io_chain(void)
* registers, and omap3xxx_prm_reconfigure_io_chain() must be called.
* No return value.
*/
-static void __init omap3xxx_prm_enable_io_wakeup(void)
+static void omap3xxx_prm_enable_io_wakeup(void)
{
if (prm_features & PRM_HAS_IO_WAKEUP)
omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index f1ca9479491b..9e14604b9642 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -533,8 +533,10 @@ void omap_prm_reset_system(void)
prm_ll_data->reset_system();
- while (1)
+ while (1) {
cpu_relax();
+ wfe();
+ }
}
/**
diff --git a/arch/arm/mach-rpc/dma.c b/arch/arm/mach-rpc/dma.c
index 6d3517dc4772..82aac38fa2cf 100644
--- a/arch/arm/mach-rpc/dma.c
+++ b/arch/arm/mach-rpc/dma.c
@@ -131,7 +131,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
} while (1);
idma->state = ~DMA_ST_AB;
- disable_irq(irq);
+ disable_irq_nosync(irq);
return IRQ_HANDLED;
}
@@ -174,6 +174,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
+ idma->dma_addr = idma->dma.sg->dma_address;
+ idma->dma_len = idma->dma.sg->length;
+
iomd_writeb(DMA_CR_C, dma_base + CR);
idma->state = DMA_ST_AB;
}
diff --git a/arch/arm/mach-rpc/irq.c b/arch/arm/mach-rpc/irq.c
index 66502e6207fe..fce7fecbd8fa 100644
--- a/arch/arm/mach-rpc/irq.c
+++ b/arch/arm/mach-rpc/irq.c
@@ -117,7 +117,7 @@ extern unsigned char rpc_default_fiq_start, rpc_default_fiq_end;
void __init rpc_init_irq(void)
{
- unsigned int irq, clr, set = 0;
+ unsigned int irq, clr, set;
iomd_writeb(0, IOMD_IRQMASKA);
iomd_writeb(0, IOMD_IRQMASKB);
@@ -129,6 +129,7 @@ void __init rpc_init_irq(void)
for (irq = 0; irq < NR_IRQS; irq++) {
clr = IRQ_NOREQUEST;
+ set = 0;
if (irq <= 6 || (irq >= 9 && irq <= 15))
clr |= IRQ_NOPROBE;
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
index e3070fdab80b..3fe4ae654047 100644
--- a/arch/arm/mach-tegra/reset-handler.S
+++ b/arch/arm/mach-tegra/reset-handler.S
@@ -56,16 +56,16 @@ ENTRY(tegra_resume)
cmp r6, #TEGRA20
beq 1f @ Yes
/* Clear the flow controller flags for this CPU. */
- cpu_to_csr_reg r1, r0
+ cpu_to_csr_reg r3, r0
mov32 r2, TEGRA_FLOW_CTRL_BASE
- ldr r1, [r2, r1]
+ ldr r1, [r2, r3]
/* Clear event & intr flag */
orr r1, r1, \
#FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps
@ & ext flags for CPU power mgnt
bic r1, r1, r0
- str r1, [r2]
+ str r1, [r2, r3]
1:
mov32 r9, 0xc09
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index 16e5ff03383c..91b3f06e5425 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -382,6 +382,14 @@ _pll_m_c_x_done:
pll_locked r1, r0, CLK_RESET_PLLC_BASE
pll_locked r1, r0, CLK_RESET_PLLX_BASE
+ tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
+ cmp r1, #TEGRA30
+ beq 1f
+ ldr r1, [r0, #CLK_RESET_PLLP_BASE]
+ bic r1, r1, #(1<<31) @ disable PllP bypass
+ str r1, [r0, #CLK_RESET_PLLP_BASE]
+1:
+
mov32 r7, TEGRA_TMRUS_BASE
ldr r1, [r7]
add r1, r1, #LOCK_DELAY
@@ -641,7 +649,10 @@ tegra30_switch_cpu_to_clk32k:
str r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
/* disable PLLP, PLLA, PLLC and PLLX */
+ tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
+ cmp r1, #TEGRA30
ldr r0, [r5, #CLK_RESET_PLLP_BASE]
+ orrne r0, r0, #(1 << 31) @ enable PllP bypass on fast cluster
bic r0, r0, #(1 << 30)
str r0, [r5, #CLK_RESET_PLLP_BASE]
ldr r0, [r5, #CLK_RESET_PLLA_BASE]
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index fe488523694c..635b0d549487 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -555,8 +555,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
static int __init ve_spc_clk_init(void)
{
- int cpu;
+ int cpu, cluster;
struct clk *clk;
+ bool init_opp_table[MAX_CLUSTERS] = { false };
if (!info)
return 0; /* Continue only if SPC is initialised */
@@ -582,8 +583,17 @@ static int __init ve_spc_clk_init(void)
continue;
}
+ cluster = topology_physical_package_id(cpu_dev->id);
+ if (init_opp_table[cluster])
+ continue;
+
if (ve_init_opp_table(cpu_dev))
pr_warn("failed to initialise cpu%d opp table\n", cpu);
+ else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
+ topology_core_cpumask(cpu_dev->id)))
+ pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
+ else
+ init_opp_table[cluster] = true;
}
platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index 7cd9865bdeb7..94929eb707f0 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -65,7 +65,7 @@ int zynq_cpun_start(u32 address, int cpu)
* 0x4: Jump by mov instruction
* 0x8: Jumping address
*/
- memcpy((__force void *)zero, &zynq_secondary_trampoline,
+ memcpy_toio(zero, &zynq_secondary_trampoline,
trampoline_size);
writel(address, zero + trampoline_size);
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 7d5f4c736a16..cd18eda014c2 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -767,6 +767,36 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
return NULL;
}
+static int alignment_get_arm(struct pt_regs *regs, u32 *ip, unsigned long *inst)
+{
+ u32 instr = 0;
+ int fault;
+
+ if (user_mode(regs))
+ fault = get_user(instr, ip);
+ else
+ fault = probe_kernel_address(ip, instr);
+
+ *inst = __mem_to_opcode_arm(instr);
+
+ return fault;
+}
+
+static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
+{
+ u16 instr = 0;
+ int fault;
+
+ if (user_mode(regs))
+ fault = get_user(instr, ip);
+ else
+ fault = probe_kernel_address(ip, instr);
+
+ *inst = __mem_to_opcode_thumb16(instr);
+
+ return fault;
+}
+
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@@ -774,10 +804,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
unsigned long instr = 0, instrptr;
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
unsigned int type;
- unsigned int fault;
u16 tinstr = 0;
int isize = 4;
int thumb2_32b = 0;
+ int fault;
if (interrupts_enabled(regs))
local_irq_enable();
@@ -786,15 +816,14 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (thumb_mode(regs)) {
u16 *ptr = (u16 *)(instrptr & ~1);
- fault = probe_kernel_address(ptr, tinstr);
- tinstr = __mem_to_opcode_thumb16(tinstr);
+
+ fault = alignment_get_thumb(regs, ptr, &tinstr);
if (!fault) {
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
IS_T32(tinstr)) {
/* Thumb-2 32-bit */
- u16 tinst2 = 0;
- fault = probe_kernel_address(ptr + 1, tinst2);
- tinst2 = __mem_to_opcode_thumb16(tinst2);
+ u16 tinst2;
+ fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
instr = __opcode_thumb32_compose(tinstr, tinst2);
thumb2_32b = 1;
} else {
@@ -803,8 +832,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
}
} else {
- fault = probe_kernel_address((void *)instrptr, instr);
- instr = __mem_to_opcode_arm(instr);
+ fault = alignment_get_arm(regs, (void *)instrptr, &instr);
}
if (fault) {
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5ca207ada852..2539c8f9fb3f 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -214,7 +214,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
- if (fsr & FSR_WRITE)
+ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
mask = VM_WRITE;
if (fsr & FSR_LNX_PF)
mask = VM_EXEC;
@@ -284,7 +284,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if (fsr & FSR_WRITE)
+ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
flags |= FAULT_FLAG_WRITE;
/*
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index afc1f84e763b..9bc272642d55 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -5,6 +5,7 @@
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
+#define FSR_CM (1 << 13)
#define FSR_WRITE (1 << 11)
#define FSR_FS4 (1 << 10)
#define FSR_FS3_0 (15)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 1565d6b67163..0fe4a7025e46 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -192,6 +192,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
+ phys_addr_t addr = __pfn_to_phys(pfn);
+
+ if (__phys_to_pfn(addr) != pfn)
+ return 0;
+
return memblock_is_map_memory(__pfn_to_phys(pfn));
}
EXPORT_SYMBOL(pfn_valid);
@@ -698,7 +703,8 @@ static void update_sections_early(struct section_perm perms[], int n)
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
- set_section_perms(perms, n, true, s->mm);
+ if (s->mm)
+ set_section_perms(perms, n, true, s->mm);
}
read_unlock(&tasklist_lock);
set_section_perms(perms, n, true, current->active_mm);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index f7c741358f37..7edc6c3f4bd9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1168,10 +1168,29 @@ void __init adjust_lowmem_bounds(void)
*/
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+ /*
+ * The first usable region must be PMD aligned. Mark its start
+ * as MEMBLOCK_NOMAP if it isn't
+ */
+ for_each_memblock(memory, reg) {
+ if (!memblock_is_nomap(reg)) {
+ if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
+ phys_addr_t len;
+
+ len = round_up(reg->base, PMD_SIZE) - reg->base;
+ memblock_mark_nomap(reg->base, len);
+ }
+ break;
+ }
+ }
+
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
+ if (memblock_is_nomap(reg))
+ continue;
+
if (reg->base < vmalloc_limit) {
if (block_end > lowmem_limit)
/*
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 93d0b6d0b63e..7fd448b23b94 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -72,8 +72,6 @@ struct jit_ctx {
#endif
};
-int bpf_jit_enable __read_mostly;
-
static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
unsigned int size)
{
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
index a4d1f8de3b5b..d9612221e484 100644
--- a/arch/arm/plat-iop/adma.c
+++ b/arch/arm/plat-iop/adma.c
@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
.resource = iop3xx_dma_0_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_0_data,
},
};
@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
.resource = iop3xx_dma_1_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_1_data,
},
};
@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
.resource = iop3xx_aau_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_aau_data,
},
};
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 272f49b2c68f..bb29e6ebdc0d 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -605,7 +605,7 @@ static struct platform_device orion_xor0_shared = {
.resource = orion_xor0_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor0_pdata,
},
};
@@ -666,7 +666,7 @@ static struct platform_device orion_xor1_shared = {
.resource = orion_xor1_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor1_pdata,
},
};
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index b92673efffff..97bd43c16cd8 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -230,18 +230,12 @@ static int pxa_ssp_probe(struct platform_device *pdev)
static int pxa_ssp_remove(struct platform_device *pdev)
{
- struct resource *res;
struct ssp_device *ssp;
ssp = platform_get_drvdata(pdev);
if (ssp == NULL)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- clk_put(ssp->clk);
-
mutex_lock(&ssp_lock);
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index e8229b9fee4a..3265b8f86069 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -258,7 +258,7 @@ config S3C_PM_DEBUG_LED_SMDK
config SAMSUNG_PM_CHECK
bool "S3C2410 PM Suspend Memory CRC"
- depends on PM
+ depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210)
select CRC32
help
Enable the PM code's memory area checksum over sleep. This option
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
index 79214d5ff097..3af02d2a0b7f 100644
--- a/arch/arm/vdso/vgettimeofday.c
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -18,9 +18,9 @@
#include <linux/compiler.h>
#include <linux/hrtimer.h>
#include <linux/time.h>
-#include <asm/arch_timer.h>
#include <asm/barrier.h>
#include <asm/bug.h>
+#include <asm/cp15.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <asm/vdso_datapage.h>
@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
u64 cycle_now;
u64 nsec;
- cycle_now = arch_counter_get_cntvct();
+ isb();
+ cycle_now = read_sysreg(CNTVCT);
cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 79230bef86b9..c712588da1ce 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1080,6 +1080,10 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
+config KEYS_COMPAT
+ def_bool y
+ depends on COMPAT && KEYS
+
endmenu
menu "Power management options"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index ee94597773fa..8d469aa5fc98 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -134,6 +134,7 @@ archclean:
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=$(boot)/dts
+ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
# In order to do that, we should use the archprepare target, but we can't since
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
@@ -143,6 +144,7 @@ archclean:
prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
+endif
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 1f012c506434..cd3414898d10 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -16,7 +16,7 @@
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
-targets := Image Image.gz
+targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index e79f3defe002..c2ad4f97cef0 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -56,10 +56,10 @@
pmu {
compatible = "arm,armv8-pmuv3";
- interrupts = <0 120 8>,
- <0 121 8>,
- <0 122 8>,
- <0 123 8>;
+ interrupts = <0 170 4>,
+ <0 171 4>,
+ <0 172 4>,
+ <0 173 4>;
interrupt-affinity = <&cpu0>,
<&cpu1>,
<&cpu2>,
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
index bd3adeac374f..2973a14523ea 100644
--- a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
+++ b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
@@ -106,7 +106,7 @@
clock-names = "uartclk", "apb_pclk";
};
- spi0: ssp@e1020000 {
+ spi0: spi@e1020000 {
status = "disabled";
compatible = "arm,pl022", "arm,primecell";
reg = <0 0xe1020000 0 0x1000>;
@@ -116,7 +116,7 @@
clock-names = "apb_pclk";
};
- spi1: ssp@e1030000 {
+ spi1: spi@e1030000 {
status = "disabled";
compatible = "arm,pl022", "arm,primecell";
reg = <0 0xe1030000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
index 25352ed943e6..00bcbf7688c7 100644
--- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
@@ -8,10 +8,10 @@
*/
/* SoC fixed clocks */
- soc_uartclk: refclk7273800hz {
+ soc_uartclk: refclk7372800hz {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <7273800>;
+ clock-frequency = <7372800>;
clock-output-names = "juno:uartclk";
};
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
index fbafa24cd533..5e0c5dc973e3 100644
--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
@@ -167,14 +167,14 @@
clock-names = "apb_pclk";
status="disabled";
};
- spi0: ssp@fe800000 {
+ spi0: spi@fe800000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x0 0xfe800000 0x1000>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_bus>;
clock-names = "apb_pclk";
};
- spi1: ssp@fe900000 {
+ spi1: spi@fe900000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x0 0xfe900000 0x1000>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi
index e703e1149c75..f3b1ba6f7422 100644
--- a/arch/arm64/boot/dts/lg/lg1313.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1313.dtsi
@@ -167,14 +167,14 @@
clock-names = "apb_pclk";
status="disabled";
};
- spi0: ssp@fe800000 {
+ spi0: spi@fe800000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x0 0xfe800000 0x1000>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_bus>;
clock-names = "apb_pclk";
};
- spi1: ssp@fe900000 {
+ spi1: spi@fe900000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x0 0xfe900000 0x1000>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 68e6f88bdcfe..f2004b0955f1 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -96,7 +96,7 @@
uart0: serial@12000 {
compatible = "marvell,armada-3700-uart";
- reg = <0x12000 0x400>;
+ reg = <0x12000 0x200>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 906fb836d241..d1e687b4911f 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -281,6 +281,7 @@
status = "okay";
bus-width = <8>;
non-removable;
+ vqmmc-supply = <&vdd_1v8>;
};
clocks {
@@ -306,7 +307,8 @@
regulator-max-microvolt = <1320000>;
enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>;
regulator-ramp-delay = <80>;
- regulator-enable-ramp-delay = <1000>;
+ regulator-enable-ramp-delay = <2000>;
+ regulator-settling-time-us = <160>;
};
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index e5fc67bf46c2..a88afb6a9c0c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1583,7 +1583,7 @@
regulator-name = "VDD_HDMI_5V0";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
- gpio = <&exp1 12 GPIO_ACTIVE_LOW>;
+ gpio = <&exp1 12 GPIO_ACTIVE_HIGH>;
enable-active-high;
vin-supply = <&vdd_5v0_sys>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 46045fe719da..87ef72bffd86 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -1020,7 +1020,7 @@
compatible = "nvidia,tegra210-agic";
#interrupt-cells = <3>;
interrupt-controller;
- reg = <0x702f9000 0x2000>,
+ reg = <0x702f9000 0x1000>,
<0x702fa000 0x2000>;
interrupts = <GIC_SPI 102 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
clocks = <&tegra_car TEGRA210_CLK_APE>;
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 601be6127628..948efff7d830 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -355,6 +355,8 @@
l11 {
regulator-min-microvolt = <1750000>;
regulator-max-microvolt = <3337000>;
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
};
l12 {
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index ea319c055f5d..1b7b4684c35b 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -50,7 +50,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
+ bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
/*
* Allow the asm code to perform the finalization if there is no
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 0ed9486f75dd..356ca9397a86 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -52,7 +52,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
+ bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
/*
* Allow the asm code to perform the finalization if there is no
diff --git a/arch/arm64/crypto/sha256-core.S b/arch/arm64/crypto/sha256-core.S
deleted file mode 100644
index 3ce82cc860bc..000000000000
--- a/arch/arm64/crypto/sha256-core.S
+++ /dev/null
@@ -1,2061 +0,0 @@
-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the OpenSSL license (the "License"). You may not use
-// this file except in compliance with the License. You can obtain a copy
-// in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-
-// ====================================================================
-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-// project. The module is, however, dual licensed under OpenSSL and
-// CRYPTOGAMS licenses depending on where you obtain it. For further
-// details see http://www.openssl.org/~appro/cryptogams/.
-//
-// Permission to use under GPLv2 terms is granted.
-// ====================================================================
-//
-// SHA256/512 for ARMv8.
-//
-// Performance in cycles per processed byte and improvement coefficient
-// over code generated with "default" compiler:
-//
-// SHA256-hw SHA256(*) SHA512
-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
-//
-// (*) Software SHA256 results are of lesser relevance, presented
-// mostly for informational purposes.
-// (**) The result is a trade-off: it's possible to improve it by
-// 10% (or by 1 cycle per round), but at the cost of 20% loss
-// on Cortex-A53 (or by 4 cycles per round).
-// (***) Super-impressive coefficients over gcc-generated code are
-// indication of some compiler "pathology", most notably code
-// generated with -mgeneral-regs-only is significanty faster
-// and the gap is only 40-90%.
-//
-// October 2016.
-//
-// Originally it was reckoned that it makes no sense to implement NEON
-// version of SHA256 for 64-bit processors. This is because performance
-// improvement on most wide-spread Cortex-A5x processors was observed
-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
-// observed that 32-bit NEON SHA256 performs significantly better than
-// 64-bit scalar version on *some* of the more recent processors. As
-// result 64-bit NEON version of SHA256 was added to provide best
-// all-round performance. For example it executes ~30% faster on X-Gene
-// and Mongoose. [For reference, NEON version of SHA512 is bound to
-// deliver much less improvement, likely *negative* on Cortex-A5x.
-// Which is why NEON support is limited to SHA256.]
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#endif
-
-.text
-
-.extern OPENSSL_armcap_P
-.globl sha256_block_data_order
-.type sha256_block_data_order,%function
-.align 6
-sha256_block_data_order:
-#ifndef __KERNEL__
-# ifdef __ILP32__
- ldrsw x16,.LOPENSSL_armcap_P
-# else
- ldr x16,.LOPENSSL_armcap_P
-# endif
- adr x17,.LOPENSSL_armcap_P
- add x16,x16,x17
- ldr w16,[x16]
- tst w16,#ARMV8_SHA256
- b.ne .Lv8_entry
- tst w16,#ARMV7_NEON
- b.ne .Lneon_entry
-#endif
- stp x29,x30,[sp,#-128]!
- add x29,sp,#0
-
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- stp x27,x28,[sp,#80]
- sub sp,sp,#4*4
-
- ldp w20,w21,[x0] // load context
- ldp w22,w23,[x0,#2*4]
- ldp w24,w25,[x0,#4*4]
- add x2,x1,x2,lsl#6 // end of input
- ldp w26,w27,[x0,#6*4]
- adr x30,.LK256
- stp x0,x2,[x29,#96]
-
-.Loop:
- ldp w3,w4,[x1],#2*4
- ldr w19,[x30],#4 // *K++
- eor w28,w21,w22 // magic seed
- str x1,[x29,#112]
-#ifndef __AARCH64EB__
- rev w3,w3 // 0
-#endif
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- eor w6,w24,w24,ror#14
- and w17,w25,w24
- bic w19,w26,w24
- add w27,w27,w3 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w6,ror#11 // Sigma1(e)
- ror w6,w20,#2
- add w27,w27,w17 // h+=Ch(e,f,g)
- eor w17,w20,w20,ror#9
- add w27,w27,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w23,w23,w27 // d+=h
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w6,w17,ror#13 // Sigma0(a)
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w27,w27,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w4,w4 // 1
-#endif
- ldp w5,w6,[x1],#2*4
- add w27,w27,w17 // h+=Sigma0(a)
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- eor w7,w23,w23,ror#14
- and w17,w24,w23
- bic w28,w25,w23
- add w26,w26,w4 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w7,ror#11 // Sigma1(e)
- ror w7,w27,#2
- add w26,w26,w17 // h+=Ch(e,f,g)
- eor w17,w27,w27,ror#9
- add w26,w26,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w22,w22,w26 // d+=h
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w7,w17,ror#13 // Sigma0(a)
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w26,w26,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w5,w5 // 2
-#endif
- add w26,w26,w17 // h+=Sigma0(a)
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- eor w8,w22,w22,ror#14
- and w17,w23,w22
- bic w19,w24,w22
- add w25,w25,w5 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w8,ror#11 // Sigma1(e)
- ror w8,w26,#2
- add w25,w25,w17 // h+=Ch(e,f,g)
- eor w17,w26,w26,ror#9
- add w25,w25,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w21,w21,w25 // d+=h
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w8,w17,ror#13 // Sigma0(a)
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w25,w25,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w6,w6 // 3
-#endif
- ldp w7,w8,[x1],#2*4
- add w25,w25,w17 // h+=Sigma0(a)
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- eor w9,w21,w21,ror#14
- and w17,w22,w21
- bic w28,w23,w21
- add w24,w24,w6 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w9,ror#11 // Sigma1(e)
- ror w9,w25,#2
- add w24,w24,w17 // h+=Ch(e,f,g)
- eor w17,w25,w25,ror#9
- add w24,w24,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w20,w20,w24 // d+=h
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w9,w17,ror#13 // Sigma0(a)
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w24,w24,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w7,w7 // 4
-#endif
- add w24,w24,w17 // h+=Sigma0(a)
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- eor w10,w20,w20,ror#14
- and w17,w21,w20
- bic w19,w22,w20
- add w23,w23,w7 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w10,ror#11 // Sigma1(e)
- ror w10,w24,#2
- add w23,w23,w17 // h+=Ch(e,f,g)
- eor w17,w24,w24,ror#9
- add w23,w23,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w27,w27,w23 // d+=h
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w10,w17,ror#13 // Sigma0(a)
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w23,w23,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w8,w8 // 5
-#endif
- ldp w9,w10,[x1],#2*4
- add w23,w23,w17 // h+=Sigma0(a)
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- eor w11,w27,w27,ror#14
- and w17,w20,w27
- bic w28,w21,w27
- add w22,w22,w8 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w11,ror#11 // Sigma1(e)
- ror w11,w23,#2
- add w22,w22,w17 // h+=Ch(e,f,g)
- eor w17,w23,w23,ror#9
- add w22,w22,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w26,w26,w22 // d+=h
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w11,w17,ror#13 // Sigma0(a)
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w22,w22,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w9,w9 // 6
-#endif
- add w22,w22,w17 // h+=Sigma0(a)
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- eor w12,w26,w26,ror#14
- and w17,w27,w26
- bic w19,w20,w26
- add w21,w21,w9 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w12,ror#11 // Sigma1(e)
- ror w12,w22,#2
- add w21,w21,w17 // h+=Ch(e,f,g)
- eor w17,w22,w22,ror#9
- add w21,w21,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w25,w25,w21 // d+=h
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w12,w17,ror#13 // Sigma0(a)
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w21,w21,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w10,w10 // 7
-#endif
- ldp w11,w12,[x1],#2*4
- add w21,w21,w17 // h+=Sigma0(a)
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- eor w13,w25,w25,ror#14
- and w17,w26,w25
- bic w28,w27,w25
- add w20,w20,w10 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w13,ror#11 // Sigma1(e)
- ror w13,w21,#2
- add w20,w20,w17 // h+=Ch(e,f,g)
- eor w17,w21,w21,ror#9
- add w20,w20,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w24,w24,w20 // d+=h
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w13,w17,ror#13 // Sigma0(a)
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w20,w20,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w11,w11 // 8
-#endif
- add w20,w20,w17 // h+=Sigma0(a)
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- eor w14,w24,w24,ror#14
- and w17,w25,w24
- bic w19,w26,w24
- add w27,w27,w11 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w14,ror#11 // Sigma1(e)
- ror w14,w20,#2
- add w27,w27,w17 // h+=Ch(e,f,g)
- eor w17,w20,w20,ror#9
- add w27,w27,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w23,w23,w27 // d+=h
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w14,w17,ror#13 // Sigma0(a)
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w27,w27,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w12,w12 // 9
-#endif
- ldp w13,w14,[x1],#2*4
- add w27,w27,w17 // h+=Sigma0(a)
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- eor w15,w23,w23,ror#14
- and w17,w24,w23
- bic w28,w25,w23
- add w26,w26,w12 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w15,ror#11 // Sigma1(e)
- ror w15,w27,#2
- add w26,w26,w17 // h+=Ch(e,f,g)
- eor w17,w27,w27,ror#9
- add w26,w26,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w22,w22,w26 // d+=h
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w15,w17,ror#13 // Sigma0(a)
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w26,w26,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w13,w13 // 10
-#endif
- add w26,w26,w17 // h+=Sigma0(a)
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- eor w0,w22,w22,ror#14
- and w17,w23,w22
- bic w19,w24,w22
- add w25,w25,w13 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w0,ror#11 // Sigma1(e)
- ror w0,w26,#2
- add w25,w25,w17 // h+=Ch(e,f,g)
- eor w17,w26,w26,ror#9
- add w25,w25,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w21,w21,w25 // d+=h
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w0,w17,ror#13 // Sigma0(a)
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w25,w25,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w14,w14 // 11
-#endif
- ldp w15,w0,[x1],#2*4
- add w25,w25,w17 // h+=Sigma0(a)
- str w6,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- eor w6,w21,w21,ror#14
- and w17,w22,w21
- bic w28,w23,w21
- add w24,w24,w14 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w6,ror#11 // Sigma1(e)
- ror w6,w25,#2
- add w24,w24,w17 // h+=Ch(e,f,g)
- eor w17,w25,w25,ror#9
- add w24,w24,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w20,w20,w24 // d+=h
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w6,w17,ror#13 // Sigma0(a)
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w24,w24,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w15,w15 // 12
-#endif
- add w24,w24,w17 // h+=Sigma0(a)
- str w7,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- eor w7,w20,w20,ror#14
- and w17,w21,w20
- bic w19,w22,w20
- add w23,w23,w15 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w7,ror#11 // Sigma1(e)
- ror w7,w24,#2
- add w23,w23,w17 // h+=Ch(e,f,g)
- eor w17,w24,w24,ror#9
- add w23,w23,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w27,w27,w23 // d+=h
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w7,w17,ror#13 // Sigma0(a)
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w23,w23,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w0,w0 // 13
-#endif
- ldp w1,w2,[x1]
- add w23,w23,w17 // h+=Sigma0(a)
- str w8,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- eor w8,w27,w27,ror#14
- and w17,w20,w27
- bic w28,w21,w27
- add w22,w22,w0 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w8,ror#11 // Sigma1(e)
- ror w8,w23,#2
- add w22,w22,w17 // h+=Ch(e,f,g)
- eor w17,w23,w23,ror#9
- add w22,w22,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w26,w26,w22 // d+=h
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w8,w17,ror#13 // Sigma0(a)
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w22,w22,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w1,w1 // 14
-#endif
- ldr w6,[sp,#12]
- add w22,w22,w17 // h+=Sigma0(a)
- str w9,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- eor w9,w26,w26,ror#14
- and w17,w27,w26
- bic w19,w20,w26
- add w21,w21,w1 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w9,ror#11 // Sigma1(e)
- ror w9,w22,#2
- add w21,w21,w17 // h+=Ch(e,f,g)
- eor w17,w22,w22,ror#9
- add w21,w21,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w25,w25,w21 // d+=h
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w9,w17,ror#13 // Sigma0(a)
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w21,w21,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w2,w2 // 15
-#endif
- ldr w7,[sp,#0]
- add w21,w21,w17 // h+=Sigma0(a)
- str w10,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w9,w4,#7
- and w17,w26,w25
- ror w8,w1,#17
- bic w28,w27,w25
- ror w10,w21,#2
- add w20,w20,w2 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w9,w9,w4,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w10,w10,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w8,w8,w1,ror#19
- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w10,w21,ror#22 // Sigma0(a)
- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
- add w3,w3,w12
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w3,w3,w9
- add w20,w20,w17 // h+=Sigma0(a)
- add w3,w3,w8
-.Loop_16_xx:
- ldr w8,[sp,#4]
- str w11,[sp,#0]
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- ror w10,w5,#7
- and w17,w25,w24
- ror w9,w2,#17
- bic w19,w26,w24
- ror w11,w20,#2
- add w27,w27,w3 // h+=X[i]
- eor w16,w16,w24,ror#11
- eor w10,w10,w5,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w24,ror#25 // Sigma1(e)
- eor w11,w11,w20,ror#13
- add w27,w27,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w9,w9,w2,ror#19
- eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
- add w27,w27,w16 // h+=Sigma1(e)
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w11,w20,ror#22 // Sigma0(a)
- eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
- add w4,w4,w13
- add w23,w23,w27 // d+=h
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w4,w4,w10
- add w27,w27,w17 // h+=Sigma0(a)
- add w4,w4,w9
- ldr w9,[sp,#8]
- str w12,[sp,#4]
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- ror w11,w6,#7
- and w17,w24,w23
- ror w10,w3,#17
- bic w28,w25,w23
- ror w12,w27,#2
- add w26,w26,w4 // h+=X[i]
- eor w16,w16,w23,ror#11
- eor w11,w11,w6,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w23,ror#25 // Sigma1(e)
- eor w12,w12,w27,ror#13
- add w26,w26,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w10,w10,w3,ror#19
- eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
- add w26,w26,w16 // h+=Sigma1(e)
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w12,w27,ror#22 // Sigma0(a)
- eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
- add w5,w5,w14
- add w22,w22,w26 // d+=h
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w5,w5,w11
- add w26,w26,w17 // h+=Sigma0(a)
- add w5,w5,w10
- ldr w10,[sp,#12]
- str w13,[sp,#8]
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- ror w12,w7,#7
- and w17,w23,w22
- ror w11,w4,#17
- bic w19,w24,w22
- ror w13,w26,#2
- add w25,w25,w5 // h+=X[i]
- eor w16,w16,w22,ror#11
- eor w12,w12,w7,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w22,ror#25 // Sigma1(e)
- eor w13,w13,w26,ror#13
- add w25,w25,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w11,w11,w4,ror#19
- eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
- add w25,w25,w16 // h+=Sigma1(e)
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w13,w26,ror#22 // Sigma0(a)
- eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
- add w6,w6,w15
- add w21,w21,w25 // d+=h
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w6,w6,w12
- add w25,w25,w17 // h+=Sigma0(a)
- add w6,w6,w11
- ldr w11,[sp,#0]
- str w14,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- ror w13,w8,#7
- and w17,w22,w21
- ror w12,w5,#17
- bic w28,w23,w21
- ror w14,w25,#2
- add w24,w24,w6 // h+=X[i]
- eor w16,w16,w21,ror#11
- eor w13,w13,w8,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w21,ror#25 // Sigma1(e)
- eor w14,w14,w25,ror#13
- add w24,w24,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w12,w12,w5,ror#19
- eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
- add w24,w24,w16 // h+=Sigma1(e)
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w14,w25,ror#22 // Sigma0(a)
- eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
- add w7,w7,w0
- add w20,w20,w24 // d+=h
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w7,w7,w13
- add w24,w24,w17 // h+=Sigma0(a)
- add w7,w7,w12
- ldr w12,[sp,#4]
- str w15,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- ror w14,w9,#7
- and w17,w21,w20
- ror w13,w6,#17
- bic w19,w22,w20
- ror w15,w24,#2
- add w23,w23,w7 // h+=X[i]
- eor w16,w16,w20,ror#11
- eor w14,w14,w9,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w20,ror#25 // Sigma1(e)
- eor w15,w15,w24,ror#13
- add w23,w23,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w13,w13,w6,ror#19
- eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
- add w23,w23,w16 // h+=Sigma1(e)
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w15,w24,ror#22 // Sigma0(a)
- eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
- add w8,w8,w1
- add w27,w27,w23 // d+=h
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w8,w8,w14
- add w23,w23,w17 // h+=Sigma0(a)
- add w8,w8,w13
- ldr w13,[sp,#8]
- str w0,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- ror w15,w10,#7
- and w17,w20,w27
- ror w14,w7,#17
- bic w28,w21,w27
- ror w0,w23,#2
- add w22,w22,w8 // h+=X[i]
- eor w16,w16,w27,ror#11
- eor w15,w15,w10,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w27,ror#25 // Sigma1(e)
- eor w0,w0,w23,ror#13
- add w22,w22,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w14,w14,w7,ror#19
- eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
- add w22,w22,w16 // h+=Sigma1(e)
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w0,w23,ror#22 // Sigma0(a)
- eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
- add w9,w9,w2
- add w26,w26,w22 // d+=h
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w9,w9,w15
- add w22,w22,w17 // h+=Sigma0(a)
- add w9,w9,w14
- ldr w14,[sp,#12]
- str w1,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- ror w0,w11,#7
- and w17,w27,w26
- ror w15,w8,#17
- bic w19,w20,w26
- ror w1,w22,#2
- add w21,w21,w9 // h+=X[i]
- eor w16,w16,w26,ror#11
- eor w0,w0,w11,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w26,ror#25 // Sigma1(e)
- eor w1,w1,w22,ror#13
- add w21,w21,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w15,w15,w8,ror#19
- eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
- add w21,w21,w16 // h+=Sigma1(e)
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w1,w22,ror#22 // Sigma0(a)
- eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
- add w10,w10,w3
- add w25,w25,w21 // d+=h
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w10,w10,w0
- add w21,w21,w17 // h+=Sigma0(a)
- add w10,w10,w15
- ldr w15,[sp,#0]
- str w2,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w1,w12,#7
- and w17,w26,w25
- ror w0,w9,#17
- bic w28,w27,w25
- ror w2,w21,#2
- add w20,w20,w10 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w1,w1,w12,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w2,w2,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w0,w0,w9,ror#19
- eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w2,w21,ror#22 // Sigma0(a)
- eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
- add w11,w11,w4
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w11,w11,w1
- add w20,w20,w17 // h+=Sigma0(a)
- add w11,w11,w0
- ldr w0,[sp,#4]
- str w3,[sp,#0]
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- ror w2,w13,#7
- and w17,w25,w24
- ror w1,w10,#17
- bic w19,w26,w24
- ror w3,w20,#2
- add w27,w27,w11 // h+=X[i]
- eor w16,w16,w24,ror#11
- eor w2,w2,w13,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w24,ror#25 // Sigma1(e)
- eor w3,w3,w20,ror#13
- add w27,w27,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w1,w1,w10,ror#19
- eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
- add w27,w27,w16 // h+=Sigma1(e)
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w3,w20,ror#22 // Sigma0(a)
- eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
- add w12,w12,w5
- add w23,w23,w27 // d+=h
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w12,w12,w2
- add w27,w27,w17 // h+=Sigma0(a)
- add w12,w12,w1
- ldr w1,[sp,#8]
- str w4,[sp,#4]
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- ror w3,w14,#7
- and w17,w24,w23
- ror w2,w11,#17
- bic w28,w25,w23
- ror w4,w27,#2
- add w26,w26,w12 // h+=X[i]
- eor w16,w16,w23,ror#11
- eor w3,w3,w14,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w23,ror#25 // Sigma1(e)
- eor w4,w4,w27,ror#13
- add w26,w26,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w2,w2,w11,ror#19
- eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
- add w26,w26,w16 // h+=Sigma1(e)
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w4,w27,ror#22 // Sigma0(a)
- eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
- add w13,w13,w6
- add w22,w22,w26 // d+=h
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w13,w13,w3
- add w26,w26,w17 // h+=Sigma0(a)
- add w13,w13,w2
- ldr w2,[sp,#12]
- str w5,[sp,#8]
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- ror w4,w15,#7
- and w17,w23,w22
- ror w3,w12,#17
- bic w19,w24,w22
- ror w5,w26,#2
- add w25,w25,w13 // h+=X[i]
- eor w16,w16,w22,ror#11
- eor w4,w4,w15,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w22,ror#25 // Sigma1(e)
- eor w5,w5,w26,ror#13
- add w25,w25,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w3,w3,w12,ror#19
- eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
- add w25,w25,w16 // h+=Sigma1(e)
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w5,w26,ror#22 // Sigma0(a)
- eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
- add w14,w14,w7
- add w21,w21,w25 // d+=h
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w14,w14,w4
- add w25,w25,w17 // h+=Sigma0(a)
- add w14,w14,w3
- ldr w3,[sp,#0]
- str w6,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- ror w5,w0,#7
- and w17,w22,w21
- ror w4,w13,#17
- bic w28,w23,w21
- ror w6,w25,#2
- add w24,w24,w14 // h+=X[i]
- eor w16,w16,w21,ror#11
- eor w5,w5,w0,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w21,ror#25 // Sigma1(e)
- eor w6,w6,w25,ror#13
- add w24,w24,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w4,w4,w13,ror#19
- eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
- add w24,w24,w16 // h+=Sigma1(e)
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w6,w25,ror#22 // Sigma0(a)
- eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
- add w15,w15,w8
- add w20,w20,w24 // d+=h
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w15,w15,w5
- add w24,w24,w17 // h+=Sigma0(a)
- add w15,w15,w4
- ldr w4,[sp,#4]
- str w7,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- ror w6,w1,#7
- and w17,w21,w20
- ror w5,w14,#17
- bic w19,w22,w20
- ror w7,w24,#2
- add w23,w23,w15 // h+=X[i]
- eor w16,w16,w20,ror#11
- eor w6,w6,w1,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w20,ror#25 // Sigma1(e)
- eor w7,w7,w24,ror#13
- add w23,w23,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w5,w5,w14,ror#19
- eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
- add w23,w23,w16 // h+=Sigma1(e)
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w7,w24,ror#22 // Sigma0(a)
- eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
- add w0,w0,w9
- add w27,w27,w23 // d+=h
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w0,w0,w6
- add w23,w23,w17 // h+=Sigma0(a)
- add w0,w0,w5
- ldr w5,[sp,#8]
- str w8,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- ror w7,w2,#7
- and w17,w20,w27
- ror w6,w15,#17
- bic w28,w21,w27
- ror w8,w23,#2
- add w22,w22,w0 // h+=X[i]
- eor w16,w16,w27,ror#11
- eor w7,w7,w2,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w27,ror#25 // Sigma1(e)
- eor w8,w8,w23,ror#13
- add w22,w22,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w6,w6,w15,ror#19
- eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
- add w22,w22,w16 // h+=Sigma1(e)
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w8,w23,ror#22 // Sigma0(a)
- eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
- add w1,w1,w10
- add w26,w26,w22 // d+=h
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w1,w1,w7
- add w22,w22,w17 // h+=Sigma0(a)
- add w1,w1,w6
- ldr w6,[sp,#12]
- str w9,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- ror w8,w3,#7
- and w17,w27,w26
- ror w7,w0,#17
- bic w19,w20,w26
- ror w9,w22,#2
- add w21,w21,w1 // h+=X[i]
- eor w16,w16,w26,ror#11
- eor w8,w8,w3,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w26,ror#25 // Sigma1(e)
- eor w9,w9,w22,ror#13
- add w21,w21,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w7,w7,w0,ror#19
- eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
- add w21,w21,w16 // h+=Sigma1(e)
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w9,w22,ror#22 // Sigma0(a)
- eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
- add w2,w2,w11
- add w25,w25,w21 // d+=h
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w2,w2,w8
- add w21,w21,w17 // h+=Sigma0(a)
- add w2,w2,w7
- ldr w7,[sp,#0]
- str w10,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w9,w4,#7
- and w17,w26,w25
- ror w8,w1,#17
- bic w28,w27,w25
- ror w10,w21,#2
- add w20,w20,w2 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w9,w9,w4,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w10,w10,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w8,w8,w1,ror#19
- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w10,w21,ror#22 // Sigma0(a)
- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
- add w3,w3,w12
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w3,w3,w9
- add w20,w20,w17 // h+=Sigma0(a)
- add w3,w3,w8
- cbnz w19,.Loop_16_xx
-
- ldp x0,x2,[x29,#96]
- ldr x1,[x29,#112]
- sub x30,x30,#260 // rewind
-
- ldp w3,w4,[x0]
- ldp w5,w6,[x0,#2*4]
- add x1,x1,#14*4 // advance input pointer
- ldp w7,w8,[x0,#4*4]
- add w20,w20,w3
- ldp w9,w10,[x0,#6*4]
- add w21,w21,w4
- add w22,w22,w5
- add w23,w23,w6
- stp w20,w21,[x0]
- add w24,w24,w7
- add w25,w25,w8
- stp w22,w23,[x0,#2*4]
- add w26,w26,w9
- add w27,w27,w10
- cmp x1,x2
- stp w24,w25,[x0,#4*4]
- stp w26,w27,[x0,#6*4]
- b.ne .Loop
-
- ldp x19,x20,[x29,#16]
- add sp,sp,#4*4
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- ldp x29,x30,[sp],#128
- ret
-.size sha256_block_data_order,.-sha256_block_data_order
-
-.align 6
-.type .LK256,%object
-.LK256:
- .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
- .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
- .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
- .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
- .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
- .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
- .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
- .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
- .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
- .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
- .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
- .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
- .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
- .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
- .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
- .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
- .long 0 //terminator
-.size .LK256,.-.LK256
-#ifndef __KERNEL__
-.align 3
-.LOPENSSL_armcap_P:
-# ifdef __ILP32__
- .long OPENSSL_armcap_P-.
-# else
- .quad OPENSSL_armcap_P-.
-# endif
-#endif
-.asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#ifndef __KERNEL__
-.type sha256_block_armv8,%function
-.align 6
-sha256_block_armv8:
-.Lv8_entry:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
-
- ld1 {v0.4s,v1.4s},[x0]
- adr x3,.LK256
-
-.Loop_hw:
- ld1 {v4.16b-v7.16b},[x1],#64
- sub x2,x2,#1
- ld1 {v16.4s},[x3],#16
- rev32 v4.16b,v4.16b
- rev32 v5.16b,v5.16b
- rev32 v6.16b,v6.16b
- rev32 v7.16b,v7.16b
- orr v18.16b,v0.16b,v0.16b // offload
- orr v19.16b,v1.16b,v1.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
-
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
-
- ld1 {v17.4s},[x3]
- add v16.4s,v16.4s,v6.4s
- sub x3,x3,#64*4-16 // rewind
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
-
- add v17.4s,v17.4s,v7.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
-
- add v0.4s,v0.4s,v18.4s
- add v1.4s,v1.4s,v19.4s
-
- cbnz x2,.Loop_hw
-
- st1 {v0.4s,v1.4s},[x0]
-
- ldr x29,[sp],#16
- ret
-.size sha256_block_armv8,.-sha256_block_armv8
-#endif
-#ifdef __KERNEL__
-.globl sha256_block_neon
-#endif
-.type sha256_block_neon,%function
-.align 4
-sha256_block_neon:
-.Lneon_entry:
- stp x29, x30, [sp, #-16]!
- mov x29, sp
- sub sp,sp,#16*4
-
- adr x16,.LK256
- add x2,x1,x2,lsl#6 // len to point at the end of inp
-
- ld1 {v0.16b},[x1], #16
- ld1 {v1.16b},[x1], #16
- ld1 {v2.16b},[x1], #16
- ld1 {v3.16b},[x1], #16
- ld1 {v4.4s},[x16], #16
- ld1 {v5.4s},[x16], #16
- ld1 {v6.4s},[x16], #16
- ld1 {v7.4s},[x16], #16
- rev32 v0.16b,v0.16b // yes, even on
- rev32 v1.16b,v1.16b // big-endian
- rev32 v2.16b,v2.16b
- rev32 v3.16b,v3.16b
- mov x17,sp
- add v4.4s,v4.4s,v0.4s
- add v5.4s,v5.4s,v1.4s
- add v6.4s,v6.4s,v2.4s
- st1 {v4.4s-v5.4s},[x17], #32
- add v7.4s,v7.4s,v3.4s
- st1 {v6.4s-v7.4s},[x17]
- sub x17,x17,#32
-
- ldp w3,w4,[x0]
- ldp w5,w6,[x0,#8]
- ldp w7,w8,[x0,#16]
- ldp w9,w10,[x0,#24]
- ldr w12,[sp,#0]
- mov w13,wzr
- eor w14,w4,w5
- mov w15,wzr
- b .L_00_48
-
-.align 4
-.L_00_48:
- ext v4.16b,v0.16b,v1.16b,#4
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- bic w15,w9,w7
- ext v7.16b,v2.16b,v3.16b,#4
- eor w11,w7,w7,ror#5
- add w3,w3,w13
- mov d19,v3.d[1]
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w3,w3,ror#11
- ushr v5.4s,v4.4s,#3
- add w10,w10,w12
- add v0.4s,v0.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- ushr v7.4s,v4.4s,#18
- add w10,w10,w11
- ldr w12,[sp,#4]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w6,w6,w10
- sli v7.4s,v4.4s,#14
- eor w14,w14,w4
- ushr v16.4s,v19.4s,#17
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- eor v5.16b,v5.16b,v7.16b
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- sli v16.4s,v19.4s,#15
- add w10,w10,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- ushr v7.4s,v19.4s,#19
- add w9,w9,w12
- ror w11,w11,#6
- add v0.4s,v0.4s,v5.4s
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- sli v7.4s,v19.4s,#13
- add w9,w9,w11
- ldr w12,[sp,#8]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- eor v17.16b,v17.16b,v7.16b
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- add v0.4s,v0.4s,v17.4s
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- ushr v18.4s,v0.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v0.4s,#10
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- sli v18.4s,v0.4s,#15
- add w8,w8,w12
- ushr v17.4s,v0.4s,#19
- ror w11,w11,#6
- eor w13,w9,w10
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- sli v17.4s,v0.4s,#13
- ldr w12,[sp,#12]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w4,w4,w8
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w10
- eor v17.16b,v17.16b,v17.16b
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- mov v17.d[1],v19.d[0]
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- add v0.4s,v0.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add v4.4s,v4.4s,v0.4s
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#16]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- ext v4.16b,v1.16b,v2.16b,#4
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- bic w15,w5,w3
- ext v7.16b,v3.16b,v0.16b,#4
- eor w11,w3,w3,ror#5
- add w7,w7,w13
- mov d19,v0.d[1]
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w7,w7,ror#11
- ushr v5.4s,v4.4s,#3
- add w6,w6,w12
- add v1.4s,v1.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- ushr v7.4s,v4.4s,#18
- add w6,w6,w11
- ldr w12,[sp,#20]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w10,w10,w6
- sli v7.4s,v4.4s,#14
- eor w14,w14,w8
- ushr v16.4s,v19.4s,#17
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- eor v5.16b,v5.16b,v7.16b
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- sli v16.4s,v19.4s,#15
- add w6,w6,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- ushr v7.4s,v19.4s,#19
- add w5,w5,w12
- ror w11,w11,#6
- add v1.4s,v1.4s,v5.4s
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- sli v7.4s,v19.4s,#13
- add w5,w5,w11
- ldr w12,[sp,#24]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- eor v17.16b,v17.16b,v7.16b
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- add v1.4s,v1.4s,v17.4s
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- ushr v18.4s,v1.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v1.4s,#10
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- sli v18.4s,v1.4s,#15
- add w4,w4,w12
- ushr v17.4s,v1.4s,#19
- ror w11,w11,#6
- eor w13,w5,w6
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- sli v17.4s,v1.4s,#13
- ldr w12,[sp,#28]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w8,w8,w4
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w6
- eor v17.16b,v17.16b,v17.16b
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- mov v17.d[1],v19.d[0]
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- add v1.4s,v1.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add v4.4s,v4.4s,v1.4s
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[sp,#32]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- ext v4.16b,v2.16b,v3.16b,#4
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- bic w15,w9,w7
- ext v7.16b,v0.16b,v1.16b,#4
- eor w11,w7,w7,ror#5
- add w3,w3,w13
- mov d19,v1.d[1]
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w3,w3,ror#11
- ushr v5.4s,v4.4s,#3
- add w10,w10,w12
- add v2.4s,v2.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- ushr v7.4s,v4.4s,#18
- add w10,w10,w11
- ldr w12,[sp,#36]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w6,w6,w10
- sli v7.4s,v4.4s,#14
- eor w14,w14,w4
- ushr v16.4s,v19.4s,#17
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- eor v5.16b,v5.16b,v7.16b
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- sli v16.4s,v19.4s,#15
- add w10,w10,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- ushr v7.4s,v19.4s,#19
- add w9,w9,w12
- ror w11,w11,#6
- add v2.4s,v2.4s,v5.4s
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- sli v7.4s,v19.4s,#13
- add w9,w9,w11
- ldr w12,[sp,#40]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- eor v17.16b,v17.16b,v7.16b
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- add v2.4s,v2.4s,v17.4s
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- ushr v18.4s,v2.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v2.4s,#10
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- sli v18.4s,v2.4s,#15
- add w8,w8,w12
- ushr v17.4s,v2.4s,#19
- ror w11,w11,#6
- eor w13,w9,w10
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- sli v17.4s,v2.4s,#13
- ldr w12,[sp,#44]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w4,w4,w8
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w10
- eor v17.16b,v17.16b,v17.16b
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- mov v17.d[1],v19.d[0]
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- add v2.4s,v2.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add v4.4s,v4.4s,v2.4s
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#48]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- ext v4.16b,v3.16b,v0.16b,#4
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- bic w15,w5,w3
- ext v7.16b,v1.16b,v2.16b,#4
- eor w11,w3,w3,ror#5
- add w7,w7,w13
- mov d19,v2.d[1]
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w7,w7,ror#11
- ushr v5.4s,v4.4s,#3
- add w6,w6,w12
- add v3.4s,v3.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- ushr v7.4s,v4.4s,#18
- add w6,w6,w11
- ldr w12,[sp,#52]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w10,w10,w6
- sli v7.4s,v4.4s,#14
- eor w14,w14,w8
- ushr v16.4s,v19.4s,#17
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- eor v5.16b,v5.16b,v7.16b
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- sli v16.4s,v19.4s,#15
- add w6,w6,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- ushr v7.4s,v19.4s,#19
- add w5,w5,w12
- ror w11,w11,#6
- add v3.4s,v3.4s,v5.4s
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- sli v7.4s,v19.4s,#13
- add w5,w5,w11
- ldr w12,[sp,#56]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- eor v17.16b,v17.16b,v7.16b
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- add v3.4s,v3.4s,v17.4s
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- ushr v18.4s,v3.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v3.4s,#10
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- sli v18.4s,v3.4s,#15
- add w4,w4,w12
- ushr v17.4s,v3.4s,#19
- ror w11,w11,#6
- eor w13,w5,w6
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- sli v17.4s,v3.4s,#13
- ldr w12,[sp,#60]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w8,w8,w4
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w6
- eor v17.16b,v17.16b,v17.16b
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- mov v17.d[1],v19.d[0]
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- add v3.4s,v3.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add v4.4s,v4.4s,v3.4s
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[x16]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- cmp w12,#0 // check for K256 terminator
- ldr w12,[sp,#0]
- sub x17,x17,#64
- bne .L_00_48
-
- sub x16,x16,#256 // rewind x16
- cmp x1,x2
- mov x17, #64
- csel x17, x17, xzr, eq
- sub x1,x1,x17 // avoid SEGV
- mov x17,sp
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- ld1 {v0.16b},[x1],#16
- bic w15,w9,w7
- eor w11,w7,w7,ror#5
- ld1 {v4.4s},[x16],#16
- add w3,w3,w13
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- eor w15,w3,w3,ror#11
- rev32 v0.16b,v0.16b
- add w10,w10,w12
- ror w11,w11,#6
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- add v4.4s,v4.4s,v0.4s
- add w10,w10,w11
- ldr w12,[sp,#4]
- and w14,w14,w13
- ror w15,w15,#2
- add w6,w6,w10
- eor w14,w14,w4
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- add w10,w10,w14
- orr w12,w12,w15
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- add w9,w9,w12
- ror w11,w11,#6
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- add w9,w9,w11
- ldr w12,[sp,#8]
- and w13,w13,w14
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- orr w12,w12,w15
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- add w8,w8,w12
- ror w11,w11,#6
- eor w13,w9,w10
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- ldr w12,[sp,#12]
- and w14,w14,w13
- ror w15,w15,#2
- add w4,w4,w8
- eor w14,w14,w10
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#16]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- ld1 {v1.16b},[x1],#16
- bic w15,w5,w3
- eor w11,w3,w3,ror#5
- ld1 {v4.4s},[x16],#16
- add w7,w7,w13
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- eor w15,w7,w7,ror#11
- rev32 v1.16b,v1.16b
- add w6,w6,w12
- ror w11,w11,#6
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- add v4.4s,v4.4s,v1.4s
- add w6,w6,w11
- ldr w12,[sp,#20]
- and w14,w14,w13
- ror w15,w15,#2
- add w10,w10,w6
- eor w14,w14,w8
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- add w6,w6,w14
- orr w12,w12,w15
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- add w5,w5,w12
- ror w11,w11,#6
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- add w5,w5,w11
- ldr w12,[sp,#24]
- and w13,w13,w14
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- orr w12,w12,w15
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- add w4,w4,w12
- ror w11,w11,#6
- eor w13,w5,w6
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- ldr w12,[sp,#28]
- and w14,w14,w13
- ror w15,w15,#2
- add w8,w8,w4
- eor w14,w14,w6
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[sp,#32]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- ld1 {v2.16b},[x1],#16
- bic w15,w9,w7
- eor w11,w7,w7,ror#5
- ld1 {v4.4s},[x16],#16
- add w3,w3,w13
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- eor w15,w3,w3,ror#11
- rev32 v2.16b,v2.16b
- add w10,w10,w12
- ror w11,w11,#6
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- add v4.4s,v4.4s,v2.4s
- add w10,w10,w11
- ldr w12,[sp,#36]
- and w14,w14,w13
- ror w15,w15,#2
- add w6,w6,w10
- eor w14,w14,w4
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- add w10,w10,w14
- orr w12,w12,w15
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- add w9,w9,w12
- ror w11,w11,#6
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- add w9,w9,w11
- ldr w12,[sp,#40]
- and w13,w13,w14
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- orr w12,w12,w15
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- add w8,w8,w12
- ror w11,w11,#6
- eor w13,w9,w10
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- ldr w12,[sp,#44]
- and w14,w14,w13
- ror w15,w15,#2
- add w4,w4,w8
- eor w14,w14,w10
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#48]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- ld1 {v3.16b},[x1],#16
- bic w15,w5,w3
- eor w11,w3,w3,ror#5
- ld1 {v4.4s},[x16],#16
- add w7,w7,w13
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- eor w15,w7,w7,ror#11
- rev32 v3.16b,v3.16b
- add w6,w6,w12
- ror w11,w11,#6
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- add v4.4s,v4.4s,v3.4s
- add w6,w6,w11
- ldr w12,[sp,#52]
- and w14,w14,w13
- ror w15,w15,#2
- add w10,w10,w6
- eor w14,w14,w8
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- add w6,w6,w14
- orr w12,w12,w15
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- add w5,w5,w12
- ror w11,w11,#6
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- add w5,w5,w11
- ldr w12,[sp,#56]
- and w13,w13,w14
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- orr w12,w12,w15
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- add w4,w4,w12
- ror w11,w11,#6
- eor w13,w5,w6
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- ldr w12,[sp,#60]
- and w14,w14,w13
- ror w15,w15,#2
- add w8,w8,w4
- eor w14,w14,w6
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- add w3,w3,w15 // h+=Sigma0(a) from the past
- ldp w11,w12,[x0,#0]
- add w3,w3,w13 // h+=Maj(a,b,c) from the past
- ldp w13,w14,[x0,#8]
- add w3,w3,w11 // accumulate
- add w4,w4,w12
- ldp w11,w12,[x0,#16]
- add w5,w5,w13
- add w6,w6,w14
- ldp w13,w14,[x0,#24]
- add w7,w7,w11
- add w8,w8,w12
- ldr w12,[sp,#0]
- stp w3,w4,[x0,#0]
- add w9,w9,w13
- mov w13,wzr
- stp w5,w6,[x0,#8]
- add w10,w10,w14
- stp w7,w8,[x0,#16]
- eor w14,w4,w5
- stp w9,w10,[x0,#24]
- mov w15,wzr
- mov x17,sp
- b.ne .L_00_48
-
- ldr x29,[x29]
- add sp,sp,#16*4+16
- ret
-.size sha256_block_neon,.-sha256_block_neon
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm64/crypto/sha512-core.S b/arch/arm64/crypto/sha512-core.S
deleted file mode 100644
index bd0f59f06c9d..000000000000
--- a/arch/arm64/crypto/sha512-core.S
+++ /dev/null
@@ -1,1085 +0,0 @@
-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the OpenSSL license (the "License"). You may not use
-// this file except in compliance with the License. You can obtain a copy
-// in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-
-// ====================================================================
-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-// project. The module is, however, dual licensed under OpenSSL and
-// CRYPTOGAMS licenses depending on where you obtain it. For further
-// details see http://www.openssl.org/~appro/cryptogams/.
-//
-// Permission to use under GPLv2 terms is granted.
-// ====================================================================
-//
-// SHA256/512 for ARMv8.
-//
-// Performance in cycles per processed byte and improvement coefficient
-// over code generated with "default" compiler:
-//
-// SHA256-hw SHA256(*) SHA512
-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
-//
-// (*) Software SHA256 results are of lesser relevance, presented
-// mostly for informational purposes.
-// (**) The result is a trade-off: it's possible to improve it by
-// 10% (or by 1 cycle per round), but at the cost of 20% loss
-// on Cortex-A53 (or by 4 cycles per round).
-// (***) Super-impressive coefficients over gcc-generated code are
-// indication of some compiler "pathology", most notably code
-// generated with -mgeneral-regs-only is significanty faster
-// and the gap is only 40-90%.
-//
-// October 2016.
-//
-// Originally it was reckoned that it makes no sense to implement NEON
-// version of SHA256 for 64-bit processors. This is because performance
-// improvement on most wide-spread Cortex-A5x processors was observed
-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
-// observed that 32-bit NEON SHA256 performs significantly better than
-// 64-bit scalar version on *some* of the more recent processors. As
-// result 64-bit NEON version of SHA256 was added to provide best
-// all-round performance. For example it executes ~30% faster on X-Gene
-// and Mongoose. [For reference, NEON version of SHA512 is bound to
-// deliver much less improvement, likely *negative* on Cortex-A5x.
-// Which is why NEON support is limited to SHA256.]
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#endif
-
-.text
-
-.extern OPENSSL_armcap_P
-.globl sha512_block_data_order
-.type sha512_block_data_order,%function
-.align 6
-sha512_block_data_order:
- stp x29,x30,[sp,#-128]!
- add x29,sp,#0
-
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- stp x27,x28,[sp,#80]
- sub sp,sp,#4*8
-
- ldp x20,x21,[x0] // load context
- ldp x22,x23,[x0,#2*8]
- ldp x24,x25,[x0,#4*8]
- add x2,x1,x2,lsl#7 // end of input
- ldp x26,x27,[x0,#6*8]
- adr x30,.LK512
- stp x0,x2,[x29,#96]
-
-.Loop:
- ldp x3,x4,[x1],#2*8
- ldr x19,[x30],#8 // *K++
- eor x28,x21,x22 // magic seed
- str x1,[x29,#112]
-#ifndef __AARCH64EB__
- rev x3,x3 // 0
-#endif
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- eor x6,x24,x24,ror#23
- and x17,x25,x24
- bic x19,x26,x24
- add x27,x27,x3 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x6,ror#18 // Sigma1(e)
- ror x6,x20,#28
- add x27,x27,x17 // h+=Ch(e,f,g)
- eor x17,x20,x20,ror#5
- add x27,x27,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x23,x23,x27 // d+=h
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x6,x17,ror#34 // Sigma0(a)
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x27,x27,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x4,x4 // 1
-#endif
- ldp x5,x6,[x1],#2*8
- add x27,x27,x17 // h+=Sigma0(a)
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- eor x7,x23,x23,ror#23
- and x17,x24,x23
- bic x28,x25,x23
- add x26,x26,x4 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x7,ror#18 // Sigma1(e)
- ror x7,x27,#28
- add x26,x26,x17 // h+=Ch(e,f,g)
- eor x17,x27,x27,ror#5
- add x26,x26,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x22,x22,x26 // d+=h
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x7,x17,ror#34 // Sigma0(a)
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x26,x26,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x5,x5 // 2
-#endif
- add x26,x26,x17 // h+=Sigma0(a)
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- eor x8,x22,x22,ror#23
- and x17,x23,x22
- bic x19,x24,x22
- add x25,x25,x5 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x8,ror#18 // Sigma1(e)
- ror x8,x26,#28
- add x25,x25,x17 // h+=Ch(e,f,g)
- eor x17,x26,x26,ror#5
- add x25,x25,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x21,x21,x25 // d+=h
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x8,x17,ror#34 // Sigma0(a)
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x25,x25,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x6,x6 // 3
-#endif
- ldp x7,x8,[x1],#2*8
- add x25,x25,x17 // h+=Sigma0(a)
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- eor x9,x21,x21,ror#23
- and x17,x22,x21
- bic x28,x23,x21
- add x24,x24,x6 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x9,ror#18 // Sigma1(e)
- ror x9,x25,#28
- add x24,x24,x17 // h+=Ch(e,f,g)
- eor x17,x25,x25,ror#5
- add x24,x24,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x20,x20,x24 // d+=h
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x9,x17,ror#34 // Sigma0(a)
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x24,x24,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x7,x7 // 4
-#endif
- add x24,x24,x17 // h+=Sigma0(a)
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- eor x10,x20,x20,ror#23
- and x17,x21,x20
- bic x19,x22,x20
- add x23,x23,x7 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x10,ror#18 // Sigma1(e)
- ror x10,x24,#28
- add x23,x23,x17 // h+=Ch(e,f,g)
- eor x17,x24,x24,ror#5
- add x23,x23,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x27,x27,x23 // d+=h
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x10,x17,ror#34 // Sigma0(a)
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x23,x23,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x8,x8 // 5
-#endif
- ldp x9,x10,[x1],#2*8
- add x23,x23,x17 // h+=Sigma0(a)
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- eor x11,x27,x27,ror#23
- and x17,x20,x27
- bic x28,x21,x27
- add x22,x22,x8 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x11,ror#18 // Sigma1(e)
- ror x11,x23,#28
- add x22,x22,x17 // h+=Ch(e,f,g)
- eor x17,x23,x23,ror#5
- add x22,x22,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x26,x26,x22 // d+=h
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x11,x17,ror#34 // Sigma0(a)
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x22,x22,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x9,x9 // 6
-#endif
- add x22,x22,x17 // h+=Sigma0(a)
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- eor x12,x26,x26,ror#23
- and x17,x27,x26
- bic x19,x20,x26
- add x21,x21,x9 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x12,ror#18 // Sigma1(e)
- ror x12,x22,#28
- add x21,x21,x17 // h+=Ch(e,f,g)
- eor x17,x22,x22,ror#5
- add x21,x21,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x25,x25,x21 // d+=h
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x12,x17,ror#34 // Sigma0(a)
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x21,x21,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x10,x10 // 7
-#endif
- ldp x11,x12,[x1],#2*8
- add x21,x21,x17 // h+=Sigma0(a)
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- eor x13,x25,x25,ror#23
- and x17,x26,x25
- bic x28,x27,x25
- add x20,x20,x10 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x13,ror#18 // Sigma1(e)
- ror x13,x21,#28
- add x20,x20,x17 // h+=Ch(e,f,g)
- eor x17,x21,x21,ror#5
- add x20,x20,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x24,x24,x20 // d+=h
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x13,x17,ror#34 // Sigma0(a)
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x20,x20,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x11,x11 // 8
-#endif
- add x20,x20,x17 // h+=Sigma0(a)
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- eor x14,x24,x24,ror#23
- and x17,x25,x24
- bic x19,x26,x24
- add x27,x27,x11 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x14,ror#18 // Sigma1(e)
- ror x14,x20,#28
- add x27,x27,x17 // h+=Ch(e,f,g)
- eor x17,x20,x20,ror#5
- add x27,x27,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x23,x23,x27 // d+=h
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x14,x17,ror#34 // Sigma0(a)
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x27,x27,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x12,x12 // 9
-#endif
- ldp x13,x14,[x1],#2*8
- add x27,x27,x17 // h+=Sigma0(a)
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- eor x15,x23,x23,ror#23
- and x17,x24,x23
- bic x28,x25,x23
- add x26,x26,x12 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x15,ror#18 // Sigma1(e)
- ror x15,x27,#28
- add x26,x26,x17 // h+=Ch(e,f,g)
- eor x17,x27,x27,ror#5
- add x26,x26,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x22,x22,x26 // d+=h
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x15,x17,ror#34 // Sigma0(a)
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x26,x26,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x13,x13 // 10
-#endif
- add x26,x26,x17 // h+=Sigma0(a)
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- eor x0,x22,x22,ror#23
- and x17,x23,x22
- bic x19,x24,x22
- add x25,x25,x13 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x0,ror#18 // Sigma1(e)
- ror x0,x26,#28
- add x25,x25,x17 // h+=Ch(e,f,g)
- eor x17,x26,x26,ror#5
- add x25,x25,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x21,x21,x25 // d+=h
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x0,x17,ror#34 // Sigma0(a)
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x25,x25,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x14,x14 // 11
-#endif
- ldp x15,x0,[x1],#2*8
- add x25,x25,x17 // h+=Sigma0(a)
- str x6,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- eor x6,x21,x21,ror#23
- and x17,x22,x21
- bic x28,x23,x21
- add x24,x24,x14 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x6,ror#18 // Sigma1(e)
- ror x6,x25,#28
- add x24,x24,x17 // h+=Ch(e,f,g)
- eor x17,x25,x25,ror#5
- add x24,x24,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x20,x20,x24 // d+=h
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x6,x17,ror#34 // Sigma0(a)
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x24,x24,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x15,x15 // 12
-#endif
- add x24,x24,x17 // h+=Sigma0(a)
- str x7,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- eor x7,x20,x20,ror#23
- and x17,x21,x20
- bic x19,x22,x20
- add x23,x23,x15 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x7,ror#18 // Sigma1(e)
- ror x7,x24,#28
- add x23,x23,x17 // h+=Ch(e,f,g)
- eor x17,x24,x24,ror#5
- add x23,x23,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x27,x27,x23 // d+=h
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x7,x17,ror#34 // Sigma0(a)
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x23,x23,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x0,x0 // 13
-#endif
- ldp x1,x2,[x1]
- add x23,x23,x17 // h+=Sigma0(a)
- str x8,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- eor x8,x27,x27,ror#23
- and x17,x20,x27
- bic x28,x21,x27
- add x22,x22,x0 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x8,ror#18 // Sigma1(e)
- ror x8,x23,#28
- add x22,x22,x17 // h+=Ch(e,f,g)
- eor x17,x23,x23,ror#5
- add x22,x22,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x26,x26,x22 // d+=h
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x8,x17,ror#34 // Sigma0(a)
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x22,x22,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x1,x1 // 14
-#endif
- ldr x6,[sp,#24]
- add x22,x22,x17 // h+=Sigma0(a)
- str x9,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- eor x9,x26,x26,ror#23
- and x17,x27,x26
- bic x19,x20,x26
- add x21,x21,x1 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x9,ror#18 // Sigma1(e)
- ror x9,x22,#28
- add x21,x21,x17 // h+=Ch(e,f,g)
- eor x17,x22,x22,ror#5
- add x21,x21,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x25,x25,x21 // d+=h
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x9,x17,ror#34 // Sigma0(a)
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x21,x21,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x2,x2 // 15
-#endif
- ldr x7,[sp,#0]
- add x21,x21,x17 // h+=Sigma0(a)
- str x10,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x9,x4,#1
- and x17,x26,x25
- ror x8,x1,#19
- bic x28,x27,x25
- ror x10,x21,#28
- add x20,x20,x2 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x9,x9,x4,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x10,x10,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x8,x8,x1,ror#61
- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x10,x21,ror#39 // Sigma0(a)
- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
- add x3,x3,x12
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x3,x3,x9
- add x20,x20,x17 // h+=Sigma0(a)
- add x3,x3,x8
-.Loop_16_xx:
- ldr x8,[sp,#8]
- str x11,[sp,#0]
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- ror x10,x5,#1
- and x17,x25,x24
- ror x9,x2,#19
- bic x19,x26,x24
- ror x11,x20,#28
- add x27,x27,x3 // h+=X[i]
- eor x16,x16,x24,ror#18
- eor x10,x10,x5,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x24,ror#41 // Sigma1(e)
- eor x11,x11,x20,ror#34
- add x27,x27,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x9,x9,x2,ror#61
- eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
- add x27,x27,x16 // h+=Sigma1(e)
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x11,x20,ror#39 // Sigma0(a)
- eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
- add x4,x4,x13
- add x23,x23,x27 // d+=h
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x4,x4,x10
- add x27,x27,x17 // h+=Sigma0(a)
- add x4,x4,x9
- ldr x9,[sp,#16]
- str x12,[sp,#8]
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- ror x11,x6,#1
- and x17,x24,x23
- ror x10,x3,#19
- bic x28,x25,x23
- ror x12,x27,#28
- add x26,x26,x4 // h+=X[i]
- eor x16,x16,x23,ror#18
- eor x11,x11,x6,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x23,ror#41 // Sigma1(e)
- eor x12,x12,x27,ror#34
- add x26,x26,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x10,x10,x3,ror#61
- eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
- add x26,x26,x16 // h+=Sigma1(e)
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x12,x27,ror#39 // Sigma0(a)
- eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
- add x5,x5,x14
- add x22,x22,x26 // d+=h
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x5,x5,x11
- add x26,x26,x17 // h+=Sigma0(a)
- add x5,x5,x10
- ldr x10,[sp,#24]
- str x13,[sp,#16]
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- ror x12,x7,#1
- and x17,x23,x22
- ror x11,x4,#19
- bic x19,x24,x22
- ror x13,x26,#28
- add x25,x25,x5 // h+=X[i]
- eor x16,x16,x22,ror#18
- eor x12,x12,x7,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x22,ror#41 // Sigma1(e)
- eor x13,x13,x26,ror#34
- add x25,x25,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x11,x11,x4,ror#61
- eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
- add x25,x25,x16 // h+=Sigma1(e)
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x13,x26,ror#39 // Sigma0(a)
- eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
- add x6,x6,x15
- add x21,x21,x25 // d+=h
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x6,x6,x12
- add x25,x25,x17 // h+=Sigma0(a)
- add x6,x6,x11
- ldr x11,[sp,#0]
- str x14,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- ror x13,x8,#1
- and x17,x22,x21
- ror x12,x5,#19
- bic x28,x23,x21
- ror x14,x25,#28
- add x24,x24,x6 // h+=X[i]
- eor x16,x16,x21,ror#18
- eor x13,x13,x8,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x21,ror#41 // Sigma1(e)
- eor x14,x14,x25,ror#34
- add x24,x24,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x12,x12,x5,ror#61
- eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
- add x24,x24,x16 // h+=Sigma1(e)
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x14,x25,ror#39 // Sigma0(a)
- eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
- add x7,x7,x0
- add x20,x20,x24 // d+=h
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x7,x7,x13
- add x24,x24,x17 // h+=Sigma0(a)
- add x7,x7,x12
- ldr x12,[sp,#8]
- str x15,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- ror x14,x9,#1
- and x17,x21,x20
- ror x13,x6,#19
- bic x19,x22,x20
- ror x15,x24,#28
- add x23,x23,x7 // h+=X[i]
- eor x16,x16,x20,ror#18
- eor x14,x14,x9,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x20,ror#41 // Sigma1(e)
- eor x15,x15,x24,ror#34
- add x23,x23,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x13,x13,x6,ror#61
- eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
- add x23,x23,x16 // h+=Sigma1(e)
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x15,x24,ror#39 // Sigma0(a)
- eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
- add x8,x8,x1
- add x27,x27,x23 // d+=h
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x8,x8,x14
- add x23,x23,x17 // h+=Sigma0(a)
- add x8,x8,x13
- ldr x13,[sp,#16]
- str x0,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- ror x15,x10,#1
- and x17,x20,x27
- ror x14,x7,#19
- bic x28,x21,x27
- ror x0,x23,#28
- add x22,x22,x8 // h+=X[i]
- eor x16,x16,x27,ror#18
- eor x15,x15,x10,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x27,ror#41 // Sigma1(e)
- eor x0,x0,x23,ror#34
- add x22,x22,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x14,x14,x7,ror#61
- eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
- add x22,x22,x16 // h+=Sigma1(e)
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x0,x23,ror#39 // Sigma0(a)
- eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
- add x9,x9,x2
- add x26,x26,x22 // d+=h
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x9,x9,x15
- add x22,x22,x17 // h+=Sigma0(a)
- add x9,x9,x14
- ldr x14,[sp,#24]
- str x1,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- ror x0,x11,#1
- and x17,x27,x26
- ror x15,x8,#19
- bic x19,x20,x26
- ror x1,x22,#28
- add x21,x21,x9 // h+=X[i]
- eor x16,x16,x26,ror#18
- eor x0,x0,x11,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x26,ror#41 // Sigma1(e)
- eor x1,x1,x22,ror#34
- add x21,x21,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x15,x15,x8,ror#61
- eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
- add x21,x21,x16 // h+=Sigma1(e)
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x1,x22,ror#39 // Sigma0(a)
- eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
- add x10,x10,x3
- add x25,x25,x21 // d+=h
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x10,x10,x0
- add x21,x21,x17 // h+=Sigma0(a)
- add x10,x10,x15
- ldr x15,[sp,#0]
- str x2,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x1,x12,#1
- and x17,x26,x25
- ror x0,x9,#19
- bic x28,x27,x25
- ror x2,x21,#28
- add x20,x20,x10 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x1,x1,x12,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x2,x2,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x0,x0,x9,ror#61
- eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x2,x21,ror#39 // Sigma0(a)
- eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
- add x11,x11,x4
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x11,x11,x1
- add x20,x20,x17 // h+=Sigma0(a)
- add x11,x11,x0
- ldr x0,[sp,#8]
- str x3,[sp,#0]
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- ror x2,x13,#1
- and x17,x25,x24
- ror x1,x10,#19
- bic x19,x26,x24
- ror x3,x20,#28
- add x27,x27,x11 // h+=X[i]
- eor x16,x16,x24,ror#18
- eor x2,x2,x13,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x24,ror#41 // Sigma1(e)
- eor x3,x3,x20,ror#34
- add x27,x27,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x1,x1,x10,ror#61
- eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
- add x27,x27,x16 // h+=Sigma1(e)
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x3,x20,ror#39 // Sigma0(a)
- eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
- add x12,x12,x5
- add x23,x23,x27 // d+=h
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x12,x12,x2
- add x27,x27,x17 // h+=Sigma0(a)
- add x12,x12,x1
- ldr x1,[sp,#16]
- str x4,[sp,#8]
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- ror x3,x14,#1
- and x17,x24,x23
- ror x2,x11,#19
- bic x28,x25,x23
- ror x4,x27,#28
- add x26,x26,x12 // h+=X[i]
- eor x16,x16,x23,ror#18
- eor x3,x3,x14,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x23,ror#41 // Sigma1(e)
- eor x4,x4,x27,ror#34
- add x26,x26,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x2,x2,x11,ror#61
- eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
- add x26,x26,x16 // h+=Sigma1(e)
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x4,x27,ror#39 // Sigma0(a)
- eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
- add x13,x13,x6
- add x22,x22,x26 // d+=h
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x13,x13,x3
- add x26,x26,x17 // h+=Sigma0(a)
- add x13,x13,x2
- ldr x2,[sp,#24]
- str x5,[sp,#16]
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- ror x4,x15,#1
- and x17,x23,x22
- ror x3,x12,#19
- bic x19,x24,x22
- ror x5,x26,#28
- add x25,x25,x13 // h+=X[i]
- eor x16,x16,x22,ror#18
- eor x4,x4,x15,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x22,ror#41 // Sigma1(e)
- eor x5,x5,x26,ror#34
- add x25,x25,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x3,x3,x12,ror#61
- eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
- add x25,x25,x16 // h+=Sigma1(e)
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x5,x26,ror#39 // Sigma0(a)
- eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
- add x14,x14,x7
- add x21,x21,x25 // d+=h
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x14,x14,x4
- add x25,x25,x17 // h+=Sigma0(a)
- add x14,x14,x3
- ldr x3,[sp,#0]
- str x6,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- ror x5,x0,#1
- and x17,x22,x21
- ror x4,x13,#19
- bic x28,x23,x21
- ror x6,x25,#28
- add x24,x24,x14 // h+=X[i]
- eor x16,x16,x21,ror#18
- eor x5,x5,x0,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x21,ror#41 // Sigma1(e)
- eor x6,x6,x25,ror#34
- add x24,x24,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x4,x4,x13,ror#61
- eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
- add x24,x24,x16 // h+=Sigma1(e)
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x6,x25,ror#39 // Sigma0(a)
- eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
- add x15,x15,x8
- add x20,x20,x24 // d+=h
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x15,x15,x5
- add x24,x24,x17 // h+=Sigma0(a)
- add x15,x15,x4
- ldr x4,[sp,#8]
- str x7,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- ror x6,x1,#1
- and x17,x21,x20
- ror x5,x14,#19
- bic x19,x22,x20
- ror x7,x24,#28
- add x23,x23,x15 // h+=X[i]
- eor x16,x16,x20,ror#18
- eor x6,x6,x1,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x20,ror#41 // Sigma1(e)
- eor x7,x7,x24,ror#34
- add x23,x23,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x5,x5,x14,ror#61
- eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
- add x23,x23,x16 // h+=Sigma1(e)
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x7,x24,ror#39 // Sigma0(a)
- eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
- add x0,x0,x9
- add x27,x27,x23 // d+=h
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x0,x0,x6
- add x23,x23,x17 // h+=Sigma0(a)
- add x0,x0,x5
- ldr x5,[sp,#16]
- str x8,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- ror x7,x2,#1
- and x17,x20,x27
- ror x6,x15,#19
- bic x28,x21,x27
- ror x8,x23,#28
- add x22,x22,x0 // h+=X[i]
- eor x16,x16,x27,ror#18
- eor x7,x7,x2,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x27,ror#41 // Sigma1(e)
- eor x8,x8,x23,ror#34
- add x22,x22,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x6,x6,x15,ror#61
- eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
- add x22,x22,x16 // h+=Sigma1(e)
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x8,x23,ror#39 // Sigma0(a)
- eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
- add x1,x1,x10
- add x26,x26,x22 // d+=h
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x1,x1,x7
- add x22,x22,x17 // h+=Sigma0(a)
- add x1,x1,x6
- ldr x6,[sp,#24]
- str x9,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- ror x8,x3,#1
- and x17,x27,x26
- ror x7,x0,#19
- bic x19,x20,x26
- ror x9,x22,#28
- add x21,x21,x1 // h+=X[i]
- eor x16,x16,x26,ror#18
- eor x8,x8,x3,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x26,ror#41 // Sigma1(e)
- eor x9,x9,x22,ror#34
- add x21,x21,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x7,x7,x0,ror#61
- eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
- add x21,x21,x16 // h+=Sigma1(e)
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x9,x22,ror#39 // Sigma0(a)
- eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
- add x2,x2,x11
- add x25,x25,x21 // d+=h
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x2,x2,x8
- add x21,x21,x17 // h+=Sigma0(a)
- add x2,x2,x7
- ldr x7,[sp,#0]
- str x10,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x9,x4,#1
- and x17,x26,x25
- ror x8,x1,#19
- bic x28,x27,x25
- ror x10,x21,#28
- add x20,x20,x2 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x9,x9,x4,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x10,x10,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x8,x8,x1,ror#61
- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x10,x21,ror#39 // Sigma0(a)
- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
- add x3,x3,x12
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x3,x3,x9
- add x20,x20,x17 // h+=Sigma0(a)
- add x3,x3,x8
- cbnz x19,.Loop_16_xx
-
- ldp x0,x2,[x29,#96]
- ldr x1,[x29,#112]
- sub x30,x30,#648 // rewind
-
- ldp x3,x4,[x0]
- ldp x5,x6,[x0,#2*8]
- add x1,x1,#14*8 // advance input pointer
- ldp x7,x8,[x0,#4*8]
- add x20,x20,x3
- ldp x9,x10,[x0,#6*8]
- add x21,x21,x4
- add x22,x22,x5
- add x23,x23,x6
- stp x20,x21,[x0]
- add x24,x24,x7
- add x25,x25,x8
- stp x22,x23,[x0,#2*8]
- add x26,x26,x9
- add x27,x27,x10
- cmp x1,x2
- stp x24,x25,[x0,#4*8]
- stp x26,x27,[x0,#6*8]
- b.ne .Loop
-
- ldp x19,x20,[x29,#16]
- add sp,sp,#4*8
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- ldp x29,x30,[sp],#128
- ret
-.size sha512_block_data_order,.-sha512_block_data_order
-
-.align 6
-.type .LK512,%object
-.LK512:
- .quad 0x428a2f98d728ae22,0x7137449123ef65cd
- .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
- .quad 0x3956c25bf348b538,0x59f111f1b605d019
- .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
- .quad 0xd807aa98a3030242,0x12835b0145706fbe
- .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
- .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
- .quad 0x9bdc06a725c71235,0xc19bf174cf692694
- .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
- .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
- .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
- .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
- .quad 0x983e5152ee66dfab,0xa831c66d2db43210
- .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
- .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
- .quad 0x06ca6351e003826f,0x142929670a0e6e70
- .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
- .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
- .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
- .quad 0x81c2c92e47edaee6,0x92722c851482353b
- .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
- .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
- .quad 0xd192e819d6ef5218,0xd69906245565a910
- .quad 0xf40e35855771202a,0x106aa07032bbd1b8
- .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
- .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
- .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
- .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
- .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
- .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
- .quad 0x90befffa23631e28,0xa4506cebde82bde9
- .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
- .quad 0xca273eceea26619c,0xd186b8c721c0c207
- .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
- .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
- .quad 0x113f9804bef90dae,0x1b710b35131c471b
- .quad 0x28db77f523047d84,0x32caab7b40c72493
- .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
- .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
- .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
- .quad 0 // terminator
-.size .LK512,.-.LK512
-#ifndef __KERNEL__
-.align 3
-.LOPENSSL_armcap_P:
-# ifdef __ILP32__
- .long OPENSSL_armcap_P-.
-# else
- .quad OPENSSL_armcap_P-.
-# endif
-#endif
-.asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 7e842dcae450..b7205c254c0d 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -29,13 +29,16 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
-#define ALTINSTR_ENTRY(feature,cb) \
+#define ALTINSTR_ENTRY(feature) \
" .word 661b - .\n" /* label */ \
- " .if " __stringify(cb) " == 0\n" \
" .word 663f - .\n" /* new instruction */ \
- " .else\n" \
+ " .hword " __stringify(feature) "\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* source len */ \
+ " .byte 664f-663f\n" /* replacement len */
+
+#define ALTINSTR_ENTRY_CB(feature, cb) \
+ " .word 661b - .\n" /* label */ \
" .word " __stringify(cb) "- .\n" /* callback */ \
- " .endif\n" \
" .hword " __stringify(feature) "\n" /* feature bit */ \
" .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */
@@ -56,15 +59,14 @@ void apply_alternatives(void *start, size_t length);
*
* Alternatives with callbacks do not generate replacement instructions.
*/
-#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \
oldinstr "\n" \
"662:\n" \
".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(feature,cb) \
+ ALTINSTR_ENTRY(feature) \
".popsection\n" \
- " .if " __stringify(cb) " == 0\n" \
".pushsection .altinstr_replacement, \"a\"\n" \
"663:\n\t" \
newinstr "\n" \
@@ -72,17 +74,25 @@ void apply_alternatives(void *start, size_t length);
".popsection\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \
- ".else\n\t" \
+ ".endif\n"
+
+#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
+ ".if "__stringify(cfg_enabled)" == 1\n" \
+ "661:\n\t" \
+ oldinstr "\n" \
+ "662:\n" \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY_CB(feature, cb) \
+ ".popsection\n" \
"663:\n\t" \
"664:\n\t" \
- ".endif\n" \
".endif\n"
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
- __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
+ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
#define ALTERNATIVE_CB(oldinstr, cb) \
- __ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
+ __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
#else
#include <asm/assembler.h>
@@ -205,7 +215,7 @@ alternative_endif
.macro user_alt, label, oldinstr, newinstr, cond
9999: alternative_insn "\oldinstr", "\newinstr", \cond
- _ASM_EXTABLE 9999b, \label
+ _asm_extable 9999b, \label
.endm
/*
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 0f2e1ab5e166..9b2e2e2e728a 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -73,7 +73,7 @@ __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
#undef __XCHG_CASE
#define __XCHG_GEN(sfx) \
-static inline unsigned long __xchg##sfx(unsigned long x, \
+static __always_inline unsigned long __xchg##sfx(unsigned long x, \
volatile void *ptr, \
int size) \
{ \
@@ -115,7 +115,7 @@ __XCHG_GEN(_mb)
#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
#define __CMPXCHG_GEN(sfx) \
-static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
+static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
unsigned long old, \
unsigned long new, \
int size) \
@@ -248,7 +248,7 @@ __CMPWAIT_CASE( , , 8);
#undef __CMPWAIT_CASE
#define __CMPWAIT_GEN(sfx) \
-static inline void __cmpwait##sfx(volatile void *ptr, \
+static __always_inline void __cmpwait##sfx(volatile void *ptr, \
unsigned long val, \
int size) \
{ \
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index eb8432bb82b8..b69e27152ea5 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -234,6 +234,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
}
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
+#define COMPAT_MINSIGSTKSZ 2048
static inline void __user *arch_compat_alloc_user_space(long len)
{
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 15868eca58de..e7bef3d936d8 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -31,9 +31,10 @@
/* CPU feature register tracking */
enum ftr_type {
- FTR_EXACT, /* Use a predefined safe value */
- FTR_LOWER_SAFE, /* Smaller value is safe */
- FTR_HIGHER_SAFE,/* Bigger value is safe */
+ FTR_EXACT, /* Use a predefined safe value */
+ FTR_LOWER_SAFE, /* Smaller value is safe */
+ FTR_HIGHER_SAFE, /* Bigger value is safe */
+ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
};
#define FTR_STRICT true /* SANITY check strict matching required */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 65615820155e..65db124a44bf 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -52,7 +52,11 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define efi_is_64bit() (true)
#define alloc_screen_info(x...) &screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+ struct screen_info *si)
+{
+}
/* redeclare as 'hidden' so the compiler will generate relative references */
extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 2a5090fb9113..86a43450f014 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -33,8 +33,8 @@
" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
insn "\n" \
-"2: stlxr %w3, %w0, %2\n" \
-" cbnz %w3, 1b\n" \
+"2: stlxr %w0, %w3, %2\n" \
+" cbnz %w0, 1b\n" \
" dmb ish\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
@@ -59,23 +59,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("mov %w0, %w4",
+ __futex_atomic_op("mov %w3, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("add %w0, %w1, %w4",
+ __futex_atomic_op("add %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("orr %w0, %w1, %w4",
+ __futex_atomic_op("orr %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("and %w0, %w1, %w4",
+ __futex_atomic_op("and %w3, %w1, %w4",
ret, oldval, uaddr, tmp, ~oparg);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("eor %w0, %w1, %w4",
+ __futex_atomic_op("eor %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
default:
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 547519abc751..ff721659eb94 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -300,6 +300,11 @@ static inline bool __kvm_cpu_uses_extended_idmap(void)
return __cpu_uses_extended_idmap();
}
+/*
+ * Can't use pgd_populate here, because the extended idmap adds an extra level
+ * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
+ * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
+ */
static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *hyp_pgd,
pgd_t *merged_hyp_pgd,
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index f705d96a76f2..5bc3de78306a 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -77,13 +77,12 @@
#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
-#define __P100 PAGE_EXECONLY
+#define __P100 PAGE_READONLY_EXEC
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_COPY_EXEC
#define __P111 PAGE_COPY_EXEC
@@ -92,7 +91,7 @@
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
-#define __S100 PAGE_EXECONLY
+#define __S100 PAGE_READONLY_EXEC
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 3a30a3994e4a..199f434f99a4 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -83,12 +83,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
-/*
- * Execute-only user mappings do not have the PTE_USER bit set. All valid
- * kernel mappings have the PTE_UXN bit set.
- */
#define pte_valid_not_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
@@ -104,8 +100,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
/*
* p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check) other than user execute-only which do not have the
- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
+ * set.
*/
#define pte_access_permitted(pte, write) \
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
@@ -357,6 +353,7 @@ static inline int pmd_protnone(pmd_t pmd)
#define pud_write(pud) pte_write(pud_pte(pud))
#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pud(pfn,prot) (__pud(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
@@ -387,8 +384,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_SECT)
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud) (0)
-#define pud_table(pud) (1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
#else
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
PUD_TYPE_SECT)
@@ -413,6 +410,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
}
+static inline void pte_unmap(pte_t *pte) { }
+
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
@@ -421,7 +420,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 5917147af0c4..9ee660013e5c 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -49,7 +49,15 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
#ifdef CONFIG_COMPAT
+#ifdef CONFIG_ARM64_64K_PAGES
+/*
+ * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
+ * by the compat vectors page.
+ */
#define TASK_SIZE_32 UL(0x100000000)
+#else
+#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
+#endif /* CONFIG_ARM64_64K_PAGES */
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index bc812435bc76..d0beefeb6d25 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -40,7 +40,7 @@ void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
int sig, int code, const char *name);
struct mm_struct;
-extern void show_pte(struct mm_struct *mm, unsigned long addr);
+extern void show_pte(unsigned long addr);
extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index 2b9a63771eda..f89263c8e11a 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -38,6 +38,7 @@ struct vdso_data {
__u32 tz_minuteswest; /* Whacky timezone stuff */
__u32 tz_dsttime;
__u32 use_syscall;
+ __u32 hrtimer_res;
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 252a6d9c1da5..1a95d135def2 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -157,10 +157,14 @@ static int __init acpi_fadt_sanity_check(void)
*/
if (table->revision < 5 ||
(table->revision == 5 && fadt->minor_revision < 1)) {
- pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
+ pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
table->revision, fadt->minor_revision);
- ret = -EINVAL;
- goto out;
+
+ if (!fadt->arm_boot_flags) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pr_err("FADT has ARM boot flags set, assuming 5.1\n");
}
if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index c0ede237c14b..49989207989a 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -604,7 +604,7 @@ static struct undef_hook setend_hooks[] = {
},
{
/* Thumb mode */
- .instr_mask = 0x0000fff7,
+ .instr_mask = 0xfffffff7,
.instr_val = 0x0000b650,
.pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
.pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index bd239b1b7a68..95878bea27f9 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -92,7 +92,7 @@ int main(void)
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+ DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 930e74d9fcbd..3b680a32886b 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -16,6 +16,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
+#include <linux/psci.h>
#include <linux/types.h>
#include <asm/cachetype.h>
#include <asm/cpu.h>
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index e137ceaf5016..82b465207ed0 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
pr_err("%s: missing enable-method property\n",
dn->full_name);
}
+ of_node_put(dn);
} else {
enable_method = acpi_get_enable_method(cpu);
if (!enable_method) {
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a3ab7dfad50a..8cf001baee21 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -148,10 +148,12 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
};
static const struct arm64_ftr_bits ftr_ctr[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 30, 1, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0), /* CWG */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
@@ -390,6 +392,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
case FTR_LOWER_SAFE:
ret = new < cur ? new : cur;
break;
+ case FTR_HIGHER_OR_ZERO_SAFE:
+ if (!cur || !new)
+ break;
+ /* Fallthrough */
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
@@ -783,6 +789,11 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
case MIDR_CAVIUM_THUNDERX2:
case MIDR_BRCM_VULCAN:
+ case MIDR_CORTEX_A53:
+ case MIDR_CORTEX_A55:
+ case MIDR_CORTEX_A57:
+ case MIDR_CORTEX_A72:
+ case MIDR_CORTEX_A73:
return false;
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 73ae90ef434c..9f1adca3c346 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -132,6 +132,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
*/
static int clear_os_lock(unsigned int cpu)
{
+ write_sysreg(0, osdlr_el1);
write_sysreg(0, oslar_el1);
isb();
return 0;
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index db6ff1944c41..aba534959377 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -650,7 +650,7 @@ ENTRY(__boot_cpu_mode)
* with MMU turned off.
*/
ENTRY(__early_cpu_boot_status)
- .long 0
+ .quad 0
.popsection
@@ -686,6 +686,7 @@ secondary_startup:
/*
* Common entry point for secondary CPUs.
*/
+ bl __cpu_secondary_check52bitva
bl __cpu_setup // initialise processor
bl __enable_mmu
ldr x8, =__secondary_switched
@@ -759,6 +760,31 @@ ENTRY(__enable_mmu)
ret
ENDPROC(__enable_mmu)
+ENTRY(__cpu_secondary_check52bitva)
+#ifdef CONFIG_ARM64_52BIT_VA
+ ldr_l x0, vabits_user
+ cmp x0, #52
+ b.ne 2f
+
+ mrs_s x0, SYS_ID_AA64MMFR2_EL1
+ and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ cbnz x0, 2f
+
+ adr_l x0, va52mismatch
+ mov w1, #1
+ strb w1, [x0]
+ dmb sy
+ dc ivac, x0 // Invalidate potentially stale cache line
+
+ update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x0, x1
+1: wfe
+ wfi
+ b 1b
+
+#endif
+2: ret
+ENDPROC(__cpu_secondary_check52bitva)
+
__no_granule_support:
/* Indicate that this CPU can't boot and is stuck in the kernel */
update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 76c9b51fa7f1..c4aec129ed20 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -247,8 +247,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
}
pte = pte_offset_kernel(pmd, dst_addr);
- set_pte(pte, __pte(virt_to_phys((void *)dst) |
- pgprot_val(PAGE_KERNEL_EXEC)));
+ set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
/*
* Load our new page tables. A strict BBM approach requires that we
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 0b9e5f6290f9..d168e52ee622 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -508,13 +508,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index c7fcb232fe47..d3e8c901274d 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -73,7 +73,11 @@
#ifdef CONFIG_EFI
-__efistub_stext_offset = stext - _text;
+/*
+ * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
+ * https://github.com/ClangBuiltLinux/linux/issues/561
+ */
+__efistub_stext_offset = ABSOLUTE(stext - _text);
/*
* Prevent the symbol aliases below from being emitted into the kallsyms
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index e017a9493b92..72a660a74ff9 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -231,24 +231,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
{
+ if (user_mode(regs))
+ return DBG_HOOK_ERROR;
+
kgdb_handle_exception(1, SIGTRAP, 0, regs);
- return 0;
+ return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_brk_fn)
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
{
+ if (user_mode(regs))
+ return DBG_HOOK_ERROR;
+
compiled_break = 1;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
- return 0;
+ return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
{
+ if (user_mode(regs))
+ return DBG_HOOK_ERROR;
+
kgdb_handle_exception(1, SIGTRAP, 0, regs);
- return 0;
+ return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 7f316982ce00..4130f1f26852 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -33,10 +33,14 @@
void *module_alloc(unsigned long size)
{
void *p;
+ u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
+
+ if (IS_ENABLED(CONFIG_KASAN))
+ /* don't exceed the static module region - see below */
+ module_alloc_end = MODULES_END;
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + MODULES_VSIZE,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ module_alloc_end, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d2b1b624ddc3..17f647103ed7 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
int retval;
+ if (user_mode(regs))
+ return DBG_HOOK_ERROR;
+
/* return error if this is not our step */
retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
@@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
int __kprobes
kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
{
+ if (user_mode(regs))
+ return DBG_HOOK_ERROR;
+
kprobe_handler(regs);
return DBG_HOOK_HANDLED;
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 42816bebb1e0..e3713d6fb8e0 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -83,7 +83,8 @@ static void cpu_psci_cpu_die(unsigned int cpu)
static int cpu_psci_cpu_kill(unsigned int cpu)
{
- int err, i;
+ int err;
+ unsigned long start, end;
if (!psci_ops.affinity_info)
return 0;
@@ -93,16 +94,18 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
* while it is dying. So, try again a few times.
*/
- for (i = 0; i < 10; i++) {
+ start = jiffies;
+ end = start + msecs_to_jiffies(100);
+ do {
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
- pr_info("CPU%d killed.\n", cpu);
+ pr_info("CPU%d killed (polled %d ms)\n", cpu,
+ jiffies_to_msecs(jiffies - start));
return 0;
}
- msleep(10);
- pr_info("Retrying again to check for CPU kill\n");
- }
+ usleep_range(100, 1000);
+ } while (time_before(jiffies, end));
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
cpu, err);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index cfd33f18f437..13b9c20a84b5 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -136,6 +136,7 @@ static int boot_secondary(unsigned int cpu, struct task_struct *idle)
}
static DECLARE_COMPLETION(cpu_running);
+bool va52mismatch __ro_after_init;
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
@@ -164,10 +165,15 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
+
+ if (IS_ENABLED(CONFIG_ARM64_52BIT_VA) && va52mismatch)
+ pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
+
ret = -EIO;
}
} else {
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
+ return ret;
}
secondary_data.stack = NULL;
@@ -895,11 +901,22 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
+/*
+ * The number of CPUs online, not counting this CPU (which may not be
+ * fully online and so not counted in num_online_cpus()).
+ */
+static inline unsigned int num_other_online_cpus(void)
+{
+ unsigned int this_cpu_online = cpu_online(smp_processor_id());
+
+ return num_online_cpus() - this_cpu_online;
+}
+
void smp_send_stop(void)
{
unsigned long timeout;
- if (num_online_cpus() > 1) {
+ if (num_other_online_cpus()) {
cpumask_t mask;
cpumask_copy(&mask, cpu_online_mask);
@@ -913,10 +930,10 @@ void smp_send_stop(void)
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
- while (num_online_cpus() > 1 && timeout--)
+ while (num_other_online_cpus() && timeout--)
udelay(1);
- if (num_online_cpus() > 1)
+ if (num_other_online_cpus())
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(cpu_online_mask));
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 28bef94cf792..5962badb3346 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -611,7 +611,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
- die("Oops - bad mode", regs, 0);
local_irq_disable();
panic("bad mode");
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 4bcfe01b5aad..c0f315ecfa7c 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -37,7 +37,7 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
-extern char vdso_start, vdso_end;
+extern char vdso_start[], vdso_end[];
static unsigned long vdso_pages __ro_after_init;
/*
@@ -124,14 +124,14 @@ static int __init vdso_init(void)
int i;
struct page **vdso_pagelist;
- if (memcmp(&vdso_start, "\177ELF", 4)) {
+ if (memcmp(vdso_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
return -EINVAL;
}
- vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
- vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
+ vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -144,7 +144,7 @@ static int __init vdso_init(void)
/* Grab the vDSO code pages. */
for (i = 0; i < vdso_pages; i++)
- vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);
+ vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(vdso_start)) + i);
vdso_spec[0].pages = &vdso_pagelist[0];
vdso_spec[1].pages = &vdso_pagelist[1];
@@ -213,6 +213,9 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+ /* Read without the seqlock held by clock_getres() */
+ WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
+
if (!use_syscall) {
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index 76320e920965..df829c4346fa 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -301,13 +301,14 @@ ENTRY(__kernel_clock_getres)
ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
b.ne 1f
- ldr x2, 5f
+ adr vdso_data, _vdso_data
+ ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
b 2f
1:
cmp w0, #CLOCK_REALTIME_COARSE
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 4f
- ldr x2, 6f
+ ldr x2, 5f
2:
cbz w1, 3f
stp xzr, x2, [x1]
@@ -321,8 +322,6 @@ ENTRY(__kernel_clock_getres)
svc #0
ret
5:
- .quad CLOCK_REALTIME_RES
-6:
.quad CLOCK_COARSE_RES
.cfi_endproc
ENDPROC(__kernel_clock_getres)
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index efbf610eaf4e..a814f32033b0 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -62,5 +62,7 @@ ENDPROC(__arch_clear_user)
.section .fixup,"ax"
.align 2
9: mov x0, x2 // return the original size
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ CONFIG_ARM64_PAN)
ret
.previous
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 4fd67ea03bb0..580aca96c53c 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -80,5 +80,7 @@ ENDPROC(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
9998: sub x0, end, dst // bytes not copied
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ CONFIG_ARM64_PAN)
ret
.previous
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 841bf8f7fab7..d9ca6a4f33b3 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -81,5 +81,7 @@ ENDPROC(__arch_copy_in_user)
.section .fixup,"ax"
.align 2
9998: sub x0, end, dst // bytes not copied
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ CONFIG_ARM64_PAN)
ret
.previous
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 7a7efe255034..e8bd40dc00cd 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -79,5 +79,7 @@ ENDPROC(__arch_copy_to_user)
.section .fixup,"ax"
.align 2
9998: sub x0, end, dst // bytes not copied
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
+ CONFIG_ARM64_PAN)
ret
.previous
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index ad49ae8f3967..f3d3f2e97add 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -79,18 +79,33 @@ static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
#endif
/*
- * Dump out the page tables associated with 'addr' in mm 'mm'.
+ * Dump out the page tables associated with 'addr' in the currently active mm.
*/
-void show_pte(struct mm_struct *mm, unsigned long addr)
+void show_pte(unsigned long addr)
{
+ struct mm_struct *mm;
pgd_t *pgd;
- if (!mm)
+ if (addr < TASK_SIZE) {
+ /* TTBR0 */
+ mm = current->active_mm;
+ if (mm == &init_mm) {
+ pr_alert("[%016lx] user address but active_mm is swapper\n",
+ addr);
+ return;
+ }
+ } else if (addr >= VA_START) {
+ /* TTBR1 */
mm = &init_mm;
+ } else {
+ pr_alert("[%016lx] address between user and kernel address ranges\n",
+ addr);
+ return;
+ }
pr_alert("pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
- pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
+ pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd));
do {
pud_t *pud;
@@ -176,8 +191,8 @@ static bool is_el1_instruction_abort(unsigned int esr)
/*
* The kernel tried to access some page that wasn't present.
*/
-static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
- unsigned int esr, struct pt_regs *regs)
+static void __do_kernel_fault(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
{
/*
* Are we prepared to handle this kernel fault?
@@ -194,7 +209,7 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
(addr < PAGE_SIZE) ? "NULL pointer dereference" :
"paging request", addr);
- show_pte(mm, addr);
+ show_pte(addr);
die("Oops", regs, esr);
bust_spinlocks(0);
do_exit(SIGKILL);
@@ -216,7 +231,6 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), inf->name, sig,
addr, esr);
- show_pte(tsk->mm, addr);
show_regs(regs);
}
@@ -232,7 +246,6 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->active_mm;
const struct fault_info *inf;
/*
@@ -243,7 +256,7 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
inf = esr_to_fault_info(esr);
__do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
} else
- __do_kernel_fault(mm, addr, esr, regs);
+ __do_kernel_fault(addr, esr, regs);
}
#define VM_FAULT_BADMAP 0x010000
@@ -306,7 +319,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
- unsigned long vm_flags = VM_READ | VM_WRITE;
+ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (notify_page_fault(regs, esr))
@@ -454,7 +467,7 @@ retry:
return 0;
no_context:
- __do_kernel_fault(mm, addr, esr, regs);
+ __do_kernel_fault(addr, esr, regs);
return 0;
}
@@ -673,11 +686,12 @@ void __init hook_debug_fault_code(int nr,
debug_fault_info[nr].name = name;
}
-asmlinkage int __exception do_debug_exception(unsigned long addr,
+asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
unsigned int esr,
struct pt_regs *regs)
{
const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
+ unsigned long pc = instruction_pointer(regs);
struct siginfo info;
int rv;
@@ -688,19 +702,19 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (interrupts_enabled(regs))
trace_hardirqs_off();
- if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+ if (user_mode(regs) && pc > TASK_SIZE)
arm64_apply_bp_hardening();
- if (!inf->fn(addr, esr, regs)) {
+ if (!inf->fn(addr_if_watchpoint, esr, regs)) {
rv = 1;
} else {
pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
- inf->name, esr, addr);
+ inf->name, esr, pc);
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
- info.si_addr = (void __user *)addr;
+ info.si_addr = (void __user *)pc;
arm64_notify_die("", regs, &info, 0);
rv = 0;
}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index fa6b2fad7a3d..5d3df68272f5 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -272,7 +272,7 @@ void __init arm64_memblock_init(void)
* memory spans, randomize the linear region as well.
*/
if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
- range = range / ARM64_MEMSTART_ALIGN + 1;
+ range /= ARM64_MEMSTART_ALIGN;
memstart_addr -= ARM64_MEMSTART_ALIGN *
((range * memstart_offset_seed) >> 16);
}
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 757009daa9ed..ff43da269fe8 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -153,7 +153,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
- pfn_to_nid(virt_to_pfn(_text)));
+ pfn_to_nid(virt_to_pfn(lm_alias(_text))));
/*
* vmemmap_populate() has populated the shadow region that covers the
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 0a56898f8410..60be5bc0984a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -28,8 +28,6 @@
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/stop_machine.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -95,6 +93,17 @@ static phys_addr_t __init early_pgtable_alloc(void)
return phys;
}
+static bool pgattr_change_is_safe(u64 old, u64 new)
+{
+ /*
+ * The following mapping attributes may be updated in live
+ * kernel mappings without the need for break-before-make.
+ */
+ static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
+
+ return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0;
+}
+
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
pgprot_t prot,
@@ -115,8 +124,17 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
pte = pte_set_fixmap_offset(pmd, addr);
do {
+ pte_t old_pte = *pte;
+
set_pte(pte, pfn_pte(pfn, prot));
pfn++;
+
+ /*
+ * After the PTE entry has been populated once, we
+ * only allow updates to the permission attributes.
+ */
+ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
+
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_clear_fixmap();
@@ -146,27 +164,27 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pmd = pmd_set_fixmap_offset(pud, addr);
do {
+ pmd_t old_pmd = *pmd;
+
next = pmd_addr_end(addr, end);
+
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
allow_block_mappings) {
- pmd_t old_pmd =*pmd;
pmd_set_huge(pmd, phys, prot);
+
/*
- * Check for previous table entries created during
- * boot (__create_page_tables) and flush them.
+ * After the PMD entry has been populated once, we
+ * only allow updates to the permission attributes.
*/
- if (!pmd_none(old_pmd)) {
- flush_tlb_all();
- if (pmd_table(old_pmd)) {
- phys_addr_t table = pmd_page_paddr(old_pmd);
- if (!WARN_ON_ONCE(slab_is_available()))
- memblock_free(table, PAGE_SIZE);
- }
- }
+ BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
+ pmd_val(*pmd)));
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
prot, pgtable_alloc);
+
+ BUG_ON(pmd_val(old_pmd) != 0 &&
+ pmd_val(old_pmd) != pmd_val(*pmd));
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
@@ -204,33 +222,28 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
pud = pud_set_fixmap_offset(pgd, addr);
do {
+ pud_t old_pud = *pud;
+
next = pud_addr_end(addr, end);
/*
* For 4K granule only, attempt to put down a 1GB block
*/
if (use_1G_block(addr, next, phys) && allow_block_mappings) {
- pud_t old_pud = *pud;
pud_set_huge(pud, phys, prot);
/*
- * If we have an old value for a pud, it will
- * be pointing to a pmd table that we no longer
- * need (from swapper_pg_dir).
- *
- * Look up the old pmd table and free it.
+ * After the PUD entry has been populated once, we
+ * only allow updates to the permission attributes.
*/
- if (!pud_none(old_pud)) {
- flush_tlb_all();
- if (pud_table(old_pud)) {
- phys_addr_t table = pud_page_paddr(old_pud);
- if (!WARN_ON_ONCE(slab_is_available()))
- memblock_free(table, PAGE_SIZE);
- }
- }
+ BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
+ pud_val(*pud)));
} else {
alloc_init_pmd(pud, addr, next, phys, prot,
pgtable_alloc, allow_block_mappings);
+
+ BUG_ON(pud_val(old_pud) != 0 &&
+ pud_val(old_pud) != pud_val(*pud));
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
@@ -396,6 +409,9 @@ void mark_rodata_ro(void)
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
section_size, PAGE_KERNEL_RO);
+
+ /* flush the TLBs after updating live kernel mappings */
+ flush_tlb_all();
}
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
@@ -479,8 +495,8 @@ static void __init map_kernel(pgd_t *pgd)
* entry instead.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
- __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
+ pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START),
+ lm_alias(bm_pmd));
pud_clear_fixmap();
} else {
BUG();
@@ -595,7 +611,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!p)
return -ENOMEM;
- set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
+ pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);
@@ -765,26 +781,49 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
int __init arch_ioremap_pud_supported(void)
{
- /* only 4k granule supports level 1 block mappings */
- return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
+ /*
+ * Only 4k granule supports level 1 block mappings.
+ * SW table walks can't handle removal of intermediate entries.
+ */
+ return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
+ !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}
int __init arch_ioremap_pmd_supported(void)
{
- return 1;
+ /* See arch_ioremap_pud_supported() */
+ return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
}
-int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
+ pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
+ pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot);
+
+ /* Only allow permission changes for now */
+ if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
+ pud_val(new_pud)))
+ return 0;
+
BUG_ON(phys & ~PUD_MASK);
- set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+ set_pud(pudp, new_pud);
return 1;
}
-int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{
+ pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
+ pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot);
+
+ /* Only allow permission changes for now */
+ if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
+ pmd_val(new_pmd)))
+ return 0;
+
BUG_ON(phys & ~PMD_MASK);
- set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+ set_pmd(pmdp, new_pmd);
return 1;
}
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 4b32168cf91a..b1e42bad69ac 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -424,7 +424,7 @@ static int __init dummy_numa_init(void)
if (numa_off)
pr_info("NUMA disabled\n"); /* Forced off on command line. */
pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n",
- 0LLU, PFN_PHYS(max_pfn) - 1);
+ memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1);
for_each_memblock(memory, mblk) {
ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 18d96d349a8b..3b95e3126eeb 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -64,17 +64,18 @@ ENTRY(cpu_do_suspend)
mrs x2, tpidr_el0
mrs x3, tpidrro_el0
mrs x4, contextidr_el1
- mrs x5, cpacr_el1
- mrs x6, tcr_el1
- mrs x7, vbar_el1
- mrs x8, mdscr_el1
- mrs x9, oslsr_el1
- mrs x10, sctlr_el1
+ mrs x5, osdlr_el1
+ mrs x6, cpacr_el1
+ mrs x7, tcr_el1
+ mrs x8, vbar_el1
+ mrs x9, mdscr_el1
+ mrs x10, oslsr_el1
+ mrs x11, sctlr_el1
stp x2, x3, [x0]
- stp x4, xzr, [x0, #16]
- stp x5, x6, [x0, #32]
- stp x7, x8, [x0, #48]
- stp x9, x10, [x0, #64]
+ stp x4, x5, [x0, #16]
+ stp x6, x7, [x0, #32]
+ stp x8, x9, [x0, #48]
+ stp x10, x11, [x0, #64]
ret
ENDPROC(cpu_do_suspend)
@@ -96,8 +97,8 @@ ENTRY(cpu_do_resume)
msr cpacr_el1, x6
/* Don't change t0sz here, mask those bits when restoring */
- mrs x5, tcr_el1
- bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+ mrs x7, tcr_el1
+ bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
msr tcr_el1, x8
msr vbar_el1, x9
@@ -115,6 +116,7 @@ ENTRY(cpu_do_resume)
/*
* Restore oslsr_el1 by writing oslar_el1
*/
+ msr osdlr_el1, x5
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
@@ -181,7 +183,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
dc cvac, cur_\()\type\()p // Ensure any existing dirty
dmb sy // lines are written back before
ldr \type, [cur_\()\type\()p] // loading the entry
- tbz \type, #0, next_\()\type // Skip invalid entries
+ tbz \type, #0, skip_\()\type // Skip invalid and
+ tbnz \type, #11, skip_\()\type // non-global entries
.endm
.macro __idmap_kpti_put_pgtable_ent_ng, type
@@ -241,8 +244,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
do_pgd: __idmap_kpti_get_pgtable_ent pgd
tbnz pgd, #1, walk_puds
- __idmap_kpti_put_pgtable_ent_ng pgd
next_pgd:
+ __idmap_kpti_put_pgtable_ent_ng pgd
+skip_pgd:
add cur_pgdp, cur_pgdp, #8
cmp cur_pgdp, end_pgdp
b.ne do_pgd
@@ -259,6 +263,15 @@ next_pgd:
msr sctlr_el1, x18
isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
+
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
ret
@@ -270,8 +283,9 @@ walk_puds:
add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
do_pud: __idmap_kpti_get_pgtable_ent pud
tbnz pud, #1, walk_pmds
- __idmap_kpti_put_pgtable_ent_ng pud
next_pud:
+ __idmap_kpti_put_pgtable_ent_ng pud
+skip_pud:
add cur_pudp, cur_pudp, 8
cmp cur_pudp, end_pudp
b.ne do_pud
@@ -290,8 +304,9 @@ walk_pmds:
add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
do_pmd: __idmap_kpti_get_pgtable_ent pmd
tbnz pmd, #1, walk_ptes
- __idmap_kpti_put_pgtable_ent_ng pmd
next_pmd:
+ __idmap_kpti_put_pgtable_ent_ng pmd
+skip_pmd:
add cur_pmdp, cur_pmdp, #8
cmp cur_pmdp, end_pmdp
b.ne do_pmd
@@ -309,7 +324,7 @@ walk_ptes:
add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
do_pte: __idmap_kpti_get_pgtable_ent pte
__idmap_kpti_put_pgtable_ent_ng pte
-next_pte:
+skip_pte:
add cur_ptep, cur_ptep, #8
cmp cur_ptep, end_ptep
b.ne do_pte
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index b47a26f4290c..939c607b1376 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -30,8 +30,6 @@
#include "bpf_jit.h"
-int bpf_jit_enable __read_mostly;
-
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index e1c02ca230cb..073bba6f9f60 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -23,7 +23,7 @@ KBUILD_AFLAGS += $(aflags-y)
LDFLAGS += $(ldflags-y)
ifeq ($(CROSS_COMPILE),)
-CROSS_COMPILE := h8300-unknown-linux-
+CROSS_COMPILE := $(call cc-cross-prefix, h8300-unknown-linux- h8300-linux-)
endif
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index a62ba368b27d..1ae06190b68f 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -103,7 +103,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
"1: %0 = memw_locked(%1);\n" \
" %0 = "#op "(%0,%2);\n" \
" memw_locked(%1,P3)=%0;\n" \
- " if !P3 jump 1b;\n" \
+ " if (!P3) jump 1b;\n" \
: "=&r" (output) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
@@ -119,7 +119,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
"1: %0 = memw_locked(%1);\n" \
" %0 = "#op "(%0,%2);\n" \
" memw_locked(%1,P3)=%0;\n" \
- " if !P3 jump 1b;\n" \
+ " if (!P3) jump 1b;\n" \
: "=&r" (output) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
@@ -136,7 +136,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
"1: %0 = memw_locked(%2);\n" \
" %1 = "#op "(%0,%3);\n" \
" memw_locked(%2,P3)=%1;\n" \
- " if !P3 jump 1b;\n" \
+ " if (!P3) jump 1b;\n" \
: "=&r" (output), "=&r" (val) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
@@ -185,7 +185,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
" }"
" memw_locked(%2, p3) = %1;"
" {"
- " if !p3 jump 1b;"
+ " if (!p3) jump 1b;"
" }"
"2:"
: "=&r" (__oldval), "=&r" (tmp)
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 2691a1857d20..634306cda006 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -52,7 +52,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
@@ -76,7 +76,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
@@ -102,7 +102,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
"1: R12 = memw_locked(R10);\n"
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
" memw_locked(R10,P1) = R12;\n"
- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
+ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
: "=&r" (oldval)
: "r" (addr), "r" (nr)
: "r10", "r11", "r12", "p0", "p1", "memory"
@@ -237,7 +237,7 @@ static inline int ffs(int x)
int r;
asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
- "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
+ "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
: "=&r" (r)
: "r" (x)
: "p0");
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index a6e34e2acbba..db258424059f 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -44,7 +44,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
__asm__ __volatile__ (
"1: %0 = memw_locked(%1);\n" /* load into retval */
" memw_locked(%1,P0) = %2;\n" /* store into memory */
- " if !P0 jump 1b;\n"
+ " if (!P0) jump 1b;\n"
: "=&r" (retval)
: "r" (ptr), "r" (x)
: "memory", "p0"
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index c607b77c8215..12bd92f3ea41 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -15,7 +15,7 @@
/* For example: %1 = %4 */ \
insn \
"2: memw_locked(%3,p2) = %1;\n" \
- " if !p2 jump 1b;\n" \
+ " if (!p2) jump 1b;\n" \
" %1 = #0;\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
@@ -83,10 +83,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
"1: %1 = memw_locked(%3)\n"
" {\n"
" p2 = cmp.eq(%1,%4)\n"
- " if !p2.new jump:NT 3f\n"
+ " if (!p2.new) jump:NT 3f\n"
" }\n"
"2: memw_locked(%3,p2) = %5\n"
- " if !p2 jump 1b\n"
+ " if (!p2) jump 1b\n"
"3:\n"
".section .fixup,\"ax\"\n"
"4: %0 = #%6\n"
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
index a1c55788c5d6..f61bb3185305 100644
--- a/arch/hexagon/include/asm/spinlock.h
+++ b/arch/hexagon/include/asm/spinlock.h
@@ -44,9 +44,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
__asm__ __volatile__(
"1: R6 = memw_locked(%0);\n"
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
" memw_locked(%0,P3) = R6;\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -60,7 +60,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
"1: R6 = memw_locked(%0);\n"
" R6 = add(R6,#-1);\n"
" memw_locked(%0,P3) = R6\n"
- " if !P3 jump 1b;\n"
+ " if (!P3) jump 1b;\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -75,7 +75,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
__asm__ __volatile__(
" R6 = memw_locked(%1);\n"
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
- " { if !P3 jump 1f; }\n"
+ " { if (!P3) jump 1f; }\n"
" memw_locked(%1,P3) = R6;\n"
" { %0 = P3 }\n"
"1:\n"
@@ -102,9 +102,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
__asm__ __volatile__(
"1: R6 = memw_locked(%0)\n"
" { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
" memw_locked(%0,P3) = R6;\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -118,7 +118,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
__asm__ __volatile__(
" R6 = memw_locked(%1)\n"
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
- " { if !P3 jump 1f; }\n"
+ " { if (!P3) jump 1f; }\n"
" memw_locked(%1,P3) = R6;\n"
" %0 = P3;\n"
"1:\n"
@@ -141,9 +141,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__(
"1: R6 = memw_locked(%0);\n"
" P3 = cmp.eq(R6,#0);\n"
- " { if !P3 jump 1b; R6 = #1; }\n"
+ " { if (!P3) jump 1b; R6 = #1; }\n"
" memw_locked(%0,P3) = R6;\n"
- " { if !P3 jump 1b; }\n"
+ " { if (!P3) jump 1b; }\n"
:
: "r" (&lock->lock)
: "memory", "r6", "p3"
@@ -163,7 +163,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
__asm__ __volatile__(
" R6 = memw_locked(%1);\n"
" P3 = cmp.eq(R6,#0);\n"
- " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
+ " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
" memw_locked(%1,P3) = R6;\n"
" %0 = P3;\n"
"1:\n"
diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c
index f94918b449a8..03a0e10ecdcc 100644
--- a/arch/hexagon/kernel/stacktrace.c
+++ b/arch/hexagon/kernel/stacktrace.c
@@ -23,8 +23,6 @@
#include <linux/thread_info.h>
#include <linux/module.h>
-register unsigned long current_frame_pointer asm("r30");
-
struct stackframe {
unsigned long fp;
unsigned long rets;
@@ -42,7 +40,7 @@ void save_stack_trace(struct stack_trace *trace)
low = (unsigned long)task_stack_page(current);
high = low + THREAD_SIZE;
- fp = current_frame_pointer;
+ fp = (unsigned long)__builtin_frame_address(0);
while (fp >= low && fp <= (high - sizeof(*frame))) {
frame = (struct stackframe *)fp;
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
index 67c6ccc14770..9f4a73ff7203 100644
--- a/arch/hexagon/kernel/vm_entry.S
+++ b/arch/hexagon/kernel/vm_entry.S
@@ -382,7 +382,7 @@ ret_from_fork:
R26.L = #LO(do_work_pending);
R0 = #VM_INT_DISABLE;
}
- if P0 jump check_work_pending
+ if (P0) jump check_work_pending
{
R0 = R25;
callr R24
diff --git a/arch/ia64/include/asm/bug.h b/arch/ia64/include/asm/bug.h
index 823616b5020b..19067821249f 100644
--- a/arch/ia64/include/asm/bug.h
+++ b/arch/ia64/include/asm/bug.h
@@ -3,7 +3,11 @@
#ifdef CONFIG_BUG
#define ia64_abort() __builtin_trap()
-#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
+#define BUG() do { \
+ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ barrier_before_unreachable(); \
+ ia64_abort(); \
+} while (0)
/* should this BUG be made generic? */
#define HAVE_ARCH_BUG
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index d1d945c6bd05..9fe114620b9d 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -912,8 +912,12 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
void
module_arch_cleanup (struct module *mod)
{
- if (mod->arch.init_unw_table)
+ if (mod->arch.init_unw_table) {
unw_remove_unwind_table(mod->arch.init_unw_table);
- if (mod->arch.core_unw_table)
+ mod->arch.init_unw_table = NULL;
+ }
+ if (mod->arch.core_unw_table) {
unw_remove_unwind_table(mod->arch.core_unw_table);
+ mod->arch.core_unw_table = NULL;
+ }
}
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index aa19b7ac8222..476c7b4be378 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
}
+EXPORT_SYMBOL(paddr_to_nid);
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
/*
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index 2081b8cd5591..b9aee983e6f4 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -88,10 +88,19 @@ static irqreturn_t cia_handler(int irq, void *dev_id)
struct ciabase *base = dev_id;
int mach_irq;
unsigned char ints;
+ unsigned long flags;
+ /* Interrupts get disabled while the timer irq flag is cleared and
+ * the timer interrupt serviced.
+ */
mach_irq = base->cia_irq;
+ local_irq_save(flags);
ints = cia_set_irq(base, CIA_ICR_ALL);
amiga_custom.intreq = base->int_mask;
+ if (ints & 1)
+ generic_handle_irq(mach_irq);
+ local_irq_restore(flags);
+ mach_irq++, ints >>= 1;
for (; ints; mach_irq++, ints >>= 1) {
if (ints & 1)
generic_handle_irq(mach_irq);
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 3d2b63bedf05..56f02ea2c248 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -142,7 +142,7 @@ struct mfptimerbase {
.name = "MFP Timer D"
};
-static irqreturn_t mfptimer_handler(int irq, void *dev_id)
+static irqreturn_t mfp_timer_d_handler(int irq, void *dev_id)
{
struct mfptimerbase *base = dev_id;
int mach_irq;
@@ -344,7 +344,7 @@ void __init atari_init_IRQ(void)
st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 0xf0) | 0x6;
/* request timer D dispatch handler */
- if (request_irq(IRQ_MFP_TIMD, mfptimer_handler, IRQF_SHARED,
+ if (request_irq(IRQ_MFP_TIMD, mfp_timer_d_handler, IRQF_SHARED,
stmfp_base.name, &stmfp_base))
pr_err("Couldn't register %s interrupt\n", stmfp_base.name);
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index c549b48174ec..972181c1fe4b 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -24,6 +24,18 @@
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
+static irqreturn_t mfp_timer_c_handler(int irq, void *dev_id)
+{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
+}
+
void __init
atari_sched_init(irq_handler_t timer_routine)
{
@@ -32,7 +44,8 @@ atari_sched_init(irq_handler_t timer_routine)
/* start timer C, div = 1:100 */
st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
/* install interrupt service routine for MFP Timer C */
- if (request_irq(IRQ_MFP_TIMC, timer_routine, 0, "timer", timer_routine))
+ if (request_irq(IRQ_MFP_TIMC, mfp_timer_c_handler, 0, "timer",
+ timer_routine))
pr_err("Couldn't register timer interrupt\n");
}
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 611d4d9ea2bd..3978d71d250b 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -45,11 +45,6 @@ extern int bvme6000_set_clock_mmss (unsigned long);
extern void bvme6000_reset (void);
void bvme6000_set_vectors (void);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/timer/timekeeping.c, called via bvme6000_process_int() */
-
-static irq_handler_t tick_handler;
-
int __init bvme6000_parse_bootinfo(const struct bi_record *bi)
{
@@ -159,12 +154,18 @@ irqreturn_t bvme6000_abort_int (int irq, void *dev_id)
static irqreturn_t bvme6000_timer_int (int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
- unsigned char msr = rtc->msr & 0xc0;
+ unsigned char msr;
+ local_irq_save(flags);
+ msr = rtc->msr & 0xc0;
rtc->msr = msr | 0x20; /* Ack the interrupt */
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
- return tick_handler(irq, dev_id);
+ return IRQ_HANDLED;
}
/*
@@ -183,9 +184,8 @@ void bvme6000_sched_init (irq_handler_t timer_routine)
rtc->msr = 0; /* Ensure timer registers accessible */
- tick_handler = timer_routine;
- if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, 0,
- "timer", bvme6000_timer_int))
+ if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, 0, "timer",
+ timer_routine))
panic ("Couldn't register timer int");
rtc->t1cr_omr = 0x04; /* Mode 2, ext clk */
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
index 749543b425a4..03c83b8f9032 100644
--- a/arch/m68k/hp300/time.c
+++ b/arch/m68k/hp300/time.c
@@ -37,13 +37,19 @@
static irqreturn_t hp300_tick(int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
unsigned long tmp;
- irq_handler_t vector = dev_id;
+
+ local_irq_save(flags);
in_8(CLOCKBASE + CLKSR);
asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE));
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
/* Turn off the network and SCSI leds */
blinken_leds(0, 0xe0);
- return vector(irq, NULL);
+ return IRQ_HANDLED;
}
u32 hp300_gettimeoffset(void)
diff --git a/arch/m68k/include/asm/bug.h b/arch/m68k/include/asm/bug.h
index ef9a2e47352f..21ddbf925e22 100644
--- a/arch/m68k/include/asm/bug.h
+++ b/arch/m68k/include/asm/bug.h
@@ -7,16 +7,19 @@
#ifndef CONFIG_SUN3
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ barrier_before_unreachable(); \
__builtin_trap(); \
} while (0)
#else
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ barrier_before_unreachable(); \
panic("BUG!"); \
} while (0)
#endif
#else
#define BUG() do { \
+ barrier_before_unreachable(); \
__builtin_trap(); \
} while (0)
#endif
diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
index b3536a82a262..e002084af101 100644
--- a/arch/m68k/kernel/uboot.c
+++ b/arch/m68k/kernel/uboot.c
@@ -103,5 +103,5 @@ __init void process_uboot_commandline(char *commandp, int size)
}
parse_uboot_commandline(commandp, len);
- commandp[size - 1] = 0;
+ commandp[len - 1] = 0;
}
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 920ff63d4a81..a435aced6e43 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -54,16 +54,6 @@ static __u8 rbv_clear;
static int gIER,gIFR,gBufA,gBufB;
/*
- * Timer defs.
- */
-
-#define TICK_SIZE 10000
-#define MAC_CLOCK_TICK (783300/HZ) /* ticks per HZ */
-#define MAC_CLOCK_LOW (MAC_CLOCK_TICK&0xFF)
-#define MAC_CLOCK_HIGH (MAC_CLOCK_TICK>>8)
-
-
-/*
* On Macs with a genuine VIA chip there is no way to mask an individual slot
* interrupt. This limitation also seems to apply to VIA clone logic cores in
* Quadra-like ASICs. (RBV and OSS machines don't have this limitation.)
@@ -278,22 +268,6 @@ void __init via_init(void)
}
/*
- * Start the 100 Hz clock
- */
-
-void __init via_init_clock(irq_handler_t func)
-{
- via1[vACR] |= 0x40;
- via1[vT1LL] = MAC_CLOCK_LOW;
- via1[vT1LH] = MAC_CLOCK_HIGH;
- via1[vT1CL] = MAC_CLOCK_LOW;
- via1[vT1CH] = MAC_CLOCK_HIGH;
-
- if (request_irq(IRQ_MAC_TIMER_1, func, 0, "timer", func))
- pr_err("Couldn't register %s interrupt\n", "timer");
-}
-
-/*
* Debugging dump, used in various places to see what's going on.
*/
@@ -321,29 +295,6 @@ void via_debug_dump(void)
}
/*
- * This is always executed with interrupts disabled.
- *
- * TBI: get time offset between scheduling timer ticks
- */
-
-u32 mac_gettimeoffset(void)
-{
- unsigned long ticks, offset = 0;
-
- /* read VIA1 timer 2 current value */
- ticks = via1[vT1CL] | (via1[vT1CH] << 8);
- /* The probability of underflow is less than 2% */
- if (ticks > MAC_CLOCK_TICK - MAC_CLOCK_TICK / 50)
- /* Check for pending timer interrupt in VIA1 IFR */
- if (via1[vIFR] & 0x40) offset = TICK_SIZE;
-
- ticks = MAC_CLOCK_TICK - ticks;
- ticks = ticks * 10000L / MAC_CLOCK_TICK;
-
- return (ticks + offset) * 1000;
-}
-
-/*
* Flush the L2 cache on Macs that have it by flipping
* the system into 24-bit mode for an instant.
*/
@@ -446,6 +397,8 @@ void via_nubus_irq_shutdown(int irq)
* via6522.c :-), disable/pending masks added.
*/
+#define VIA_TIMER_1_INT BIT(6)
+
void via1_irq(struct irq_desc *desc)
{
int irq_num;
@@ -455,6 +408,21 @@ void via1_irq(struct irq_desc *desc)
if (!events)
return;
+ irq_num = IRQ_MAC_TIMER_1;
+ irq_bit = VIA_TIMER_1_INT;
+ if (events & irq_bit) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ via1[vIFR] = irq_bit;
+ generic_handle_irq(irq_num);
+ local_irq_restore(flags);
+
+ events &= ~irq_bit;
+ if (!events)
+ return;
+ }
+
irq_num = VIA1_SOURCE_BASE;
irq_bit = 1;
do {
@@ -619,3 +587,56 @@ int via2_scsi_drq_pending(void)
return via2[gIFR] & (1 << IRQ_IDX(IRQ_MAC_SCSIDRQ));
}
EXPORT_SYMBOL(via2_scsi_drq_pending);
+
+/* timer and clock source */
+
+#define VIA_CLOCK_FREQ 783360 /* VIA "phase 2" clock in Hz */
+#define VIA_TIMER_INTERVAL (1000000 / HZ) /* microseconds per jiffy */
+#define VIA_TIMER_CYCLES (VIA_CLOCK_FREQ / HZ) /* clock cycles per jiffy */
+
+#define VIA_TC (VIA_TIMER_CYCLES - 2) /* including 0 and -1 */
+#define VIA_TC_LOW (VIA_TC & 0xFF)
+#define VIA_TC_HIGH (VIA_TC >> 8)
+
+void __init via_init_clock(irq_handler_t timer_routine)
+{
+ if (request_irq(IRQ_MAC_TIMER_1, timer_routine, 0, "timer", NULL)) {
+ pr_err("Couldn't register %s interrupt\n", "timer");
+ return;
+ }
+
+ via1[vT1LL] = VIA_TC_LOW;
+ via1[vT1LH] = VIA_TC_HIGH;
+ via1[vT1CL] = VIA_TC_LOW;
+ via1[vT1CH] = VIA_TC_HIGH;
+ via1[vACR] |= 0x40;
+}
+
+u32 mac_gettimeoffset(void)
+{
+ unsigned long flags;
+ u8 count_high;
+ u16 count, offset = 0;
+
+ /*
+ * Timer counter wrap-around is detected with the timer interrupt flag
+ * but reading the counter low byte (vT1CL) would reset the flag.
+ * Also, accessing both counter registers is essentially a data race.
+ * These problems are avoided by ignoring the low byte. Clock accuracy
+ * is 256 times worse (error can reach 0.327 ms) but CPU overhead is
+ * reduced by avoiding slow VIA register accesses.
+ */
+
+ local_irq_save(flags);
+ count_high = via1[vT1CH];
+ if (count_high == 0xFF)
+ count_high = 0;
+ if (count_high > 0 && (via1[vIFR] & VIA_TIMER_1_INT))
+ offset = VIA_TIMER_CYCLES;
+ local_irq_restore(flags);
+
+ count = count_high << 8;
+ count = VIA_TIMER_CYCLES - count + offset;
+
+ return ((count * VIA_TIMER_INTERVAL) / VIA_TIMER_CYCLES) * 1000;
+}
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index c11d38dfad08..1a095443790b 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -46,11 +46,6 @@ extern void mvme147_reset (void);
static int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/time/timekeeping.c, called via mvme147_process_int() */
-
-irq_handler_t tick_handler;
-
int __init mvme147_parse_bootinfo(const struct bi_record *bi)
{
@@ -106,16 +101,23 @@ void __init config_mvme147(void)
static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
+
+ local_irq_save(flags);
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
- return tick_handler(irq, dev_id);
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
}
void mvme147_sched_init (irq_handler_t timer_routine)
{
- tick_handler = timer_routine;
- if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1", NULL))
+ if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1",
+ timer_routine))
pr_err("Couldn't register timer interrupt\n");
/* Init the clock with a value */
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 58e240939d26..ac49fa7ec46b 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -51,11 +51,6 @@ extern void mvme16x_reset (void);
int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/time/timekeeping.c, called via mvme16x_process_int() */
-
-static irq_handler_t tick_handler;
-
unsigned short mvme16x_config;
EXPORT_SYMBOL(mvme16x_config);
@@ -354,8 +349,15 @@ static irqreturn_t mvme16x_abort_int (int irq, void *dev_id)
static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
{
- *(volatile unsigned char *)0xfff4201b |= 8;
- return tick_handler(irq, dev_id);
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *(volatile unsigned char *)0xfff4201b |= 8;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
}
void mvme16x_sched_init (irq_handler_t timer_routine)
@@ -363,14 +365,13 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
uint16_t brdno = be16_to_cpu(mvme_bdid.brdno);
int irq;
- tick_handler = timer_routine;
/* Using PCCchip2 or MC2 chip tick timer 1 */
*(volatile unsigned long *)0xfff42008 = 0;
*(volatile unsigned long *)0xfff42004 = 10000; /* 10ms */
*(volatile unsigned char *)0xfff42017 |= 3;
*(volatile unsigned char *)0xfff4201b = 0x16;
- if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, 0,
- "timer", mvme16x_timer_int))
+ if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, 0, "timer",
+ timer_routine))
panic ("Couldn't register timer int");
if (brdno == 0x0162 || brdno == 0x172)
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index 513f9bb17b9c..60b51f5b9cfc 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -126,10 +126,10 @@ void q40_mksound(unsigned int hz, unsigned int ticks)
sound_ticks = ticks << 1;
}
-static irq_handler_t q40_timer_routine;
-
-static irqreturn_t q40_timer_int (int irq, void * dev)
+static irqreturn_t q40_timer_int(int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+
ql_ticks = ql_ticks ? 0 : 1;
if (sound_ticks) {
unsigned char sval=(sound_ticks & 1) ? 128-SVOL : 128+SVOL;
@@ -138,8 +138,13 @@ static irqreturn_t q40_timer_int (int irq, void * dev)
*DAC_RIGHT=sval;
}
- if (!ql_ticks)
- q40_timer_routine(irq, dev);
+ if (!ql_ticks) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+ }
return IRQ_HANDLED;
}
@@ -147,11 +152,9 @@ void q40_sched_init (irq_handler_t timer_routine)
{
int timer_irq;
- q40_timer_routine = timer_routine;
timer_irq = Q40_IRQ_FRAME;
- if (request_irq(timer_irq, q40_timer_int, 0,
- "timer", q40_timer_int))
+ if (request_irq(timer_irq, q40_timer_int, 0, "timer", timer_routine))
panic("Couldn't register timer int");
master_outb(-1, FRAME_CLEAR_REG);
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 6bbca30c9188..a5824abb4a39 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -61,8 +61,10 @@ static irqreturn_t sun3_int7(int irq, void *dev_id)
static irqreturn_t sun3_int5(int irq, void *dev_id)
{
+ unsigned long flags;
unsigned int cnt;
+ local_irq_save(flags);
#ifdef CONFIG_SUN3
intersil_clear();
#endif
@@ -76,6 +78,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
cnt = kstat_irqs_cpu(irq, 0);
if (!(cnt % 20))
sun3_leds(led_pattern[cnt % 160 / 20]);
+ local_irq_restore(flags);
return IRQ_HANDLED;
}
diff --git a/arch/m68k/sun3x/time.c b/arch/m68k/sun3x/time.c
index 431d3c4306dd..a4f6a44d3418 100644
--- a/arch/m68k/sun3x/time.c
+++ b/arch/m68k/sun3x/time.c
@@ -77,15 +77,19 @@ u32 sun3x_gettimeoffset(void)
}
#if 0
-static void sun3x_timer_tick(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t sun3x_timer_tick(int irq, void *dev_id)
{
- void (*vector)(int, void *, struct pt_regs *) = dev_id;
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
- /* Clear the pending interrupt - pulse the enable line low */
- disable_irq(5);
- enable_irq(5);
+ local_irq_save(flags);
+ /* Clear the pending interrupt - pulse the enable line low */
+ disable_irq(5);
+ enable_irq(5);
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
- vector(irq, NULL, regs);
+ return IRQ_HANDLED;
}
#endif
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 740f2b82a182..491676a6cde5 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -75,19 +75,21 @@ archclean:
linux.bin linux.bin.gz linux.bin.ub: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+ @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
simpleImage.%: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+ @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
define archhelp
echo '* linux.bin - Create raw binary'
echo ' linux.bin.gz - Create compressed raw binary'
echo ' linux.bin.ub - Create U-Boot wrapped raw binary'
- echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
- echo ' - stripped elf with fdt blob'
- echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
- echo ' *_defconfig - Select default config from arch/microblaze/configs'
- echo ''
+ echo ' simpleImage.<dt> - Create the following images with <dt>.dtb linked in'
+ echo ' simpleImage.<dt> : raw image'
+ echo ' simpleImage.<dt>.ub : raw image with U-Boot header'
+ echo ' simpleImage.<dt>.unstrip: ELF (identical to vmlinux)'
+ echo ' simpleImage.<dt>.strip : stripped ELF'
echo ' Targets with <dt> embed a device tree blob inside the image'
echo ' These targets support board with firmware that does not'
echo ' support passing a device tree directly. Replace <dt> with the'
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 0f3fe6a151dc..22bed08ec7f2 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -8,15 +8,12 @@ OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary
$(obj)/linux.bin: vmlinux FORCE
$(call if_changed,objcopy)
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
$(obj)/linux.bin.ub: $(obj)/linux.bin FORCE
$(call if_changed,uimage)
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
$(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
$(call if_changed,gzip)
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
quiet_cmd_cp = CP $< $@$2
cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
@@ -34,6 +31,5 @@ $(obj)/simpleImage.%: vmlinux FORCE
$(call if_changed,objcopy)
$(call if_changed,uimage)
$(call if_changed,strip,.strip)
- @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')'
clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index 0bde47e4fa69..dcba53803fa5 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -92,7 +92,8 @@ static inline void __disable_dcache_nomsr(void)
#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
do { \
int align = ~(cache_line_length - 1); \
- end = min(start + cache_size, end); \
+ if (start < UINT_MAX - cache_size) \
+ end = min(start + cache_size, end); \
start &= align; \
} while (0)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index bb9940c6927e..f8a529c85279 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -13,6 +13,7 @@ config MIPS
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
+ select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
@@ -791,7 +792,6 @@ config SIBYTE_SWARM
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
- select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
@@ -804,6 +804,7 @@ config SIBYTE_LITTLESUR
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select ZONE_DMA32 if 64BIT
config SIBYTE_SENTOSA
bool "Sibyte BCM91250E-Sentosa"
@@ -814,7 +815,6 @@ config SIBYTE_SENTOSA
select SYS_HAS_CPU_SB1
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
- select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
@@ -828,7 +828,6 @@ config SIBYTE_BIGSUR
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
- select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SNI_RM
bool "SNI RM200/300/400"
diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
index e81ce4623070..06fb94370c7c 100644
--- a/arch/mips/bcm47xx/workarounds.c
+++ b/arch/mips/bcm47xx/workarounds.c
@@ -4,9 +4,8 @@
#include <bcm47xx_board.h>
#include <bcm47xx.h>
-static void __init bcm47xx_workarounds_netgear_wnr3500l(void)
+static void __init bcm47xx_workarounds_enable_usb_power(int usb_power)
{
- const int usb_power = 12;
int err;
err = gpio_request_one(usb_power, GPIOF_OUT_INIT_HIGH, "usb_power");
@@ -22,7 +21,10 @@ void __init bcm47xx_workarounds(void)
switch (board) {
case BCM47XX_BOARD_NETGEAR_WNR3500L:
- bcm47xx_workarounds_netgear_wnr3500l();
+ bcm47xx_workarounds_enable_usb_power(12);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
+ bcm47xx_workarounds_enable_usb_power(21);
break;
default:
/* No workaround(s) needed */
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index 7019e2967009..bbbf8057565b 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -84,7 +84,7 @@ void __init prom_init(void)
* Here we will start up CPU1 in the background and ask it to
* reconfigure itself then go back to sleep.
*/
- memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
+ memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20);
__sync();
set_c0_cause(C_SW0);
cpumask_set_cpu(1, &bmips_booted_mask);
diff --git a/arch/mips/bcm63xx/reset.c b/arch/mips/bcm63xx/reset.c
index d1fe51edf5e6..4d411da2497b 100644
--- a/arch/mips/bcm63xx/reset.c
+++ b/arch/mips/bcm63xx/reset.c
@@ -119,7 +119,7 @@
#define BCM6368_RESET_DSL 0
#define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK
#define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK
-#define BCM6368_RESET_ENETSW 0
+#define BCM6368_RESET_ENETSW SOFTRESET_6368_ENETSW_MASK
#define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK
#define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK
#define BCM6368_RESET_PCIE 0
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 90aca95fe314..2f77e250b91d 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(LINUXINCLUDE) $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
-DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
+
# decompressor objects (linked with vmlinuz)
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
@@ -75,6 +78,8 @@ OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
$(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
$(call if_changed,objcopy)
+HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
+
# Calculate the load address of the compressed kernel image
hostprogs-y := calc_vmlinuz_load_addr
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 542c3ede9722..d14f75ec8273 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -13,7 +13,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
-#include "../../../../include/linux/sizes.h"
+#include <linux/sizes.h>
int main(int argc, char *argv[])
{
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index cf47ed4d8569..1fda24fc1860 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -98,7 +98,7 @@
miscintc: interrupt-controller@18060010 {
compatible = "qca,ar7240-misc-intc";
- reg = <0x18060010 0x4>;
+ reg = <0x18060010 0x8>;
interrupt-parent = <&cpuintc>;
interrupts = <6>;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
index 8241fc6aa17d..3839feba68f2 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
@@ -266,7 +266,7 @@ int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
} else {
union cvmx_pko_mem_debug8 debug8;
debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
- return debug8.cn58xx.doorbell;
+ return debug8.cn50xx.doorbell;
}
case CVMX_CMD_QUEUE_ZIP:
case CVMX_CMD_QUEUE_DFA:
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 6420c83c29d1..ff5fc917ef95 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2199,6 +2199,9 @@ static int octeon_irq_cib_map(struct irq_domain *d,
}
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
cd->host_data = host_data;
cd->bit = hw;
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 1ba6bcf98570..2ecc8d1b0539 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -502,7 +502,7 @@ static void __init octeon_fdt_set_phy(int eth, int phy_addr)
if (phy_addr >= 256 && alt_phy > 0) {
const struct fdt_property *phy_prop;
struct fdt_property *alt_prop;
- u32 phy_handle_name;
+ fdt32_t phy_handle_name;
/* Use the alt phy node instead.*/
phy_prop = fdt_get_property(initial_boot_params, eth, "phy-handle", NULL);
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index f3f60056bc27..fb5651b99ab2 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -637,7 +637,6 @@ CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_EMI62=m
CONFIG_USB_EMI26=m
CONFIG_USB_ADUTUX=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_LED=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index c2b4e3f33a73..4f6b45f64c2f 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -350,7 +350,6 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y
CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
CONFIG_USB_LED=m
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
index 6aa264b9856a..7c6151d412bd 100644
--- a/arch/mips/fw/sni/sniprom.c
+++ b/arch/mips/fw/sni/sniprom.c
@@ -42,7 +42,7 @@
/* O32 stack has to be 8-byte aligned. */
static u64 o32_stk[4096];
-#define O32_STK &o32_stk[sizeof(o32_stk)]
+#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
#define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
__asm__(#fun " = call_o32")
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index a92aee7b977a..23f55af7d6ba 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void)
#endif
}
-extern char bmips_reset_nmi_vec;
-extern char bmips_reset_nmi_vec_end;
-extern char bmips_smp_movevec;
-extern char bmips_smp_int_vec;
-extern char bmips_smp_int_vec_end;
+extern char bmips_reset_nmi_vec[];
+extern char bmips_reset_nmi_vec_end[];
+extern char bmips_smp_movevec[];
+extern char bmips_smp_int_vec[];
+extern char bmips_smp_int_vec_end[];
extern int bmips_smp_enabled;
extern int bmips_cpu_offset;
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index e081a265f422..cc2eb1b06050 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -8,6 +8,41 @@
#ifndef _ASM_COMPILER_H
#define _ASM_COMPILER_H
+/*
+ * With GCC 4.5 onwards we can use __builtin_unreachable to indicate to the
+ * compiler that a particular code path will never be hit. This allows it to be
+ * optimised out of the generated binary.
+ *
+ * Unfortunately at least GCC 4.6.3 through 7.3.0 inclusive suffer from a bug
+ * that can lead to instructions from beyond an unreachable statement being
+ * incorrectly reordered into earlier delay slots if the unreachable statement
+ * is the only content of a case in a switch statement. This can lead to
+ * seemingly random behaviour, such as invalid memory accesses from incorrectly
+ * reordered loads or stores. See this potential GCC fix for details:
+ *
+ * https://gcc.gnu.org/ml/gcc-patches/2015-09/msg00360.html
+ *
+ * It is unclear whether GCC 8 onwards suffer from the same issue - nothing
+ * relevant is mentioned in GCC 8 release notes and nothing obviously relevant
+ * stands out in GCC commit logs, but these newer GCC versions generate very
+ * different code for the testcase which doesn't exhibit the bug.
+ *
+ * GCC also handles stack allocation suboptimally when calling noreturn
+ * functions or calling __builtin_unreachable():
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
+ *
+ * We work around both of these issues by placing a volatile asm statement,
+ * which GCC is prevented from reordering past, prior to __builtin_unreachable
+ * calls.
+ *
+ * The .insn statement is required to ensure that any branches to the
+ * statement, which sadly must be kept due to the asm statement, are known to
+ * be branches to code and satisfy linker requirements for microMIPS kernels.
+ */
+#undef barrier_before_unreachable
+#define barrier_before_unreachable() asm volatile(".insn")
+
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
#define GCC_IMM_ASM() "n"
#define GCC_REG_ACCUM "$0"
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 06049b6b3ddd..5dfae80264b9 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -60,21 +60,11 @@
* instruction, so the lower 16 bits must be zero. Should be true on
* on any sane architecture; generic code does not use this assumption.
*/
-extern const unsigned long mips_io_port_base;
+extern unsigned long mips_io_port_base;
-/*
- * Gcc will generate code to load the value of mips_io_port_base after each
- * function call which may be fairly wasteful in some cases. So we don't
- * play quite by the book. We tell gcc mips_io_port_base is a long variable
- * which solves the code generation issue. Now we need to violate the
- * aliasing rules a little to make initialization possible and finally we
- * will need the barrier() to fight side effects of the aliasing chat.
- * This trickery will eventually collapse under gcc's optimizer. Oh well.
- */
static inline void set_io_port_base(unsigned long base)
{
- * (unsigned long *) &mips_io_port_base = base;
- barrier();
+ mips_io_port_base = base;
}
/*
diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h
index 493a3cc7c39a..cfdbe66575f4 100644
--- a/arch/mips/include/asm/kexec.h
+++ b/arch/mips/include/asm/kexec.h
@@ -12,11 +12,11 @@
#include <asm/stacktrace.h>
/* Maximum physical address we can use pages from */
-#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
-#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
-#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
/* Reserve 3*4096 bytes for board-specific info */
#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096)
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
index c2917b39966b..bba2c8837951 100644
--- a/arch/mips/include/asm/mach-ath79/ar933x_uart.h
+++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
@@ -27,8 +27,8 @@
#define AR933X_UART_CS_PARITY_S 0
#define AR933X_UART_CS_PARITY_M 0x3
#define AR933X_UART_CS_PARITY_NONE 0
-#define AR933X_UART_CS_PARITY_ODD 1
-#define AR933X_UART_CS_PARITY_EVEN 2
+#define AR933X_UART_CS_PARITY_ODD 2
+#define AR933X_UART_CS_PARITY_EVEN 3
#define AR933X_UART_CS_IF_MODE_S 2
#define AR933X_UART_CS_IF_MODE_M 0x3
#define AR933X_UART_CS_IF_MODE_NONE 0
diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h
index 5604db3d1836..d79c68fa78d9 100644
--- a/arch/mips/include/asm/netlogic/xlr/fmn.h
+++ b/arch/mips/include/asm/netlogic/xlr/fmn.h
@@ -301,8 +301,6 @@ static inline int nlm_fmn_send(unsigned int size, unsigned int code,
for (i = 0; i < 8; i++) {
nlm_msgsnd(dest);
status = nlm_read_c2_status0();
- if ((status & 0x2) == 1)
- pr_info("Send pending fail!\n");
if ((status & 0x4) == 0)
return 0;
}
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
index 5f47f76ed510..20eb9c46a75a 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -611,7 +611,7 @@ static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
- status->doorbell = debug8.cn58xx.doorbell;
+ status->doorbell = debug8.cn50xx.doorbell;
}
}
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 060f23ff1817..258158c34df1 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -25,7 +25,17 @@ extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+static inline int raw_smp_processor_id(void)
+{
+#if defined(__VDSO__)
+ extern int vdso_smp_processor_id(void)
+ __compiletime_error("VDSO should not call smp_processor_id()");
+ return vdso_smp_processor_id();
+#else
+ return current_thread_info()->cpu;
+#endif
+}
+#define raw_smp_processor_id raw_smp_processor_id
/* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index e309d8fcb516..da1cb0499d6c 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -52,8 +52,26 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/* How to get the thread information struct from C. */
+/*
+ * A pointer to the struct thread_info for the currently executing thread is
+ * held in register $28/$gp.
+ *
+ * We declare __current_thread_info as a global register variable rather than a
+ * local register variable within current_thread_info() because clang doesn't
+ * support explicit local register variables.
+ *
+ * When building the VDSO we take care not to declare the global register
+ * variable because this causes GCC to not preserve the value of $28/$gp in
+ * functions that change its value (which is common in the PIC VDSO when
+ * accessing the GOT). Since the VDSO shouldn't be accessing
+ * __current_thread_info anyway we declare it extern in order to cause a link
+ * failure if it's referenced.
+ */
+#ifdef __VDSO__
+extern struct thread_info *__current_thread_info;
+#else
register struct thread_info *__current_thread_info __asm__("$28");
+#endif
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/mips/include/uapi/asm/sgidefs.h b/arch/mips/include/uapi/asm/sgidefs.h
index 876442fcfb32..5be81f8fd479 100644
--- a/arch/mips/include/uapi/asm/sgidefs.h
+++ b/arch/mips/include/uapi/asm/sgidefs.h
@@ -11,14 +11,6 @@
#define __ASM_SGIDEFS_H
/*
- * Using a Linux compiler for building Linux seems logic but not to
- * everybody.
- */
-#ifndef __linux__
-#error Use a Linux compiler or give up.
-#endif
-
-/*
* Definitions for the ISA levels
*
* With the introduction of MIPS32 / MIPS64 instruction sets definitions
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index c5bc344fc745..73039746ae36 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -31,7 +31,8 @@ void __init setup_pit_timer(void)
static int __init init_pit_clocksource(void)
{
- if (num_possible_cpus() > 1) /* PIT does not scale! */
+ if (num_possible_cpus() > 1 || /* PIT does not scale! */
+ !clockevent_state_periodic(&i8253_clockevent))
return 0;
return clocksource_i8253_init();
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 7913a5cf6806..b9c788790c0f 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -125,7 +125,7 @@ trace_a_syscall:
subu t1, v0, __NR_O32_Linux
move a1, v0
bnez t1, 1f /* __NR_syscall at offset 0 */
- lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
.set pop
1: jal syscall_trace_enter
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 695950361d2a..7cc1d29334ee 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -74,7 +74,7 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
* mips_io_port_base is the begin of the address space to which x86 style
* I/O ports are mapped.
*/
-const unsigned long mips_io_port_base = -1;
+unsigned long mips_io_port_base = -1;
EXPORT_SYMBOL(mips_io_port_base);
static struct resource code_resource = { .name = "Kernel code", };
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index d4a293b68249..416d53f587e7 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -453,10 +453,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, char *end)
static inline void bmips_nmi_handler_setup(void)
{
- bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
- &bmips_reset_nmi_vec_end);
- bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
- &bmips_smp_int_vec_end);
+ bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
+ bmips_reset_nmi_vec_end);
+ bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
+ bmips_smp_int_vec_end);
}
struct reset_vec_info {
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
index dbb917403131..ec951dde0999 100644
--- a/arch/mips/kernel/uprobes.c
+++ b/arch/mips/kernel/uprobes.c
@@ -111,9 +111,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
*/
aup->resume_epc = regs->cp0_epc + 4;
if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
- unsigned long epc;
-
- epc = regs->cp0_epc;
__compute_return_epc_for_insn(regs,
(union mips_instruction) aup->insn[0]);
aup->resume_epc = regs->cp0_epc;
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 544ea21bfef9..b2683aca401f 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
{
list_del(&v->list);
if (v->load_addr)
- release_progmem(v);
+ release_progmem(v->load_addr);
kfree(v);
}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 8ac0e5994ed2..7c6f75c2aa4d 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -160,8 +160,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
if (edge)
irq_set_handler(d->hwirq, handle_edge_irq);
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
- (val << (i * 4)), LTQ_EIU_EXIN_C);
+ ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
+ (~(7 << (i * 4)))) | (val << (i * 4)),
+ LTQ_EIU_EXIN_C);
}
}
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index 0fce4608aa88..12abf14aed4a 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -43,6 +43,10 @@ else
$(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
endif
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
#
# Loongson Machines' Support
#
diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c
index ffefc1cb2612..98c3a7feb10f 100644
--- a/arch/mips/loongson64/common/serial.c
+++ b/arch/mips/loongson64/common/serial.c
@@ -110,7 +110,7 @@ static int __init serial_init(void)
}
module_init(serial_init);
-static void __init serial_exit(void)
+static void __exit serial_exit(void)
{
platform_device_unregister(&uart8250_device);
}
diff --git a/arch/mips/loongson64/loongson-3/platform.c b/arch/mips/loongson64/loongson-3/platform.c
index 25a97cc0ee33..0db4cc3196eb 100644
--- a/arch/mips/loongson64/loongson-3/platform.c
+++ b/arch/mips/loongson64/loongson-3/platform.c
@@ -31,6 +31,9 @@ static int __init loongson3_platform_init(void)
continue;
pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+
pdev->name = loongson_sysconf.sensors[i].name;
pdev->id = loongson_sysconf.sensors[i].id;
pdev->dev.platform_data = &loongson_sysconf.sensors[i];
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 7f2519cfb5d2..15f788601b64 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -828,12 +828,12 @@ do { \
} while (0)
#define DIFROMREG(di, x) \
- ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) == 0)], 0))
+ ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) ^ 1)], 0))
#define DITOREG(di, x) \
do { \
unsigned fpr, i; \
- fpr = (x) & ~(cop1_64bit(xcp) == 0); \
+ fpr = (x) & ~(cop1_64bit(xcp) ^ 1); \
set_fpr64(&ctx->fpr[fpr], 0, di); \
for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++) \
set_fpr64(&ctx->fpr[fpr], i, 0); \
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 2da5649fc545..f625fd20b21e 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -386,6 +386,7 @@ static struct work_registers build_get_work_registers(u32 **p)
static void build_restore_work_registers(u32 **p)
{
if (scratch_reg >= 0) {
+ uasm_i_ehb(p);
UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
return;
}
@@ -636,7 +637,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
return;
}
- if (cpu_has_rixi && _PAGE_NO_EXEC) {
+ if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
if (fill_includes_sw_bits) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
} else {
@@ -660,6 +661,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
int restore_scratch)
{
if (restore_scratch) {
+ /*
+ * Ensure the MFC0 below observes the value written to the
+ * KScratch register by the prior MTC0.
+ */
+ if (scratch_reg >= 0)
+ uasm_i_ehb(p);
+
/* Reset default page size */
if (PM_DEFAULT_MASK >> 16) {
uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
@@ -919,6 +927,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
}
if (mode != not_refill && check_for_high_segbits) {
uasm_l_large_segbits_fault(l, *p);
+
+ if (mode == refill_scratch && scratch_reg >= 0)
+ uasm_i_ehb(p);
+
/*
* We get here if we are an xsseg address, or if we are
* an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
@@ -1238,6 +1250,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
if (c0_scratch_reg >= 0) {
+ uasm_i_ehb(p);
UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
build_tlb_write_entry(p, l, r, tlb_random);
uasm_l_leave(l, *p);
@@ -1592,15 +1605,17 @@ static void build_setup_pgd(void)
uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
uasm_l_tlbl_goaround1(&l, p);
UASM_i_SLL(&p, a0, a0, 11);
- uasm_i_jr(&p, 31);
UASM_i_MTC0(&p, a0, C0_CONTEXT);
+ uasm_i_jr(&p, 31);
+ uasm_i_ehb(&p);
} else {
/* PGD in c0_KScratch */
- uasm_i_jr(&p, 31);
if (cpu_has_ldpte)
UASM_i_MTC0(&p, a0, C0_PWBASE);
else
UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
+ uasm_i_jr(&p, 31);
+ uasm_i_ehb(&p);
}
#else
#ifdef CONFIG_SMP
@@ -1614,13 +1629,16 @@ static void build_setup_pgd(void)
UASM_i_LA_mostly(&p, a2, pgdc);
UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
#endif /* SMP */
- uasm_i_jr(&p, 31);
/* if pgd_reg is allocated, save PGD also to scratch register */
- if (pgd_reg != -1)
+ if (pgd_reg != -1) {
UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
- else
+ uasm_i_jr(&p, 31);
+ uasm_i_ehb(&p);
+ } else {
+ uasm_i_jr(&p, 31);
uasm_i_nop(&p);
+ }
#endif
if (p >= tlbmiss_handler_setup_pgd_end)
panic("tlbmiss_handler_setup_pgd space exceeded");
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 248603739198..bb9f779326d0 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1194,8 +1194,6 @@ jmp_cmp:
return 0;
}
-int bpf_jit_enable __read_mostly;
-
void bpf_jit_compile(struct bpf_prog *fp)
{
struct jit_ctx ctx;
diff --git a/arch/mips/pistachio/Platform b/arch/mips/pistachio/Platform
index d80cd612df1f..c3592b374ad2 100644
--- a/arch/mips/pistachio/Platform
+++ b/arch/mips/pistachio/Platform
@@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \
-I$(srctree)/arch/mips/include/asm/mach-pistachio
load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
+all-$(CONFIG_MACH_PISTACHIO) := uImage.gz
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
index 3ef3fb658136..b3d6bf23a662 100644
--- a/arch/mips/sibyte/common/Makefile
+++ b/arch/mips/sibyte/common/Makefile
@@ -1,5 +1,4 @@
obj-y := cfe.o
-obj-$(CONFIG_SWIOTLB) += dma.o
obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
deleted file mode 100644
index eb47a94f3583..000000000000
--- a/arch/mips/sibyte/common/dma.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * DMA support for Broadcom SiByte platforms.
- *
- * Copyright (c) 2018 Maciej W. Rozycki
- */
-
-#include <linux/swiotlb.h>
-#include <asm/bootinfo.h>
-
-void __init plat_swiotlb_setup(void)
-{
- swiotlb_init(1);
-}
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index a1d98b5c8fd6..5c53b8aa43d2 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -959,12 +959,11 @@ void __init txx9_sramc_init(struct resource *r)
goto exit_put;
err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr);
if (err) {
- device_unregister(&dev->dev);
iounmap(dev->base);
- kfree(dev);
+ device_unregister(&dev->dev);
}
return;
exit_put:
+ iounmap(dev->base);
put_device(&dev->dev);
- return;
}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 0b845cc7fbdc..adfaee2dce34 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -6,7 +6,10 @@ ccflags-vdso := \
$(filter -I%,$(KBUILD_CFLAGS)) \
$(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
- $(filter -march=%,$(KBUILD_CFLAGS))
+ $(filter -march=%,$(KBUILD_CFLAGS)) \
+ $(filter -m%-float,$(KBUILD_CFLAGS)) \
+ $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
+ -D__VDSO__
cflags-vdso := $(ccflags-vdso) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
-O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c
index bf2f55d10a4d..4e704046a150 100644
--- a/arch/nios2/kernel/nios2_ksyms.c
+++ b/arch/nios2/kernel/nios2_ksyms.c
@@ -9,12 +9,20 @@
#include <linux/export.h>
#include <linux/string.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+
/* string functions */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
+/* memory management */
+
+EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(flush_icache_range);
+
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
@@ -31,3 +39,7 @@ DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__umoddi3);
DECLARE_EXPORT(__umodsi3);
DECLARE_EXPORT(__muldi3);
+DECLARE_EXPORT(__ucmpdi2);
+DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__ashldi3);
+DECLARE_EXPORT(__ashrdi3);
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index fec8bf97d806..c17e8451d997 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -179,7 +179,7 @@ handler: ;\
* occured. in fact they never do. if you need them use
* values saved on stack (for SPR_EPC, SPR_ESR) or content
* of r4 (for SPR_EEAR). for details look at EXCEPTION_HANDLE()
- * in 'arch/or32/kernel/head.S'
+ * in 'arch/openrisc/kernel/head.S'
*/
/* =====================================================[ exceptions] === */
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index f14793306b03..98dd6860bc0b 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -1596,7 +1596,7 @@ _string_esr_irq_bug:
/*
* .data section should be page aligned
- * (look into arch/or32/kernel/vmlinux.lds)
+ * (look into arch/openrisc/kernel/vmlinux.lds.S)
*/
.section .data,"aw"
.align 8192
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 60e6f07b7e32..eb83d65153b8 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -59,14 +59,14 @@
#define LDCW ldcw,co
#define BL b,l
# ifdef CONFIG_64BIT
-# define LEVEL 2.0w
+# define PA_ASM_LEVEL 2.0w
# else
-# define LEVEL 2.0
+# define PA_ASM_LEVEL 2.0
# endif
#else
#define LDCW ldcw
#define BL bl
-#define LEVEL 1.1
+#define PA_ASM_LEVEL 1.1
#endif
#ifdef __ASSEMBLY__
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index 7ada30900807..90253bdc2ee5 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -43,8 +43,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
-#define xchg(ptr, x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __typeof__(*(ptr)) _x_ = (x); \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
+ __ret; \
+})
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 2e674e13e005..656984ec1958 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -323,6 +323,8 @@ extern int _parisc_requires_coherency;
#define parisc_requires_coherency() (0)
#endif
+extern int running_on_qemu;
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index bbbe360b458f..9b99eb0712ad 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -22,7 +22,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
- .level LEVEL
+ .level PA_ASM_LEVEL
__INITDATA
ENTRY(boot_args)
@@ -254,7 +254,7 @@ stext_pdc_ret:
ldo R%PA(fault_vector_11)(%r10),%r10
$is_pa20:
- .level LEVEL /* restore 1.1 || 2.0w */
+ .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
#endif /*!CONFIG_64BIT*/
load32 PA(fault_vector_20),%r10
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index c3a532abac03..b4e3edad53ab 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -189,6 +189,7 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
*/
int running_on_qemu __read_mostly;
+EXPORT_SYMBOL(running_on_qemu);
void __cpuidle arch_cpu_idle_dead(void)
{
@@ -206,12 +207,6 @@ void __cpuidle arch_cpu_idle(void)
static int __init parisc_idle_init(void)
{
- const char *marker;
-
- /* check QEMU/SeaBIOS marker in PAGE0 */
- marker = (char *) &PAGE0->pad0;
- running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
-
if (!running_on_qemu)
cpu_idle_poll_ctrl(1);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 0780c375fe2e..e204fc49517d 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -170,6 +170,9 @@ long arch_ptrace(struct task_struct *child, long request,
if ((addr & (sizeof(unsigned long)-1)) ||
addr >= sizeof(struct pt_regs))
break;
+ if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
+ data |= 3; /* ensure userspace privilege */
+ }
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
(addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
@@ -231,16 +234,18 @@ long arch_ptrace(struct task_struct *child, long request,
static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
{
- if (offset < 0)
- return sizeof(struct pt_regs);
- else if (offset <= 32*4) /* gr[0..31] */
- return offset * 2 + 4;
- else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
- return offset + 32*4;
- else if (offset < sizeof(struct pt_regs)/2 + 32*4)
- return offset * 2 + 4 - 32*8;
+ compat_ulong_t pos;
+
+ if (offset < 32*4) /* gr[0..31] */
+ pos = offset * 2 + 4;
+ else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */
+ pos = (offset - 32*4) + PT_FR0;
+ else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */
+ pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4;
else
- return sizeof(struct pt_regs);
+ pos = sizeof(struct pt_regs);
+
+ return pos;
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
@@ -284,9 +289,12 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
addr = translate_usr_offset(addr);
if (addr >= sizeof(struct pt_regs))
break;
+ if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
+ data |= 3; /* ensure userspace privilege */
+ }
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
- *(__u64 *) ((char *) task_regs(child) + addr) = data;
+ *(__u32 *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
@@ -499,7 +507,8 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
return;
case RI(iaoq[0]):
case RI(iaoq[1]):
- regs->iaoq[num - RI(iaoq[0])] = val;
+ /* set 2 lowest bits to ensure userspace privilege: */
+ regs->iaoq[num - RI(iaoq[0])] = val | 3;
return;
case RI(sar): regs->sar = val;
return;
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 2e66a887788e..581b0c66e521 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -403,6 +403,9 @@ void start_parisc(void)
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
+ /* check QEMU/SeaBIOS marker in PAGE0 */
+ running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
+
cpunum = smp_processor_id();
set_firmware_width_unlocked();
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 5f7e57fcaeef..0cf379acb5ed 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -48,7 +48,7 @@ registers).
*/
#define KILL_INSN break 0,0
- .level LEVEL
+ .level PA_ASM_LEVEL
.text
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 47ef8fdcd382..22754e0c3bda 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -299,7 +299,7 @@ static int __init init_cr16_clocksource(void)
* The cr16 interval timers are not syncronized across CPUs, so mark
* them unstable and lower rating on SMP systems.
*/
- if (num_online_cpus() > 1) {
+ if (num_online_cpus() > 1 && !running_on_qemu) {
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;
}
diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h
index 933423fa5144..b0db61188a61 100644
--- a/arch/parisc/math-emu/cnv_float.h
+++ b/arch/parisc/math-emu/cnv_float.h
@@ -60,19 +60,19 @@
((exponent < (SGL_P - 1)) ? \
(Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE)
-#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH)
+#define Int_isinexact_to_sgl(int_value) ((int_value << 33 - SGL_EXP_LENGTH) != 0)
#define Sgl_roundnearest_from_int(int_value,sgl_value) \
if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \
- if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \
+ if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \
Sall(sgl_value)++
#define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \
- ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB))
+ (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB))
#define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \
if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \
- if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \
+ if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) || \
Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++
#define Dint_isinexact_to_dbl(dint_value) \
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 838d0259cd27..3741f91fc186 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -2,7 +2,7 @@
* arch/parisc/mm/ioremap.c
*
* (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
* (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
*/
@@ -83,7 +83,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, pgprot)) {
- vfree(addr);
+ vunmap(addr);
return NULL;
}
@@ -91,9 +91,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
EXPORT_SYMBOL(__ioremap);
-void iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *io_addr)
{
- if (addr > high_memory)
- return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
+ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+
+ if (is_vmalloc_addr((void *)addr))
+ vunmap((void *)addr);
}
EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 0a6bb48854e3..f529d3d9d88d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -85,6 +85,7 @@ config PPC
select BINFMT_ELF
select ARCH_HAS_ELF_RANDOMIZE
select OF
+ select OF_DMA_DEFAULT_COHERENT if !NOT_COHERENT_CACHE
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
select HAVE_FTRACE_MCOUNT_RECORD
@@ -128,7 +129,7 @@ config PPC
select ARCH_HAS_GCOV_PROFILE_ALL
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE
- select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64
+ select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC
select GENERIC_TIME_VSYSCALL_OLD
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
@@ -164,6 +165,11 @@ config PPC
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_KERNEL_GZIP
+config PPC_BARRIER_NOSPEC
+ bool
+ default y
+ depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
+
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index a60c9c6e5cc1..de29b88c0e70 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -373,7 +373,9 @@ vdso_install:
ifeq ($(CONFIG_PPC64),y)
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
endif
+ifdef CONFIG_VDSO32
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
+endif
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/powerpc/boot/4xx.c b/arch/powerpc/boot/4xx.c
index 9d3bd4c45a24..1c4354f922fd 100644
--- a/arch/powerpc/boot/4xx.c
+++ b/arch/powerpc/boot/4xx.c
@@ -232,7 +232,7 @@ void ibm4xx_denali_fixup_memsize(void)
dpath = 8; /* 64 bits */
/* get address pins (rows) */
- val = SDRAM0_READ(DDR0_42);
+ val = SDRAM0_READ(DDR0_42);
row = DDR_GET_VAL(val, DDR_APIN, DDR_APIN_SHIFT);
if (row > max_row)
diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
index 9d9f6f334d3c..3da3e2b1b51b 100644
--- a/arch/powerpc/boot/addnote.c
+++ b/arch/powerpc/boot/addnote.c
@@ -223,7 +223,11 @@ main(int ac, char **av)
PUT_16(E_PHNUM, np + 2);
/* write back */
- lseek(fd, (long) 0, SEEK_SET);
+ i = lseek(fd, (long) 0, SEEK_SET);
+ if (i < 0) {
+ perror("lseek");
+ exit(1);
+ }
i = write(fd, buf, n);
if (i < 0) {
perror("write");
diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts
index aa68911f6560..084b82ba7493 100644
--- a/arch/powerpc/boot/dts/bamboo.dts
+++ b/arch/powerpc/boot/dts/bamboo.dts
@@ -268,8 +268,10 @@
/* Outbound ranges, one memory and one IO,
* later cannot be changed. Chip supports a second
* IO range but we don't use it for now
+ * The chip also supports a larger memory range but
+ * it's not naturally aligned, so our code will break
*/
- ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x40000000
+ ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x20000000
0x02000000 0x00000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi
index e1a961f05dcd..baa0c503e741 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi
@@ -63,6 +63,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy0: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
index c288f3c6c637..93095600e808 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
@@ -60,6 +60,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy6: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi
index 94f3e7175012..ff4bd38f0645 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi
@@ -63,6 +63,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy1: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
index 94a76982d214..1fa38ed6f59e 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
@@ -60,6 +60,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy7: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi
index b5ff5f71c6b8..a8cc9780c0c4 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi
@@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy0: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi
index ee44182c6348..8b8bd70c9382 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi
@@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy1: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi
index f05f0d775039..619c880b54d8 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi
@@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe5000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy2: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi
index a9114ec51075..d7ebb73a400d 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi
@@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe7000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy3: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi
index 44dd00ac7367..b151d696a069 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi
@@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe9000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy4: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi
index 5b1b84b58602..adc0ae0013a3 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi
@@ -59,6 +59,7 @@ fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xeb000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy5: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi
index 0e1daaef9e74..435047e0e250 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi
@@ -60,6 +60,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy14: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi
index 68c5ef779266..c098657cca0a 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi
@@ -60,6 +60,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xf3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy15: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi
index 605363cc1117..9d06824815f3 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi
@@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe1000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy8: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi
index 1955dfa13634..70e947730c4b 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi
@@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe3000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy9: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi
index 2c1476454ee0..ad96e6529595 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi
@@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe5000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy10: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi
index b8b541ff5fb0..034bc4b71f7a 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi
@@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe7000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy11: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi
index 4b2cfddd1b15..93ca23d82b39 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi
@@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xe9000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy12: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi
index 0a52ddf7cc17..23b3117a2fd2 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi
@@ -59,6 +59,7 @@ fman@500000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xeb000 0x1000>;
+ fsl,erratum-a011043; /* must ignore read errors */
pcsphy13: ethernet-phy@0 {
reg = <0x0>;
diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
index 7e3789ea396b..5f2cb1c53e15 100644
--- a/arch/powerpc/boot/libfdt_env.h
+++ b/arch/powerpc/boot/libfdt_env.h
@@ -4,6 +4,10 @@
#include <types.h>
#include <string.h>
+#define INT_MAX ((int)(~0U>>1))
+#define UINT32_MAX ((u32)~0U)
+#define INT32_MAX ((s32)(UINT32_MAX >> 1))
+
#include "of.h"
typedef u32 uint32_t;
diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h
index 5c6afdbca642..21b52c15aafc 100644
--- a/arch/powerpc/boot/xz_config.h
+++ b/arch/powerpc/boot/xz_config.h
@@ -19,10 +19,30 @@ static inline uint32_t swab32p(void *p)
#ifdef __LITTLE_ENDIAN__
#define get_le32(p) (*((uint32_t *) (p)))
+#define cpu_to_be32(x) swab32(x)
+static inline u32 be32_to_cpup(const u32 *p)
+{
+ return swab32p((u32 *)p);
+}
#else
#define get_le32(p) swab32p(p)
+#define cpu_to_be32(x) (x)
+static inline u32 be32_to_cpup(const u32 *p)
+{
+ return *p;
+}
#endif
+static inline uint32_t get_unaligned_be32(const void *p)
+{
+ return be32_to_cpup(p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ *((u32 *)p) = cpu_to_be32(val);
+}
+
#define memeq(a, b, size) (memcmp(a, b, size) == 0)
#define memzero(buf, size) memset(buf, 0, size)
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index 85e88f7a59c0..9ff848e3c4a6 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -27,7 +27,7 @@ static inline int arch_get_random_seed_int(unsigned int *v)
unsigned long val;
int rc;
- rc = arch_get_random_long(&val);
+ rc = arch_get_random_seed_long(&val);
if (rc)
*v = val;
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index e0baba1535e6..c06cfdf12c0b 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -121,4 +121,13 @@ extern s64 __ashrdi3(s64, int);
extern int __cmpdi2(s64, s64);
extern int __ucmpdi2(u64, u64);
+/* Patch sites */
+extern s32 patch__call_flush_count_cache;
+extern s32 patch__flush_count_cache_return;
+extern s32 patch__flush_link_stack_return;
+extern s32 patch__call_kvm_flush_link_stack;
+
+extern long flush_count_cache;
+extern long kvm_flush_link_stack;
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 798ab37c9930..80024c4f2093 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -77,6 +77,27 @@ do { \
#define smp_mb__before_spinlock() smp_mb()
+#ifdef CONFIG_PPC_BOOK3S_64
+#define NOSPEC_BARRIER_SLOT nop
+#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#define NOSPEC_BARRIER_SLOT nop; nop
+#endif
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+/*
+ * Prevent execution of subsequent instructions until preceding branches have
+ * been fully resolved and are no longer executing speculatively.
+ */
+#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
+
+// This also acts as a compiler barrier due to the memory clobber.
+#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
+
+#else /* !CONFIG_PPC_BARRIER_NOSPEC */
+#define barrier_nospec_asm
+#define barrier_nospec()
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
#include <asm-generic/barrier.h>
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h
new file mode 100644
index 000000000000..ed7b1448493a
--- /dev/null
+++ b/arch/powerpc/include/asm/code-patching-asm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018, Michael Ellerman, IBM Corporation.
+ */
+#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H
+#define _ASM_POWERPC_CODE_PATCHING_ASM_H
+
+/* Define a "site" that can be patched */
+.macro patch_site label name
+ .pushsection ".rodata"
+ .balign 4
+ .global \name
+\name:
+ .4byte \label - .
+ .popsection
+.endm
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index b4ab1f497335..ab934f8232bd 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -28,6 +28,8 @@ unsigned int create_cond_branch(const unsigned int *addr,
unsigned long target, int flags);
int patch_branch(unsigned int *addr, unsigned long target, int flags);
int patch_instruction(unsigned int *addr, unsigned int instr);
+int patch_instruction_site(s32 *addr, unsigned int instr);
+int patch_branch_site(s32 *site, unsigned long target, int flags);
int instr_is_relative_branch(unsigned int instr);
int instr_is_relative_link_branch(unsigned int instr);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 4e54282c29b4..cf51aea47510 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -44,6 +44,7 @@ extern int machine_check_e500(struct pt_regs *regs);
extern int machine_check_e200(struct pt_regs *regs);
extern int machine_check_47x(struct pt_regs *regs);
int machine_check_8xx(struct pt_regs *regs);
+int machine_check_83xx(struct pt_regs *regs);
extern void cpu_down_flush_e500v2(void);
extern void cpu_down_flush_e500mc(void);
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 0bf8202feca6..175128e19025 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -213,6 +213,25 @@ void setup_feature_keys(void);
FTR_ENTRY_OFFSET 951b-952b; \
.popsection;
+#define NOSPEC_BARRIER_FIXUP_SECTION \
+953: \
+ .pushsection __barrier_nospec_fixup,"a"; \
+ .align 2; \
+954: \
+ FTR_ENTRY_OFFSET 953b-954b; \
+ .popsection;
+
+#define START_BTB_FLUSH_SECTION \
+955: \
+
+#define END_BTB_FLUSH_SECTION \
+956: \
+ .pushsection __btb_flush_fixup,"a"; \
+ .align 2; \
+957: \
+ FTR_ENTRY_OFFSET 955b-957b; \
+ FTR_ENTRY_OFFSET 956b-957b; \
+ .popsection;
#ifndef __ASSEMBLY__
@@ -220,6 +239,8 @@ extern long stf_barrier_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
#endif
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index f4c7467f7465..b73ab8a7ebc3 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -60,8 +60,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
pagefault_enable();
- if (!ret)
- *oval = oldval;
+ *oval = oldval;
return ret;
}
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 9d978102bf0d..9587d301db55 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -316,10 +316,12 @@
#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
+#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
+#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
#ifndef __ASSEMBLY__
#include <linux/types.h>
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 5e12e19940e2..defa553fe823 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -271,6 +271,7 @@ struct kvm_arch {
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
struct list_head rtas_tokens;
+ struct mutex rtas_token_lock;
DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#endif
#ifdef CONFIG_KVM_MPIC
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c4ced1d01d57..b7067590f15c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -225,6 +225,7 @@
/* Misc instructions for BPF compiler */
#define PPC_INST_LBZ 0x88000000
#define PPC_INST_LD 0xe8000000
+#define PPC_INST_LDX 0x7c00002a
#define PPC_INST_LHZ 0xa0000000
#define PPC_INST_LWZ 0x80000000
#define PPC_INST_LHBRX 0x7c00062c
@@ -232,6 +233,7 @@
#define PPC_INST_STB 0x98000000
#define PPC_INST_STH 0xb0000000
#define PPC_INST_STD 0xf8000000
+#define PPC_INST_STDX 0x7c00012a
#define PPC_INST_STDU 0xf8000001
#define PPC_INST_STW 0x90000000
#define PPC_INST_STWU 0x94000000
@@ -259,6 +261,7 @@
#define PPC_INST_MULLI 0x1c000000
#define PPC_INST_DIVWU 0x7c000396
#define PPC_INST_DIVD 0x7c0003d2
+#define PPC_INST_DIVDU 0x7c000392
#define PPC_INST_RLWINM 0x54000000
#define PPC_INST_RLWIMI 0x50000000
#define PPC_INST_RLDICL 0x78000000
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c73750b0d9fa..bbd35ba36a22 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -437,7 +437,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
.machine push ; \
.machine "power4" ; \
lis scratch,0x60000000@h; \
- dcbt r0,scratch,0b01010; \
+ dcbt 0,scratch,0b01010; \
.machine pop
/*
@@ -780,4 +780,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
.long 0x2400004c /* rfid */
#endif /* !CONFIG_PPC_BOOK3E */
#endif /* __ASSEMBLY__ */
+
+/*
+ * Helper macro for exception table entries
+ */
+#define EX_TABLE(_fault, _target) \
+ stringify_in_c(.section __ex_table,"a";)\
+ stringify_in_c(.balign 4;) \
+ stringify_in_c(.long (_fault) - . ;) \
+ stringify_in_c(.long (_target) - . ;) \
+ stringify_in_c(.previous)
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define BTB_FLUSH(reg) \
+ lis reg,BUCSR_INIT@h; \
+ ori reg,reg,BUCSR_INIT@l; \
+ mtspr SPRN_BUCSR,reg; \
+ isync;
+#else
+#define BTB_FLUSH(reg)
+#endif /* CONFIG_PPC_FSL_BOOK3E */
+
#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index ceb168cd3b81..26aeeaad3267 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -663,6 +663,8 @@
#define SRR1_PROGTRAP 0x00020000 /* Trap */
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
+#define SRR1_MCE_MCP 0x00080000 /* Machine check signal caused interrupt */
+
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
#define HSRR1_DENORM 0x00100000 /* Denorm exception */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 737e012ef56e..319ed53e503f 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -41,7 +41,7 @@
#if defined(CONFIG_PPC_BOOK3E_64)
#define MSR_64BIT MSR_CM
-#define MSR_ (MSR_ME | MSR_CE)
+#define MSR_ (MSR_ME | MSR_RI | MSR_CE)
#define MSR_KERNEL (MSR_ | MSR_64BIT)
#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index 44989b22383c..ccf44c135389 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -22,6 +22,7 @@ enum stf_barrier_type {
void setup_stf_barrier(void);
void do_stf_barrier_fixups(enum stf_barrier_type types);
+void setup_count_cache_flush(void);
static inline void security_ftr_set(unsigned long feature)
{
@@ -59,6 +60,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Indirect branch prediction cache disabled
#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
+// bcctr 2,0,0 triggers a hardware assisted count cache flush
+#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull
+
// Features indicating need for Spectre/Meltdown mitigations
@@ -74,6 +78,12 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Firmware configuration indicates user favours security over performance
#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
+// Software required to flush count cache on context switch
+#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
+
+// Software required to flush link stack on context switch
+#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
+
// Features enabled by default
#define SEC_FTR_DEFAULT \
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 3f160cd20107..862ebce3ae54 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -8,6 +8,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
+extern bool init_mem_is_free;
extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
@@ -50,6 +51,26 @@ enum l1d_flush_type {
void setup_rfi_flush(enum l1d_flush_type, bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void setup_barrier_nospec(void);
+#else
+static inline void setup_barrier_nospec(void) { };
+#endif
+void do_barrier_nospec_fixups(bool enable);
+extern bool barrier_nospec_enabled;
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
+#else
+static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
+#endif
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+void setup_spectre_v2(void);
+#else
+static inline void setup_spectre_v2(void) {};
+#endif
+void do_btb_flush_fixups(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index d89beaba26ff..8b957aabb826 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -213,30 +213,18 @@
* respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
* (i.e. carry out) is not stored anywhere, and is lost.
*/
-#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
do { \
if (__builtin_constant_p (bh) && (bh) == 0) \
- __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "%r" ((USItype)(ah)), \
- "%r" ((USItype)(al)), \
- "rI" ((USItype)(bl))); \
- else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
- __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "%r" ((USItype)(ah)), \
- "%r" ((USItype)(al)), \
- "rI" ((USItype)(bl))); \
+ __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
else \
- __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "%r" ((USItype)(ah)), \
- "r" ((USItype)(bh)), \
- "%r" ((USItype)(al)), \
- "rI" ((USItype)(bl))); \
+ __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
} while (0)
/* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to
@@ -248,44 +236,24 @@
* and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
* and is lost.
*/
-#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
do { \
if (__builtin_constant_p (ah) && (ah) == 0) \
- __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(bh)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
- else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
- __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(bh)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
+ __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
+ __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
else if (__builtin_constant_p (bh) && (bh) == 0) \
- __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(ah)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
- else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
- __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(ah)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
+ __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
else \
- __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(ah)), \
- "r" ((USItype)(bh)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
+ __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
} while (0)
/* asm fragments for mul and div */
@@ -294,13 +262,10 @@
* UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
* word product in HIGH_PROD and LOW_PROD.
*/
-#define umul_ppmm(ph, pl, m0, m1) \
+#define umul_ppmm(ph, pl, m0, m1) \
do { \
USItype __m0 = (m0), __m1 = (m1); \
- __asm__ ("mulhwu %0,%1,%2" \
- : "=r" ((USItype)(ph)) \
- : "%r" (__m0), \
- "r" (__m1)); \
+ __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
(pl) = __m0 * __m1; \
} while (0)
@@ -312,9 +277,10 @@
* significant bit of DENOMINATOR must be 1, then the pre-processor symbol
* UDIV_NEEDS_NORMALIZATION is defined to 1.
*/
-#define udiv_qrnnd(q, r, n1, n0, d) \
+#define udiv_qrnnd(q, r, n1, n0, d) \
do { \
- UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
+ UWtype __d1, __d0, __q1, __q0; \
+ UWtype __r1, __r0, __m; \
__d1 = __ll_highpart (d); \
__d0 = __ll_lowpart (d); \
\
@@ -325,7 +291,7 @@
if (__r1 < __m) \
{ \
__q1--, __r1 += (d); \
- if (__r1 >= (d)) /* we didn't get carry when adding to __r1 */ \
+ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
if (__r1 < __m) \
__q1--, __r1 += (d); \
} \
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b7b0f2..229c91bcf616 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -90,6 +90,8 @@ static inline int prrn_is_enabled(void)
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
+
+int dlpar_cpu_readd(int cpu);
#endif
#endif
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 31913b3ac7ab..da852153c1f8 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -269,6 +269,7 @@ do { \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
+ barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -280,8 +281,10 @@ do { \
unsigned long __gu_val = 0; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \
- if (access_ok(VERIFY_READ, __gu_addr, (size))) \
+ if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
+ barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ } \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -292,6 +295,7 @@ do { \
unsigned long __gu_val; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
+ barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -348,15 +352,19 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
switch (n) {
case 1:
+ barrier_nospec();
__get_user_size(*(u8 *)to, from, 1, ret);
break;
case 2:
+ barrier_nospec();
__get_user_size(*(u16 *)to, from, 2, ret);
break;
case 4:
+ barrier_nospec();
__get_user_size(*(u32 *)to, from, 4, ret);
break;
case 8:
+ barrier_nospec();
__get_user_size(*(u64 *)to, from, 8, ret);
break;
}
@@ -366,6 +374,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
check_object_size(to, n, false);
+ barrier_nospec();
return __copy_tofrom_user((__force void __user *)to, from, n);
}
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 1afe90ade595..674c03350cd1 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -86,6 +86,7 @@ struct vdso_data {
__s32 wtom_clock_nsec;
struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
__u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
+ __u32 hrtimer_res; /* hrtimer resolution */
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
};
@@ -107,6 +108,7 @@ struct vdso_data {
__s32 wtom_clock_nsec;
struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
__u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
+ __u32 hrtimer_res; /* hrtimer resolution */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 dcache_block_size; /* L1 d-cache block size */
__u32 icache_block_size; /* L1 i-cache block size */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 13885786282b..d80fbf0884ff 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -44,9 +44,10 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
+obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 14fbbd9035ca..dfcf28be12ba 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -383,6 +383,7 @@ int main(void)
DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction));
+ DEFINE(CLOCK_HRTIMER_RES, offsetof(struct vdso_data, hrtimer_res));
DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
@@ -411,7 +412,6 @@ int main(void)
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
#ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index c641983bbdd6..3394a72b19f2 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -867,4 +867,21 @@ void cacheinfo_cpu_offline(unsigned int cpu_id)
if (cache)
cache_cpu_clear(cache, cpu_id);
}
+
+void cacheinfo_teardown(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ cacheinfo_cpu_offline(cpu);
+}
+
+void cacheinfo_rebuild(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ cacheinfo_cpu_online(cpu);
+}
+
#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
diff --git a/arch/powerpc/kernel/cacheinfo.h b/arch/powerpc/kernel/cacheinfo.h
index a7b74d36acd7..2cdee87a482c 100644
--- a/arch/powerpc/kernel/cacheinfo.h
+++ b/arch/powerpc/kernel/cacheinfo.h
@@ -5,4 +5,8 @@
extern void cacheinfo_cpu_online(unsigned int cpu_id);
extern void cacheinfo_cpu_offline(unsigned int cpu_id);
+/* Allow migration/suspend to tear down and rebuild the hierarchy. */
+extern void cacheinfo_teardown(void);
+extern void cacheinfo_rebuild(void);
+
#endif /* _PPC_CACHEINFO_H */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6a82ef039c50..514e04b62261 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1162,6 +1162,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc603",
},
+#ifdef CONFIG_PPC_83xx
{ /* e300c1 (a 603e core, plus some) on 83xx */
.pvr_mask = 0x7fff0000,
.pvr_value = 0x00830000,
@@ -1172,7 +1173,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
+ .machine_check = machine_check_83xx,
.platform = "ppc603",
},
{ /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
@@ -1186,7 +1187,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
+ .machine_check = machine_check_83xx,
.platform = "ppc603",
},
{ /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */
@@ -1200,7 +1201,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
+ .machine_check = machine_check_83xx,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e300",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
@@ -1217,12 +1218,13 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
+ .machine_check = machine_check_83xx,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e300",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
.platform = "ppc603",
},
+#endif
{ /* default match, we assume split I/D cache & TB (non-601)... */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
@@ -2197,11 +2199,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
* oprofile_cpu_type already has a value, then we are
* possibly overriding a real PVR with a logical one,
* and, in that case, keep the current value for
- * oprofile_cpu_type.
+ * oprofile_cpu_type. Futhermore, let's ensure that the
+ * fix for the PMAO bug is enabled on compatibility mode.
*/
if (old.oprofile_cpu_type != NULL) {
t->oprofile_cpu_type = old.oprofile_cpu_type;
t->oprofile_type = old.oprofile_type;
+ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
}
}
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 8336b9016ca9..a7f229e59892 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -362,10 +362,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
NULL, &hugepage_shift);
if (!ptep)
return token;
- WARN_ON(hugepage_shift);
- pa = pte_pfn(*ptep) << PAGE_SHIFT;
- return pa | (token & (PAGE_SIZE-1));
+ pa = pte_pfn(*ptep);
+
+ /* On radix we can do hugepage mappings for io, so handle that */
+ if (hugepage_shift) {
+ pa <<= hugepage_shift;
+ pa |= token & ((1ul << hugepage_shift) - 1);
+ } else {
+ pa <<= PAGE_SHIFT;
+ pa |= token & (PAGE_SIZE - 1);
+ }
+
+ return pa;
}
/*
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 620e08d4eb6e..adac3dee4c57 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -520,12 +520,6 @@ static void *eeh_rmv_device(void *data, void *userdata)
pci_iov_remove_virtfn(edev->physfn, pdn->vf_index, 0);
edev->pdev = NULL;
-
- /*
- * We have to set the VF PE number to invalid one, which is
- * required to plug the VF successfully.
- */
- pdn->pe_number = IODA_INVALID_PE;
#endif
if (rmv_data)
list_add(&edev->rmv_list, &rmv_data->edev_list);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 1abd8dd77ec1..eee2131a97e6 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -370,7 +370,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
while (parent) {
if (!(parent->type & EEH_PE_INVALID))
break;
- parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
+ parent->type &= ~EEH_PE_INVALID;
parent = parent->parent;
}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 370645687cc7..bdd88f9d7926 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -34,6 +34,7 @@
#include <asm/ftrace.h>
#include <asm/ptrace.h>
#include <asm/export.h>
+#include <asm/barrier.h>
/*
* MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
@@ -347,6 +348,15 @@ syscall_dotrace_cont:
ori r10,r10,sys_call_table@l
slwi r0,r0,2
bge- 66f
+
+ barrier_nospec_asm
+ /*
+ * Prevent the load of the handler below (based on the user-passed
+ * system call number) being speculatively executed until the test
+ * against NR_syscalls and branch to .66f above has
+ * committed.
+ */
+
lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
mtlr r10
addi r9,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index e24ae0fa80ed..38f0a75014eb 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -26,6 +26,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/thread_info.h>
+#include <asm/code-patching-asm.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
@@ -38,6 +39,7 @@
#include <asm/context_tracking.h>
#include <asm/tm.h>
#include <asm/ppc-opcode.h>
+#include <asm/barrier.h>
#include <asm/export.h>
#ifdef CONFIG_PPC_BOOK3S
#include <asm/exception-64s.h>
@@ -78,6 +80,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
std r0,GPR0(r1)
std r10,GPR1(r1)
beq 2f /* if from kernel mode */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+ BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+#endif
ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
2: std r2,GPR2(r1)
std r3,GPR3(r1)
@@ -180,6 +187,15 @@ system_call: /* label this so stack traces look sane */
clrldi r8,r8,32
15:
slwi r0,r0,4
+
+ barrier_nospec_asm
+ /*
+ * Prevent the load of the handler below (based on the user-passed
+ * system call number) being speculatively executed until the test
+ * against NR_syscalls and branch to .Lsyscall_enosys above has
+ * committed.
+ */
+
ldx r12,r11,r0 /* Fetch system call handler [ptr] */
mtctr r12
bctrl /* Call handler */
@@ -473,6 +489,63 @@ _GLOBAL(ret_from_kernel_thread)
li r3,0
b .Lsyscall_exit
+#ifdef CONFIG_PPC_BOOK3S_64
+
+#define FLUSH_COUNT_CACHE \
+1: nop; \
+ patch_site 1b, patch__call_flush_count_cache
+
+
+#define BCCTR_FLUSH .long 0x4c400420
+
+.macro nops number
+ .rept \number
+ nop
+ .endr
+.endm
+
+.balign 32
+.global flush_count_cache
+flush_count_cache:
+ /* Save LR into r9 */
+ mflr r9
+
+ // Flush the link stack
+ .rept 64
+ bl .+4
+ .endr
+ b 1f
+ nops 6
+
+ .balign 32
+ /* Restore LR */
+1: mtlr r9
+
+ // If we're just flushing the link stack, return here
+3: nop
+ patch_site 3b patch__flush_link_stack_return
+
+ li r9,0x7fff
+ mtctr r9
+
+ BCCTR_FLUSH
+
+2: nop
+ patch_site 2b patch__flush_count_cache_return
+
+ nops 3
+
+ .rept 278
+ .balign 32
+ BCCTR_FLUSH
+ nops 7
+ .endr
+
+ blr
+#else
+#define FLUSH_COUNT_CACHE
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -504,6 +577,8 @@ _GLOBAL(_switch)
std r23,_CCR(r1)
std r1,KSP(r3) /* Set old stack pointer */
+ FLUSH_COUNT_CACHE
+
#ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index ca03eb229a9a..423b5257d3a1 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -295,7 +295,8 @@ ret_from_mc_except:
andi. r10,r11,MSR_PR; /* save stack pointer */ \
beq 1f; /* branch around if supervisor */ \
ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
-1: cmpdi cr1,r1,0; /* check if SP makes sense */ \
+1: type##_BTB_FLUSH \
+ cmpdi cr1,r1,0; /* check if SP makes sense */ \
bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
@@ -327,6 +328,30 @@ ret_from_mc_except:
#define SPRN_MC_SRR0 SPRN_MCSRR0
#define SPRN_MC_SRR1 SPRN_MCSRR1
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define GEN_BTB_FLUSH \
+ START_BTB_FLUSH_SECTION \
+ beq 1f; \
+ BTB_FLUSH(r10) \
+ 1: \
+ END_BTB_FLUSH_SECTION
+
+#define CRIT_BTB_FLUSH \
+ START_BTB_FLUSH_SECTION \
+ BTB_FLUSH(r10) \
+ END_BTB_FLUSH_SECTION
+
+#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
+#define MC_BTB_FLUSH CRIT_BTB_FLUSH
+#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
+#else
+#define GEN_BTB_FLUSH
+#define CRIT_BTB_FLUSH
+#define DBG_BTB_FLUSH
+#define MC_BTB_FLUSH
+#define GDBELL_BTB_FLUSH
+#endif
+
#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index d50cc9b38b80..0c8b966e8070 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -467,6 +467,10 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
RFI_TO_USER_OR_KERNEL
9:
/* Deliver the machine check to host kernel in V mode. */
+BEGIN_FTR_SECTION
+ ld r10,ORIG_GPR3(r1)
+ mtspr SPRN_CFAR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
MACHINE_CHECK_HANDLER_WINDUP
b machine_check_pSeries
@@ -1505,7 +1509,7 @@ handle_page_fault:
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
cmpdi r3,0
- beq+ 12f
+ beq+ ret_from_except_lite
bl save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1520,7 +1524,12 @@ handle_dabr_fault:
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_break
-12: b ret_from_except_lite
+ /*
+ * do_break() may have changed the NV GPRS while handling a breakpoint.
+ * If so, we need to restore them with their updated values. Don't use
+ * ret_from_except_lite here.
+ */
+ b ret_from_except
#ifdef CONFIG_PPC_STD_MMU_64
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index a620203f7de3..7b98c7351f6c 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -31,6 +31,16 @@
*/
#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define BOOKE_CLEAR_BTB(reg) \
+START_BTB_FLUSH_SECTION \
+ BTB_FLUSH(reg) \
+END_BTB_FLUSH_SECTION
+#else
+#define BOOKE_CLEAR_BTB(reg)
+#endif
+
+
#define NORMAL_EXCEPTION_PROLOG(intno) \
mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
mfspr r10, SPRN_SPRG_THREAD; \
@@ -42,6 +52,7 @@
andi. r11, r11, MSR_PR; /* check whether user or kernel */\
mr r11, r1; \
beq 1f; \
+ BOOKE_CLEAR_BTB(r11) \
/* if from user, start at top of this thread's kernel stack */ \
lwz r11, THREAD_INFO-THREAD(r10); \
ALLOC_STACK_FRAME(r11, THREAD_SIZE); \
@@ -127,6 +138,7 @@
stw r9,_CCR(r8); /* save CR on stack */\
mfspr r11,exc_level_srr1; /* check whether user or kernel */\
DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
+ BOOKE_CLEAR_BTB(r10) \
andi. r11,r11,MSR_PR; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index bf4c6021515f..60a0aeefc4a7 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -452,6 +452,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10)
DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
+START_BTB_FLUSH_SECTION
+ mfspr r11, SPRN_SRR1
+ andi. r10,r11,MSR_PR
+ beq 1f
+ BTB_FLUSH(r10)
+1:
+END_BTB_FLUSH_SECTION
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -546,6 +553,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10)
DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
+START_BTB_FLUSH_SECTION
+ mfspr r11, SPRN_SRR1
+ andi. r10,r11,MSR_PR
+ beq 1f
+ BTB_FLUSH(r10)
+1:
+END_BTB_FLUSH_SECTION
+
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 5f202a566ec5..9bfdd2510fd5 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -765,9 +765,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
vaddr = page_address(page) + offset;
uaddr = (unsigned long)vaddr;
- npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
if (tbl) {
+ npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
align = 0;
if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
((unsigned long)vaddr & ~PAGE_MASK) == 0)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ad713f741ca8..eff4a336a1b4 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -527,8 +527,6 @@ void __do_irq(struct pt_regs *regs)
trace_irq_entry(regs);
- check_stack_overflow();
-
/*
* Query the platform PIC for the interrupt & ack it.
*
@@ -560,6 +558,8 @@ void do_IRQ(struct pt_regs *regs)
irqtp = hardirq_ctx[raw_smp_processor_id()];
sirqtp = softirq_ctx[raw_smp_processor_id()];
+ check_stack_overflow();
+
/* Already there ? */
if (unlikely(curtp == irqtp || curtp == sirqtp)) {
__do_irq(regs);
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 30b89d5cbb03..3b1c3bb91025 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -72,7 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr,
do_feature_fixups(powerpc_firmware_features,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
-#endif
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+ sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
+ if (sect != NULL)
+ do_barrier_nospec_fixups_range(barrier_nospec_enabled,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
sect = find_section(hdr, sechdrs, "__lwsync_fixup");
if (sect != NULL)
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 592693437070..c8f1b78fbd0e 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -271,9 +271,22 @@ void remove_dev_pci_data(struct pci_dev *pdev)
continue;
#ifdef CONFIG_EEH
- /* Release EEH device for the VF */
+ /*
+ * Release EEH state for this VF. The PCI core
+ * has already torn down the pci_dev for this VF, but
+ * we're responsible to removing the eeh_dev since it
+ * has the same lifetime as the pci_dn that spawned it.
+ */
edev = pdn_to_eeh_dev(pdn);
if (edev) {
+ /*
+ * We allocate pci_dn's for the totalvfs count,
+ * but only only the vfs that were activated
+ * have a configured PE.
+ */
+ if (edev->pe)
+ eeh_rmv_from_parent_pe(edev);
+
pdn->edev = NULL;
kfree(edev);
}
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index ea3d98115b88..e0648a09d9c8 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -45,6 +45,8 @@ static unsigned int pci_parse_of_flags(u32 addr0, int bridge)
if (addr0 & 0x02000000) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ flags |= IORESOURCE_MEM_64;
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
if (addr0 & 0x40000000)
flags |= IORESOURCE_PREFETCH
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 609f0e87ced7..54c95e7c74cc 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -476,13 +476,14 @@ void giveup_all(struct task_struct *tsk)
if (!tsk->thread.regs)
return;
+ check_if_tm_restore_required(tsk);
+
usermsr = tsk->thread.regs->msr;
if ((usermsr & msr_all_available) == 0)
return;
msr_check_and_set(msr_all_available);
- check_if_tm_restore_required(tsk);
#ifdef CONFIG_PPC_FPU
if (usermsr & MSR_FP)
@@ -575,12 +576,11 @@ void flush_all_to_thread(struct task_struct *tsk)
if (tsk->thread.regs) {
preempt_disable();
BUG_ON(tsk != current);
- save_all(tsk);
-
#ifdef CONFIG_SPE
if (tsk->thread.regs->msr & MSR_SPE)
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
#endif
+ save_all(tsk);
preempt_enable();
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index b0245bed6f54..b868f07c4246 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -128,7 +128,7 @@ static void __init move_device_tree(void)
p = __va(memblock_alloc(size, PAGE_SIZE));
memcpy(p, initial_boot_params, size);
initial_boot_params = p;
- DBG("Moved device tree to 0x%p\n", p);
+ DBG("Moved device tree to 0x%px\n", p);
}
DBG("<- move_device_tree\n");
@@ -651,7 +651,7 @@ void __init early_init_devtree(void *params)
{
phys_addr_t limit;
- DBG(" -> early_init_devtree(%p)\n", params);
+ DBG(" -> early_init_devtree(%px)\n", params);
/* Too early to BUG_ON(), do it by hand */
if (!early_init_dt_verify(params))
@@ -711,7 +711,7 @@ void __init early_init_devtree(void *params)
memblock_allow_resize();
memblock_dump_all();
- DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
+ DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
/* We may need to relocate the flat tree, do it now.
* FIXME .. and the initrd too? */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 6a3e5de544ce..641f3e4c3380 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -874,15 +874,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
return 0;
for_each_cpu(cpu, cpus) {
+ struct device *dev = get_cpu_device(cpu);
+
switch (state) {
case DOWN:
- cpuret = cpu_down(cpu);
+ cpuret = device_offline(dev);
break;
case UP:
- cpuret = cpu_up(cpu);
+ cpuret = device_online(dev);
break;
}
- if (cpuret) {
+ if (cpuret < 0) {
pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
__func__,
((state == UP) ? "up" : "down"),
@@ -971,6 +973,8 @@ int rtas_ibm_suspend_me(u64 handle)
data.token = rtas_token("ibm,suspend-me");
data.complete = &done;
+ lock_device_hotplug();
+
/* All present CPUs must be online */
cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
cpuret = rtas_online_cpus_mask(offline_mask);
@@ -980,6 +984,7 @@ int rtas_ibm_suspend_me(u64 handle)
goto out;
}
+ cpu_hotplug_disable();
stop_topology_update();
/* Call function on all CPUs. One of us will make the
@@ -994,6 +999,7 @@ int rtas_ibm_suspend_me(u64 handle)
printk(KERN_ERR "Error doing global join\n");
start_topology_update();
+ cpu_hotplug_enable();
/* Take down CPUs not online prior to suspend */
cpuret = rtas_offline_cpus_mask(offline_mask);
@@ -1002,6 +1008,7 @@ int rtas_ibm_suspend_me(u64 handle)
__func__);
out:
+ unlock_device_hotplug();
free_cpumask_var(offline_mask);
return atomic_read(&data.error);
}
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 2277df84ef6e..ff85fc800183 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -4,42 +4,153 @@
//
// Copyright 2018, Michael Ellerman, IBM Corporation.
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/seq_buf.h>
+#include <asm/asm-prototypes.h>
+#include <asm/code-patching.h>
+#include <asm/debug.h>
#include <asm/security_features.h>
+#include <asm/setup.h>
unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+enum count_cache_flush_type {
+ COUNT_CACHE_FLUSH_NONE = 0x1,
+ COUNT_CACHE_FLUSH_SW = 0x2,
+ COUNT_CACHE_FLUSH_HW = 0x4,
+};
+static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+static bool link_stack_flush_enabled;
+
+bool barrier_nospec_enabled;
+static bool no_nospec;
+static bool btb_flush_enabled;
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
+static bool no_spectrev2;
+#endif
+
+static void enable_barrier_nospec(bool enable)
+{
+ barrier_nospec_enabled = enable;
+ do_barrier_nospec_fixups(enable);
+}
+
+void setup_barrier_nospec(void)
+{
+ bool enable;
+
+ /*
+ * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
+ * But there's a good reason not to. The two flags we check below are
+ * both are enabled by default in the kernel, so if the hcall is not
+ * functional they will be enabled.
+ * On a system where the host firmware has been updated (so the ori
+ * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
+ * not been updated, we would like to enable the barrier. Dropping the
+ * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
+ * we potentially enable the barrier on systems where the host firmware
+ * is not updated, but that's harmless as it's a no-op.
+ */
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
+
+ if (!no_nospec)
+ enable_barrier_nospec(enable);
+}
+
+static int __init handle_nospectre_v1(char *p)
+{
+ no_nospec = true;
+
+ return 0;
+}
+early_param("nospectre_v1", handle_nospectre_v1);
+
+#ifdef CONFIG_DEBUG_FS
+static int barrier_nospec_set(void *data, u64 val)
+{
+ switch (val) {
+ case 0:
+ case 1:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!!val == !!barrier_nospec_enabled)
+ return 0;
+
+ enable_barrier_nospec(!!val);
+
+ return 0;
+}
+
+static int barrier_nospec_get(void *data, u64 *val)
+{
+ *val = barrier_nospec_enabled ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
+ barrier_nospec_get, barrier_nospec_set, "%llu\n");
+
+static __init int barrier_nospec_debugfs_init(void)
+{
+ debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
+ &fops_barrier_nospec);
+ return 0;
+}
+device_initcall(barrier_nospec_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
+
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
+static int __init handle_nospectre_v2(char *p)
+{
+ no_spectrev2 = true;
+
+ return 0;
+}
+early_param("nospectre_v2", handle_nospectre_v2);
+#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+void setup_spectre_v2(void)
+{
+ if (no_spectrev2)
+ do_btb_flush_fixups();
+ else
+ btb_flush_enabled = true;
+}
+#endif /* CONFIG_PPC_FSL_BOOK3E */
+
+#ifdef CONFIG_PPC_BOOK3S_64
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
{
bool thread_priv;
thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
- if (rfi_flush || thread_priv) {
+ if (rfi_flush) {
struct seq_buf s;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
- seq_buf_printf(&s, "Mitigation: ");
-
- if (rfi_flush)
- seq_buf_printf(&s, "RFI Flush");
-
- if (rfi_flush && thread_priv)
- seq_buf_printf(&s, ", ");
-
+ seq_buf_printf(&s, "Mitigation: RFI Flush");
if (thread_priv)
- seq_buf_printf(&s, "L1D private per thread");
+ seq_buf_printf(&s, ", L1D private per thread");
seq_buf_printf(&s, "\n");
return s.len;
}
+ if (thread_priv)
+ return sprintf(buf, "Vulnerable: L1D private per thread\n");
+
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
return sprintf(buf, "Not affected\n");
@@ -47,24 +158,43 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
return sprintf(buf, "Vulnerable\n");
}
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_meltdown(dev, attr, buf);
+}
+#endif
+
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
{
- if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
- return sprintf(buf, "Not affected\n");
+ struct seq_buf s;
- return sprintf(buf, "Vulnerable\n");
+ seq_buf_init(&s, buf, PAGE_SIZE - 1);
+
+ if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
+ if (barrier_nospec_enabled)
+ seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
+ else
+ seq_buf_printf(&s, "Vulnerable");
+
+ if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
+ seq_buf_printf(&s, ", ori31 speculation barrier enabled");
+
+ seq_buf_printf(&s, "\n");
+ } else
+ seq_buf_printf(&s, "Not affected\n");
+
+ return s.len;
}
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
{
- bool bcs, ccd, ori;
struct seq_buf s;
+ bool bcs, ccd;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
- ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
if (bcs || ccd) {
seq_buf_printf(&s, "Mitigation: ");
@@ -77,17 +207,31 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (ccd)
seq_buf_printf(&s, "Indirect branch cache disabled");
- } else
- seq_buf_printf(&s, "Vulnerable");
- if (ori)
- seq_buf_printf(&s, ", ori31 speculation barrier enabled");
+ if (link_stack_flush_enabled)
+ seq_buf_printf(&s, ", Software link stack flush");
+
+ } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+ seq_buf_printf(&s, "Mitigation: Software count cache flush");
+
+ if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
+ seq_buf_printf(&s, " (hardware accelerated)");
+
+ if (link_stack_flush_enabled)
+ seq_buf_printf(&s, ", Software link stack flush");
+
+ } else if (btb_flush_enabled) {
+ seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
+ } else {
+ seq_buf_printf(&s, "Vulnerable");
+ }
seq_buf_printf(&s, "\n");
return s.len;
}
+#ifdef CONFIG_PPC_BOOK3S_64
/*
* Store-forwarding barrier support.
*/
@@ -235,3 +379,121 @@ static __init int stf_barrier_debugfs_init(void)
}
device_initcall(stf_barrier_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
+
+static void no_count_cache_flush(void)
+{
+ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+ pr_info("count-cache-flush: software flush disabled.\n");
+}
+
+static void toggle_count_cache_flush(bool enable)
+{
+ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
+ !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
+ enable = false;
+
+ if (!enable) {
+ patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
+#endif
+ pr_info("link-stack-flush: software flush disabled.\n");
+ link_stack_flush_enabled = false;
+ no_count_cache_flush();
+ return;
+ }
+
+ // This enables the branch from _switch to flush_count_cache
+ patch_branch_site(&patch__call_flush_count_cache,
+ (u64)&flush_count_cache, BRANCH_SET_LINK);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ // This enables the branch from guest_exit_cont to kvm_flush_link_stack
+ patch_branch_site(&patch__call_kvm_flush_link_stack,
+ (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
+#endif
+
+ pr_info("link-stack-flush: software flush enabled.\n");
+ link_stack_flush_enabled = true;
+
+ // If we just need to flush the link stack, patch an early return
+ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+ patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
+ no_count_cache_flush();
+ return;
+ }
+
+ if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
+ count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
+ pr_info("count-cache-flush: full software flush sequence enabled.\n");
+ return;
+ }
+
+ patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
+ count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
+ pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
+}
+
+void setup_count_cache_flush(void)
+{
+ bool enable = true;
+
+ if (no_spectrev2 || cpu_mitigations_off()) {
+ if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
+ security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
+ pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
+
+ enable = false;
+ }
+
+ /*
+ * There's no firmware feature flag/hypervisor bit to tell us we need to
+ * flush the link stack on context switch. So we set it here if we see
+ * either of the Spectre v2 mitigations that aim to protect userspace.
+ */
+ if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
+ security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
+ security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
+
+ toggle_count_cache_flush(enable);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int count_cache_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ toggle_count_cache_flush(enable);
+
+ return 0;
+}
+
+static int count_cache_flush_get(void *data, u64 *val)
+{
+ if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
+ *val = 0;
+ else
+ *val = 1;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
+ count_cache_flush_set, "%llu\n");
+
+static __init int count_cache_flush_debugfs_init(void)
+{
+ debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
+ NULL, &fops_count_cache_flush);
+ return 0;
+}
+device_initcall(count_cache_flush_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index bf0f712ac0e0..5e7d70c5d065 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -918,6 +918,9 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.setup_arch)
ppc_md.setup_arch();
+ setup_barrier_nospec();
+ setup_spectre_v2();
+
paging_init();
/* Initialize the MMU context management stuff. */
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 2bfa5a7bb672..a378b1e80a1a 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1281,6 +1281,9 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
goto bad;
if (MSR_TM_ACTIVE(msr_hi<<32)) {
+ /* Trying to start TM on non TM system */
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto bad;
/* We only recheckpoint on return if we're
* transaction.
*/
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index d929afab7b24..aa6cc2bfa69d 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -469,8 +469,10 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
err |= __get_user(tsk->thread.ckpt_regs.ccr,
&sc->gp_regs[PT_CCR]);
+ /* Don't allow userspace to set the trap value */
+ regs->trap = 0;
+
/* These regs are not checkpointed; they can go in 'regs'. */
- err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
@@ -741,17 +743,35 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
if (MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */
struct ucontext __user *uc_transact;
+
+ /* Trying to start TM on non TM system */
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto badframe;
+
if (__get_user(uc_transact, &uc->uc_link))
goto badframe;
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
- }
- else
- /* Fall through, for non-TM restore */
+ } else
#endif
- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
- goto badframe;
+ {
+ /*
+ * Fall through, for non-TM restore
+ *
+ * Unset MSR[TS] on the thread regs since MSR from user
+ * context does not have MSR active, and recheckpoint was
+ * not called since restore_tm_sigcontexts() was not called
+ * also.
+ *
+ * If not unsetting it, the code can RFID to userspace with
+ * MSR[TS] set, but without CPU in the proper state,
+ * causing a TM bad thing.
+ */
+ current->thread.regs->msr &= ~MSR_TS_MASK;
+ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+ goto badframe;
+ }
if (restore_altstack(&uc->uc_stack))
goto badframe;
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index ba4dee3d233f..884d1c3a187b 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -23,11 +23,19 @@
#define SL_IBAT2 0x48
#define SL_DBAT3 0x50
#define SL_IBAT3 0x58
-#define SL_TB 0x60
-#define SL_R2 0x68
-#define SL_CR 0x6c
-#define SL_LR 0x70
-#define SL_R12 0x74 /* r12 to r31 */
+#define SL_DBAT4 0x60
+#define SL_IBAT4 0x68
+#define SL_DBAT5 0x70
+#define SL_IBAT5 0x78
+#define SL_DBAT6 0x80
+#define SL_IBAT6 0x88
+#define SL_DBAT7 0x90
+#define SL_IBAT7 0x98
+#define SL_TB 0xa0
+#define SL_R2 0xa8
+#define SL_CR 0xac
+#define SL_LR 0xb0
+#define SL_R12 0xb4 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)
.section .data
@@ -112,6 +120,41 @@ _GLOBAL(swsusp_arch_suspend)
mfibatl r4,3
stw r4,SL_IBAT3+4(r11)
+BEGIN_MMU_FTR_SECTION
+ mfspr r4,SPRN_DBAT4U
+ stw r4,SL_DBAT4(r11)
+ mfspr r4,SPRN_DBAT4L
+ stw r4,SL_DBAT4+4(r11)
+ mfspr r4,SPRN_DBAT5U
+ stw r4,SL_DBAT5(r11)
+ mfspr r4,SPRN_DBAT5L
+ stw r4,SL_DBAT5+4(r11)
+ mfspr r4,SPRN_DBAT6U
+ stw r4,SL_DBAT6(r11)
+ mfspr r4,SPRN_DBAT6L
+ stw r4,SL_DBAT6+4(r11)
+ mfspr r4,SPRN_DBAT7U
+ stw r4,SL_DBAT7(r11)
+ mfspr r4,SPRN_DBAT7L
+ stw r4,SL_DBAT7+4(r11)
+ mfspr r4,SPRN_IBAT4U
+ stw r4,SL_IBAT4(r11)
+ mfspr r4,SPRN_IBAT4L
+ stw r4,SL_IBAT4+4(r11)
+ mfspr r4,SPRN_IBAT5U
+ stw r4,SL_IBAT5(r11)
+ mfspr r4,SPRN_IBAT5L
+ stw r4,SL_IBAT5+4(r11)
+ mfspr r4,SPRN_IBAT6U
+ stw r4,SL_IBAT6(r11)
+ mfspr r4,SPRN_IBAT6L
+ stw r4,SL_IBAT6+4(r11)
+ mfspr r4,SPRN_IBAT7U
+ stw r4,SL_IBAT7(r11)
+ mfspr r4,SPRN_IBAT7L
+ stw r4,SL_IBAT7+4(r11)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
#if 0
/* Backup various CPU config stuffs */
bl __save_cpu_setup
@@ -277,27 +320,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtibatu 3,r4
lwz r4,SL_IBAT3+4(r11)
mtibatl 3,r4
-#endif
-
BEGIN_MMU_FTR_SECTION
- li r4,0
+ lwz r4,SL_DBAT4(r11)
mtspr SPRN_DBAT4U,r4
+ lwz r4,SL_DBAT4+4(r11)
mtspr SPRN_DBAT4L,r4
+ lwz r4,SL_DBAT5(r11)
mtspr SPRN_DBAT5U,r4
+ lwz r4,SL_DBAT5+4(r11)
mtspr SPRN_DBAT5L,r4
+ lwz r4,SL_DBAT6(r11)
mtspr SPRN_DBAT6U,r4
+ lwz r4,SL_DBAT6+4(r11)
mtspr SPRN_DBAT6L,r4
+ lwz r4,SL_DBAT7(r11)
mtspr SPRN_DBAT7U,r4
+ lwz r4,SL_DBAT7+4(r11)
mtspr SPRN_DBAT7L,r4
+ lwz r4,SL_IBAT4(r11)
mtspr SPRN_IBAT4U,r4
+ lwz r4,SL_IBAT4+4(r11)
mtspr SPRN_IBAT4L,r4
+ lwz r4,SL_IBAT5(r11)
mtspr SPRN_IBAT5U,r4
+ lwz r4,SL_IBAT5+4(r11)
mtspr SPRN_IBAT5L,r4
+ lwz r4,SL_IBAT6(r11)
mtspr SPRN_IBAT6U,r4
+ lwz r4,SL_IBAT6+4(r11)
mtspr SPRN_IBAT6L,r4
+ lwz r4,SL_IBAT7(r11)
mtspr SPRN_IBAT7U,r4
+ lwz r4,SL_IBAT7+4(r11)
mtspr SPRN_IBAT7L,r4
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+#endif
/* Flush all TLBs */
lis r4,0x1000
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 988f38dced0f..82d8aae81c6a 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -179,7 +179,7 @@ nothing_to_copy:
sld r3, r3, r0
li r0, 0
1:
- dcbf r0,r3
+ dcbf 0,r3
addi r3,r3,0x20
bdnz 1b
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index ab7b661b6da3..71315b4989e0 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -257,7 +257,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
* Accumulate stolen time by scanning the dispatch trace log.
* Called on entry from user mode.
*/
-void accumulate_stolen_time(void)
+void notrace accumulate_stolen_time(void)
{
u64 sst, ust;
u8 save_soft_enabled = local_paca->soft_enabled;
@@ -862,6 +862,7 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
vdso_data->stamp_xtime = *wall_time;
vdso_data->stamp_sec_fraction = frac_sec;
+ vdso_data->hrtimer_res = hrtimer_resolution;
smp_wmb();
++(vdso_data->tb_update_count);
}
diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S
index 3745113fcc65..2a7eb5452aba 100644
--- a/arch/powerpc/kernel/vdso32/datapage.S
+++ b/arch/powerpc/kernel/vdso32/datapage.S
@@ -37,6 +37,7 @@ data_page_branch:
mtlr r0
addi r3, r3, __kernel_datapage_offset-data_page_branch
lwz r0,0(r3)
+ .cfi_restore lr
add r3,r0,r3
blr
.cfi_endproc
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 6b2b69616e77..9b24466570c8 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -139,6 +139,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
*/
99:
li r0,__NR_clock_gettime
+ .cfi_restore lr
sc
blr
.cfi_endproc
@@ -159,12 +160,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
+ mflr r12
+ .cfi_register lr,r12
+ bl __get_datapage@local /* get data page */
+ lwz r5, CLOCK_HRTIMER_RES(r3)
+ mtlr r12
li r3,0
cmpli cr0,r4,0
crclr cr0*4+so
beqlr
- lis r5,CLOCK_REALTIME_RES@h
- ori r5,r5,CLOCK_REALTIME_RES@l
stw r3,TSPC32_TV_SEC(r4)
stw r5,TSPC32_TV_NSEC(r4)
blr
diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S
index 69c5af2b3c96..228a4a2383d6 100644
--- a/arch/powerpc/kernel/vdso64/cacheflush.S
+++ b/arch/powerpc/kernel/vdso64/cacheflush.S
@@ -39,7 +39,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
- srw. r8,r8,r9 /* compute line count */
+ srd. r8,r8,r9 /* compute line count */
crclr cr0*4+so
beqlr /* nothing to do? */
mtctr r8
@@ -56,7 +56,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
subf r8,r6,r4 /* compute length */
add r8,r8,r5
lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
- srw. r8,r8,r9 /* compute line count */
+ srd. r8,r8,r9 /* compute line count */
crclr cr0*4+so
beqlr /* nothing to do? */
mtctr r8
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
index abf17feffe40..bf9668691511 100644
--- a/arch/powerpc/kernel/vdso64/datapage.S
+++ b/arch/powerpc/kernel/vdso64/datapage.S
@@ -37,6 +37,7 @@ data_page_branch:
mtlr r0
addi r3, r3, __kernel_datapage_offset-data_page_branch
lwz r0,0(r3)
+ .cfi_restore lr
add r3,r0,r3
blr
.cfi_endproc
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index 382021324883..c973378e1f2b 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -124,6 +124,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
*/
99:
li r0,__NR_clock_gettime
+ .cfi_restore lr
sc
blr
.cfi_endproc
@@ -144,12 +145,15 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
+ mflr r12
+ .cfi_register lr,r12
+ bl V_LOCAL_FUNC(__get_datapage)
+ lwz r5, CLOCK_HRTIMER_RES(r3)
+ mtlr r12
li r3,0
cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
- lis r5,CLOCK_REALTIME_RES@h
- ori r5,r5,CLOCK_REALTIME_RES@l
std r3,TSPC64_TV_SEC(r4)
std r5,TSPC64_TV_NSEC(r4)
blr
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index c16fddbb6ab8..c20510497c49 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -153,8 +153,25 @@ SECTIONS
*(__rfi_flush_fixup)
__stop___rfi_flush_fixup = .;
}
-#endif
+#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+ . = ALIGN(8);
+ __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
+ __start___barrier_nospec_fixup = .;
+ *(__barrier_nospec_fixup)
+ __stop___barrier_nospec_fixup = .;
+ }
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ . = ALIGN(8);
+ __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
+ __start__btb_flush_fixup = .;
+ *(__btb_flush_fixup)
+ __stop__btb_flush_fixup = .;
+ }
+#endif
EXCEPTION_TABLE(0)
NOTES :kernel :notes
@@ -298,6 +315,12 @@ SECTIONS
*(.branch_lt)
}
+#ifdef CONFIG_DEBUG_INFO_BTF
+ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
+ *(.BTF)
+ }
+#endif
+
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
*(.opd)
}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index b6952dd23152..209cad89a11a 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
ulong pc = kvmppc_get_pc(vcpu);
+ ulong lr = kvmppc_get_lr(vcpu);
if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
+ if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
+ kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
}
}
@@ -811,6 +814,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
#ifdef CONFIG_PPC64
INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
+ mutex_init(&kvm->arch.rtas_token_lock);
#endif
return kvm->arch.kvm_ops->init_vm(kvm);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0a2b247dbc6b..5cf1392dff96 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -374,12 +374,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
- struct kvm_vcpu *ret;
-
- mutex_lock(&kvm->lock);
- ret = kvm_get_vcpu_by_id(kvm, id);
- mutex_unlock(&kvm->lock);
- return ret;
+ return kvm_get_vcpu_by_id(kvm, id);
}
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
@@ -1098,7 +1093,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
- mutex_lock(&kvm->lock);
spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
@@ -1132,7 +1126,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
- mutex_unlock(&kvm->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
@@ -1773,7 +1766,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
mutex_unlock(&kvm->lock);
if (!vcore)
- goto free_vcpu;
+ goto uninit_vcpu;
spin_lock(&vcore->lock);
++vcore->num_threads;
@@ -1789,6 +1782,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
return vcpu;
+uninit_vcpu:
+ kvm_vcpu_uninit(vcpu);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 79a180cf4c94..4b60bec20603 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -18,6 +18,7 @@
*/
#include <asm/ppc_asm.h>
+#include <asm/code-patching-asm.h>
#include <asm/kvm_asm.h>
#include <asm/reg.h>
#include <asm/mmu.h>
@@ -1266,6 +1267,10 @@ mc_cont:
bl kvmhv_accumulate_time
#endif
+ /* Possibly flush the link stack here. */
+1: nop
+ patch_site 1b patch__call_kvm_flush_link_stack
+
stw r12, STACK_SLOT_TRAP(r1)
mr r3, r12
/* Increment exit count, poke other threads to exit */
@@ -1685,6 +1690,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtlr r0
blr
+.balign 32
+.global kvm_flush_link_stack
+kvm_flush_link_stack:
+ /* Save LR into r0 */
+ mflr r0
+
+ /* Flush the link stack. On Power8 it's up to 32 entries in size. */
+ .rept 32
+ bl .+4
+ .endr
+
+ /* And on Power9 it's up to 64. */
+BEGIN_FTR_SECTION
+ .rept 32
+ bl .+4
+ .endr
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
+ /* Restore LR */
+ mtlr r0
+ blr
+
/*
* Check whether an HDSI is an HPTE not found fault or something else.
* If it is an HPTE not found fault that is due to the guest accessing
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e0d88d0890aa..8172021bcee6 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1482,10 +1482,12 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
err = kvmppc_mmu_init(vcpu);
if (err < 0)
- goto uninit_vcpu;
+ goto free_shared_page;
return vcpu;
+free_shared_page:
+ free_page((unsigned long)vcpu->arch.shared);
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_shadow_vcpu:
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index ef27fbd5d9c5..b1b2273d1f6d 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -133,7 +133,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
{
struct rtas_token_definition *d, *tmp;
- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
if (rtas_name_matches(d->handler->name, name)) {
@@ -154,7 +154,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
bool found;
int i;
- lockdep_assert_held(&kvm->lock);
+ lockdep_assert_held(&kvm->arch.rtas_token_lock);
list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
if (d->token == token)
@@ -193,14 +193,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
if (copy_from_user(&args, argp, sizeof(args)))
return -EFAULT;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->arch.rtas_token_lock);
if (args.token)
rc = rtas_token_define(kvm, args.name, args.token);
else
rc = rtas_token_undefine(kvm, args.name);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->arch.rtas_token_lock);
return rc;
}
@@ -232,7 +232,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
orig_rets = args.rets;
args.rets = &args.args[be32_to_cpu(args.nargs)];
- mutex_lock(&vcpu->kvm->lock);
+ mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
@@ -243,7 +243,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
}
}
- mutex_unlock(&vcpu->kvm->lock);
+ mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
if (rc == 0) {
args.rets = orig_rets;
@@ -269,8 +269,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
{
struct rtas_token_definition *d, *tmp;
- lockdep_assert_held(&kvm->lock);
-
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
list_del(&d->list);
kfree(d);
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 81bd8a07aa51..612b7f6a887f 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -75,6 +75,10 @@
PPC_LL r1, VCPU_HOST_STACK(r4)
PPC_LL r2, HOST_R2(r1)
+START_BTB_FLUSH_SECTION
+ BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+
mfspr r10, SPRN_PID
lwz r8, VCPU_HOST_PID(r4)
PPC_LL r11, VCPU_SHARED(r4)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 990db69a1d0b..fa88f641ac03 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
vcpu->arch.pwrmgtcr0 = spr_val;
break;
+ case SPRN_BUCSR:
+ /*
+ * If we are here, it means that we have already flushed the
+ * branch predictor, so just return to guest.
+ */
+ break;
+
/* extra exceptions */
#ifdef CONFIG_SPE_POSSIBLE
case SPRN_IVOR32:
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 753d591f1b52..c312955977ce 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -14,12 +14,20 @@
#include <asm/page.h>
#include <asm/code-patching.h>
#include <asm/uaccess.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
int patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
+ /* Make sure we aren't patching a freed init section */
+ if (*PTRRELOC(&init_mem_is_free) && init_section_contains(addr, 4)) {
+ pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+ return 0;
+ }
+
__put_user_size(instr, addr, 4, err);
if (err)
return err;
@@ -32,6 +40,22 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags)
return patch_instruction(addr, create_branch(addr, target, flags));
}
+int patch_branch_site(s32 *site, unsigned long target, int flags)
+{
+ unsigned int *addr;
+
+ addr = (unsigned int *)((unsigned long)site + *site);
+ return patch_instruction(addr, create_branch(addr, target, flags));
+}
+
+int patch_instruction_site(s32 *site, unsigned int instr)
+{
+ unsigned int *addr;
+
+ addr = (unsigned int *)((unsigned long)site + *site);
+ return patch_instruction(addr, instr);
+}
+
unsigned int create_branch(const unsigned int *addr,
unsigned long target, int flags)
{
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index a84d333ecb09..ca5fc8fa7efc 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -45,13 +45,13 @@ _GLOBAL(copypage_power7)
.machine push
.machine "power4"
/* setup read stream 0 */
- dcbt r0,r4,0b01000 /* addr from */
- dcbt r0,r7,0b01010 /* length and depth from */
+ dcbt 0,r4,0b01000 /* addr from */
+ dcbt 0,r7,0b01010 /* length and depth from */
/* setup write stream 1 */
- dcbtst r0,r9,0b01000 /* addr to */
- dcbtst r0,r10,0b01010 /* length and depth to */
+ dcbtst 0,r9,0b01000 /* addr to */
+ dcbtst 0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* all streams GO */
+ dcbt 0,r8,0b01010 /* all streams GO */
.machine pop
#ifdef CONFIG_ALTIVEC
@@ -83,7 +83,7 @@ _GLOBAL(copypage_power7)
li r12,112
.align 5
-1: lvx v7,r0,r4
+1: lvx v7,0,r4
lvx v6,r4,r6
lvx v5,r4,r7
lvx v4,r4,r8
@@ -92,7 +92,7 @@ _GLOBAL(copypage_power7)
lvx v1,r4,r11
lvx v0,r4,r12
addi r4,r4,128
- stvx v7,r0,r3
+ stvx v7,0,r3
stvx v6,r3,r6
stvx v5,r3,r7
stvx v4,r3,r8
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index da0c568d18c4..391694814691 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -327,13 +327,13 @@ err1; stb r0,0(r3)
.machine push
.machine "power4"
/* setup read stream 0 */
- dcbt r0,r6,0b01000 /* addr from */
- dcbt r0,r7,0b01010 /* length and depth from */
+ dcbt 0,r6,0b01000 /* addr from */
+ dcbt 0,r7,0b01010 /* length and depth from */
/* setup write stream 1 */
- dcbtst r0,r9,0b01000 /* addr to */
- dcbtst r0,r10,0b01010 /* length and depth to */
+ dcbtst 0,r9,0b01000 /* addr to */
+ dcbtst 0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* all streams GO */
+ dcbt 0,r8,0b01010 /* all streams GO */
.machine pop
beq cr1,.Lunwind_stack_nonvmx_copy
@@ -388,26 +388,26 @@ err3; std r0,0(r3)
li r11,48
bf cr7*4+3,5f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
addi r4,r4,16
-err3; stvx v1,r0,r3
+err3; stvx v1,0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
err3; lvx v0,r4,r9
addi r4,r4,32
-err3; stvx v1,r0,r3
+err3; stvx v1,0,r3
err3; stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
-err3; lvx v3,r0,r4
+err3; lvx v3,0,r4
err3; lvx v2,r4,r9
err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
addi r4,r4,64
-err3; stvx v3,r0,r3
+err3; stvx v3,0,r3
err3; stvx v2,r3,r9
err3; stvx v1,r3,r10
err3; stvx v0,r3,r11
@@ -433,7 +433,7 @@ err3; stvx v0,r3,r11
*/
.align 5
8:
-err4; lvx v7,r0,r4
+err4; lvx v7,0,r4
err4; lvx v6,r4,r9
err4; lvx v5,r4,r10
err4; lvx v4,r4,r11
@@ -442,7 +442,7 @@ err4; lvx v2,r4,r14
err4; lvx v1,r4,r15
err4; lvx v0,r4,r16
addi r4,r4,128
-err4; stvx v7,r0,r3
+err4; stvx v7,0,r3
err4; stvx v6,r3,r9
err4; stvx v5,r3,r10
err4; stvx v4,r3,r11
@@ -463,29 +463,29 @@ err4; stvx v0,r3,r16
mtocrf 0x01,r6
bf cr7*4+1,9f
-err3; lvx v3,r0,r4
+err3; lvx v3,0,r4
err3; lvx v2,r4,r9
err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
addi r4,r4,64
-err3; stvx v3,r0,r3
+err3; stvx v3,0,r3
err3; stvx v2,r3,r9
err3; stvx v1,r3,r10
err3; stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
err3; lvx v0,r4,r9
addi r4,r4,32
-err3; stvx v1,r0,r3
+err3; stvx v1,0,r3
err3; stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
addi r4,r4,16
-err3; stvx v1,r0,r3
+err3; stvx v1,0,r3
addi r3,r3,16
/* Up to 15B to go */
@@ -565,25 +565,25 @@ err3; lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
addi r3,r3,16
vor v0,v1,v1
5: bf cr7*4+2,6f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
err3; stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
-err3; lvx v3,r0,r4
+err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
@@ -592,7 +592,7 @@ err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
err3; stvx v9,r3,r9
err3; stvx v10,r3,r10
err3; stvx v11,r3,r11
@@ -618,7 +618,7 @@ err3; stvx v11,r3,r11
*/
.align 5
8:
-err4; lvx v7,r0,r4
+err4; lvx v7,0,r4
VPERM(v8,v0,v7,v16)
err4; lvx v6,r4,r9
VPERM(v9,v7,v6,v16)
@@ -635,7 +635,7 @@ err4; lvx v1,r4,r15
err4; lvx v0,r4,r16
VPERM(v15,v1,v0,v16)
addi r4,r4,128
-err4; stvx v8,r0,r3
+err4; stvx v8,0,r3
err4; stvx v9,r3,r9
err4; stvx v10,r3,r10
err4; stvx v11,r3,r11
@@ -656,7 +656,7 @@ err4; stvx v15,r3,r16
mtocrf 0x01,r6
bf cr7*4+1,9f
-err3; lvx v3,r0,r4
+err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
@@ -665,27 +665,27 @@ err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
err3; stvx v9,r3,r9
err3; stvx v10,r3,r10
err3; stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
err3; stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
addi r3,r3,16
/* Up to 15B to go */
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index cf1398e3c2e0..e6ed0ec94bc8 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -277,8 +277,101 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
}
+
+void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
+{
+ unsigned int instr, *dest;
+ long *start, *end;
+ int i;
+
+ start = fixup_start;
+ end = fixup_end;
+
+ instr = 0x60000000; /* nop */
+
+ if (enable) {
+ pr_info("barrier-nospec: using ORI speculation barrier\n");
+ instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ }
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+ patch_instruction(dest, instr);
+ }
+
+ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
+}
+
#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+void do_barrier_nospec_fixups(bool enable)
+{
+ void *start, *end;
+
+ start = PTRRELOC(&__start___barrier_nospec_fixup),
+ end = PTRRELOC(&__stop___barrier_nospec_fixup);
+
+ do_barrier_nospec_fixups_range(enable, start, end);
+}
+#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+#ifdef CONFIG_PPC_FSL_BOOK3E
+void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
+{
+ unsigned int instr[2], *dest;
+ long *start, *end;
+ int i;
+
+ start = fixup_start;
+ end = fixup_end;
+
+ instr[0] = PPC_INST_NOP;
+ instr[1] = PPC_INST_NOP;
+
+ if (enable) {
+ pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
+ instr[0] = PPC_INST_ISYNC;
+ instr[1] = PPC_INST_SYNC;
+ }
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+ patch_instruction(dest, instr[0]);
+ patch_instruction(dest + 1, instr[1]);
+ }
+
+ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
+}
+
+static void patch_btb_flush_section(long *curr)
+{
+ unsigned int *start, *end;
+
+ start = (void *)curr + *curr;
+ end = (void *)curr + *(curr + 1);
+ for (; start < end; start++) {
+ pr_devel("patching dest %lx\n", (unsigned long)start);
+ patch_instruction(start, PPC_INST_NOP);
+ }
+}
+
+void do_btb_flush_fixups(void)
+{
+ long *start, *end;
+
+ start = PTRRELOC(&__start__btb_flush_fixup);
+ end = PTRRELOC(&__stop__btb_flush_fixup);
+
+ for (; start < end; start += 2)
+ patch_btb_flush_section(start);
+}
+#endif /* CONFIG_PPC_FSL_BOOK3E */
+
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
long *start, *end;
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 786234fd4e91..193909abd18b 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -261,12 +261,12 @@ _GLOBAL(memcpy_power7)
.machine push
.machine "power4"
- dcbt r0,r6,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
+ dcbt 0,r6,0b01000
+ dcbt 0,r7,0b01010
+ dcbtst 0,r9,0b01000
+ dcbtst 0,r10,0b01010
eieio
- dcbt r0,r8,0b01010 /* GO */
+ dcbt 0,r8,0b01010 /* GO */
.machine pop
beq cr1,.Lunwind_stack_nonvmx_copy
@@ -321,26 +321,26 @@ _GLOBAL(memcpy_power7)
li r11,48
bf cr7*4+3,5f
- lvx v1,r0,r4
+ lvx v1,0,r4
addi r4,r4,16
- stvx v1,r0,r3
+ stvx v1,0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
- lvx v1,r0,r4
+ lvx v1,0,r4
lvx v0,r4,r9
addi r4,r4,32
- stvx v1,r0,r3
+ stvx v1,0,r3
stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
- lvx v3,r0,r4
+ lvx v3,0,r4
lvx v2,r4,r9
lvx v1,r4,r10
lvx v0,r4,r11
addi r4,r4,64
- stvx v3,r0,r3
+ stvx v3,0,r3
stvx v2,r3,r9
stvx v1,r3,r10
stvx v0,r3,r11
@@ -366,7 +366,7 @@ _GLOBAL(memcpy_power7)
*/
.align 5
8:
- lvx v7,r0,r4
+ lvx v7,0,r4
lvx v6,r4,r9
lvx v5,r4,r10
lvx v4,r4,r11
@@ -375,7 +375,7 @@ _GLOBAL(memcpy_power7)
lvx v1,r4,r15
lvx v0,r4,r16
addi r4,r4,128
- stvx v7,r0,r3
+ stvx v7,0,r3
stvx v6,r3,r9
stvx v5,r3,r10
stvx v4,r3,r11
@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6
bf cr7*4+1,9f
- lvx v3,r0,r4
+ lvx v3,0,r4
lvx v2,r4,r9
lvx v1,r4,r10
lvx v0,r4,r11
addi r4,r4,64
- stvx v3,r0,r3
+ stvx v3,0,r3
stvx v2,r3,r9
stvx v1,r3,r10
stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
- lvx v1,r0,r4
+ lvx v1,0,r4
lvx v0,r4,r9
addi r4,r4,32
- stvx v1,r0,r3
+ stvx v1,0,r3
stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
- lvx v1,r0,r4
+ lvx v1,0,r4
addi r4,r4,16
- stvx v1,r0,r3
+ stvx v1,0,r3
addi r3,r3,16
/* Up to 15B to go */
@@ -499,25 +499,25 @@ _GLOBAL(memcpy_power7)
addi r4,r4,16
bf cr7*4+3,5f
- lvx v1,r0,r4
+ lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
- stvx v8,r0,r3
+ stvx v8,0,r3
addi r3,r3,16
vor v0,v1,v1
5: bf cr7*4+2,6f
- lvx v1,r0,r4
+ lvx v1,0,r4
VPERM(v8,v0,v1,v16)
lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
- lvx v3,r0,r4
+ lvx v3,0,r4
VPERM(v8,v0,v3,v16)
lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
@@ -526,7 +526,7 @@ _GLOBAL(memcpy_power7)
lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
@@ -552,7 +552,7 @@ _GLOBAL(memcpy_power7)
*/
.align 5
8:
- lvx v7,r0,r4
+ lvx v7,0,r4
VPERM(v8,v0,v7,v16)
lvx v6,r4,r9
VPERM(v9,v7,v6,v16)
@@ -569,7 +569,7 @@ _GLOBAL(memcpy_power7)
lvx v0,r4,r16
VPERM(v15,v1,v0,v16)
addi r4,r4,128
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
@@ -590,7 +590,7 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6
bf cr7*4+1,9f
- lvx v3,r0,r4
+ lvx v3,0,r4
VPERM(v8,v0,v3,v16)
lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
@@ -599,27 +599,27 @@ _GLOBAL(memcpy_power7)
lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
- lvx v1,r0,r4
+ lvx v1,0,r4
VPERM(v8,v0,v1,v16)
lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
- lvx v1,r0,r4
+ lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
- stvx v8,r0,r3
+ stvx v8,0,r3
addi r3,r3,16
/* Up to 15B to go */
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 57ace356c949..11e6372537fd 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -192,7 +192,7 @@ err1; std r0,8(r3)
mtctr r6
mr r8,r3
14:
-err1; dcbz r0,r3
+err1; dcbz 0,r3
add r3,r3,r9
bdnz 14b
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 9376e8e53bfa..2791f568bdb2 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -521,21 +521,22 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
switch (regs->trap) {
case 0x300:
case 0x380:
- printk(KERN_ALERT "Unable to handle kernel paging request for "
- "data at address 0x%08lx\n", regs->dar);
+ pr_alert("BUG: %s at 0x%08lx\n",
+ regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
+ "Unable to handle kernel data access", regs->dar);
break;
case 0x400:
case 0x480:
- printk(KERN_ALERT "Unable to handle kernel paging request for "
- "instruction fetch\n");
+ pr_alert("BUG: Unable to handle kernel instruction fetch%s",
+ regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
break;
case 0x600:
- printk(KERN_ALERT "Unable to handle kernel paging request for "
- "unaligned access at address 0x%08lx\n", regs->dar);
+ pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
+ regs->dar);
break;
default:
- printk(KERN_ALERT "Unable to handle kernel paging request for "
- "unknown fault\n");
+ pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
+ regs->dar);
break;
}
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index bd666287c5ed..2dc1fc445f35 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -289,10 +289,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, psize,
ssize);
-
+ if (ret == -1) {
+ /* Try to remove a non bolted entry */
+ ret = mmu_hash_ops.hpte_remove(hpteg);
+ if (ret != -1)
+ ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
+ HPTE_V_BOLTED, psize, psize,
+ ssize);
+ }
if (ret < 0)
break;
+ cond_resched();
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5f844337de21..34f70d36b16d 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -62,6 +62,7 @@
#endif
unsigned long long memory_limit;
+bool init_mem_is_free;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
@@ -344,6 +345,14 @@ void __init mem_init(void)
BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
#ifdef CONFIG_SWIOTLB
+ /*
+ * Some platforms (e.g. 85xx) limit DMA-able memory way below
+ * 4G. We force memblock to bottom-up mode to ensure that the
+ * memory allocated in swiotlb_init() is DMA-able.
+ * As it's the last memblock allocation, no need to reset it
+ * back to to-down.
+ */
+ memblock_set_bottom_up(true);
swiotlb_init(0);
#endif
@@ -396,6 +405,7 @@ void __init mem_init(void)
void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
+ init_mem_is_free = true;
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0ef83c274019..31e9064ba628 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1540,13 +1540,6 @@ static void reset_topology_timer(void)
#ifdef CONFIG_SMP
-static void stage_topology_update(int core_id)
-{
- cpumask_or(&cpu_associativity_changes_mask,
- &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
- reset_topology_timer();
-}
-
static int dt_update_callback(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -1559,7 +1552,7 @@ static int dt_update_callback(struct notifier_block *nb,
!of_prop_cmp(update->prop->name, "ibm,associativity")) {
u32 core_id;
of_property_read_u32(update->dn, "reg", &core_id);
- stage_topology_update(core_id);
+ rc = dlpar_cpu_readd(core_id);
rc = NOTIFY_OK;
}
break;
@@ -1581,6 +1574,9 @@ int start_topology_update(void)
{
int rc = 0;
+ if (!topology_updates_enabled)
+ return 0;
+
if (firmware_has_feature(FW_FEATURE_PRRN)) {
if (!prrn_enabled) {
prrn_enabled = 1;
@@ -1610,6 +1606,9 @@ int stop_topology_update(void)
{
int rc = 0;
+ if (!topology_updates_enabled)
+ return 0;
+
if (prrn_enabled) {
prrn_enabled = 0;
#ifdef CONFIG_SMP
@@ -1655,11 +1654,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
kbuf[read_len] = '\0';
- if (!strncmp(kbuf, "on", 2))
+ if (!strncmp(kbuf, "on", 2)) {
+ topology_updates_enabled = true;
start_topology_update();
- else if (!strncmp(kbuf, "off", 3))
+ } else if (!strncmp(kbuf, "off", 3)) {
stop_topology_update();
- else
+ topology_updates_enabled = false;
+ } else
return -EINVAL;
return count;
@@ -1674,9 +1675,7 @@ static const struct file_operations topology_ops = {
static int topology_update_init(void)
{
- /* Do not poll for changes if disabled at boot */
- if (topology_updates_enabled)
- start_topology_update();
+ start_topology_update();
if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
return -ENOMEM;
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 44c33ee397a0..2525f23da4be 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -287,14 +287,6 @@ void __init radix__early_init_devtree(void)
mmu_psize_defs[MMU_PAGE_64K].shift = 16;
mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
found:
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
- if (mmu_psize_defs[MMU_PAGE_2M].shift) {
- /*
- * map vmemmap using 2M if available
- */
- mmu_vmemmap_psize = MMU_PAGE_2M;
- }
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
return;
}
@@ -337,7 +329,13 @@ void __init radix__early_init_mmu(void)
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* vmemmap mapping */
- mmu_vmemmap_psize = mmu_virtual_psize;
+ if (mmu_psize_defs[MMU_PAGE_2M].shift) {
+ /*
+ * map vmemmap using 2M if available
+ */
+ mmu_vmemmap_psize = MMU_PAGE_2M;
+ } else
+ mmu_vmemmap_psize = mmu_virtual_psize;
#endif
/*
* initialize page table size
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 2a049fb8523d..96c52271e9c2 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -52,7 +52,7 @@ struct batrange { /* stores address ranges mapped by BATs */
phys_addr_t v_block_mapped(unsigned long va)
{
int b;
- for (b = 0; b < 4; ++b)
+ for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
return bat_addrs[b].phys + (va - bat_addrs[b].start);
return 0;
@@ -64,7 +64,7 @@ phys_addr_t v_block_mapped(unsigned long va)
unsigned long p_block_mapped(phys_addr_t pa)
{
int b;
- for (b = 0; b < 4; ++b)
+ for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
if (pa >= bat_addrs[b].phys
&& pa < (bat_addrs[b].limit-bat_addrs[b].start)
+bat_addrs[b].phys)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 64c9a91773af..96c41b55b106 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -321,7 +321,7 @@ void slb_initialize(void)
#endif
}
- get_paca()->stab_rr = SLB_NUM_BOLTED;
+ get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index eb82d787d99a..b7e9c09dfe19 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -69,6 +69,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
std r15,EX_TLB_R15(r12)
std r10,EX_TLB_CR(r12)
#ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+ mfspr r11, SPRN_SRR1
+ andi. r10,r11,MSR_PR
+ beq 1f
+ BTB_FLUSH(r10)
+1:
+END_BTB_FLUSH_SECTION
std r7,EX_TLB_R7(r12)
#endif
TLB_MISS_PROLOG_STATS
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index eabecfcaef7c..204b4d9c4424 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -400,7 +400,7 @@ _GLOBAL(set_context)
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
- * Must preserve r7, r8, r9, and r10
+ * Must preserve r7, r8, r9, r10 and r11
*/
_GLOBAL(loadcam_entry)
mflr r5
@@ -436,6 +436,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
*/
_GLOBAL(loadcam_multi)
mflr r8
+ /* Don't switch to AS=1 if already there */
+ mfmsr r11
+ andi. r11,r11,MSR_IS
+ bne 10f
/*
* Set up temporary TLB entry that is the same as what we're
@@ -461,6 +465,7 @@ _GLOBAL(loadcam_multi)
mtmsr r6
isync
+10:
mr r9,r3
add r10,r3,r4
2: bl loadcam_entry
@@ -469,6 +474,10 @@ _GLOBAL(loadcam_multi)
mr r3,r9
blt 2b
+ /* Don't return to AS=0 if we were in AS=1 at function start */
+ andi. r11,r11,MSR_IS
+ bne 3f
+
/* Return to AS=0 and clear the temporary entry */
mfmsr r6
rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
@@ -484,6 +493,7 @@ _GLOBAL(loadcam_multi)
tlbwe
isync
+3:
mtlr r8
blr
#endif
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 89f70073dec8..83e5b255d142 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -51,6 +51,8 @@
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
+ ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
___PPC_RA(base) | ((i) & 0xfffc))
#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
@@ -65,7 +67,9 @@
#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
- ___PPC_RA(base) | IMM_L(i))
+ ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
+ ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
@@ -85,17 +89,6 @@
___PPC_RA(a) | ___PPC_RB(b))
#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
___PPC_RA(a) | ___PPC_RB(b))
-
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
-#else
-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
-#endif
-
#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
@@ -123,7 +116,7 @@
___PPC_RA(a) | IMM_L(i))
#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_DIVD(d, a, b) EMIT(PPC_INST_DIVD | ___PPC_RT(d) | \
+#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \
___PPC_RA(a) | ___PPC_RB(b))
#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(b))
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
index a8cd7e289ecd..81a9045d8410 100644
--- a/arch/powerpc/net/bpf_jit32.h
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
#endif
+#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
+
#define SEEN_DATAREF 0x10000 /* might call external helpers */
#define SEEN_XREG 0x20000 /* X reg is used */
#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 62fa7589db2b..bb944b6018d7 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -86,6 +86,26 @@ DECLARE_LOAD_FUNC(sk_load_byte);
(imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
func##_positive_offset)
+/*
+ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
+ * so ensure that it isn't in use already.
+ */
+#define PPC_BPF_LL(r, base, i) do { \
+ if ((i) % 4) { \
+ PPC_LI(b2p[TMP_REG_2], (i)); \
+ PPC_LDX(r, base, b2p[TMP_REG_2]); \
+ } else \
+ PPC_LD(r, base, i); \
+ } while(0)
+#define PPC_BPF_STL(r, base, i) do { \
+ if ((i) % 4) { \
+ PPC_LI(b2p[TMP_REG_2], (i)); \
+ PPC_STDX(r, base, b2p[TMP_REG_2]); \
+ } else \
+ PPC_STD(r, base, i); \
+ } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
+
#define SEEN_FUNC 0x1000 /* might call external helpers */
#define SEEN_STACK 0x2000 /* uses BPF stack */
#define SEEN_SKB 0x4000 /* uses sk_buff */
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 9c58194c7ea5..158f43008314 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -18,8 +18,6 @@
#include "bpf_jit32.h"
-int bpf_jit_enable __read_mostly;
-
static inline void bpf_flush_icache(void *start, void *end)
{
smp_wmb();
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index bdbbc320b006..888ee95340da 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -21,8 +21,6 @@
#include "bpf_jit64.h"
-int bpf_jit_enable __read_mostly;
-
static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
{
int *p = area;
@@ -265,7 +263,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+ PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
PPC_BCC(COND_GT, out);
@@ -278,7 +276,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
/* prog = array->ptrs[index]; */
PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
/*
* if (prog == NULL)
@@ -288,7 +286,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
PPC_BCC(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
+ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
#ifdef PPC64_ELF_ABI_v1
/* skip past the function descriptor */
PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
@@ -419,12 +417,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
+ PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
PPC_MULD(b2p[TMP_REG_1], src_reg,
b2p[TMP_REG_1]);
PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
} else
- PPC_DIVD(dst_reg, dst_reg, src_reg);
+ PPC_DIVDU(dst_reg, dst_reg, src_reg);
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
@@ -452,7 +450,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
break;
case BPF_ALU64:
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVD(b2p[TMP_REG_2], dst_reg,
+ PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
b2p[TMP_REG_1]);
PPC_MULD(b2p[TMP_REG_1],
b2p[TMP_REG_1],
@@ -460,7 +458,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
PPC_SUB(dst_reg, dst_reg,
b2p[TMP_REG_1]);
} else
- PPC_DIVD(dst_reg, dst_reg,
+ PPC_DIVDU(dst_reg, dst_reg,
b2p[TMP_REG_1]);
break;
}
@@ -620,7 +618,7 @@ bpf_alu32_trunc:
* the instructions generated will remain the
* same across all passes
*/
- PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
+ PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
break;
@@ -676,7 +674,7 @@ emit_clear:
PPC_LI32(b2p[TMP_REG_1], imm);
src_reg = b2p[TMP_REG_1];
}
- PPC_STD(src_reg, dst_reg, off);
+ PPC_BPF_STL(src_reg, dst_reg, off);
break;
/*
@@ -723,7 +721,7 @@ emit_clear:
break;
/* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_DW:
- PPC_LD(dst_reg, src_reg, off);
+ PPC_BPF_LL(dst_reg, src_reg, off);
break;
/*
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 771edffa2d40..ba49ae6625f1 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1800,6 +1800,7 @@ static int power_pmu_event_init(struct perf_event *event)
int n;
int err;
struct cpu_hw_events *cpuhw;
+ u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
@@ -1896,13 +1897,14 @@ static int power_pmu_event_init(struct perf_event *event)
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
+ bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
- if (cpuhw->bhrb_filter == -1) {
+ if (bhrb_filter == -1) {
put_cpu_var(cpu_hw_events);
return -EOPNOTSUPP;
}
+ cpuhw->bhrb_filter = bhrb_filter;
}
put_cpu_var(cpu_hw_events);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index ab830d106ec5..5fbd9bdefa4e 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -29,6 +29,7 @@ enum {
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
+#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
/* Table of alternatives, sorted by column 0 */
static const unsigned int event_alternatives[][MAX_ALT] = {
@@ -262,6 +263,8 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type)
static void power8_config_bhrb(u64 pmu_bhrb_filter)
{
+ pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
+
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 9abcd8f65504..c396d5e5098c 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -30,6 +30,7 @@ enum {
#define POWER9_MMCRA_IFM1 0x0000000040000000UL
#define POWER9_MMCRA_IFM2 0x0000000080000000UL
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
+#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
@@ -177,6 +178,8 @@ static u64 power9_bhrb_filter_map(u64 branch_sample_type)
static void power9_config_bhrb(u64 pmu_bhrb_filter)
{
+ pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
+
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index d75c9816a5c9..2b6589fe812d 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -14,6 +14,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <asm/debug.h>
#include <asm/io.h>
#include <asm/hw_irq.h>
#include <asm/ipic.h>
@@ -150,3 +151,19 @@ void __init mpc83xx_setup_arch(void)
mpc83xx_setup_pci();
}
+
+int machine_check_83xx(struct pt_regs *regs)
+{
+ u32 mask = 1 << (31 - IPIC_MCP_WDT);
+
+ if (!(regs->msr & SRR1_MCE_MCP) || !(ipic_get_mcp_status() & mask))
+ return machine_check_generic(regs);
+ ipic_clear_mcp_status(mask);
+
+ if (debugger_fault_handler(regs))
+ return 1;
+
+ die("Watchdog NMI Reset", regs, 0);
+
+ return 1;
+}
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index b7f937563827..d1fee2d35b49 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -299,23 +299,6 @@ static int __init maple_probe(void)
return 1;
}
-define_machine(maple) {
- .name = "Maple",
- .probe = maple_probe,
- .setup_arch = maple_setup_arch,
- .init_IRQ = maple_init_IRQ,
- .pci_irq_fixup = maple_pci_irq_fixup,
- .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
- .restart = maple_restart,
- .halt = maple_halt,
- .get_boot_time = maple_get_boot_time,
- .set_rtc_time = maple_set_rtc_time,
- .get_rtc_time = maple_get_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = maple_progress,
- .power_save = power4_idle,
-};
-
#ifdef CONFIG_EDAC
/*
* Register a platform device for CPC925 memory controller on
@@ -372,3 +355,20 @@ static int __init maple_cpc925_edac_setup(void)
}
machine_device_initcall(maple, maple_cpc925_edac_setup);
#endif
+
+define_machine(maple) {
+ .name = "Maple",
+ .probe = maple_probe,
+ .setup_arch = maple_setup_arch,
+ .init_IRQ = maple_init_IRQ,
+ .pci_irq_fixup = maple_pci_irq_fixup,
+ .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
+ .restart = maple_restart,
+ .halt = maple_halt,
+ .get_boot_time = maple_get_boot_time,
+ .set_rtc_time = maple_set_rtc_time,
+ .get_rtc_time = maple_get_rtc_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = maple_progress,
+ .power_save = power4_idle,
+};
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index 1c2802fabd57..c856cd7fcdc4 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -37,10 +37,18 @@
#define SL_IBAT2 0x48
#define SL_DBAT3 0x50
#define SL_IBAT3 0x58
-#define SL_TB 0x60
-#define SL_R2 0x68
-#define SL_CR 0x6c
-#define SL_R12 0x70 /* r12 to r31 */
+#define SL_DBAT4 0x60
+#define SL_IBAT4 0x68
+#define SL_DBAT5 0x70
+#define SL_IBAT5 0x78
+#define SL_DBAT6 0x80
+#define SL_IBAT6 0x88
+#define SL_DBAT7 0x90
+#define SL_IBAT7 0x98
+#define SL_TB 0xa0
+#define SL_R2 0xa8
+#define SL_CR 0xac
+#define SL_R12 0xb0 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)
.section .text
@@ -125,6 +133,41 @@ _GLOBAL(low_sleep_handler)
mfibatl r4,3
stw r4,SL_IBAT3+4(r1)
+BEGIN_MMU_FTR_SECTION
+ mfspr r4,SPRN_DBAT4U
+ stw r4,SL_DBAT4(r1)
+ mfspr r4,SPRN_DBAT4L
+ stw r4,SL_DBAT4+4(r1)
+ mfspr r4,SPRN_DBAT5U
+ stw r4,SL_DBAT5(r1)
+ mfspr r4,SPRN_DBAT5L
+ stw r4,SL_DBAT5+4(r1)
+ mfspr r4,SPRN_DBAT6U
+ stw r4,SL_DBAT6(r1)
+ mfspr r4,SPRN_DBAT6L
+ stw r4,SL_DBAT6+4(r1)
+ mfspr r4,SPRN_DBAT7U
+ stw r4,SL_DBAT7(r1)
+ mfspr r4,SPRN_DBAT7L
+ stw r4,SL_DBAT7+4(r1)
+ mfspr r4,SPRN_IBAT4U
+ stw r4,SL_IBAT4(r1)
+ mfspr r4,SPRN_IBAT4L
+ stw r4,SL_IBAT4+4(r1)
+ mfspr r4,SPRN_IBAT5U
+ stw r4,SL_IBAT5(r1)
+ mfspr r4,SPRN_IBAT5L
+ stw r4,SL_IBAT5+4(r1)
+ mfspr r4,SPRN_IBAT6U
+ stw r4,SL_IBAT6(r1)
+ mfspr r4,SPRN_IBAT6L
+ stw r4,SL_IBAT6+4(r1)
+ mfspr r4,SPRN_IBAT7U
+ stw r4,SL_IBAT7(r1)
+ mfspr r4,SPRN_IBAT7L
+ stw r4,SL_IBAT7+4(r1)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
/* Backup various CPU config stuffs */
bl __save_cpu_setup
@@ -325,22 +368,37 @@ grackle_wake_up:
mtibatl 3,r4
BEGIN_MMU_FTR_SECTION
- li r4,0
+ lwz r4,SL_DBAT4(r1)
mtspr SPRN_DBAT4U,r4
+ lwz r4,SL_DBAT4+4(r1)
mtspr SPRN_DBAT4L,r4
+ lwz r4,SL_DBAT5(r1)
mtspr SPRN_DBAT5U,r4
+ lwz r4,SL_DBAT5+4(r1)
mtspr SPRN_DBAT5L,r4
+ lwz r4,SL_DBAT6(r1)
mtspr SPRN_DBAT6U,r4
+ lwz r4,SL_DBAT6+4(r1)
mtspr SPRN_DBAT6L,r4
+ lwz r4,SL_DBAT7(r1)
mtspr SPRN_DBAT7U,r4
+ lwz r4,SL_DBAT7+4(r1)
mtspr SPRN_DBAT7L,r4
+ lwz r4,SL_IBAT4(r1)
mtspr SPRN_IBAT4U,r4
+ lwz r4,SL_IBAT4+4(r1)
mtspr SPRN_IBAT4L,r4
+ lwz r4,SL_IBAT5(r1)
mtspr SPRN_IBAT5U,r4
+ lwz r4,SL_IBAT5+4(r1)
mtspr SPRN_IBAT5L,r4
+ lwz r4,SL_IBAT6(r1)
mtspr SPRN_IBAT6U,r4
+ lwz r4,SL_IBAT6+4(r1)
mtspr SPRN_IBAT6L,r4
+ lwz r4,SL_IBAT7(r1)
mtspr SPRN_IBAT7U,r4
+ lwz r4,SL_IBAT7+4(r1)
mtspr SPRN_IBAT7L,r4
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 2354ea51e871..6189c4cf56c3 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -546,8 +546,8 @@ static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
{
struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
+ u8 fstate = 0;
+ __be16 pcierr = 0;
s64 rc;
int result = 0;
@@ -585,8 +585,8 @@ static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
{
struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
+ u8 fstate = 0;
+ __be16 pcierr = 0;
s64 rc;
int result;
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 7fb61ebc99a2..c34a44e04c87 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -579,7 +579,10 @@ static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
bin_attr->size);
}
-static BIN_ATTR_RO(symbol_map, 0);
+static struct bin_attribute symbol_map_attr = {
+ .attr = {.name = "symbol_map", .mode = 0400},
+ .read = symbol_map_read
+};
static void opal_export_symmap(void)
{
@@ -596,10 +599,10 @@ static void opal_export_symmap(void)
return;
/* Setup attributes */
- bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
- bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
+ symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
+ symbol_map_attr.size = be64_to_cpu(syms[1]);
- rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
+ rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
if (rc)
pr_warn("Error %d creating OPAL symbols file\n", rc);
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 8015e40bc7ee..b787a669a1e2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -599,8 +599,8 @@ static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
{
struct pnv_ioda_pe *slave, *pe;
- u8 fstate, state;
- __be16 pcierr;
+ u8 fstate = 0, state;
+ __be16 pcierr = 0;
s64 rc;
/* Sanity check on PE number */
@@ -1524,6 +1524,10 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
/* Reserve PE for each VF */
for (vf_index = 0; vf_index < num_vfs; vf_index++) {
+ int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
+ int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
+ struct pci_dn *vf_pdn;
+
if (pdn->m64_single_mode)
pe_num = pdn->pe_num_map[vf_index];
else
@@ -1536,13 +1540,11 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
pe->pbus = NULL;
pe->parent_dev = pdev;
pe->mve_number = -1;
- pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
- pci_iov_virtfn_devfn(pdev, vf_index);
+ pe->rid = (vf_bus << 8) | vf_devfn;
pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
hose->global_number, pdev->bus->number,
- PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
- PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
+ PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
if (pnv_ioda_configure_pe(phb, pe)) {
/* XXX What do we do here ? */
@@ -1556,6 +1558,15 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
list_add_tail(&pe->list, &phb->ioda.pe_list);
mutex_unlock(&phb->ioda.pe_list_mutex);
+ /* associate this pe to it's pdn */
+ list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
+ if (vf_pdn->busno == vf_bus &&
+ vf_pdn->devfn == vf_devfn) {
+ vf_pdn->pe_number = pe_num;
+ break;
+ }
+ }
+
pnv_pci_ioda2_setup_dma_pe(phb, pe);
}
}
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index db7b8020f68e..2ed7627e991e 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -482,8 +482,8 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
{
struct pnv_phb *phb = pdn->phb->private_data;
- u8 fstate;
- __be16 pcierr;
+ u8 fstate = 0;
+ __be16 pcierr = 0;
unsigned int pe_no;
s64 rc;
@@ -856,16 +856,12 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
struct pnv_phb *phb = hose->private_data;
#ifdef CONFIG_PCI_IOV
struct pnv_ioda_pe *pe;
- struct pci_dn *pdn;
/* Fix the VF pdn PE number */
if (pdev->is_virtfn) {
- pdn = pci_get_pdn(pdev);
- WARN_ON(pdn->pe_number != IODA_INVALID_PE);
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
if (pe->rid == ((pdev->bus->number << 8) |
(pdev->devfn & 0xff))) {
- pdn->pe_number = pe->pe_number;
pe->pdev = pdev;
break;
}
@@ -923,6 +919,23 @@ void __init pnv_pci_init(void)
if (!firmware_has_feature(FW_FEATURE_OPAL))
return;
+#ifdef CONFIG_PCIEPORTBUS
+ /*
+ * On PowerNV PCIe devices are (currently) managed in cooperation
+ * with firmware. This isn't *strictly* required, but there's enough
+ * assumptions baked into both firmware and the platform code that
+ * it's unwise to allow the portbus services to be used.
+ *
+ * We need to fix this eventually, but for now set this flag to disable
+ * the portbus driver. The AER service isn't required since that AER
+ * events are handled via EEH. The pciehp hotplug driver can't work
+ * without kernel changes (and portbus binding breaks pnv_php). The
+ * other services also require some thinking about how we're going
+ * to integrate them.
+ */
+ pcie_ports_disabled = true;
+#endif
+
/* Look for IODA IO-Hubs. */
for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
pnv_pci_init_ioda_hub(np);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 17203abf38e8..365e2b620201 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -77,6 +77,12 @@ static void init_fw_feat_flags(struct device_node *np)
if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
+ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
+
+ if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np))
+ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
+
/*
* The features below are enabled by default, so we instead look to see
* if firmware has *disabled* them, and clear them if so.
@@ -123,6 +129,7 @@ static void pnv_setup_rfi_flush(void)
security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable);
+ setup_count_cache_flush();
}
static void __init pnv_setup_arch(void)
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index 3db53e8aff92..9b2ef76578f0 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -664,7 +664,7 @@ static int update_flash_db(void)
db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
- if (count < sizeof(struct os_area_db)) {
+ if (count < 0 || count < sizeof(struct os_area_db)) {
pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
count);
error = count < 0 ? count : -EIO;
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 66e7227469b8..b5ff5ee3e39c 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -391,6 +391,10 @@ static struct bus_type cmm_subsys = {
.dev_name = "cmm",
};
+static void cmm_release_device(struct device *dev)
+{
+}
+
/**
* cmm_sysfs_register - Register with sysfs
*
@@ -406,6 +410,7 @@ static int cmm_sysfs_register(struct device *dev)
dev->id = 0;
dev->bus = &cmm_subsys;
+ dev->release = cmm_release_device;
if ((rc = device_register(dev)))
goto subsys_unregister;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 999b04819d69..5abb8e2239a5 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -63,6 +63,10 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
prop->name = kstrdup(name, GFP_KERNEL);
+ if (!prop->name) {
+ dlpar_free_cc_property(prop);
+ return NULL;
+ }
prop->length = be32_to_cpu(ccwa->prop_length);
value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 39049e4884fb..7a4d172c9376 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -150,7 +150,7 @@ static int dtl_start(struct dtl *dtl)
/* Register our dtl buffer with the hypervisor. The HV expects the
* buffer size to be passed in the second word of the buffer */
- ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
+ ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
hwcpu = get_hard_smp_processor_id(dtl->cpu);
addr = __pa(dtl->buf);
@@ -185,7 +185,7 @@ static void dtl_stop(struct dtl *dtl)
static u64 dtl_current_index(struct dtl *dtl)
{
- return lppaca_of(dtl->cpu).dtl_idx;
+ return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a1b63e00b2f7..7a2beedb9740 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -785,6 +785,25 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
return rc;
}
+int dlpar_cpu_readd(int cpu)
+{
+ struct device_node *dn;
+ struct device *dev;
+ u32 drc_index;
+ int rc;
+
+ dev = get_cpu_device(cpu);
+ dn = dev->of_node;
+
+ rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
+
+ rc = dlpar_cpu_remove_by_index(drc_index);
+ if (!rc)
+ rc = dlpar_cpu_add(drc_index);
+
+ return rc;
+}
+
int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
{
u32 count, drc_index;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index c0a0947f43bb..eee45b9220e0 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -294,6 +294,7 @@ static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
+ of_node_put(dr_node);
dlpar_free_cc_nodes(lmb_node);
return aa_index;
}
@@ -397,8 +398,10 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb)
for (i = 0; i < scns_per_block; i++) {
pfn = PFN_DOWN(phys_addr);
- if (!pfn_present(pfn))
+ if (!pfn_present(pfn)) {
+ phys_addr += MIN_MEMORY_BLOCK_SIZE;
continue;
+ }
rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
phys_addr += MIN_MEMORY_BLOCK_SIZE;
@@ -616,7 +619,7 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb)
nid = memory_add_physaddr_to_nid(lmb->base_addr);
/* Add the memory */
- rc = add_memory(nid, lmb->base_addr, block_sz);
+ rc = __add_memory(nid, lmb->base_addr, block_sz);
if (rc) {
dlpar_remove_device_tree_lmb(lmb);
dlpar_release_drc(lmb->drc_index);
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index 74da18de853a..73ec15cd2708 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(hvc_get_chars);
* @vtermno: The vtermno or unit_address of the adapter from which the data
* originated.
* @buf: The character buffer that contains the character data to send to
- * firmware.
+ * firmware. Must be at least 16 bytes, even if count is less than 16.
* @count: Send this number of characters.
*/
int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 0024e451bb36..c0f094c96cd6 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -167,10 +167,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
return be64_to_cpu(*tcep);
}
-static void tce_free_pSeriesLP(struct iommu_table*, long, long);
+static void tce_free_pSeriesLP(unsigned long liobn, long, long);
static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
-static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
+static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
@@ -181,25 +181,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
int ret = 0;
long tcenum_start = tcenum, npages_start = npages;
- rpn = __pa(uaddr) >> TCE_SHIFT;
+ rpn = __pa(uaddr) >> tceshift;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
while (npages--) {
- tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
+ tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
+ rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
- tce_free_pSeriesLP(tbl, tcenum_start,
+ tce_free_pSeriesLP(liobn, tcenum_start,
(npages_start - (npages + 1)));
break;
}
if (rc && printk_ratelimit()) {
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
- printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
+ printk("\tindex = 0x%llx\n", (u64)liobn);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
printk("\ttce val = 0x%llx\n", tce );
dump_stack();
@@ -228,7 +228,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
unsigned long flags;
if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
+ return tce_build_pSeriesLP(tbl->it_index, tcenum,
+ tbl->it_page_shift, npages, uaddr,
direction, attrs);
}
@@ -244,8 +245,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
local_irq_restore(flags);
- return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
- direction, attrs);
+ return tce_build_pSeriesLP(tbl->it_index, tcenum,
+ tbl->it_page_shift,
+ npages, uaddr, direction, attrs);
}
__this_cpu_write(tce_page, tcep);
}
@@ -296,16 +298,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
return ret;
}
-static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
+static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
{
u64 rc;
while (npages--) {
- rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
+ rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
if (rc && printk_ratelimit()) {
printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
- printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
+ printk("\tindex = 0x%llx\n", (u64)liobn);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
dump_stack();
}
@@ -320,7 +322,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
u64 rc;
if (!firmware_has_feature(FW_FEATURE_MULTITCE))
- return tce_free_pSeriesLP(tbl, tcenum, npages);
+ return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
@@ -435,6 +437,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
u64 rc = 0;
long l, limit;
+ if (!firmware_has_feature(FW_FEATURE_MULTITCE)) {
+ unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
+ unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
+ be64_to_cpu(maprange->dma_base);
+ unsigned long tcenum = dmastart >> tceshift;
+ unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
+ void *uaddr = __va(start_pfn << PAGE_SHIFT);
+
+ return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
+ tcenum, tceshift, npages, (unsigned long) uaddr,
+ DMA_BIDIRECTIONAL, 0);
+ }
+
local_irq_disable(); /* to protect tcep and the page behind it */
tcep = __this_cpu_read(tce_page);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 3784a7abfcc8..74791e8382d2 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/kobject.h>
+#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/stat.h>
#include <linux/completion.h>
@@ -206,7 +207,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
prop_data += vd;
}
+
+ cond_resched();
}
+
+ cond_resched();
} while (rtas_rc == 1);
of_node_put(dn);
@@ -282,8 +287,12 @@ int pseries_devicetree_update(s32 scope)
add_dt_node(phandle, drc_index);
break;
}
+
+ cond_resched();
}
}
+
+ cond_resched();
} while (rc == 1);
kfree(rtas_buf);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 91ade7755823..30782859d898 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -298,6 +298,9 @@ static void pseries_lpar_idle(void)
* low power mode by ceding processor to hypervisor
*/
+ if (!prep_irq_for_idle())
+ return;
+
/* Indicate to hypervisor that we are idle. */
get_lppaca()->idle = 1;
@@ -475,6 +478,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST)
+ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
+
+ if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE)
+ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
+
/*
* The features below are enabled by default, so we instead look to see
* if firmware has *disabled* them, and clear them if so.
@@ -525,6 +534,7 @@ void pseries_setup_rfi_flush(void)
security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
setup_rfi_flush(types, enable);
+ setup_count_cache_flush();
}
static void __init pSeries_setup_arch(void)
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index a00949f3e378..a8ebc96dfed2 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -158,6 +158,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
mtdcr(uic->dcrbase + UIC_PR, pr);
mtdcr(uic->dcrbase + UIC_TR, tr);
+ mtdcr(uic->dcrbase + UIC_SR, ~mask);
raw_spin_unlock_irqrestore(&uic->lock, flags);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 687e8b8bf5c6..899288b71145 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -3043,7 +3043,7 @@ void dump_segments(void)
printf("sr0-15 =");
for (i = 0; i < 16; ++i)
- printf(" %x", mfsrin(i));
+ printf(" %x", mfsrin(i << 28));
printf("\n");
}
#endif
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 591cbdf615af..1a906dd7ca7d 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -572,6 +572,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (!nbytes)
+ return -EINVAL;
+
if (unlikely(!xts_ctx->fc))
return xts_fallback_encrypt(desc, dst, src, nbytes);
@@ -586,6 +589,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (!nbytes)
+ return -EINVAL;
+
if (unlikely(!xts_ctx->fc))
return xts_fallback_decrypt(desc, dst, src, nbytes);
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 2a17123130d3..224aeda1e8cc 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -267,7 +267,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *root_inode;
- struct dentry *root_dentry;
+ struct dentry *root_dentry, *update_file;
int rc = 0;
struct hypfs_sb_info *sbi;
@@ -298,9 +298,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
rc = hypfs_diag_create_files(root_dentry);
if (rc)
return rc;
- sbi->update_file = hypfs_create_update_file(root_dentry);
- if (IS_ERR(sbi->update_file))
- return PTR_ERR(sbi->update_file);
+ update_file = hypfs_create_update_file(root_dentry);
+ if (IS_ERR(update_file))
+ return PTR_ERR(update_file);
+ sbi->update_file = update_file;
hypfs_update_update(sb);
pr_info("Hypervisor filesystem mounted\n");
return 0;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 8d665f1b29f8..f0fe566a9910 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -215,11 +215,14 @@ do { \
/*
* Cache aliasing on the latest machines calls for a mapping granularity
- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
- * of up to 1GB. For 31-bit processes the virtual address space is limited,
- * use no alignment and limit the randomization to 8MB.
+ * of 512KB for the anonymous mapping base. For 64-bit processes use a
+ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
+ * the virtual address space is limited, use no alignment and limit the
+ * randomization to 8MB.
+ * For the additional randomization of the program break use 32MB for
+ * 64-bit and 8MB for 31-bit.
*/
-#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
#define STACK_RND_MASK MMAP_RND_MASK
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 5811e7849a2e..1df70a73dc5c 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -61,6 +61,18 @@ static inline int test_facility(unsigned long nr)
return __test_facility(nr, &S390_lowcore.stfle_fac_list);
}
+static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
+{
+ register unsigned long reg0 asm("0") = size - 1;
+
+ asm volatile(
+ ".insn s,0xb2b00000,0(%1)" /* stfle */
+ : "+d" (reg0)
+ : "a" (stfle_fac_list)
+ : "memory", "cc");
+ return reg0;
+}
+
/**
* stfle - Store facility list extended
* @stfle_fac_list: array where facility list can be stored
@@ -78,13 +90,8 @@ static inline void stfle(u64 *stfle_fac_list, int size)
memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
if (S390_lowcore.stfl_fac_list & 0x01000000) {
/* More facility bits available with stfle */
- register unsigned long reg0 asm("0") = size - 1;
-
- asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
- : "+d" (reg0)
- : "a" (stfle_fac_list)
- : "memory", "cc");
- nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
+ nr = __stfle_asm(stfle_fac_list, size);
+ nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
}
memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
preempt_enable();
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 69b8a41fca84..e094c0cf6936 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -35,7 +35,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
- if (PAGE_DEFAULT_KEY)
+ if (PAGE_DEFAULT_KEY != 0)
__storage_key_init_range(start, end);
}
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 0bb08f341c09..f1330245b584 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -146,7 +146,7 @@ static inline void get_tod_clock_ext(char *clk)
static inline unsigned long long get_tod_clock(void)
{
- unsigned char clk[STORE_CLOCK_EXT_SIZE];
+ char clk[STORE_CLOCK_EXT_SIZE];
get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index a7ef70220126..31b2913372b5 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -151,7 +151,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
__rc; \
})
-static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
+static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
unsigned long spec = 0x810000UL;
int rc;
@@ -181,7 +181,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
return rc;
}
-static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
+static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
unsigned long spec = 0x81UL;
int rc;
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index a97354c8c667..7171fb98533f 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -76,7 +76,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
static void *show_diag_stat_start(struct seq_file *m, loff_t *pos)
{
- return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
+ return *pos <= NR_DIAG_STAT ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index aaf9dab3c193..f9dca1aed9a4 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1930,10 +1930,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr += sprintf(ptr, "%%c%i", value);
else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value);
- else if (operand->flags & OPERAND_PCREL)
- ptr += sprintf(ptr, "%lx", (signed int) value
- + addr);
- else if (operand->flags & OPERAND_SIGNED)
+ else if (operand->flags & OPERAND_PCREL) {
+ void *pcrel = (void *)((int)value + addr);
+
+ ptr += sprintf(ptr, "%px", pcrel);
+ } else if (operand->flags & OPERAND_SIGNED)
ptr += sprintf(ptr, "%i", value);
else
ptr += sprintf(ptr, "%u", value);
@@ -2005,7 +2006,7 @@ void show_code(struct pt_regs *regs)
else
*ptr++ = ' ';
addr = regs->psw.addr + start - 32;
- ptr += sprintf(ptr, "%016lx: ", addr);
+ ptr += sprintf(ptr, "%px: ", (void *)addr);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
@@ -2033,7 +2034,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
opsize = insn_length(*code);
if (opsize > len)
break;
- ptr += sprintf(ptr, "%p: ", code);
+ ptr += sprintf(ptr, "%px: ", code);
for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[i]);
*ptr++ = '\t';
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index be75e8e49e43..802a4ded9a62 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -24,6 +24,12 @@ ENTRY(ftrace_stub)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
+#ifdef __PACK_STACK
+/* allocate just enough for r14, r15 and backchain */
+#define TRACED_FUNC_FRAME_SIZE 24
+#else
+#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD
+#endif
ENTRY(_mcount)
BR_EX %r14
@@ -37,9 +43,16 @@ ENTRY(ftrace_caller)
#ifndef CC_USING_HOTPATCH
aghi %r0,MCOUNT_RETURN_FIXUP
#endif
- aghi %r15,-STACK_FRAME_SIZE
+ # allocate stack frame for ftrace_caller to contain traced function
+ aghi %r15,-TRACED_FUNC_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
+ stg %r0,(__SF_GPRS+8*8)(%r15)
+ stg %r15,(__SF_GPRS+9*8)(%r15)
+ # allocate pt_regs and stack frame for ftrace_trace_function
+ aghi %r15,-STACK_FRAME_SIZE
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
+ aghi %r1,-TRACED_FUNC_FRAME_SIZE
+ stg %r1,__SF_BACKCHAIN(%r15)
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 96e4fcad57bf..c62eb09b2ba7 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -185,7 +185,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
unsigned long num_sdb, gfp_t gfp_flags)
{
int i, rc;
- unsigned long *new, *tail;
+ unsigned long *new, *tail, *tail_prev = NULL;
if (!sfb->sdbt || !sfb->tail)
return -EINVAL;
@@ -224,6 +224,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
sfb->num_sdbt++;
/* Link current page to tail of chain */
*tail = (unsigned long)(void *) new + 1;
+ tail_prev = tail;
tail = new;
}
@@ -233,10 +234,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* issue, a new realloc call (if required) might succeed.
*/
rc = alloc_sample_data_block(tail, gfp_flags);
- if (rc)
+ if (rc) {
+ /* Undo last SDBT. An SDBT with no SDB at its first
+ * entry but with an SDBT entry instead can not be
+ * handled by the interrupt handler code.
+ * Avoid this situation.
+ */
+ if (tail_prev) {
+ sfb->num_sdbt--;
+ free_page((unsigned long) new);
+ tail = tail_prev;
+ }
break;
+ }
sfb->num_sdb++;
tail++;
+ tail_prev = new = NULL; /* Allocated at least one SBD */
}
/* Link sampling buffer to its origin */
@@ -1282,18 +1295,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
*/
if (flush_all && done)
break;
-
- /* If an event overflow happened, discard samples by
- * processing any remaining sample-data-blocks.
- */
- if (event_overflow)
- flush_all = 1;
}
/* Account sample overflows in the event hardware structure */
if (sampl_overflow)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb);
+
+ /* Perf_event_overflow() and perf_event_account_interrupt() limit
+ * the interrupt rate to an upper limit. Roughly 1000 samples per
+ * task tick.
+ * Hitting this limit results in a large number
+ * of throttled REF_REPORT_THROTTLE entries and the samples
+ * are dropped.
+ * Slightly increase the interval to avoid hitting this limit.
+ */
+ if (event_overflow) {
+ SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
+ debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
+ __func__,
+ DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
+ }
+
if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
"overflow stats: sample=%llu event=%llu\n",
@@ -1611,14 +1634,17 @@ static int __init init_cpum_sampling_pmu(void)
}
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
- if (!sfdbg)
+ if (!sfdbg) {
pr_err("Registering for s390dbf failed\n");
+ return -ENOMEM;
+ }
debug_register_view(sfdbg, &debug_sprintf_view);
err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
+ debug_unregister(sfdbg);
goto out;
}
@@ -1627,6 +1653,7 @@ static int __init init_cpum_sampling_pmu(void)
pr_cpumsf_err(RS_INIT_FAILURE_PERF);
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
cpumf_measurement_alert);
+ debug_unregister(sfdbg);
goto out;
}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index d856263fd768..737e22cf0972 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -139,8 +139,9 @@ static void show_cpu_mhz(struct seq_file *m, unsigned long n)
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
+ unsigned long first = cpumask_first(cpu_online_mask);
- if (!n)
+ if (n == first)
show_cpu_summary(m, v);
if (!machine_has_cpu_mhz)
return 0;
@@ -153,6 +154,8 @@ static inline void *c_update(loff_t *pos)
{
if (*pos)
*pos = cpumask_next(*pos - 1, cpu_online_mask);
+ else
+ *pos = cpumask_first(cpu_online_mask);
return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index d52a94e9f57f..cba8e56cd63d 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -691,39 +691,67 @@ static struct sclp_core_info *smp_get_core_info(void)
static int smp_add_present_cpu(int cpu);
-static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
+static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
+ bool configured, bool early)
{
struct pcpu *pcpu;
- cpumask_t avail;
- int cpu, nr, i, j;
+ int cpu, nr, i;
u16 address;
nr = 0;
- cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
- cpu = cpumask_first(&avail);
- for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
- if (sclp.has_core_type && info->core[i].type != boot_core_type)
+ if (sclp.has_core_type && core->type != boot_core_type)
+ return nr;
+ cpu = cpumask_first(avail);
+ address = core->core_id << smp_cpu_mt_shift;
+ for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
+ if (pcpu_find_address(cpu_present_mask, address + i))
continue;
- address = info->core[i].core_id << smp_cpu_mt_shift;
- for (j = 0; j <= smp_cpu_mtid; j++) {
- if (pcpu_find_address(cpu_present_mask, address + j))
- continue;
- pcpu = pcpu_devices + cpu;
- pcpu->address = address + j;
- pcpu->state =
- (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
- set_cpu_present(cpu, true);
- if (sysfs_add && smp_add_present_cpu(cpu) != 0)
- set_cpu_present(cpu, false);
- else
- nr++;
- cpu = cpumask_next(cpu, &avail);
- if (cpu >= nr_cpu_ids)
+ pcpu = pcpu_devices + cpu;
+ pcpu->address = address + i;
+ if (configured)
+ pcpu->state = CPU_STATE_CONFIGURED;
+ else
+ pcpu->state = CPU_STATE_STANDBY;
+ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ set_cpu_present(cpu, true);
+ if (!early && smp_add_present_cpu(cpu) != 0)
+ set_cpu_present(cpu, false);
+ else
+ nr++;
+ cpumask_clear_cpu(cpu, avail);
+ cpu = cpumask_next(cpu, avail);
+ }
+ return nr;
+}
+
+static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
+{
+ struct sclp_core_entry *core;
+ cpumask_t avail;
+ bool configured;
+ u16 core_id;
+ int nr, i;
+
+ nr = 0;
+ cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
+ /*
+ * Add IPL core first (which got logical CPU number 0) to make sure
+ * that all SMT threads get subsequent logical CPU numbers.
+ */
+ if (early) {
+ core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
+ for (i = 0; i < info->configured; i++) {
+ core = &info->core[i];
+ if (core->core_id == core_id) {
+ nr += smp_add_core(core, &avail, true, early);
break;
+ }
}
}
+ for (i = 0; i < info->combined; i++) {
+ configured = i < info->configured;
+ nr += smp_add_core(&info->core[i], &avail, configured, early);
+ }
return nr;
}
@@ -771,7 +799,7 @@ static void __init smp_detect_cpus(void)
/* Add CPUs present at boot */
get_online_cpus();
- __smp_rescan_cpus(info, 0);
+ __smp_rescan_cpus(info, true);
put_online_cpus();
kfree(info);
}
@@ -1127,7 +1155,7 @@ int __ref smp_rescan_cpus(void)
return -ENOMEM;
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
- nr = __smp_rescan_cpus(info, 1);
+ nr = __smp_rescan_cpus(info, false);
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
kfree(info);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 239f29508f0b..69ac47241b19 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -256,7 +256,8 @@ int arch_update_cpu_topology(void)
topology_update_polarization_simple();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
- kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ if (dev)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
return rc;
}
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index ca7c3c34f94b..2bb3a255e51a 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -24,9 +24,10 @@ obj-y += vdso32_wrapper.o
extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
-# Disable gcov profiling and ubsan for VDSO code
+# Disable gcov profiling, ubsan and kasan for VDSO code
GCOV_PROFILE := n
UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 84af2b6b64c4..76c56b5382be 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -24,9 +24,10 @@ obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-# Disable gcov profiling and ubsan for VDSO code
+# Disable gcov profiling, ubsan and kasan for VDSO code
GCOV_PROFILE := n
UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index be4db07f70d3..95126d25aed5 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1652,6 +1652,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
case KVM_S390_MCHK:
irq->u.mchk.mcic = s390int->parm64;
break;
+ case KVM_S390_INT_PFAULT_INIT:
+ irq->u.ext.ext_params = s390int->parm;
+ irq->u.ext.ext_params2 = s390int->parm64;
+ break;
+ case KVM_S390_RESTART:
+ case KVM_S390_INT_CLOCK_COMP:
+ case KVM_S390_INT_CPU_TIMER:
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2032ab81b2d7..d8fd2eadcda7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -319,19 +319,30 @@ static void kvm_s390_cpu_feat_init(void)
int kvm_arch_init(void *opaque)
{
+ int rc;
+
kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
if (!kvm_s390_dbf)
return -ENOMEM;
if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
- debug_unregister(kvm_s390_dbf);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_debug_unreg;
}
kvm_s390_cpu_feat_init();
/* Register floating interrupt controller interface. */
- return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
+ rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
+ if (rc) {
+ pr_err("Failed to register FLIC rc=%d\n", rc);
+ goto out_debug_unreg;
+ }
+ return 0;
+
+out_debug_unreg:
+ debug_unregister(kvm_s390_dbf);
+ return rc;
}
void kvm_arch_exit(void)
@@ -1422,13 +1433,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
if (!kvm->arch.sca)
goto out_err;
- spin_lock(&kvm_lock);
+ mutex_lock(&kvm_lock);
sca_offset += 16;
if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
sca_offset = 0;
kvm->arch.sca = (struct bsca_block *)
((char *) kvm->arch.sca + sca_offset);
- spin_unlock(&kvm_lock);
+ mutex_unlock(&kvm_lock);
sprintf(debug_name, "kvm-%u", current->pid);
@@ -3033,7 +3044,7 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
| KVM_S390_MEMOP_F_CHECK_ONLY;
- if (mop->flags & ~supported_flags)
+ if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
return -EINVAL;
if (mop->size > MEM_OP_MAX_SIZE)
@@ -3105,7 +3116,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
- struct kvm_s390_irq s390irq;
+ struct kvm_s390_irq s390irq = {};
r = -EFAULT;
if (copy_from_user(&s390int, argp, sizeof(s390int)))
@@ -3288,21 +3299,28 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- int rc;
-
- /* If the basics of the memslot do not change, we do not want
- * to update the gmap. Every update causes several unnecessary
- * segment translation exceptions. This is usually handled just
- * fine by the normal fault handler + gmap, but it will also
- * cause faults on the prefix page of running guest CPUs.
- */
- if (old->userspace_addr == mem->userspace_addr &&
- old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
- old->npages * PAGE_SIZE == mem->memory_size)
- return;
+ int rc = 0;
- rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
- mem->guest_phys_addr, mem->memory_size);
+ switch (change) {
+ case KVM_MR_DELETE:
+ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+ old->npages * PAGE_SIZE);
+ break;
+ case KVM_MR_MOVE:
+ rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
+ old->npages * PAGE_SIZE);
+ if (rc)
+ break;
+ /* FALLTHROUGH */
+ case KVM_MR_CREATE:
+ rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
+ mem->guest_phys_addr, mem->memory_size);
+ break;
+ case KVM_MR_FLAGS_ONLY:
+ break;
+ default:
+ WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
+ }
if (rc)
pr_warn("failed to commit memory region\n");
return;
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index da246d95b87c..d3f046eca7db 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -947,6 +947,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->iprcc = PGM_ADDRESSING;
scb_s->pgmilc = 4;
scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
+ rc = 1;
}
return rc;
}
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 79ddd580d605..ca6fab51eea1 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -306,16 +306,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
}
if (write) {
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len))
+ len = min(*lenp, sizeof(buf));
+ if (copy_from_user(buf, buffer, len))
return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
+ buf[len - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds);
+ *ppos += *lenp;
} else {
len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds);
@@ -323,9 +323,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
+ *lenp = len;
+ *ppos += len;
}
- *lenp = len;
- *ppos += len;
return 0;
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index b6c85b760305..0195c3983f54 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -759,14 +759,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
static inline unsigned long *gmap_table_walk(struct gmap *gmap,
unsigned long gaddr, int level)
{
+ const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
unsigned long *table;
if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
return NULL;
if (gmap_is_shadow(gmap) && gmap->removed)
return NULL;
- if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
+
+ if (asce_type != _ASCE_TYPE_REGION1 &&
+ gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
return NULL;
+
table = gmap->table;
switch (gmap->asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
@@ -1680,6 +1684,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
goto out_free;
} else if (*table & _REGION_ENTRY_ORIGIN) {
rc = -EAGAIN; /* Race with shadow */
+ goto out_free;
}
crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
/* mark as invalid as long as the parent table is not protected */
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 97fc449a7470..cf045f56581e 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -38,7 +38,8 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ if (WARN_ON_ONCE(page_ref_count(head) < 0)
+ || !page_cache_get_speculative(head))
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(head);
@@ -76,7 +77,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ if (WARN_ON_ONCE(page_ref_count(head) < 0)
+ || !page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
}
@@ -150,7 +152,8 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ if (WARN_ON_ONCE(page_ref_count(head) < 0)
+ || !page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 8bd25aebf488..9b15a1dc6628 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -28,8 +28,6 @@
#include <asm/nospec-branch.h>
#include "bpf_jit.h"
-int bpf_jit_enable __read_mostly;
-
struct bpf_jit {
u32 seen; /* Flags to remember seen eBPF instructions */
u32 seen_reg[16]; /* Array to remember which registers are used */
@@ -883,7 +881,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
break;
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
/* lcgr %dst,%dst */
- EMIT4(0xb9130000, dst_reg, dst_reg);
+ EMIT4(0xb9030000, dst_reg, dst_reg);
break;
/*
* BPF_FROM_BE/LE
@@ -1064,8 +1062,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/* llgf %w1,map.max_entries(%b2) */
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
offsetof(struct bpf_array, map.max_entries));
- /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
- EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+ /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
+ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
REG_W1, 0, 0xa);
/*
@@ -1091,8 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
* goto out;
*/
- /* sllg %r1,%b3,3: %r1 = index * 8 */
- EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+ /* llgfr %r1,%b3: %r1 = (u32) index */
+ EMIT4(0xb9160000, REG_1, BPF_REG_3);
+ /* sllg %r1,%r1,3: %r1 *= 8 */
+ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
/* lg %r1,prog(%b2,%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
REG_1, offsetof(struct bpf_array, ptrs));
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index 1fb6d5714bae..fd00566677c9 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -180,10 +180,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
struct sh_clk_ops;
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
}
-void __init plat_irq_setup(void)
+void __init __weak plat_irq_setup(void)
{
}
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 3280a6bfa503..b2592c3864ad 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -370,7 +370,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
#define ioremap_nocache ioremap
#define ioremap_uc ioremap
-#define iounmap __iounmap
+
+static inline void iounmap(void __iomem *addr)
+{
+ __iounmap(addr);
+}
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
index 2a0ca8780f0d..e4caddd443da 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
@@ -79,8 +79,15 @@ enum {
GPIO_FN_WDTOVF,
/* CAN */
- GPIO_FN_CTX1, GPIO_FN_CRX1, GPIO_FN_CTX0, GPIO_FN_CTX0_CTX1,
- GPIO_FN_CRX0, GPIO_FN_CRX0_CRX1, GPIO_FN_CRX0_CRX1_CRX2,
+ GPIO_FN_CTX2, GPIO_FN_CRX2,
+ GPIO_FN_CTX1, GPIO_FN_CRX1,
+ GPIO_FN_CTX0, GPIO_FN_CRX0,
+ GPIO_FN_CTX0_CTX1, GPIO_FN_CRX0_CRX1,
+ GPIO_FN_CTX0_CTX1_CTX2, GPIO_FN_CRX0_CRX1_CRX2,
+ GPIO_FN_CTX2_PJ21, GPIO_FN_CRX2_PJ20,
+ GPIO_FN_CTX1_PJ23, GPIO_FN_CRX1_PJ22,
+ GPIO_FN_CTX0_CTX1_PJ23, GPIO_FN_CRX0_CRX1_PJ22,
+ GPIO_FN_CTX0_CTX1_CTX2_PJ21, GPIO_FN_CRX0_CRX1_CRX2_PJ20,
/* DMAC */
GPIO_FN_TEND0, GPIO_FN_DACK0, GPIO_FN_DREQ0,
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7734.h b/arch/sh/include/cpu-sh4/cpu/sh7734.h
index 2fb9a7b71b41..a2667c9b5819 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7734.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7734.h
@@ -133,7 +133,7 @@ enum {
GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C,
GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A,
GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B,
- GPIO_FN_RD_WR, GPIO_FN_TCLK0,
+ GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4,
GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B,
GPIO_FN_ET0_ETXD3_A,
GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B,
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 2197fc584186..000cc3343867 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
switch (sh_type) {
case SH_BREAKPOINT_READ:
*gen_type = HW_BREAKPOINT_R;
+ break;
case SH_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
diff --git a/arch/sparc/include/asm/bug.h b/arch/sparc/include/asm/bug.h
index eaa8f8d38125..fa85cac0285c 100644
--- a/arch/sparc/include/asm/bug.h
+++ b/arch/sparc/include/asm/bug.h
@@ -8,10 +8,14 @@
void do_BUG(const char *file, int line);
#define BUG() do { \
do_BUG(__FILE__, __LINE__); \
+ barrier_before_unreachable(); \
__builtin_trap(); \
} while (0)
#else
-#define BUG() __builtin_trap()
+#define BUG() do { \
+ barrier_before_unreachable(); \
+ __builtin_trap(); \
+} while (0)
#endif
#define HAVE_ARCH_BUG
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index faa2f61058c2..92f0a46ace78 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -40,7 +40,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
return val;
}
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr,x) \
+({ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+ __ret; \
+})
void __xchg_called_with_bad_pointer(void);
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index f005ccac91cc..e87c0f81b700 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -20,6 +20,7 @@
*/
#define HAS_DMA
+#ifdef CONFIG_PARPORT_PC_FIFO
static DEFINE_SPINLOCK(dma_spin_lock);
#define claim_dma_lock() \
@@ -30,6 +31,7 @@ static DEFINE_SPINLOCK(dma_spin_lock);
#define release_dma_lock(__flags) \
spin_unlock_irqrestore(&dma_spin_lock, __flags);
+#endif
static struct sparc_ebus_info {
struct ebus_dma_info info;
diff --git a/arch/sparc/include/uapi/asm/ipcbuf.h b/arch/sparc/include/uapi/asm/ipcbuf.h
index 66013b4fe10d..58da9c4addb2 100644
--- a/arch/sparc/include/uapi/asm/ipcbuf.h
+++ b/arch/sparc/include/uapi/asm/ipcbuf.h
@@ -14,19 +14,19 @@
struct ipc64_perm
{
- __kernel_key_t key;
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_uid_t cuid;
- __kernel_gid_t cgid;
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
#ifndef __arch64__
- unsigned short __pad0;
+ unsigned short __pad0;
#endif
- __kernel_mode_t mode;
- unsigned short __pad1;
- unsigned short seq;
- unsigned long long __unused1;
- unsigned long long __unused2;
+ __kernel_mode_t mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned long long __unused1;
+ unsigned long long __unused2;
};
#endif /* __SPARC_IPCBUF_H */
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 71e7f77f6776..84a80cd004eb 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -889,6 +889,10 @@ static int sparc_perf_event_set_period(struct perf_event *event,
s64 period = hwc->sample_period;
int ret = 0;
+ /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
+ if (unlikely(period != hwc->last_period))
+ left = period - (hwc->last_period - left);
+
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 572db686f845..385d6d04564d 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -151,12 +151,14 @@ SECTIONS
}
PERCPU_SECTION(SMP_CACHE_BYTES)
-#ifdef CONFIG_JUMP_LABEL
. = ALIGN(PAGE_SIZE);
.exit.text : {
EXIT_TEXT
}
-#endif
+
+ .exit.data : {
+ EXIT_DATA
+ }
. = ALIGN(PAGE_SIZE);
__init_end = .;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index fcf4d27a38fb..e09f7b440b8c 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -586,7 +586,7 @@ xcall_flush_tlb_kernel_range: /* 44 insns */
sub %g7, %g1, %g3
srlx %g3, 18, %g2
brnz,pn %g2, 2f
- add %g2, 1, %g2
+ sethi %hi(PAGE_SIZE), %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@@ -750,7 +750,7 @@ __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
sub %g7, %g1, %g3
srlx %g3, 18, %g2
brnz,pn %g2, 2f
- add %g2, 1, %g2
+ sethi %hi(PAGE_SIZE), %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index a6d9204a6a0b..98a4da3012e3 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -10,8 +10,6 @@
#include "bpf_jit.h"
-int bpf_jit_enable __read_mostly;
-
static inline bool is_simm13(unsigned int value)
{
return value + 0x1000 < 0x2000;
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 1a70e6c0f259..94709ab41ed8 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -24,8 +24,7 @@
* has an opportunity to return -EFAULT to the user if needed.
* The 64-bit routines just return a "long long" with the value,
* since they are only used from kernel space and don't expect to fault.
- * Support for 16-bit ops is included in the framework but we don't provide
- * any (x86_64 has an atomic_inc_short(), so we might want to some day).
+ * Support for 16-bit ops is included in the framework but we don't provide any.
*
* Note that the caller is advised to issue a suitable L1 or L2
* prefetch on the address being manipulated to avoid extra stalls.
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
index 68205fd3b08c..6ae7f0f434a9 100644
--- a/arch/um/Kconfig.debug
+++ b/arch/um/Kconfig.debug
@@ -18,6 +18,7 @@ config GPROF
config GCOV
bool "Enable gcov support"
depends on DEBUG_INFO
+ depends on !KCOV
help
This option allows developers to retrieve coverage data from a UML
session.
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 62087028a9ce..d2ad45c10113 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -260,7 +260,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
if (err == 0) {
spin_unlock(&line->lock);
return IRQ_NONE;
- } else if (err < 0) {
+ } else if ((err < 0) && (err != -EAGAIN)) {
line->head = line->buffer;
line->tail = line->buffer;
}
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 1a60e1328e2f..6aca4c90aa1a 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -56,7 +56,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* when the new ->mm is used for the first time.
*/
__switch_mm(&new->context.id);
- down_write(&new->mmap_sem);
+ down_write_nested(&new->mmap_sem, 1);
uml_setup_stubs(new);
up_write(&new->mmap_sem);
}
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 053baff03674..9300f7630d2a 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -11,6 +11,7 @@
#include <asm/types.h>
#include <asm/page.h>
#include <asm/segment.h>
+#include <sysdep/ptrace_user.h>
struct thread_info {
struct task_struct *task; /* main task structure */
@@ -22,6 +23,8 @@ struct thread_info {
0-0xBFFFFFFF for user
0-0xFFFFFFFF for kernel */
struct thread_info *real_thread; /* Points to non-IRQ stack */
+ unsigned long aux_fp_regs[FP_SIZE]; /* auxiliary fp_regs to save/restore
+ them out-of-band */
};
#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index de5d572225f3..cc64f0579949 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -274,7 +274,7 @@ extern int protect(struct mm_id * mm_idp, unsigned long addr,
extern int is_skas_winch(int pid, int fd, void *data);
extern int start_userspace(unsigned long stub_stack);
extern int copy_context_skas0(unsigned long stack, int pid);
-extern void userspace(struct uml_pt_regs *regs);
+extern void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs);
extern int map_stub_pages(int fd, unsigned long code, unsigned long data,
unsigned long stack);
extern void new_thread(void *stack, jmp_buf *buf, void (*handler)(void));
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 034b42c7ab40..787568044a2a 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -128,7 +128,7 @@ void new_thread_handler(void)
* callback returns only if the kernel thread execs a process
*/
n = fn(arg);
- userspace(&current->thread.regs.regs);
+ userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
}
/* Called magically, see new_thread_handler above */
@@ -147,7 +147,7 @@ void fork_handler(void)
current->thread.prev_sched = NULL;
- userspace(&current->thread.regs.regs);
+ userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
}
int copy_thread(unsigned long clone_flags, unsigned long sp,
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 25c23666d592..040e3efdc9a6 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt)
static struct clock_event_device timer_clockevent = {
.name = "posix-timer",
.rating = 250,
- .cpumask = cpu_all_mask,
+ .cpumask = cpu_possible_mask,
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = itimer_shutdown,
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 0a99d4515065..cd4a6ff676a8 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -87,12 +87,11 @@ bad_wait:
extern unsigned long current_stub_stack(void);
-static void get_skas_faultinfo(int pid, struct faultinfo *fi)
+static void get_skas_faultinfo(int pid, struct faultinfo *fi, unsigned long *aux_fp_regs)
{
int err;
- unsigned long fpregs[FP_SIZE];
- err = get_fp_registers(pid, fpregs);
+ err = get_fp_registers(pid, aux_fp_regs);
if (err < 0) {
printk(UM_KERN_ERR "save_fp_registers returned %d\n",
err);
@@ -112,7 +111,7 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi)
*/
memcpy(fi, (void *)current_stub_stack(), sizeof(*fi));
- err = put_fp_registers(pid, fpregs);
+ err = put_fp_registers(pid, aux_fp_regs);
if (err < 0) {
printk(UM_KERN_ERR "put_fp_registers returned %d\n",
err);
@@ -120,9 +119,9 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi)
}
}
-static void handle_segv(int pid, struct uml_pt_regs * regs)
+static void handle_segv(int pid, struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
{
- get_skas_faultinfo(pid, &regs->faultinfo);
+ get_skas_faultinfo(pid, &regs->faultinfo, aux_fp_regs);
segv(regs->faultinfo, 0, 1, NULL);
}
@@ -305,7 +304,7 @@ int start_userspace(unsigned long stub_stack)
return err;
}
-void userspace(struct uml_pt_regs *regs)
+void userspace(struct uml_pt_regs *regs, unsigned long *aux_fp_regs)
{
int err, status, op, pid = userspace_pid[0];
/* To prevent races if using_sysemu changes under us.*/
@@ -374,11 +373,11 @@ void userspace(struct uml_pt_regs *regs)
case SIGSEGV:
if (PTRACE_FULL_FAULTINFO) {
get_skas_faultinfo(pid,
- &regs->faultinfo);
+ &regs->faultinfo, aux_fp_regs);
(*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
regs);
}
- else handle_segv(pid, regs);
+ else handle_segv(pid, regs, aux_fp_regs);
break;
case SIGTRAP + 0x80:
handle_trap(pid, regs, local_using_sysemu);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e31001ec4c07..80636caee07c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -937,13 +937,7 @@ config NR_CPUS
approximately eight kilobytes to the kernel image.
config SCHED_SMT
- bool "SMT (Hyperthreading) scheduler support"
- depends on SMP
- ---help---
- SMT scheduler support improves the CPU scheduler's decision making
- when dealing with Intel Pentium 4 chips with HyperThreading at a
- cost of slightly increased overhead in some places. If unsure say
- N here.
+ def_bool y if SMP
config SCHED_MC
def_bool y
@@ -1761,6 +1755,51 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS
If unsure, say y.
+choice
+ prompt "TSX enable mode"
+ depends on CPU_SUP_INTEL
+ default X86_INTEL_TSX_MODE_OFF
+ help
+ Intel's TSX (Transactional Synchronization Extensions) feature
+ allows to optimize locking protocols through lock elision which
+ can lead to a noticeable performance boost.
+
+ On the other hand it has been shown that TSX can be exploited
+ to form side channel attacks (e.g. TAA) and chances are there
+ will be more of those attacks discovered in the future.
+
+ Therefore TSX is not enabled by default (aka tsx=off). An admin
+ might override this decision by tsx=on the command line parameter.
+ Even with TSX enabled, the kernel will attempt to enable the best
+ possible TAA mitigation setting depending on the microcode available
+ for the particular machine.
+
+ This option allows to set the default tsx mode between tsx=on, =off
+ and =auto. See Documentation/kernel-parameters.txt for more
+ details.
+
+ Say off if not sure, auto if TSX is in use but it should be used on safe
+ platforms or on if TSX is in use and the security aspect of tsx is not
+ relevant.
+
+config X86_INTEL_TSX_MODE_OFF
+ bool "off"
+ help
+ TSX is disabled if possible - equals to tsx=off command line parameter.
+
+config X86_INTEL_TSX_MODE_ON
+ bool "on"
+ help
+ TSX is always enabled on TSX capable HW - equals the tsx=on command
+ line parameter.
+
+config X86_INTEL_TSX_MODE_AUTO
+ bool "auto"
+ help
+ TSX is enabled on TSX capable HW that is believed to be safe against
+ side channel attacks- equals the tsx=auto command line parameter.
+endchoice
+
config EFI
bool "EFI runtime service support"
depends on ACPI
@@ -2051,14 +2090,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
If unsure, leave at the default value.
config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs"
+ def_bool y
depends on SMP
- ---help---
- Say Y here to allow turning CPUs off and on. CPUs can be
- controlled through /sys/devices/system/cpu.
- ( Note: power management support will enable this option
- automatically on SMP systems. )
- Say N if you want to disable CPU hotplug.
config BOOTPARAM_HOTPLUG_CPU0
bool "Set default setting of cpu0_hotpluggable"
@@ -2581,8 +2614,7 @@ config OLPC
config OLPC_XO1_PM
bool "OLPC XO-1 Power Management"
- depends on OLPC && MFD_CS5535 && PM_SLEEP
- select MFD_CORE
+ depends on OLPC && MFD_CS5535=y && PM_SLEEP
---help---
Add support for poweroff and suspend of the OLPC XO-1 laptop.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 4386440fe463..f09a192260f8 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -192,7 +192,7 @@ config HAVE_MMIOTRACE_SUPPORT
config X86_DECODER_SELFTEST
bool "x86 instruction decoder selftest"
- depends on DEBUG_KERNEL && KPROBES
+ depends on DEBUG_KERNEL && INSTRUCTION_DECODER
depends on !COMPILE_TEST
---help---
Perform x86 instruction decoder selftests at build time.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index b5226a009973..940ed27a6212 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
export REALMODE_CFLAGS
@@ -47,7 +48,7 @@ export REALMODE_CFLAGS
export BITS
ifdef CONFIG_X86_NEED_RELOCS
- LDFLAGS_vmlinux := --emit-relocs
+ LDFLAGS_vmlinux := --emit-relocs --discard-none
endif
#
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 3b7156f46bc1..3b16935b22bc 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -100,7 +100,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE
AFLAGS_header.o += -I$(objtree)/$(obj)
$(obj)/header.o: $(obj)/zoffset.h
-LDFLAGS_setup.elf := -T
+LDFLAGS_setup.elf := -m elf_i386 -T
$(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
$(call if_changed,ld)
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index fd0b6a272dd5..7532f6f53677 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -170,7 +170,7 @@ preferred_addr:
notl %eax
andl %eax, %ebx
cmpl $LOAD_PHYSICAL_ADDR, %ebx
- jge 1f
+ jae 1f
#endif
movl $LOAD_PHYSICAL_ADDR, %ebx
1:
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index efdfba21a5b2..3fac2d133e4e 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -104,7 +104,7 @@ ENTRY(startup_32)
notl %eax
andl %eax, %ebx
cmpl $LOAD_PHYSICAL_ADDR, %ebx
- jge 1f
+ jae 1f
#endif
movl $LOAD_PHYSICAL_ADDR, %ebx
1:
@@ -227,6 +227,11 @@ ENTRY(efi32_stub_entry)
leal efi32_config(%ebp), %eax
movl %eax, efi_config(%ebp)
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
jmp startup_32
ENDPROC(efi32_stub_entry)
#endif
@@ -334,7 +339,7 @@ preferred_addr:
notq %rax
andq %rax, %rbp
cmpq $LOAD_PHYSICAL_ADDR, %rbp
- jge 1f
+ jae 1f
#endif
movq $LOAD_PHYSICAL_ADDR, %rbp
1:
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index d86e68d3c794..1912b2671f10 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -15,6 +15,7 @@
#include "error.h"
#include "../string.h"
#include "../voffset.h"
+#include <asm/bootparam_utils.h>
/*
* WARNING!!
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 2728e1b7e4a6..a8789aa647b4 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -19,7 +19,6 @@
#include <asm/page.h>
#include <asm/boot.h>
#include <asm/bootparam.h>
-#include <asm/bootparam_utils.h>
#define BOOT_BOOT_H
#include "../ctype.h"
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index cd4df9322501..7bbfe7d35da7 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -76,15 +76,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0;
}
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
- u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
if (irq_fpu_usable()) {
kernel_fpu_begin();
- *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
+ *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
kernel_fpu_end();
} else
- *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+ *(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
@@ -93,15 +92,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- return __chksum_finup(&ctx->crc, data, len, out);
+ return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(&ctx->crc, data, length, out);
+ return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
index eff2f414e22b..ec234c43b3f4 100644
--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
+++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
@@ -321,6 +321,12 @@ ENTRY(poly1305_4block_avx2)
vpaddq t2,t1,t1
vmovq t1x,d4
+ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+ # amount. Careful: we must not assume the carry bits 'd0 >> 26',
+ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+ # integers. It's true in a single-block implementation, but not here.
+
# d1 += d0 >> 26
mov d0,%rax
shr $26,%rax
@@ -359,16 +365,16 @@ ENTRY(poly1305_4block_avx2)
# h0 += (d4 >> 26) * 5
mov d4,%rax
shr $26,%rax
- lea (%eax,%eax,4),%eax
- add %eax,%ebx
+ lea (%rax,%rax,4),%rax
+ add %rax,%rbx
# h4 = d4 & 0x3ffffff
mov d4,%rax
and $0x3ffffff,%eax
mov %eax,h4
# h1 += h0 >> 26
- mov %ebx,%eax
- shr $26,%eax
+ mov %rbx,%rax
+ shr $26,%rax
add %eax,h1
# h0 = h0 & 0x3ffffff
andl $0x3ffffff,%ebx
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
index 338c748054ed..639d9760b089 100644
--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
+++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
@@ -251,16 +251,16 @@ ENTRY(poly1305_block_sse2)
# h0 += (d4 >> 26) * 5
mov d4,%rax
shr $26,%rax
- lea (%eax,%eax,4),%eax
- add %eax,%ebx
+ lea (%rax,%rax,4),%rax
+ add %rax,%rbx
# h4 = d4 & 0x3ffffff
mov d4,%rax
and $0x3ffffff,%eax
mov %eax,h4
# h1 += h0 >> 26
- mov %ebx,%eax
- shr $26,%eax
+ mov %rbx,%rax
+ shr $26,%rax
add %eax,h1
# h0 = h0 & 0x3ffffff
andl $0x3ffffff,%ebx
@@ -518,6 +518,12 @@ ENTRY(poly1305_2block_sse2)
paddq t2,t1
movq t1,d4
+ # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
+ # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
+ # amount. Careful: we must not assume the carry bits 'd0 >> 26',
+ # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
+ # integers. It's true in a single-block implementation, but not here.
+
# d1 += d0 >> 26
mov d0,%rax
shr $26,%rax
@@ -556,16 +562,16 @@ ENTRY(poly1305_2block_sse2)
# h0 += (d4 >> 26) * 5
mov d4,%rax
shr $26,%rax
- lea (%eax,%eax,4),%eax
- add %eax,%ebx
+ lea (%rax,%rax,4),%rax
+ add %rax,%rbx
# h4 = d4 & 0x3ffffff
mov d4,%rax
and $0x3ffffff,%eax
mov %eax,h4
# h1 += h0 >> 26
- mov %ebx,%eax
- shr $26,%eax
+ mov %rbx,%rax
+ shr $26,%rax
add %eax,h1
# h0 = h0 & 0x3ffffff
andl $0x3ffffff,%ebx
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 9a9e5884066c..8af8c070f213 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -1,4 +1,5 @@
#include <linux/jump_label.h>
+#include <asm/cpufeatures.h>
/*
@@ -201,6 +202,23 @@ For 32-bit we have the following conventions - kernel is built with
.byte 0xf1
.endm
+/*
+ * Mitigate Spectre v1 for conditional swapgs code paths.
+ *
+ * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
+ * prevent a speculative swapgs when coming from kernel space.
+ *
+ * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
+ * to prevent the swapgs from getting speculatively skipped when coming from
+ * user space.
+ */
+.macro FENCE_SWAPGS_USER_ENTRY
+ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
+.endm
+.macro FENCE_SWAPGS_KERNEL_ENTRY
+ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
+.endm
+
#endif /* CONFIG_X86_64 */
/*
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index b0cd306dc527..8841d016b4a4 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -28,6 +28,7 @@
#include <asm/vdso.h>
#include <asm/uaccess.h>
#include <asm/cpufeature.h>
+#include <asm/nospec-branch.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@@ -206,6 +207,8 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
#endif
user_enter_irqoff();
+
+ mds_user_clear_cpu_buffers();
}
#define SYSCALL_EXIT_WORK_FLAGS \
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index a76dc738ec61..4d980d11e2d1 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -219,6 +219,7 @@ ENTRY(__switch_to_asm)
pushl %ebx
pushl %edi
pushl %esi
+ pushfl
/* switch stack */
movl %esp, TASK_threadsp(%eax)
@@ -241,6 +242,7 @@ ENTRY(__switch_to_asm)
#endif
/* restore callee-saved registers */
+ popfl
popl %esi
popl %edi
popl %ebx
@@ -1193,6 +1195,7 @@ ENTRY(int3)
END(int3)
ENTRY(general_protection)
+ ASM_CLAC
pushl $do_general_protection
jmp error_code
END(general_protection)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 870e941c1947..10ecfba43dff 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -313,6 +313,7 @@ ENTRY(__switch_to_asm)
pushq %r13
pushq %r14
pushq %r15
+ pushfq
/* switch stack */
movq %rsp, TASK_threadsp(%rdi)
@@ -335,6 +336,7 @@ ENTRY(__switch_to_asm)
#endif
/* restore callee-saved registers */
+ popfq
popq %r15
popq %r14
popq %r13
@@ -418,6 +420,7 @@ END(irq_entries_start)
* tracking that we're in kernel mode.
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
SWITCH_KERNEL_CR3
/*
@@ -431,8 +434,10 @@ END(irq_entries_start)
TRACE_IRQS_OFF
CALL_enter_from_user_mode
-
+ jmp 2f
1:
+ FENCE_SWAPGS_KERNEL_ENTRY
+2:
/*
* Save previous stack pointer, optionally switch to interrupt stack.
* irq_count is used to check if a CPU is already on an interrupt stack
@@ -1002,6 +1007,13 @@ ENTRY(paranoid_entry)
movq %rax, %cr3
2:
#endif
+ /*
+ * The above doesn't do an unconditional CR3 write, even in the PTI
+ * case. So do an lfence to prevent GS speculation, regardless of
+ * whether PTI is enabled.
+ */
+ FENCE_SWAPGS_KERNEL_ENTRY
+
ret
END(paranoid_entry)
@@ -1063,6 +1075,7 @@ ENTRY(error_entry)
* from user mode due to an IRET fault.
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
.Lerror_entry_from_usermode_after_swapgs:
/*
@@ -1074,6 +1087,8 @@ ENTRY(error_entry)
CALL_enter_from_user_mode
ret
+.Lerror_entry_done_lfence:
+ FENCE_SWAPGS_KERNEL_ENTRY
.Lerror_entry_done:
TRACE_IRQS_OFF
ret
@@ -1092,7 +1107,7 @@ ENTRY(error_entry)
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
cmpq $.Lgs_change, RIP+8(%rsp)
- jne .Lerror_entry_done
+ jne .Lerror_entry_done_lfence
/*
* hack: .Lgs_change can fail with user gsbase. If this happens, fix up
@@ -1100,6 +1115,7 @@ ENTRY(error_entry)
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
jmp .Lerror_entry_done
.Lbstep_iret:
@@ -1113,6 +1129,7 @@ ENTRY(error_entry)
* Switch to kernel gsbase:
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
/*
* Pretend that the exception came from user mode: set up pt_regs
@@ -1209,6 +1226,7 @@ ENTRY(nmi)
* to switch CR3 here.
*/
cld
+ FENCE_SWAPGS_USER_ENTRY
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq 5*8(%rdx) /* pt_regs->ss */
@@ -1497,6 +1515,7 @@ end_repeat_nmi:
movq %rax, %cr3
2:
#endif
+ FENCE_SWAPGS_KERNEL_ENTRY
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
call do_nmi
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index d5409660f5de..0d3ebdfa0739 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -47,10 +47,8 @@ targets += $(vdso_img_sodbg)
export CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
- -Wl,--no-undefined \
- -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
- $(DISABLE_LTO)
+VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
+ -z max-page-size=4096
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
@@ -96,10 +94,8 @@ CFLAGS_REMOVE_vvar.o = -pg
#
CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
- -Wl,-soname=linux-vdso.so.1 \
- -Wl,-z,max-page-size=4096 \
- -Wl,-z,common-page-size=4096
+VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
+ -z max-page-size=4096
# 64-bit objects to re-brand as x32
vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
@@ -127,7 +123,7 @@ $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
$(call if_changed,vdso)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
# This makes sure the $(obj) subdirectory exists even though vdso32/
# is not a kbuild sub-make subdirectory.
@@ -165,13 +161,14 @@ $(obj)/vdso32.so.dbg: FORCE \
# The DSO images are built using a special linker script.
#
quiet_cmd_vdso = VDSO $@
- cmd_vdso = $(CC) -nostdlib -o $@ \
+ cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+ -T $(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
- $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
+ $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
+ -Bsymbolic
GCOV_PROFILE := n
#
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 3f9d1a83891a..50c1f77cab15 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -10,6 +10,7 @@
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/mm_types.h>
+#include <linux/elf.h>
#include <asm/processor.h>
#include <asm/vdso.h>
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index afb222b63cae..836b7e4a2005 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -112,23 +112,145 @@ static __initconst const u64 amd_hw_cache_event_ids
},
};
+static __initconst const u64 amd_hw_cache_event_ids_f17h
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
+ [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
+ [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
+ [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
+ [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+},
+[C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
+ [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+},
+[C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+},
+};
+
/*
- * AMD Performance Monitor K7 and later.
+ * AMD Performance Monitor K7 and later, up to and including Family 16h:
*/
static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
+};
+
+/*
+ * AMD Performance Monitor Family 17h and later:
+ */
+static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
+{
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
};
static u64 amd_pmu_event_map(int hw_event)
{
+ if (boot_cpu_data.x86 >= 0x17)
+ return amd_f17h_perfmon_event_map[hw_event];
+
return amd_perfmon_event_map[hw_event];
}
@@ -714,9 +836,10 @@ __init int amd_pmu_init(void)
x86_pmu.amd_nb_constraints = 0;
}
- /* Events are common for all AMDs */
- memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
+ if (boot_cpu_data.x86 >= 0x17)
+ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
+ else
+ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
return 0;
}
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index fd4484ae3ffc..5f72b473f3ed 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -388,7 +388,8 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
struct hw_perf_event *hwc, u64 config)
{
config &= ~perf_ibs->cnt_mask;
- wrmsrl(hwc->config_base, config);
+ if (boot_cpu_data.x86 == 0x10)
+ wrmsrl(hwc->config_base, config);
config &= ~perf_ibs->enable_mask;
wrmsrl(hwc->config_base, config);
}
@@ -563,7 +564,8 @@ static struct perf_ibs perf_ibs_op = {
},
.msr = MSR_AMD64_IBSOPCTL,
.config_mask = IBS_OP_CONFIG_MASK,
- .cnt_mask = IBS_OP_MAX_CNT,
+ .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
+ IBS_OP_CUR_CNT_RAND,
.enable_mask = IBS_OP_ENABLE,
.valid_mask = IBS_OP_VAL,
.max_period = IBS_OP_MAX_CNT << 4,
@@ -624,7 +626,7 @@ fail:
if (event->attr.sample_type & PERF_SAMPLE_RAW)
offset_max = perf_ibs->offset_max;
else if (check_rip)
- offset_max = 2;
+ offset_max = 3;
else
offset_max = 1;
do {
@@ -671,10 +673,17 @@ fail:
throttle = perf_event_overflow(event, &data, &regs);
out:
- if (throttle)
+ if (throttle) {
perf_ibs_stop(event, 0);
- else
- perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
+ } else {
+ period >>= 4;
+
+ if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
+ (*config & IBS_OP_CNT_CTL))
+ period |= *config & IBS_OP_CUR_CNT_RAND;
+
+ perf_ibs_enable_event(perf_ibs, hwc, period);
+ }
perf_event_update_userpage(event);
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 65577f081d07..6bfb9a68134c 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -19,13 +19,14 @@
#include <asm/cpufeature.h>
#include <asm/perf_event.h>
#include <asm/msr.h>
+#include <asm/smp.h>
#define NUM_COUNTERS_NB 4
#define NUM_COUNTERS_L2 4
#define MAX_COUNTERS NUM_COUNTERS_NB
#define RDPMC_BASE_NB 6
-#define RDPMC_BASE_L2 10
+#define RDPMC_BASE_LLC 10
#define COUNTER_SHIFT 16
@@ -45,30 +46,30 @@ struct amd_uncore {
};
static struct amd_uncore * __percpu *amd_uncore_nb;
-static struct amd_uncore * __percpu *amd_uncore_l2;
+static struct amd_uncore * __percpu *amd_uncore_llc;
static struct pmu amd_nb_pmu;
-static struct pmu amd_l2_pmu;
+static struct pmu amd_llc_pmu;
static cpumask_t amd_nb_active_mask;
-static cpumask_t amd_l2_active_mask;
+static cpumask_t amd_llc_active_mask;
static bool is_nb_event(struct perf_event *event)
{
return event->pmu->type == amd_nb_pmu.type;
}
-static bool is_l2_event(struct perf_event *event)
+static bool is_llc_event(struct perf_event *event)
{
- return event->pmu->type == amd_l2_pmu.type;
+ return event->pmu->type == amd_llc_pmu.type;
}
static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
{
if (is_nb_event(event) && amd_uncore_nb)
return *per_cpu_ptr(amd_uncore_nb, event->cpu);
- else if (is_l2_event(event) && amd_uncore_l2)
- return *per_cpu_ptr(amd_uncore_l2, event->cpu);
+ else if (is_llc_event(event) && amd_uncore_llc)
+ return *per_cpu_ptr(amd_uncore_llc, event->cpu);
return NULL;
}
@@ -183,21 +184,19 @@ static int amd_uncore_event_init(struct perf_event *event)
return -ENOENT;
/*
- * NB and L2 counters (MSRs) are shared across all cores that share the
- * same NB / L2 cache. Interrupts can be directed to a single target
- * core, however, event counts generated by processes running on other
- * cores cannot be masked out. So we do not support sampling and
- * per-thread events.
+ * NB and Last level cache counters (MSRs) are shared across all cores
+ * that share the same NB / Last level cache. On family 16h and below,
+ * Interrupts can be directed to a single target core, however, event
+ * counts generated by processes running on other cores cannot be masked
+ * out. So we do not support sampling and per-thread events via
+ * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
*/
- if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
- return -EINVAL;
- /* NB and L2 counters do not have usr/os/guest/host bits */
+ /* NB and Last level cache counters do not have usr/os/guest/host bits */
if (event->attr.exclude_user || event->attr.exclude_kernel ||
event->attr.exclude_host || event->attr.exclude_guest)
return -EINVAL;
- /* and we do not enable counter overflow interrupts */
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;
@@ -226,8 +225,8 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
if (pmu->type == amd_nb_pmu.type)
active_mask = &amd_nb_active_mask;
- else if (pmu->type == amd_l2_pmu.type)
- active_mask = &amd_l2_active_mask;
+ else if (pmu->type == amd_llc_pmu.type)
+ active_mask = &amd_llc_active_mask;
else
return 0;
@@ -274,9 +273,10 @@ static struct pmu amd_nb_pmu = {
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
};
-static struct pmu amd_l2_pmu = {
+static struct pmu amd_llc_pmu = {
.task_ctx_nr = perf_invalid_context,
.attr_groups = amd_uncore_attr_groups,
.name = "amd_l2",
@@ -286,6 +286,7 @@ static struct pmu amd_l2_pmu = {
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
};
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
@@ -296,7 +297,7 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
static int amd_uncore_cpu_up_prepare(unsigned int cpu)
{
- struct amd_uncore *uncore_nb = NULL, *uncore_l2;
+ struct amd_uncore *uncore_nb = NULL, *uncore_llc;
if (amd_uncore_nb) {
uncore_nb = amd_uncore_alloc(cpu);
@@ -312,18 +313,18 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
}
- if (amd_uncore_l2) {
- uncore_l2 = amd_uncore_alloc(cpu);
- if (!uncore_l2)
+ if (amd_uncore_llc) {
+ uncore_llc = amd_uncore_alloc(cpu);
+ if (!uncore_llc)
goto fail;
- uncore_l2->cpu = cpu;
- uncore_l2->num_counters = NUM_COUNTERS_L2;
- uncore_l2->rdpmc_base = RDPMC_BASE_L2;
- uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
- uncore_l2->active_mask = &amd_l2_active_mask;
- uncore_l2->pmu = &amd_l2_pmu;
- uncore_l2->id = -1;
- *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
+ uncore_llc->cpu = cpu;
+ uncore_llc->num_counters = NUM_COUNTERS_L2;
+ uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
+ uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
+ uncore_llc->active_mask = &amd_llc_active_mask;
+ uncore_llc->pmu = &amd_llc_pmu;
+ uncore_llc->id = -1;
+ *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
}
return 0;
@@ -376,17 +377,12 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
}
- if (amd_uncore_l2) {
- unsigned int apicid = cpu_data(cpu).apicid;
- unsigned int nshared;
-
- uncore = *per_cpu_ptr(amd_uncore_l2, cpu);
- cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
- nshared = ((eax >> 14) & 0xfff) + 1;
- uncore->id = apicid - (apicid % nshared);
+ if (amd_uncore_llc) {
+ uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
+ uncore->id = per_cpu(cpu_llc_id, cpu);
- uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
- *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
+ uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
+ *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
}
return 0;
@@ -419,8 +415,8 @@ static int amd_uncore_cpu_online(unsigned int cpu)
if (amd_uncore_nb)
uncore_online(cpu, amd_uncore_nb);
- if (amd_uncore_l2)
- uncore_online(cpu, amd_uncore_l2);
+ if (amd_uncore_llc)
+ uncore_online(cpu, amd_uncore_llc);
return 0;
}
@@ -456,8 +452,8 @@ static int amd_uncore_cpu_down_prepare(unsigned int cpu)
if (amd_uncore_nb)
uncore_down_prepare(cpu, amd_uncore_nb);
- if (amd_uncore_l2)
- uncore_down_prepare(cpu, amd_uncore_l2);
+ if (amd_uncore_llc)
+ uncore_down_prepare(cpu, amd_uncore_llc);
return 0;
}
@@ -479,8 +475,8 @@ static int amd_uncore_cpu_dead(unsigned int cpu)
if (amd_uncore_nb)
uncore_dead(cpu, amd_uncore_nb);
- if (amd_uncore_l2)
- uncore_dead(cpu, amd_uncore_l2);
+ if (amd_uncore_llc)
+ uncore_dead(cpu, amd_uncore_llc);
return 0;
}
@@ -510,16 +506,16 @@ static int __init amd_uncore_init(void)
}
if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
- amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
- if (!amd_uncore_l2) {
+ amd_uncore_llc = alloc_percpu(struct amd_uncore *);
+ if (!amd_uncore_llc) {
ret = -ENOMEM;
- goto fail_l2;
+ goto fail_llc;
}
- ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
+ ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
if (ret)
- goto fail_l2;
+ goto fail_llc;
- pr_info("perf: AMD L2I counters detected\n");
+ pr_info("perf: AMD LLC counters detected\n");
ret = 0;
}
@@ -529,7 +525,7 @@ static int __init amd_uncore_init(void)
if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
"PERF_X86_AMD_UNCORE_PREP",
amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
- goto fail_l2;
+ goto fail_llc;
if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
"AP_PERF_X86_AMD_UNCORE_STARTING",
@@ -546,11 +542,11 @@ fail_start:
cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
fail_prep:
cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
-fail_l2:
+fail_llc:
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
perf_pmu_unregister(&amd_nb_pmu);
- if (amd_uncore_l2)
- free_percpu(amd_uncore_l2);
+ if (amd_uncore_llc)
+ free_percpu(amd_uncore_llc);
fail_nb:
if (amd_uncore_nb)
free_percpu(amd_uncore_nb);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 1e9f610d36a4..c26cca506f64 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -374,7 +374,7 @@ int x86_add_exclusive(unsigned int what)
* LBR and BTS are still mutually exclusive.
*/
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
- return 0;
+ goto out;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
mutex_lock(&pmc_reserve_mutex);
@@ -386,6 +386,7 @@ int x86_add_exclusive(unsigned int what)
mutex_unlock(&pmc_reserve_mutex);
}
+out:
atomic_inc(&active_events);
return 0;
@@ -396,11 +397,15 @@ fail_unlock:
void x86_del_exclusive(unsigned int what)
{
+ atomic_dec(&active_events);
+
+ /*
+ * See the comment in x86_add_exclusive().
+ */
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
return;
atomic_dec(&x86_pmu.lbr_exclusive[what]);
- atomic_dec(&active_events);
}
int x86_setup_perfctr(struct perf_event *event)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 098ab775135f..55e362f9dbfa 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2867,7 +2867,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
return ret;
if (event->attr.precise_ip) {
- if (!event->attr.freq) {
+ if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
~intel_pmu_free_running_flags(event)))
@@ -3075,6 +3075,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
return left;
}
+static u64 nhm_limit_period(struct perf_event *event, u64 left)
+{
+ return max(left, 32ULL);
+}
+
PMU_FORMAT_ATTR(event, "config:0-7" );
PMU_FORMAT_ATTR(umask, "config:8-15" );
PMU_FORMAT_ATTR(edge, "config:18" );
@@ -3734,6 +3739,7 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
+ x86_pmu.limit_period = nhm_limit_period;
x86_pmu.cpu_events = nhm_events_attrs;
@@ -3750,11 +3756,11 @@ __init int intel_pmu_init(void)
pr_cont("Nehalem events, ");
break;
- case INTEL_FAM6_ATOM_PINEVIEW:
- case INTEL_FAM6_ATOM_LINCROFT:
- case INTEL_FAM6_ATOM_PENWELL:
- case INTEL_FAM6_ATOM_CLOVERVIEW:
- case INTEL_FAM6_ATOM_CEDARVIEW:
+ case INTEL_FAM6_ATOM_BONNELL:
+ case INTEL_FAM6_ATOM_BONNELL_MID:
+ case INTEL_FAM6_ATOM_SALTWELL:
+ case INTEL_FAM6_ATOM_SALTWELL_MID:
+ case INTEL_FAM6_ATOM_SALTWELL_TABLET:
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -3766,9 +3772,11 @@ __init int intel_pmu_init(void)
pr_cont("Atom events, ");
break;
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_X:
+ case INTEL_FAM6_ATOM_SILVERMONT_MID:
case INTEL_FAM6_ATOM_AIRMONT:
+ case INTEL_FAM6_ATOM_AIRMONT_MID:
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -3785,7 +3793,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 47d526c700a1..72d09340c24d 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -531,8 +531,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates),
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index f26e26e4d84f..f562ddbeb20c 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -655,7 +655,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
EVENT_CONSTRAINT_END
};
@@ -664,7 +664,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
@@ -672,7 +672,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
struct event_constraint intel_slm_pebs_event_constraints[] = {
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
@@ -697,7 +697,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};
@@ -714,7 +714,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};
@@ -723,7 +723,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -738,9 +738,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -754,9 +754,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
@@ -777,9 +777,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
@@ -800,9 +800,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
struct event_constraint intel_skl_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
@@ -1326,6 +1326,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
old = ((s64)(prev_raw_count << shift) >> shift);
local64_add(new - old + count * period, &event->count);
+ local64_set(&hwc->period_left, -new);
+
perf_event_update_userpage(event);
return 0;
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index be0b1968d60a..68144a341903 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -61,8 +61,8 @@ static bool test_intel(int idx)
case INTEL_FAM6_BROADWELL_GT3E:
case INTEL_FAM6_BROADWELL_X:
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_X:
case INTEL_FAM6_ATOM_AIRMONT:
if (idx == PERF_MSR_SMI)
return true;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index cb13c0564ea7..9978ea4382bf 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -60,9 +60,8 @@
} while (0)
#define RELOAD_SEG(seg) { \
- unsigned int pre = GET_SEG(seg); \
+ unsigned int pre = (seg) | 3; \
unsigned int cur = get_user_seg(seg); \
- pre |= 3; \
if (pre != cur) \
set_user_seg(seg, pre); \
}
@@ -71,6 +70,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
struct sigcontext_32 __user *sc)
{
unsigned int tmpflags, err = 0;
+ u16 gs, fs, es, ds;
void __user *buf;
u32 tmp;
@@ -78,16 +78,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
current->restart_block.fn = do_no_restart_syscall;
get_user_try {
- /*
- * Reload fs and gs if they have changed in the signal
- * handler. This does not handle long fs/gs base changes in
- * the handler, but does not clobber them at least in the
- * normal case.
- */
- RELOAD_SEG(gs);
- RELOAD_SEG(fs);
- RELOAD_SEG(ds);
- RELOAD_SEG(es);
+ gs = GET_SEG(gs);
+ fs = GET_SEG(fs);
+ ds = GET_SEG(ds);
+ es = GET_SEG(es);
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip); COPY(ax);
@@ -105,6 +99,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
buf = compat_ptr(tmp);
} get_user_catch(err);
+ /*
+ * Reload fs and gs if they have changed in the signal
+ * handler. This does not handle long fs/gs base changes in
+ * the handler, but does not clobber them at least in the
+ * normal case.
+ */
+ RELOAD_SEG(gs);
+ RELOAD_SEG(fs);
+ RELOAD_SEG(ds);
+ RELOAD_SEG(es);
+
err |= fpu__restore_sig(buf, 1);
force_iret();
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 2188b5af8167..f39fd349cef6 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -50,7 +50,7 @@ static inline void generic_apic_probe(void)
#ifdef CONFIG_X86_LOCAL_APIC
-extern unsigned int apic_verbosity;
+extern int apic_verbosity;
extern int local_apic_timer_c2_ok;
extern int disable_apic;
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 14635c5ea025..305c6eed9141 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -49,7 +49,7 @@ static __always_inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
- : "ir" (i));
+ : "ir" (i) : "memory");
}
/**
@@ -63,7 +63,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
- : "ir" (i));
+ : "ir" (i) : "memory");
}
/**
@@ -89,7 +89,7 @@ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
static __always_inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
- : "+m" (v->counter));
+ : "+m" (v->counter) :: "memory");
}
/**
@@ -101,7 +101,7 @@ static __always_inline void atomic_inc(atomic_t *v)
static __always_inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
- : "+m" (v->counter));
+ : "+m" (v->counter) :: "memory");
}
/**
@@ -249,19 +249,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-/**
- * atomic_inc_short - increment of a short integer
- * @v: pointer to type int
- *
- * Atomically adds 1 to @v
- * Returns the new value of @u
- */
-static __always_inline short int atomic_inc_short(short int *v)
-{
- asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
- return *v;
-}
-
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 89ed2f6ae2f7..a3248402c36b 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -44,7 +44,7 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter)
- : "er" (i), "m" (v->counter));
+ : "er" (i), "m" (v->counter) : "memory");
}
/**
@@ -58,7 +58,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter)
- : "er" (i), "m" (v->counter));
+ : "er" (i), "m" (v->counter) : "memory");
}
/**
@@ -85,7 +85,7 @@ static __always_inline void atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
- : "m" (v->counter));
+ : "m" (v->counter) : "memory");
}
/**
@@ -98,7 +98,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
: "=m" (v->counter)
- : "m" (v->counter));
+ : "m" (v->counter) : "memory");
}
/**
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index eb53c2c78a1f..a0f450b21d67 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -105,8 +105,8 @@ do { \
#endif
/* Atomic operations are already serializing on x86 */
-#define __smp_mb__before_atomic() barrier()
-#define __smp_mb__after_atomic() barrier()
+#define __smp_mb__before_atomic() do { } while (0)
+#define __smp_mb__after_atomic() do { } while (0)
#include <asm-generic/barrier.h>
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 4a8cb8d7cbd5..588d8fbd1e6d 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -17,6 +17,20 @@
* Note: efi_info is commonly left uninitialized, but that field has a
* private magic, so it is better to leave it unchanged.
*/
+
+#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
+
+#define BOOT_PARAM_PRESERVE(struct_member) \
+ { \
+ .start = offsetof(struct boot_params, struct_member), \
+ .len = sizeof_mbr(struct boot_params, struct_member), \
+ }
+
+struct boot_params_to_save {
+ unsigned int start;
+ unsigned int len;
+};
+
static void sanitize_boot_params(struct boot_params *boot_params)
{
/*
@@ -35,19 +49,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
- memset(&boot_params->ext_ramdisk_image, 0,
- (char *)&boot_params->efi_info -
- (char *)&boot_params->ext_ramdisk_image);
- memset(&boot_params->kbd_status, 0,
- (char *)&boot_params->hdr -
- (char *)&boot_params->kbd_status);
- memset(&boot_params->_pad7[0], 0,
- (char *)&boot_params->edd_mbr_sig_buffer[0] -
- (char *)&boot_params->_pad7[0]);
- memset(&boot_params->_pad8[0], 0,
- (char *)&boot_params->eddbuf[0] -
- (char *)&boot_params->_pad8[0]);
- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+ static struct boot_params scratch;
+ char *bp_base = (char *)boot_params;
+ char *save_base = (char *)&scratch;
+ int i;
+
+ const struct boot_params_to_save to_save[] = {
+ BOOT_PARAM_PRESERVE(screen_info),
+ BOOT_PARAM_PRESERVE(apm_bios_info),
+ BOOT_PARAM_PRESERVE(tboot_addr),
+ BOOT_PARAM_PRESERVE(ist_info),
+ BOOT_PARAM_PRESERVE(hd0_info),
+ BOOT_PARAM_PRESERVE(hd1_info),
+ BOOT_PARAM_PRESERVE(sys_desc_table),
+ BOOT_PARAM_PRESERVE(olpc_ofw_header),
+ BOOT_PARAM_PRESERVE(efi_info),
+ BOOT_PARAM_PRESERVE(alt_mem_k),
+ BOOT_PARAM_PRESERVE(scratch),
+ BOOT_PARAM_PRESERVE(e820_entries),
+ BOOT_PARAM_PRESERVE(eddbuf_entries),
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
+ BOOT_PARAM_PRESERVE(hdr),
+ BOOT_PARAM_PRESERVE(e820_map),
+ BOOT_PARAM_PRESERVE(eddbuf),
+ };
+
+ memset(&scratch, 0, sizeof(scratch));
+
+ for (i = 0; i < ARRAY_SIZE(to_save); i++) {
+ memcpy(save_base + to_save[i].start,
+ bp_base + to_save[i].start, to_save[i].len);
+ }
+
+ memcpy(boot_params, save_base, sizeof(*boot_params));
}
}
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 98444b77fbe3..fb457ba8ccc6 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -192,7 +192,8 @@
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-
+#define X86_FEATURE_FENCE_SWAPGS_USER ( 7*32+10) /* "" LFENCE in user entry SWAPGS path */
+#define X86_FEATURE_FENCE_SWAPGS_KERNEL ( 7*32+11) /* "" LFENCE in kernel entry SWAPGS path */
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
@@ -201,9 +202,6 @@
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
-/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
-#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
-
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
@@ -214,6 +212,7 @@
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
+#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -271,10 +270,12 @@
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
-#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
-#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
-#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -304,6 +305,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
+#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */
/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
@@ -315,6 +317,7 @@
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
+#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
@@ -352,5 +355,10 @@
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
+#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
+#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
+#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
index f498411f2500..1b15304dd098 100644
--- a/arch/x86/include/asm/crash.h
+++ b/arch/x86/include/asm/crash.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_CRASH_H
#define _ASM_X86_CRASH_H
+struct kimage;
+
int crash_load_segments(struct kimage *image);
int crash_copy_backup_region(struct kimage *image);
int crash_setup_memmap_entries(struct kimage *image,
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 8554f960e21b..61d6f2c05757 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -142,7 +142,7 @@ extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
-void native_set_fixmap(enum fixed_addresses idx,
+void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
#ifndef CONFIG_PARAVIRT
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index b3e32b010ab1..c2c01f84df75 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
+#define POP_SS_OPCODE 0x1f
+#define MOV_SREG_OPCODE 0x8e
+
+/*
+ * Intel SDM Vol.3A 6.8.3 states;
+ * "Any single-step trap that would be delivered following the MOV to SS
+ * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
+ * suppressed."
+ * This function returns true if @insn is MOV SS or POP SS. On these
+ * instructions, single stepping is suppressed.
+ */
+static inline int insn_masking_exception(struct insn *insn)
+{
+ return insn->opcode.bytes[0] == POP_SS_OPCODE ||
+ (insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
+ X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
+}
+
#endif /* _ASM_X86_INSN_H */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 75b748a1deb8..74ee597beb3e 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -5,7 +5,7 @@
* "Big Core" Processors (Branded as Core, Xeon, etc...)
*
* The "_X" parts are generally the EP and EX Xeons, or the
- * "Extreme" ones, like Broadwell-E.
+ * "Extreme" ones, like Broadwell-E, or Atom microserver.
*
* Things ending in "2" are usually because we have no better
* name for them. There's no processor called "SILVERMONT2".
@@ -50,19 +50,24 @@
/* "Small Core" Processors (Atom) */
-#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
-#define INTEL_FAM6_ATOM_LINCROFT 0x26
-#define INTEL_FAM6_ATOM_PENWELL 0x27
-#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
-#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
-#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
-#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
-#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
-#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
-#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
-#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
-#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A
+#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
+#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */
+
+#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */
+#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */
+#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */
+
+#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT_X 0x4D /* Avaton, Rangely */
+#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */
+
+#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */
+#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */
+
+#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
+#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
+#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
+#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
/* Xeon Phi */
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 508a062e6cf1..0c8f4281b151 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -5,6 +5,8 @@
#ifndef __ASSEMBLY__
+#include <asm/nospec-branch.h>
+
/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
@@ -53,11 +55,13 @@ static inline void native_irq_enable(void)
static inline __cpuidle void native_safe_halt(void)
{
+ mds_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory");
}
static inline __cpuidle void native_halt(void)
{
+ mds_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory");
}
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 282630e4c6ea..1624a7ffa95d 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -66,7 +66,7 @@ struct kimage;
/* Memory to backup during crash kdump */
#define KEXEC_BACKUP_SRC_START (0UL)
-#define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */
+#define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */
/*
* CPU does not save ss and sp on stack if execution is already
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9a8167b175d5..d2c14a96ec28 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -261,6 +261,7 @@ struct kvm_rmap_head {
struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
+ struct list_head lpage_disallowed_link;
/*
* The following two entries are used to key the shadow page in the
@@ -273,6 +274,7 @@ struct kvm_mmu_page {
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
bool unsync;
+ bool lpage_disallowed; /* Can't be replaced by an equiv large page */
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
@@ -487,6 +489,7 @@ struct kvm_vcpu_arch {
bool tpr_access_reporting;
u64 ia32_xss;
u64 microcode_version;
+ u64 arch_capabilities;
/*
* Paging state of the vcpu
@@ -723,6 +726,7 @@ struct kvm_arch {
*/
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;
+ struct list_head lpage_disallowed_mmu_pages;
struct kvm_page_track_notifier_node mmu_sp_tracker;
struct kvm_page_track_notifier_head track_notifier_head;
@@ -797,6 +801,8 @@ struct kvm_arch {
bool x2apic_format;
bool x2apic_broadcast_quirk_disabled;
+
+ struct task_struct *nx_lpage_recovery_thread;
};
struct kvm_vm_stat {
@@ -810,6 +816,7 @@ struct kvm_vm_stat {
ulong mmu_unsync;
ulong remote_tlb_flush;
ulong lpages;
+ ulong nx_lpage_splits;
};
struct kvm_vcpu_stat {
@@ -1308,25 +1315,29 @@ enum {
#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
+asmlinkage void __noreturn kvm_spurious_fault(void);
+
/*
* Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running.
- * Trap the fault and ignore the instruction if that happens.
+ * Usually after catching the fault we just panic; during reboot
+ * instead the instruction is ignored.
*/
-asmlinkage void kvm_spurious_fault(void);
-
-#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
- "666: " insn "\n\t" \
- "668: \n\t" \
- ".pushsection .fixup, \"ax\" \n" \
- "667: \n\t" \
- cleanup_insn "\n\t" \
- "cmpb $0, kvm_rebooting \n\t" \
- "jne 668b \n\t" \
- __ASM_SIZE(push) " $666b \n\t" \
- "jmp kvm_spurious_fault \n\t" \
- ".popsection \n\t" \
- _ASM_EXTABLE(666b, 667b)
+#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
+ "666: \n\t" \
+ insn "\n\t" \
+ "jmp 668f \n\t" \
+ "667: \n\t" \
+ "call kvm_spurious_fault \n\t" \
+ "668: \n\t" \
+ ".pushsection .fixup, \"ax\" \n\t" \
+ "700: \n\t" \
+ cleanup_insn "\n\t" \
+ "cmpb $0, kvm_rebooting\n\t" \
+ "je 667b \n\t" \
+ "jmp 668b \n\t" \
+ ".popsection \n\t" \
+ _ASM_EXTABLE(666b, 700b)
#define __kvm_handle_fault_on_reboot(insn) \
____kvm_handle_fault_on_reboot(insn, "")
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 5e69154c9f07..c8e472e2c896 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -52,6 +52,21 @@ struct extended_sigtable {
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
+static inline u32 intel_get_microcode_revision(void)
+{
+ u32 rev, dummy;
+
+ native_wrmsrl(MSR_IA32_UCODE_REV, 0);
+
+ /* As documented in the SDM: Do a CPUID 1 here */
+ native_cpuid_eax(1);
+
+ /* get the current revision from MSR 0x8B */
+ native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
+
+ return rev;
+}
+
extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev);
extern int microcode_sanity_check(void *mc, int print_err);
extern int find_matching_signature(void *mc, unsigned int csig, int cpf);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9963e21ac443..8d162e0f2881 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_MSR_INDEX_H
#define _ASM_X86_MSR_INDEX_H
+#include <linux/bits.h>
+
/*
* CPU model specific register (MSR) numbers.
*
@@ -38,13 +40,14 @@
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
-#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
-#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
+#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
-#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_PERFCTR1 0x000000c2
@@ -61,24 +64,45 @@
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
-#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
-#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
-#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
-#define ARCH_CAP_SSB_NO (1 << 4) /*
- * Not susceptible to Speculative Store Bypass
- * attack, so no Speculative Store Bypass
- * control required.
- */
+#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
+#define ARCH_CAP_SSB_NO BIT(4) /*
+ * Not susceptible to Speculative Store Bypass
+ * attack, so no Speculative Store Bypass
+ * control required.
+ */
+#define ARCH_CAP_MDS_NO BIT(5) /*
+ * Not susceptible to
+ * Microarchitectural Data
+ * Sampling (MDS) vulnerabilities.
+ */
+#define ARCH_CAP_PSCHANGE_MC_NO BIT(6) /*
+ * The processor is not susceptible to a
+ * machine check error due to modifying the
+ * code page size along with either the
+ * physical address or cache type
+ * without TLB invalidation.
+ */
+#define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */
+#define ARCH_CAP_TAA_NO BIT(8) /*
+ * Not susceptible to
+ * TSX Async Abort (TAA) vulnerabilities.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
-#define L1D_FLUSH (1 << 0) /*
- * Writeback and invalidate the
- * L1 data cache.
- */
+#define L1D_FLUSH BIT(0) /*
+ * Writeback and invalidate the
+ * L1 data cache.
+ */
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
+#define MSR_IA32_TSX_CTRL 0x00000122
+#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
+#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
+
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
@@ -305,6 +329,7 @@
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
#define MSR_AMD64_TSC_RATIO 0xc0000104
#define MSR_AMD64_NB_CFG 0xc001001f
+#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index f37f2d8a2989..58b1b766e84e 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <asm/cpufeature.h>
+#include <asm/nospec-branch.h>
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
@@ -18,7 +19,7 @@
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
#define MWAITX_ECX_TIMER_ENABLE BIT(1)
#define MWAITX_MAX_LOOPS ((u32)-1)
-#define MWAITX_DISABLE_CSTATES 0xf
+#define MWAITX_DISABLE_CSTATES 0xf0
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
@@ -38,6 +39,8 @@ static inline void __monitorx(const void *eax, unsigned long ecx,
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
+ mds_idle_clear_cpu_buffers();
+
/* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
@@ -72,6 +75,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
static inline void __mwaitx(unsigned long eax, unsigned long ebx,
unsigned long ecx)
{
+ /* No MDS buffer clear as this is AMD/HYGON only */
+
/* "mwaitx %eax, %ebx, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xfb;"
:: "a" (eax), "b" (ebx), "c" (ecx));
@@ -79,6 +84,8 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
+ mds_idle_clear_cpu_buffers();
+
trace_hardirqs_on();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 1b4132161c1f..8d56d701b5f7 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -3,6 +3,8 @@
#ifndef _ASM_X86_NOSPEC_BRANCH_H_
#define _ASM_X86_NOSPEC_BRANCH_H_
+#include <linux/static_key.h>
+
#include <asm/alternative.h>
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
@@ -194,7 +196,7 @@
" lfence;\n" \
" jmp 902b;\n" \
" .align 16\n" \
- "903: addl $4, %%esp;\n" \
+ "903: lea 4(%%esp), %%esp;\n" \
" pushl %[thunk_target];\n" \
" ret;\n" \
" .align 16\n" \
@@ -214,10 +216,17 @@ enum spectre_v2_mitigation {
SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
SPECTRE_V2_RETPOLINE_GENERIC,
SPECTRE_V2_RETPOLINE_AMD,
- SPECTRE_V2_IBRS,
SPECTRE_V2_IBRS_ENHANCED,
};
+/* The indirect branch speculation control variants */
+enum spectre_v2_user_mitigation {
+ SPECTRE_V2_USER_NONE,
+ SPECTRE_V2_USER_STRICT,
+ SPECTRE_V2_USER_PRCTL,
+ SPECTRE_V2_USER_SECCOMP,
+};
+
/* The Speculative Store Bypass disable variants */
enum ssb_mitigation {
SPEC_STORE_BYPASS_NONE,
@@ -295,6 +304,60 @@ do { \
preempt_enable(); \
} while (0)
+DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+DECLARE_STATIC_KEY_FALSE(mds_user_clear);
+DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+
+#include <asm/segment.h>
+
+/**
+ * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
+ *
+ * This uses the otherwise unused and obsolete VERW instruction in
+ * combination with microcode which triggers a CPU buffer flush when the
+ * instruction is executed.
+ */
+static inline void mds_clear_cpu_buffers(void)
+{
+ static const u16 ds = __KERNEL_DS;
+
+ /*
+ * Has to be the memory-operand variant because only that
+ * guarantees the CPU buffer flush functionality according to
+ * documentation. The register-operand variant does not.
+ * Works with any segment selector, but a valid writable
+ * data segment is the fastest variant.
+ *
+ * "cc" clobber is required because VERW modifies ZF.
+ */
+ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
+}
+
+/**
+ * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_user_clear_cpu_buffers(void)
+{
+ if (static_branch_likely(&mds_user_clear))
+ mds_clear_cpu_buffers();
+}
+
+/**
+ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_idle_clear_cpu_buffers(void)
+{
+ if (static_branch_likely(&mds_idle_clear))
+ mds_clear_cpu_buffers();
+}
+
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index f353061bba1d..81d5ea71bbe9 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -200,16 +200,20 @@ struct x86_pmu_capability {
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
#define IBSCTL_LVT_OFFSET_MASK 0x0F
-/* ibs fetch bits/masks */
+/* IBS fetch bits/masks */
#define IBS_FETCH_RAND_EN (1ULL<<57)
#define IBS_FETCH_VAL (1ULL<<49)
#define IBS_FETCH_ENABLE (1ULL<<48)
#define IBS_FETCH_CNT 0xFFFF0000ULL
#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
-/* ibs op bits/masks */
-/* lower 4 bits of the current count are ignored: */
-#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
+/*
+ * IBS op bits/masks
+ * The lower 7 bits of the current count are random bits
+ * preloaded by hardware and ignored in software
+ */
+#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
+#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 221a32ed1372..f12e61e2a86b 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -44,15 +44,15 @@ struct mm_struct;
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
{
- *ptep = native_make_pte(0);
+ WRITE_ONCE(*ptep, pte);
}
-static inline void native_set_pte(pte_t *ptep, pte_t pte)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
- *ptep = pte;
+ native_set_pte(ptep, native_make_pte(0));
}
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -62,7 +62,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
- *pmdp = pmd;
+ WRITE_ONCE(*pmdp, pmd);
}
static inline void native_pmd_clear(pmd_t *pmd)
@@ -98,7 +98,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
- *pudp = pud;
+ WRITE_ONCE(*pudp, pud);
}
static inline void native_pud_clear(pud_t *pud)
@@ -131,7 +131,7 @@ static inline pgd_t *native_get_shadow_pgd(pgd_t *pgdp)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
- *pgdp = kaiser_set_shadow_pgd(pgdp, pgd);
+ WRITE_ONCE(*pgdp, kaiser_set_shadow_pgd(pgdp, pgd));
}
static inline void native_pgd_clear(pgd_t *pgd)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index ee8c6290c421..7aa9a9bd9d98 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -213,6 +213,24 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
: "memory");
}
+#define native_cpuid_reg(reg) \
+static inline unsigned int native_cpuid_##reg(unsigned int op) \
+{ \
+ unsigned int eax = op, ebx, ecx = 0, edx; \
+ \
+ native_cpuid(&eax, &ebx, &ecx, &edx); \
+ \
+ return reg; \
+}
+
+/*
+ * Native CPUID functions returning a single datum.
+ */
+native_cpuid_reg(eax)
+native_cpuid_reg(ebx)
+native_cpuid_reg(ecx)
+native_cpuid_reg(edx)
+
static inline void load_cr3(pgd_t *pgdir)
{
write_cr3(__pa(pgdir));
@@ -874,4 +892,17 @@ enum l1tf_mitigations {
extern enum l1tf_mitigations l1tf_mitigation;
+enum mds_mitigations {
+ MDS_MITIGATION_OFF,
+ MDS_MITIGATION_FULL,
+ MDS_MITIGATION_VMWERV,
+};
+
+enum taa_mitigations {
+ TAA_MITIGATION_OFF,
+ TAA_MITIGATION_UCODE_NEEDED,
+ TAA_MITIGATION_VERW,
+ TAA_MITIGATION_TSX_DISABLED,
+};
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 2b5d686ea9f3..fb489cd848fa 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -115,9 +115,9 @@ static inline int v8086_mode(struct pt_regs *regs)
#endif
}
-#ifdef CONFIG_X86_64
static inline bool user_64bit_mode(struct pt_regs *regs)
{
+#ifdef CONFIG_X86_64
#ifndef CONFIG_PARAVIRT
/*
* On non-paravirt systems, this is the only long mode CPL 3
@@ -128,8 +128,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
/* Headers are too twisted for this to go in paravirt.h. */
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
#endif
+#else /* !CONFIG_X86_64 */
+ return false;
+#endif
}
+#ifdef CONFIG_X86_64
#define current_user_stack_pointer() current_pt_regs()->sp
#define compat_user_stack_pointer() current_pt_regs()->sp
#endif
@@ -196,23 +200,51 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs,
}
/**
+ * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns the address of the @n th entry of the
+ * kernel stack which is specified by @regs. If the @n th entry is NOT in
+ * the kernel stack, this returns NULL.
+ */
+static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return addr;
+ else
+ return NULL;
+}
+
+/* To avoid include hell, we can't include uaccess.h */
+extern long probe_kernel_read(void *dst, const void *src, size_t size);
+
+/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
- * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
- unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
- addr += n;
- if (regs_within_kernel_stack(regs, (unsigned long)addr))
- return *addr;
- else
- return 0;
+ unsigned long *addr;
+ unsigned long val;
+ long ret;
+
+ addr = regs_get_kernel_stack_nth_addr(regs, n);
+ if (addr) {
+ ret = probe_kernel_read(&val, addr, sizeof(val));
+ if (!ret)
+ return val;
+ }
+ return 0;
}
#define arch_has_single_step() (1)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index d25fb6beb2f0..dcaf7100b69c 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -177,16 +177,6 @@ extern int safe_smp_processor_id(void);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
-
-#ifndef CONFIG_X86_64
-static inline int logical_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
-}
-
-#endif
-
extern int hard_smp_processor_id(void);
#else /* CONFIG_X86_LOCAL_APIC */
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index ae7c2c5cd7f0..5393babc0598 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}
+static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
+{
+ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+ return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
{
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}
+static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+ return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
{
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
@@ -70,11 +82,7 @@ extern void speculative_store_bypass_ht_init(void);
static inline void speculative_store_bypass_ht_init(void) { }
#endif
-extern void speculative_store_bypass_update(unsigned long tif);
-
-static inline void speculative_store_bypass_update_current(void)
-{
- speculative_store_bypass_update(current_thread_info()->flags);
-}
+extern void speculation_ctrl_update(unsigned long tif);
+extern void speculation_ctrl_update_current(void);
#endif
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 37f2e0b377ad..4141ead86879 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -55,13 +55,16 @@ extern int kstack_depth_to_print;
static inline unsigned long *
get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
{
+ struct inactive_task_frame *frame;
+
if (regs)
return (unsigned long *)regs->bp;
if (task == current)
return __builtin_frame_address(0);
- return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp;
+ frame = (struct inactive_task_frame *)task->thread.sp;
+ return (unsigned long *)READ_ONCE_NOCHECK(frame->bp);
}
#else
static inline unsigned long *
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 8e9dbe7b73a1..5cc2ce4ab8a3 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -11,7 +11,13 @@
/* image of the saved processor state */
struct saved_context {
- u16 es, fs, gs, ss;
+ /*
+ * On x86_32, all segment registers, with the possible exception of
+ * gs, are saved at kernel entry in pt_regs.
+ */
+#ifdef CONFIG_X86_32_LAZY_GS
+ u16 gs;
+#endif
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
bool misc_enable_saved;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 2bd96b4df140..701751918921 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -19,8 +19,20 @@
*/
struct saved_context {
struct pt_regs regs;
- u16 ds, es, fs, gs, ss;
- unsigned long gs_base, gs_kernel_base, fs_base;
+
+ /*
+ * User CS and SS are saved in current_pt_regs(). The rest of the
+ * segment selectors need to be saved and restored here.
+ */
+ u16 ds, es, fs, gs;
+
+ /*
+ * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
+ * so we save them separately. We save the kernelmode GSBASE to
+ * restore percpu access after resume.
+ */
+ unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
+
unsigned long cr0, cr2, cr3, cr4, cr8;
u64 misc_enable;
bool misc_enable_saved;
@@ -29,8 +41,7 @@ struct saved_context {
u16 gdt_pad; /* Unused */
struct desc_ptr gdt_desc;
u16 idt_pad;
- u16 idt_limit;
- unsigned long idt_base;
+ struct desc_ptr idt;
u16 ldt;
u16 tss;
unsigned long tr;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 5cb436acd463..e959b8d40473 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -8,9 +8,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
__visible struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next);
-struct tss_struct;
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
- struct tss_struct *tss);
/* This runs runs on the previous thread's stack. */
static inline void prepare_switch_to(struct task_struct *prev,
@@ -38,6 +35,7 @@ asmlinkage void ret_from_fork(void);
/* data that is pointed to by thread.sp */
struct inactive_task_frame {
+ unsigned long flags;
#ifdef CONFIG_X86_64
unsigned long r15;
unsigned long r14;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2d8788a59b4d..0438f7fbb383 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -83,10 +83,12 @@ struct thread_info {
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
-#define TIF_SSBD 5 /* Reduced data speculation */
+#define TIF_SSBD 5 /* Speculative store bypass disable */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
+#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
+#define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
@@ -111,6 +113,8 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
+#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOTSC (1 << TIF_NOTSC)
@@ -140,8 +144,18 @@ struct thread_info {
_TIF_NOHZ)
/* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+#define _TIF_WORK_CTXSW_BASE \
+ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP| \
+ _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
+
+/*
+ * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
+ */
+#ifdef CONFIG_SMP
+# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
+#else
+# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
+#endif
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 686a58d793e5..f5ca15622dc9 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -68,8 +68,12 @@ static inline void invpcid_flush_all_nonglobals(void)
struct tlb_state {
struct mm_struct *active_mm;
int state;
- /* last user mm's ctx id */
- u64 last_ctx_id;
+
+ /* Last user mm for optimizing IBPB */
+ union {
+ struct mm_struct *last_user_mm;
+ unsigned long last_user_mm_ibpb;
+ };
/*
* Access to this CR4 shadow and to H/W CR4 is protected by
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 2177c7551ff7..9db8d8758ed3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -438,8 +438,10 @@ do { \
({ \
int __gu_err; \
__inttype(*(ptr)) __gu_val; \
+ __typeof__(ptr) __gu_ptr = (ptr); \
+ __typeof__(size) __gu_size = (size); \
__uaccess_begin_nospec(); \
- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+ __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
__uaccess_end(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__builtin_expect(__gu_err, 0); \
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index e728699db774..59e78c3d3dd8 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -89,8 +89,13 @@ static inline unsigned int __getcpu(void)
* works on all CPUs. This is volatile so that it orders
* correctly wrt barrier() and to keep gcc from cleverly
* hoisting it out of the calling function.
+ *
+ * If RDPID is available, use it.
*/
- asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ alternative_io ("lsl %[seg],%[p]",
+ ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
+ X86_FEATURE_RDPID,
+ [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
return p;
}
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index ccdc23d89b60..9f694537a103 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -216,6 +216,9 @@ privcmd_call(unsigned call,
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
+ return -EINVAL;
+
stac();
asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index 3dec769cadf7..1c532b3f18ea 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -27,7 +27,6 @@ header-y += ldt.h
header-y += mce.h
header-y += mman.h
header-y += msgbuf.h
-header-y += msr-index.h
header-y += msr.h
header-y += mtrr.h
header-y += param.h
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 69a6e07e3149..db7dae58745f 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -28,6 +28,8 @@ struct mce {
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
__u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
__u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
+ __u64 ppin; /* Protected Processor Inventory Number */
+ __u32 microcode;/* Microcode revision */
};
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 0a1e8a67cc99..c3fba8b52753 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1717,7 +1717,7 @@ int __acpi_acquire_global_lock(unsigned int *lock)
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = cmpxchg(lock, old, new);
} while (unlikely (val != old));
- return (new < 3) ? -1 : 0;
+ return ((new & 0x3) < 3) ? -1 : 0;
}
int __acpi_release_global_lock(unsigned int *lock)
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 169963f471bb..50b8ed0317a3 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -109,6 +109,15 @@ ENTRY(do_suspend_lowlevel)
movq pt_regs_r14(%rax), %r14
movq pt_regs_r15(%rax), %r15
+#ifdef CONFIG_KASAN
+ /*
+ * The suspend path may have poisoned some areas deeper in the stack,
+ * which we now need to unpoison.
+ */
+ movq %rsp, %rdi
+ call kasan_unpoison_task_stack_below
+#endif
+
xorl %eax, %eax
addq $8, %rsp
FRAME_END
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 4f2af1ee09cb..722a76b88bcc 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -183,7 +183,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
/*
* Debug level, exported for io_apic.c
*/
-unsigned int apic_verbosity;
+int apic_verbosity;
int pic_mode;
@@ -629,7 +629,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
/*
- * Temporary interrupt handler.
+ * Temporary interrupt handler and polled calibration function.
*/
static void __init lapic_cal_handler(struct clock_event_device *dev)
{
@@ -713,7 +713,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
static int __init calibrate_APIC_clock(void)
{
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
- void (*real_handler)(struct clock_event_device *dev);
+ u64 tsc_perj = 0, tsc_start = 0;
+ unsigned long jif_start;
unsigned long deltaj;
long delta, deltatsc;
int pm_referenced = 0;
@@ -742,28 +743,64 @@ static int __init calibrate_APIC_clock(void)
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
"calibrating APIC timer ...\n");
+ /*
+ * There are platforms w/o global clockevent devices. Instead of
+ * making the calibration conditional on that, use a polling based
+ * approach everywhere.
+ */
local_irq_disable();
- /* Replace the global interrupt handler */
- real_handler = global_clock_event->event_handler;
- global_clock_event->event_handler = lapic_cal_handler;
-
/*
* Setup the APIC counter to maximum. There is no way the lapic
* can underflow in the 100ms detection time frame
*/
__setup_APIC_LVTT(0xffffffff, 0, 0);
- /* Let the interrupts run */
+ /*
+ * Methods to terminate the calibration loop:
+ * 1) Global clockevent if available (jiffies)
+ * 2) TSC if available and frequency is known
+ */
+ jif_start = READ_ONCE(jiffies);
+
+ if (tsc_khz) {
+ tsc_start = rdtsc();
+ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
+ }
+
+ /*
+ * Enable interrupts so the tick can fire, if a global
+ * clockevent device is available
+ */
local_irq_enable();
- while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
- cpu_relax();
+ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
+ /* Wait for a tick to elapse */
+ while (1) {
+ if (tsc_khz) {
+ u64 tsc_now = rdtsc();
+ if ((tsc_now - tsc_start) >= tsc_perj) {
+ tsc_start += tsc_perj;
+ break;
+ }
+ } else {
+ unsigned long jif_now = READ_ONCE(jiffies);
- local_irq_disable();
+ if (time_after(jif_now, jif_start)) {
+ jif_start = jif_now;
+ break;
+ }
+ }
+ cpu_relax();
+ }
- /* Restore the real event handler */
- global_clock_event->event_handler = real_handler;
+ /* Invoke the calibration routine */
+ local_irq_disable();
+ lapic_cal_handler(NULL);
+ local_irq_enable();
+ }
+
+ local_irq_disable();
/* Build delta t1-t2 as apic timer counts down */
delta = lapic_cal_t1 - lapic_cal_t2;
@@ -814,10 +851,11 @@ static int __init calibrate_APIC_clock(void)
levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
/*
- * PM timer calibration failed or not turned on
- * so lets try APIC timer based calibration
+ * PM timer calibration failed or not turned on so lets try APIC
+ * timer based calibration, if a global clockevent device is
+ * available.
*/
- if (!pm_referenced) {
+ if (!pm_referenced && global_clock_event) {
apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
/*
@@ -1243,6 +1281,56 @@ static void lapic_setup_esr(void)
oldvalue, value);
}
+static void apic_pending_intr_clear(void)
+{
+ long long max_loops = cpu_khz ? cpu_khz : 1000000;
+ unsigned long long tsc = 0, ntsc;
+ unsigned int value, queued;
+ int i, j, acked = 0;
+
+ if (boot_cpu_has(X86_FEATURE_TSC))
+ tsc = rdtsc();
+ /*
+ * After a crash, we no longer service the interrupts and a pending
+ * interrupt from previous kernel might still have ISR bit set.
+ *
+ * Most probably by now CPU has serviced that pending interrupt and
+ * it might not have done the ack_APIC_irq() because it thought,
+ * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
+ * does not clear the ISR bit and cpu thinks it has already serivced
+ * the interrupt. Hence a vector might get locked. It was noticed
+ * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
+ */
+ do {
+ queued = 0;
+ for (i = APIC_ISR_NR - 1; i >= 0; i--)
+ queued |= apic_read(APIC_IRR + i*0x10);
+
+ for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+ value = apic_read(APIC_ISR + i*0x10);
+ for (j = 31; j >= 0; j--) {
+ if (value & (1<<j)) {
+ ack_APIC_irq();
+ acked++;
+ }
+ }
+ }
+ if (acked > 256) {
+ printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
+ acked);
+ break;
+ }
+ if (queued) {
+ if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
+ ntsc = rdtsc();
+ max_loops = (cpu_khz << 10) - (ntsc - tsc);
+ } else
+ max_loops--;
+ }
+ } while (queued && max_loops > 0);
+ WARN_ON(max_loops <= 0);
+}
+
/**
* setup_local_APIC - setup the local APIC
*
@@ -1252,19 +1340,22 @@ static void lapic_setup_esr(void)
void setup_local_APIC(void)
{
int cpu = smp_processor_id();
- unsigned int value, queued;
- int i, j, acked = 0;
- unsigned long long tsc = 0, ntsc;
- long long max_loops = cpu_khz ? cpu_khz : 1000000;
+ unsigned int value;
- if (boot_cpu_has(X86_FEATURE_TSC))
- tsc = rdtsc();
if (disable_apic) {
disable_ioapic_support();
return;
}
+ /*
+ * If this comes from kexec/kcrash the APIC might be enabled in
+ * SPIV. Soft disable it before doing further initialization.
+ */
+ value = apic_read(APIC_SPIV);
+ value &= ~APIC_SPIV_APIC_ENABLED;
+ apic_write(APIC_SPIV, value);
+
#ifdef CONFIG_X86_32
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if (lapic_is_integrated() && apic->disable_esr) {
@@ -1290,16 +1381,21 @@ void setup_local_APIC(void)
apic->init_apic_ldr();
#ifdef CONFIG_X86_32
- /*
- * APIC LDR is initialized. If logical_apicid mapping was
- * initialized during get_smp_config(), make sure it matches the
- * actual value.
- */
- i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
- WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
- /* always use the value from LDR */
- early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
- logical_smp_processor_id();
+ if (apic->dest_logical) {
+ int logical_apicid, ldr_apicid;
+
+ /*
+ * APIC LDR is initialized. If logical_apicid mapping was
+ * initialized during get_smp_config(), make sure it matches
+ * the actual value.
+ */
+ logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
+ ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
+ if (logical_apicid != BAD_APICID)
+ WARN_ON(logical_apicid != ldr_apicid);
+ /* Always use the value from LDR. */
+ early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
+ }
#endif
/*
@@ -1310,45 +1406,7 @@ void setup_local_APIC(void)
value &= ~APIC_TPRI_MASK;
apic_write(APIC_TASKPRI, value);
- /*
- * After a crash, we no longer service the interrupts and a pending
- * interrupt from previous kernel might still have ISR bit set.
- *
- * Most probably by now CPU has serviced that pending interrupt and
- * it might not have done the ack_APIC_irq() because it thought,
- * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
- * does not clear the ISR bit and cpu thinks it has already serivced
- * the interrupt. Hence a vector might get locked. It was noticed
- * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
- */
- do {
- queued = 0;
- for (i = APIC_ISR_NR - 1; i >= 0; i--)
- queued |= apic_read(APIC_IRR + i*0x10);
-
- for (i = APIC_ISR_NR - 1; i >= 0; i--) {
- value = apic_read(APIC_ISR + i*0x10);
- for (j = 31; j >= 0; j--) {
- if (value & (1<<j)) {
- ack_APIC_irq();
- acked++;
- }
- }
- }
- if (acked > 256) {
- printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
- acked);
- break;
- }
- if (queued) {
- if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
- ntsc = rdtsc();
- max_loops = (cpu_khz << 10) - (ntsc - tsc);
- } else
- max_loops--;
- }
- } while (queued && max_loops > 0);
- WARN_ON(max_loops <= 0);
+ apic_pending_intr_clear();
/*
* Now that we are all set up, enable the APIC
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 56012010332c..76fe153ccc6d 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -37,32 +37,12 @@ static int bigsmp_early_logical_apicid(int cpu)
return early_per_cpu(x86_cpu_to_apicid, cpu);
}
-static inline unsigned long calculate_ldr(int cpu)
-{
- unsigned long val, id;
-
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- id = per_cpu(x86_bios_cpu_apicid, cpu);
- val |= SET_APIC_LOGICAL_ID(id);
-
- return val;
-}
-
/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116). So here it goes...
+ * bigsmp enables physical destination mode
+ * and doesn't use LDR and DFR
*/
static void bigsmp_init_apic_ldr(void)
{
- unsigned long val;
- int cpu = smp_processor_id();
-
- apic_write(APIC_DFR, APIC_DFR_FLAT);
- val = calculate_ldr(cpu);
- apic_write(APIC_LDR, val);
}
static void bigsmp_setup_apic_routing(void)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d34629d70421..3401b28f1312 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1712,9 +1712,10 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
static inline bool ioapic_irqd_mask(struct irq_data *data)
{
- /* If we are moving the irq we need to mask it */
+ /* If we are moving the IRQ we need to mask it */
if (unlikely(irqd_is_setaffinity_pending(data))) {
- mask_ioapic_irq(data);
+ if (!irqd_irq_masked(data))
+ mask_ioapic_irq(data);
return true;
}
return false;
@@ -1751,7 +1752,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
*/
if (!io_apic_level_ack_pending(data->chip_data))
irq_move_masked_irq(data);
- unmask_ioapic_irq(data);
+ /* If the IRQ is masked in the core, leave it: */
+ if (!irqd_irq_masked(data))
+ unmask_ioapic_irq(data);
}
}
#else
@@ -2346,7 +2349,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
* dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
* gsi_top if ioapic_dynirq_base hasn't been initialized yet.
*/
- return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
+ if (!ioapic_initialized)
+ return gsi_top;
+ /*
+ * For DT enabled machines ioapic_dynirq_base is irrelevant and not
+ * updated. So simply return @from if ioapic_dynirq_base == 0.
+ */
+ return ioapic_dynirq_base ? : from;
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 33b63670bf09..f6e386fe510c 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -25,7 +25,7 @@ obj-y += bugs.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
-obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
+obj-$(CONFIG_CPU_SUP_INTEL) += intel.o tsx.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index be6d0543e626..9428b54fff66 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -746,6 +746,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
msr_set_bit(MSR_AMD64_DE_CFG, 31);
}
+static bool rdrand_force;
+
+static int __init rdrand_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "force"))
+ rdrand_force = true;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+early_param("rdrand", rdrand_cmdline);
+
+static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
+{
+ /*
+ * Saving of the MSR used to hide the RDRAND support during
+ * suspend/resume is done by arch/x86/power/cpu.c, which is
+ * dependent on CONFIG_PM_SLEEP.
+ */
+ if (!IS_ENABLED(CONFIG_PM_SLEEP))
+ return;
+
+ /*
+ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
+ * RDRAND support using the CPUID function directly.
+ */
+ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
+ return;
+
+ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
+
+ /*
+ * Verify that the CPUID change has occurred in case the kernel is
+ * running virtualized and the hypervisor doesn't support the MSR.
+ */
+ if (cpuid_ecx(1) & BIT(30)) {
+ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
+ return;
+ }
+
+ clear_cpu_cap(c, X86_FEATURE_RDRAND);
+ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
+}
+
+static void init_amd_jg(struct cpuinfo_x86 *c)
+{
+ /*
+ * Some BIOS implementations do not restore proper RDRAND support
+ * across suspend and resume. Check on whether to hide the RDRAND
+ * instruction support via CPUID.
+ */
+ clear_rdrand_cpuid_bit(c);
+}
+
static void init_amd_bd(struct cpuinfo_x86 *c)
{
u64 value;
@@ -760,14 +818,24 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
wrmsrl_safe(MSR_F15H_IC_CFG, value);
}
}
+
+ /*
+ * Some BIOS implementations do not restore proper RDRAND support
+ * across suspend and resume. Check on whether to hide the RDRAND
+ * instruction support via CPUID.
+ */
+ clear_rdrand_cpuid_bit(c);
}
static void init_amd_zn(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_ZEN);
- /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
- if (!cpu_has(c, X86_FEATURE_CPB))
+ /*
+ * Fix erratum 1076: CPB feature bit not being set in CPUID.
+ * Always set it, except when running under a hypervisor.
+ */
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
}
@@ -801,6 +869,7 @@ static void init_amd(struct cpuinfo_x86 *c)
case 0x10: init_amd_gh(c); break;
case 0x12: init_amd_ln(c); break;
case 0x15: init_amd_bd(c); break;
+ case 0x16: init_amd_jg(c); break;
case 0x17: init_amd_zn(c); break;
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 6221166e3fca..24307d5bb4b8 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/nospec.h>
#include <linux/prctl.h>
+#include <linux/sched/smt.h>
#include <asm/spec-ctrl.h>
#include <asm/cmdline.h>
@@ -24,21 +25,26 @@
#include <asm/vmx.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
+#include <asm/hypervisor.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/intel-family.h>
#include <asm/e820.h>
+#include "cpu.h"
+
+static void __init spectre_v1_select_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
+static void __init mds_select_mitigation(void);
+static void __init mds_print_mitigation(void);
+static void __init taa_select_mitigation(void);
-/*
- * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
- * writes to SPEC_CTRL contain whatever reserved bits have been set.
- */
-u64 __ro_after_init x86_spec_ctrl_base;
+/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
+u64 x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+static DEFINE_MUTEX(spec_ctrl_mutex);
/*
* The vendor and possibly platform specific bits which can be modified in
@@ -53,6 +59,20 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
u64 __ro_after_init x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
+/* Control conditional STIPB in switch_to() */
+DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+/* Control conditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+/* Control unconditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+/* Control MDS CPU buffer clear before returning to user space */
+DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+EXPORT_SYMBOL_GPL(mds_user_clear);
+/* Control MDS CPU buffer clear before idling (halt, mwait) */
+DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+EXPORT_SYMBOL_GPL(mds_idle_clear);
+
void __init check_bugs(void)
{
identify_boot_cpu();
@@ -80,16 +100,21 @@ void __init check_bugs(void)
if (boot_cpu_has(X86_FEATURE_STIBP))
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
- /* Select the proper spectre mitigation before patching alternatives */
+ /* Select the proper CPU mitigations before patching alternatives: */
+ spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
+ ssb_select_mitigation();
+ l1tf_select_mitigation();
+ mds_select_mitigation();
+ taa_select_mitigation();
/*
- * Select proper mitigation for any exposure to the Speculative Store
- * Bypass vulnerability.
+ * As MDS and TAA mitigations are inter-related, print MDS
+ * mitigation until after TAA mitigation selection is done.
*/
- ssb_select_mitigation();
+ mds_print_mitigation();
- l1tf_select_mitigation();
+ arch_smt_update();
#ifdef CONFIG_X86_32
/*
@@ -123,31 +148,6 @@ void __init check_bugs(void)
#endif
}
-/* The kernel command line selection */
-enum spectre_v2_mitigation_cmd {
- SPECTRE_V2_CMD_NONE,
- SPECTRE_V2_CMD_AUTO,
- SPECTRE_V2_CMD_FORCE,
- SPECTRE_V2_CMD_RETPOLINE,
- SPECTRE_V2_CMD_RETPOLINE_GENERIC,
- SPECTRE_V2_CMD_RETPOLINE_AMD,
-};
-
-static const char *spectre_v2_strings[] = {
- [SPECTRE_V2_NONE] = "Vulnerable",
- [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
- [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
- [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
- [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
- [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
-};
-
-#undef pr_fmt
-#define pr_fmt(fmt) "Spectre V2 : " fmt
-
-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
- SPECTRE_V2_NONE;
-
void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{
@@ -165,9 +165,14 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
/* SSBD controlled in MSR_SPEC_CTRL */
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD))
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+ /* Conditional STIBP enabled? */
+ if (static_branch_unlikely(&switch_to_cond_stibp))
+ hostval |= stibp_tif_to_spec_ctrl(ti->flags);
+
if (hostval != guestval) {
msrval = setguest ? guestval : hostval;
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -201,7 +206,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
ssbd_spec_ctrl_to_tif(hostval);
- speculative_store_bypass_update(tif);
+ speculation_ctrl_update(tif);
}
}
EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
@@ -216,6 +221,275 @@ static void x86_amd_ssb_disable(void)
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}
+#undef pr_fmt
+#define pr_fmt(fmt) "MDS: " fmt
+
+/* Default mitigation for MDS-affected CPUs */
+static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
+static bool mds_nosmt __ro_after_init = false;
+
+static const char * const mds_strings[] = {
+ [MDS_MITIGATION_OFF] = "Vulnerable",
+ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
+ [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
+};
+
+static void __init mds_select_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
+ mds_mitigation = MDS_MITIGATION_OFF;
+ return;
+ }
+
+ if (mds_mitigation == MDS_MITIGATION_FULL) {
+ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ mds_mitigation = MDS_MITIGATION_VMWERV;
+
+ static_branch_enable(&mds_user_clear);
+
+ if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
+ (mds_nosmt || cpu_mitigations_auto_nosmt()))
+ cpu_smt_disable(false);
+ }
+}
+
+static void __init mds_print_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
+ return;
+
+ pr_info("%s\n", mds_strings[mds_mitigation]);
+}
+
+static int __init mds_cmdline(char *str)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS))
+ return 0;
+
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off"))
+ mds_mitigation = MDS_MITIGATION_OFF;
+ else if (!strcmp(str, "full"))
+ mds_mitigation = MDS_MITIGATION_FULL;
+ else if (!strcmp(str, "full,nosmt")) {
+ mds_mitigation = MDS_MITIGATION_FULL;
+ mds_nosmt = true;
+ }
+
+ return 0;
+}
+early_param("mds", mds_cmdline);
+
+#undef pr_fmt
+#define pr_fmt(fmt) "TAA: " fmt
+
+/* Default mitigation for TAA-affected CPUs */
+static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
+static bool taa_nosmt __ro_after_init;
+
+static const char * const taa_strings[] = {
+ [TAA_MITIGATION_OFF] = "Vulnerable",
+ [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
+ [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
+ [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
+};
+
+static void __init taa_select_mitigation(void)
+{
+ u64 ia32_cap;
+
+ if (!boot_cpu_has_bug(X86_BUG_TAA)) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ return;
+ }
+
+ /* TSX previously disabled by tsx=off */
+ if (!boot_cpu_has(X86_FEATURE_RTM)) {
+ taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
+ goto out;
+ }
+
+ if (cpu_mitigations_off()) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ return;
+ }
+
+ /*
+ * TAA mitigation via VERW is turned off if both
+ * tsx_async_abort=off and mds=off are specified.
+ */
+ if (taa_mitigation == TAA_MITIGATION_OFF &&
+ mds_mitigation == MDS_MITIGATION_OFF)
+ goto out;
+
+ if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ taa_mitigation = TAA_MITIGATION_VERW;
+ else
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+ /*
+ * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
+ * A microcode update fixes this behavior to clear CPU buffers. It also
+ * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
+ * ARCH_CAP_TSX_CTRL_MSR bit.
+ *
+ * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
+ * update is required.
+ */
+ ia32_cap = x86_read_arch_cap_msr();
+ if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
+ !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+
+ /*
+ * TSX is enabled, select alternate mitigation for TAA which is
+ * the same as MDS. Enable MDS static branch to clear CPU buffers.
+ *
+ * For guests that can't determine whether the correct microcode is
+ * present on host, enable the mitigation for UCODE_NEEDED as well.
+ */
+ static_branch_enable(&mds_user_clear);
+
+ if (taa_nosmt || cpu_mitigations_auto_nosmt())
+ cpu_smt_disable(false);
+
+ /*
+ * Update MDS mitigation, if necessary, as the mds_user_clear is
+ * now enabled for TAA mitigation.
+ */
+ if (mds_mitigation == MDS_MITIGATION_OFF &&
+ boot_cpu_has_bug(X86_BUG_MDS)) {
+ mds_mitigation = MDS_MITIGATION_FULL;
+ mds_select_mitigation();
+ }
+out:
+ pr_info("%s\n", taa_strings[taa_mitigation]);
+}
+
+static int __init tsx_async_abort_parse_cmdline(char *str)
+{
+ if (!boot_cpu_has_bug(X86_BUG_TAA))
+ return 0;
+
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off")) {
+ taa_mitigation = TAA_MITIGATION_OFF;
+ } else if (!strcmp(str, "full")) {
+ taa_mitigation = TAA_MITIGATION_VERW;
+ } else if (!strcmp(str, "full,nosmt")) {
+ taa_mitigation = TAA_MITIGATION_VERW;
+ taa_nosmt = true;
+ }
+
+ return 0;
+}
+early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
+
+#undef pr_fmt
+#define pr_fmt(fmt) "Spectre V1 : " fmt
+
+enum spectre_v1_mitigation {
+ SPECTRE_V1_MITIGATION_NONE,
+ SPECTRE_V1_MITIGATION_AUTO,
+};
+
+static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
+ SPECTRE_V1_MITIGATION_AUTO;
+
+static const char * const spectre_v1_strings[] = {
+ [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
+ [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
+};
+
+/*
+ * Does SMAP provide full mitigation against speculative kernel access to
+ * userspace?
+ */
+static bool smap_works_speculatively(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_SMAP))
+ return false;
+
+ /*
+ * On CPUs which are vulnerable to Meltdown, SMAP does not
+ * prevent speculative access to user data in the L1 cache.
+ * Consider SMAP to be non-functional as a mitigation on these
+ * CPUs.
+ */
+ if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
+ return false;
+
+ return true;
+}
+
+static void __init spectre_v1_select_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
+ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+ return;
+ }
+
+ if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
+ /*
+ * With Spectre v1, a user can speculatively control either
+ * path of a conditional swapgs with a user-controlled GS
+ * value. The mitigation is to add lfences to both code paths.
+ *
+ * If FSGSBASE is enabled, the user can put a kernel address in
+ * GS, in which case SMAP provides no protection.
+ *
+ * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
+ * FSGSBASE enablement patches have been merged. ]
+ *
+ * If FSGSBASE is disabled, the user can only put a user space
+ * address in GS. That makes an attack harder, but still
+ * possible if there's no SMAP protection.
+ */
+ if (!smap_works_speculatively()) {
+ /*
+ * Mitigation can be provided from SWAPGS itself or
+ * PTI as the CR3 write in the Meltdown mitigation
+ * is serializing.
+ *
+ * If neither is there, mitigate with an LFENCE to
+ * stop speculation through swapgs.
+ */
+ if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
+ !boot_cpu_has(X86_FEATURE_KAISER))
+ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
+
+ /*
+ * Enable lfences in the kernel entry (non-swapgs)
+ * paths, to prevent user entry from speculatively
+ * skipping swapgs.
+ */
+ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
+ }
+ }
+
+ pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+}
+
+static int __init nospectre_v1_cmdline(char *str)
+{
+ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+ return 0;
+}
+early_param("nospectre_v1", nospectre_v1_cmdline);
+
+#undef pr_fmt
+#define pr_fmt(fmt) "Spectre V2 : " fmt
+
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+ SPECTRE_V2_NONE;
+
+static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
+ SPECTRE_V2_USER_NONE;
+
#ifdef RETPOLINE
static bool spectre_v2_bad_module;
@@ -237,67 +511,225 @@ static inline const char *spectre_v2_module_string(void)
static inline const char *spectre_v2_module_string(void) { return ""; }
#endif
-static void __init spec2_print_if_insecure(const char *reason)
+static inline bool match_option(const char *arg, int arglen, const char *opt)
{
- if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
- pr_info("%s selected on command line.\n", reason);
+ int len = strlen(opt);
+
+ return len == arglen && !strncmp(arg, opt, len);
}
-static void __init spec2_print_if_secure(const char *reason)
+/* The kernel command line selection for spectre v2 */
+enum spectre_v2_mitigation_cmd {
+ SPECTRE_V2_CMD_NONE,
+ SPECTRE_V2_CMD_AUTO,
+ SPECTRE_V2_CMD_FORCE,
+ SPECTRE_V2_CMD_RETPOLINE,
+ SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+ SPECTRE_V2_CMD_RETPOLINE_AMD,
+};
+
+enum spectre_v2_user_cmd {
+ SPECTRE_V2_USER_CMD_NONE,
+ SPECTRE_V2_USER_CMD_AUTO,
+ SPECTRE_V2_USER_CMD_FORCE,
+ SPECTRE_V2_USER_CMD_PRCTL,
+ SPECTRE_V2_USER_CMD_PRCTL_IBPB,
+ SPECTRE_V2_USER_CMD_SECCOMP,
+ SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
+};
+
+static const char * const spectre_v2_user_strings[] = {
+ [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
+ [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
+ [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
+ [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
+};
+
+static const struct {
+ const char *option;
+ enum spectre_v2_user_cmd cmd;
+ bool secure;
+} v2_user_options[] __initconst = {
+ { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
+ { "off", SPECTRE_V2_USER_CMD_NONE, false },
+ { "on", SPECTRE_V2_USER_CMD_FORCE, true },
+ { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
+ { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
+ { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
+ { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
+};
+
+static void __init spec_v2_user_print_cond(const char *reason, bool secure)
{
- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
- pr_info("%s selected on command line.\n", reason);
+ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
+ pr_info("spectre_v2_user=%s forced on command line.\n", reason);
}
-static inline bool retp_compiler(void)
+static enum spectre_v2_user_cmd __init
+spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
{
- return __is_defined(RETPOLINE);
+ char arg[20];
+ int ret, i;
+
+ switch (v2_cmd) {
+ case SPECTRE_V2_CMD_NONE:
+ return SPECTRE_V2_USER_CMD_NONE;
+ case SPECTRE_V2_CMD_FORCE:
+ return SPECTRE_V2_USER_CMD_FORCE;
+ default:
+ break;
+ }
+
+ ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
+ arg, sizeof(arg));
+ if (ret < 0)
+ return SPECTRE_V2_USER_CMD_AUTO;
+
+ for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
+ if (match_option(arg, ret, v2_user_options[i].option)) {
+ spec_v2_user_print_cond(v2_user_options[i].option,
+ v2_user_options[i].secure);
+ return v2_user_options[i].cmd;
+ }
+ }
+
+ pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
+ return SPECTRE_V2_USER_CMD_AUTO;
}
-static inline bool match_option(const char *arg, int arglen, const char *opt)
+static void __init
+spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
{
- int len = strlen(opt);
+ enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
+ bool smt_possible = IS_ENABLED(CONFIG_SMP);
+ enum spectre_v2_user_cmd cmd;
- return len == arglen && !strncmp(arg, opt, len);
+ if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
+ return;
+
+ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
+ cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+ smt_possible = false;
+
+ cmd = spectre_v2_parse_user_cmdline(v2_cmd);
+ switch (cmd) {
+ case SPECTRE_V2_USER_CMD_NONE:
+ goto set_mode;
+ case SPECTRE_V2_USER_CMD_FORCE:
+ mode = SPECTRE_V2_USER_STRICT;
+ break;
+ case SPECTRE_V2_USER_CMD_PRCTL:
+ case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+ mode = SPECTRE_V2_USER_PRCTL;
+ break;
+ case SPECTRE_V2_USER_CMD_AUTO:
+ case SPECTRE_V2_USER_CMD_SECCOMP:
+ case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ if (IS_ENABLED(CONFIG_SECCOMP))
+ mode = SPECTRE_V2_USER_SECCOMP;
+ else
+ mode = SPECTRE_V2_USER_PRCTL;
+ break;
+ }
+
+ /* Initialize Indirect Branch Prediction Barrier */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+
+ switch (cmd) {
+ case SPECTRE_V2_USER_CMD_FORCE:
+ case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+ case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ static_branch_enable(&switch_mm_always_ibpb);
+ break;
+ case SPECTRE_V2_USER_CMD_PRCTL:
+ case SPECTRE_V2_USER_CMD_AUTO:
+ case SPECTRE_V2_USER_CMD_SECCOMP:
+ static_branch_enable(&switch_mm_cond_ibpb);
+ break;
+ default:
+ break;
+ }
+
+ pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+ static_key_enabled(&switch_mm_always_ibpb) ?
+ "always-on" : "conditional");
+ }
+
+ /* If enhanced IBRS is enabled no STIPB required */
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return;
+
+ /*
+ * If SMT is not possible or STIBP is not available clear the STIPB
+ * mode.
+ */
+ if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+ mode = SPECTRE_V2_USER_NONE;
+set_mode:
+ spectre_v2_user = mode;
+ /* Only print the STIBP mode when SMT possible */
+ if (smt_possible)
+ pr_info("%s\n", spectre_v2_user_strings[mode]);
}
+static const char * const spectre_v2_strings[] = {
+ [SPECTRE_V2_NONE] = "Vulnerable",
+ [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
+ [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
+ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
+ [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
+ [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
+};
+
static const struct {
const char *option;
enum spectre_v2_mitigation_cmd cmd;
bool secure;
-} mitigation_options[] = {
- { "off", SPECTRE_V2_CMD_NONE, false },
- { "on", SPECTRE_V2_CMD_FORCE, true },
- { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
- { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
- { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
- { "auto", SPECTRE_V2_CMD_AUTO, false },
+} mitigation_options[] __initconst = {
+ { "off", SPECTRE_V2_CMD_NONE, false },
+ { "on", SPECTRE_V2_CMD_FORCE, true },
+ { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
+ { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
+ { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
+ { "auto", SPECTRE_V2_CMD_AUTO, false },
};
+static void __init spec_v2_print_cond(const char *reason, bool secure)
+{
+ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
+ pr_info("%s selected on command line.\n", reason);
+}
+
+static inline bool retp_compiler(void)
+{
+ return __is_defined(RETPOLINE);
+}
+
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
{
+ enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
char arg[20];
int ret, i;
- enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
- if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+ if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
+ cpu_mitigations_off())
return SPECTRE_V2_CMD_NONE;
- else {
- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
- if (ret < 0)
- return SPECTRE_V2_CMD_AUTO;
- for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
- if (!match_option(arg, ret, mitigation_options[i].option))
- continue;
- cmd = mitigation_options[i].cmd;
- break;
- }
+ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
+ if (ret < 0)
+ return SPECTRE_V2_CMD_AUTO;
- if (i >= ARRAY_SIZE(mitigation_options)) {
- pr_err("unknown option (%s). Switching to AUTO select\n", arg);
- return SPECTRE_V2_CMD_AUTO;
- }
+ for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
+ if (!match_option(arg, ret, mitigation_options[i].option))
+ continue;
+ cmd = mitigation_options[i].cmd;
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(mitigation_options)) {
+ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+ return SPECTRE_V2_CMD_AUTO;
}
if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
@@ -314,11 +746,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_AUTO;
}
- if (mitigation_options[i].secure)
- spec2_print_if_secure(mitigation_options[i].option);
- else
- spec2_print_if_insecure(mitigation_options[i].option);
-
+ spec_v2_print_cond(mitigation_options[i].option,
+ mitigation_options[i].secure);
return cmd;
}
@@ -400,12 +829,6 @@ specv2_set_mode:
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
- /* Initialize Indirect Branch Prediction Barrier if supported */
- if (boot_cpu_has(X86_FEATURE_IBPB)) {
- setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
- pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
- }
-
/*
* Retpoline means the kernel is safe because it has no indirect
* branches. Enhanced IBRS protects firmware too, so, enable restricted
@@ -421,6 +844,107 @@ specv2_set_mode:
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
+
+ /* Set up IBPB and STIBP depending on the general spectre V2 command */
+ spectre_v2_user_select_mitigation(cmd);
+}
+
+static void update_stibp_msr(void * __unused)
+{
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+
+/* Update x86_spec_ctrl_base in case SMT state changed. */
+static void update_stibp_strict(void)
+{
+ u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+
+ if (sched_smt_active())
+ mask |= SPEC_CTRL_STIBP;
+
+ if (mask == x86_spec_ctrl_base)
+ return;
+
+ pr_info("Update user space SMT mitigation: STIBP %s\n",
+ mask & SPEC_CTRL_STIBP ? "always-on" : "off");
+ x86_spec_ctrl_base = mask;
+ on_each_cpu(update_stibp_msr, NULL, 1);
+}
+
+/* Update the static key controlling the evaluation of TIF_SPEC_IB */
+static void update_indir_branch_cond(void)
+{
+ if (sched_smt_active())
+ static_branch_enable(&switch_to_cond_stibp);
+ else
+ static_branch_disable(&switch_to_cond_stibp);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+/* Update the static key controlling the MDS CPU buffer clear in idle */
+static void update_mds_branch_idle(void)
+{
+ /*
+ * Enable the idle clearing if SMT is active on CPUs which are
+ * affected only by MSBDS and not any other MDS variant.
+ *
+ * The other variants cannot be mitigated when SMT is enabled, so
+ * clearing the buffers on idle just to prevent the Store Buffer
+ * repartitioning leak would be a window dressing exercise.
+ */
+ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
+ return;
+
+ if (sched_smt_active())
+ static_branch_enable(&mds_idle_clear);
+ else
+ static_branch_disable(&mds_idle_clear);
+}
+
+#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
+#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
+
+void arch_smt_update(void)
+{
+ mutex_lock(&spec_ctrl_mutex);
+
+ switch (spectre_v2_user) {
+ case SPECTRE_V2_USER_NONE:
+ break;
+ case SPECTRE_V2_USER_STRICT:
+ update_stibp_strict();
+ break;
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ update_indir_branch_cond();
+ break;
+ }
+
+ switch (mds_mitigation) {
+ case MDS_MITIGATION_FULL:
+ case MDS_MITIGATION_VMWERV:
+ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+ pr_warn_once(MDS_MSG_SMT);
+ update_mds_branch_idle();
+ break;
+ case MDS_MITIGATION_OFF:
+ break;
+ }
+
+ switch (taa_mitigation) {
+ case TAA_MITIGATION_VERW:
+ case TAA_MITIGATION_UCODE_NEEDED:
+ if (sched_smt_active())
+ pr_warn_once(TAA_MSG_SMT);
+ break;
+ case TAA_MITIGATION_TSX_DISABLED:
+ case TAA_MITIGATION_OFF:
+ break;
+ }
+
+ mutex_unlock(&spec_ctrl_mutex);
}
#undef pr_fmt
@@ -437,7 +961,7 @@ enum ssb_mitigation_cmd {
SPEC_STORE_BYPASS_CMD_SECCOMP,
};
-static const char *ssb_strings[] = {
+static const char * const ssb_strings[] = {
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
[SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
[SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
@@ -447,7 +971,7 @@ static const char *ssb_strings[] = {
static const struct {
const char *option;
enum ssb_mitigation_cmd cmd;
-} ssb_mitigation_options[] = {
+} ssb_mitigation_options[] __initconst = {
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
@@ -461,7 +985,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
char arg[20];
int ret, i;
- if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
+ cpu_mitigations_off()) {
return SPEC_STORE_BYPASS_CMD_NONE;
} else {
ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
@@ -523,6 +1048,16 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
}
/*
+ * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
+ * bit in the mask to allow guests to use the mitigation even in the
+ * case where the host does not enable it.
+ */
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+ }
+
+ /*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
@@ -531,18 +1066,15 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
if (mode == SPEC_STORE_BYPASS_DISABLE) {
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
/*
- * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
- * a completely different MSR and bit dependent on family.
+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
+ * use a completely different MSR and bit dependent on family.
*/
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
+ if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+ !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ x86_amd_ssb_disable();
+ } else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
- x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
- break;
- case X86_VENDOR_AMD:
- x86_amd_ssb_disable();
- break;
}
}
@@ -560,10 +1092,25 @@ static void ssb_select_mitigation(void)
#undef pr_fmt
#define pr_fmt(fmt) "Speculation prctl: " fmt
-static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+static void task_update_spec_tif(struct task_struct *tsk)
{
- bool update;
+ /* Force the update of the real TIF bits */
+ set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
+ /*
+ * Immediately update the speculation control MSRs for the current
+ * task, but for a non-current task delay setting the CPU
+ * mitigation until it is scheduled next.
+ *
+ * This can only happen for SECCOMP mitigation. For PRCTL it's
+ * always the current task.
+ */
+ if (tsk == current)
+ speculation_ctrl_update_current();
+}
+
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
return -ENXIO;
@@ -574,28 +1121,56 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
if (task_spec_ssb_force_disable(task))
return -EPERM;
task_clear_spec_ssb_disable(task);
- update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+ task_update_spec_tif(task);
break;
case PR_SPEC_DISABLE:
task_set_spec_ssb_disable(task);
- update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ task_update_spec_tif(task);
break;
case PR_SPEC_FORCE_DISABLE:
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task);
- update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ task_update_spec_tif(task);
break;
default:
return -ERANGE;
}
+ return 0;
+}
- /*
- * If being set on non-current task, delay setting the CPU
- * mitigation until it is next scheduled.
- */
- if (task == current && update)
- speculative_store_bypass_update_current();
-
+static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+ if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ return 0;
+ /*
+ * Indirect branch speculation is always disabled in strict
+ * mode.
+ */
+ if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
+ return -EPERM;
+ task_clear_spec_ib_disable(task);
+ task_update_spec_tif(task);
+ break;
+ case PR_SPEC_DISABLE:
+ case PR_SPEC_FORCE_DISABLE:
+ /*
+ * Indirect branch speculation is always allowed when
+ * mitigation is force disabled.
+ */
+ if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ return -EPERM;
+ if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
+ return 0;
+ task_set_spec_ib_disable(task);
+ if (ctrl == PR_SPEC_FORCE_DISABLE)
+ task_set_spec_ib_force_disable(task);
+ task_update_spec_tif(task);
+ break;
+ default:
+ return -ERANGE;
+ }
return 0;
}
@@ -605,6 +1180,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssb_prctl_set(task, ctrl);
+ case PR_SPEC_INDIRECT_BRANCH:
+ return ib_prctl_set(task, ctrl);
default:
return -ENODEV;
}
@@ -615,6 +1192,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+ if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+ ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
@@ -637,11 +1216,35 @@ static int ssb_prctl_get(struct task_struct *task)
}
}
+static int ib_prctl_get(struct task_struct *task)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return PR_SPEC_NOT_AFFECTED;
+
+ switch (spectre_v2_user) {
+ case SPECTRE_V2_USER_NONE:
+ return PR_SPEC_ENABLE;
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ if (task_spec_ib_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+ if (task_spec_ib_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ case SPECTRE_V2_USER_STRICT:
+ return PR_SPEC_DISABLE;
+ default:
+ return PR_SPEC_NOT_AFFECTED;
+ }
+}
+
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
{
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssb_prctl_get(task);
+ case PR_SPEC_INDIRECT_BRANCH:
+ return ib_prctl_get(task);
default:
return -ENODEV;
}
@@ -656,6 +1259,9 @@ void x86_spec_ctrl_setup_ap(void)
x86_amd_ssb_disable();
}
+bool itlb_multihit_kvm_mitigation;
+EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
+
#undef pr_fmt
#define pr_fmt(fmt) "L1TF: " fmt
@@ -713,6 +1319,11 @@ static void __init l1tf_select_mitigation(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
+ if (cpu_mitigations_off())
+ l1tf_mitigation = L1TF_MITIGATION_OFF;
+ else if (cpu_mitigations_auto_nosmt())
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+
override_cache_bits(&boot_cpu_data);
switch (l1tf_mitigation) {
@@ -735,12 +1346,13 @@ static void __init l1tf_select_mitigation(void)
#endif
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
- if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
+ if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
+ e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
half_pa);
pr_info("However, doing so will make a part of your RAM unusable.\n");
- pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
+ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
return;
}
@@ -773,13 +1385,14 @@ static int __init l1tf_cmdline(char *str)
early_param("l1tf", l1tf_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) fmt
#ifdef CONFIG_SYSFS
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
#if IS_ENABLED(CONFIG_KVM_INTEL)
-static const char *l1tf_vmx_states[] = {
+static const char * const l1tf_vmx_states[] = {
[VMENTER_L1D_FLUSH_AUTO] = "auto",
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
@@ -795,21 +1408,99 @@ static ssize_t l1tf_show_state(char *buf)
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
- cpu_smt_control == CPU_SMT_ENABLED))
+ sched_smt_active())) {
return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation]);
+ }
return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation],
- cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
+ sched_smt_active() ? "vulnerable" : "disabled");
+}
+
+static ssize_t itlb_multihit_show_state(char *buf)
+{
+ if (itlb_multihit_kvm_mitigation)
+ return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
+ else
+ return sprintf(buf, "KVM: Vulnerable\n");
}
#else
static ssize_t l1tf_show_state(char *buf)
{
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
}
+
+static ssize_t itlb_multihit_show_state(char *buf)
+{
+ return sprintf(buf, "Processor vulnerable\n");
+}
#endif
+static ssize_t mds_show_state(char *buf)
+{
+#ifdef CONFIG_HYPERVISOR_GUEST
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ return sprintf(buf, "%s; SMT Host state unknown\n",
+ mds_strings[mds_mitigation]);
+ }
+#endif
+
+ if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+ (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
+ sched_smt_active() ? "mitigated" : "disabled"));
+ }
+
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+ sched_smt_active() ? "vulnerable" : "disabled");
+}
+
+static ssize_t tsx_async_abort_show_state(char *buf)
+{
+ if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
+ (taa_mitigation == TAA_MITIGATION_OFF))
+ return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
+
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ return sprintf(buf, "%s; SMT Host state unknown\n",
+ taa_strings[taa_mitigation]);
+ }
+
+ return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
+ sched_smt_active() ? "vulnerable" : "disabled");
+}
+
+static char *stibp_state(void)
+{
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return "";
+
+ switch (spectre_v2_user) {
+ case SPECTRE_V2_USER_NONE:
+ return ", STIBP: disabled";
+ case SPECTRE_V2_USER_STRICT:
+ return ", STIBP: forced";
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ if (static_key_enabled(&switch_to_cond_stibp))
+ return ", STIBP: conditional";
+ }
+ return "";
+}
+
+static char *ibpb_state(void)
+{
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ if (static_key_enabled(&switch_mm_always_ibpb))
+ return ", IBPB: always-on";
+ if (static_key_enabled(&switch_mm_cond_ibpb))
+ return ", IBPB: conditional";
+ return ", IBPB: disabled";
+ }
+ return "";
+}
+
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -824,12 +1515,14 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
break;
case X86_BUG_SPECTRE_V1:
- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
case X86_BUG_SPECTRE_V2:
- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
- boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ ibpb_state(),
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ stibp_state(),
+ boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
spectre_v2_module_string());
case X86_BUG_SPEC_STORE_BYPASS:
@@ -839,6 +1532,16 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
return l1tf_show_state(buf);
break;
+
+ case X86_BUG_MDS:
+ return mds_show_state(buf);
+
+ case X86_BUG_TAA:
+ return tsx_async_abort_show_state(buf);
+
+ case X86_BUG_ITLB_MULTIHIT:
+ return itlb_multihit_show_state(buf);
+
default:
break;
}
@@ -870,4 +1573,19 @@ ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *b
{
return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
}
+
+ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
+}
+
+ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
+}
+
+ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3c01610c5ba9..f490a4fab2f7 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -388,7 +388,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
* cpuid bit to be set. We need to ensure that we
* update that bit in this CPU's "cpu_info".
*/
- get_cpu_cap(c);
+ set_cpu_cap(c, X86_FEATURE_OSPKE);
}
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -752,6 +752,12 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_STIBP);
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
}
+
+ if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
+ }
}
void get_cpu_cap(struct cpuinfo_x86 *c)
@@ -885,84 +891,134 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
c->x86_cache_bits = c->x86_phys_bits;
}
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_CENTAUR, 5 },
- { X86_VENDOR_INTEL, 5 },
- { X86_VENDOR_NSC, 5 },
- { X86_VENDOR_ANY, 4 },
- {}
-};
+#define NO_SPECULATION BIT(0)
+#define NO_MELTDOWN BIT(1)
+#define NO_SSB BIT(2)
+#define NO_L1TF BIT(3)
+#define NO_MDS BIT(4)
+#define MSBDS_ONLY BIT(5)
+#define NO_SWAPGS BIT(6)
+#define NO_ITLB_MULTIHIT BIT(7)
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
- { X86_VENDOR_AMD },
- {}
-};
+#define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
- { X86_VENDOR_CENTAUR, 5, },
- { X86_VENDOR_INTEL, 5, },
- { X86_VENDOR_NSC, 5, },
- { X86_VENDOR_AMD, 0x12, },
- { X86_VENDOR_AMD, 0x11, },
- { X86_VENDOR_AMD, 0x10, },
- { X86_VENDOR_AMD, 0xf, },
- { X86_VENDOR_ANY, 4, },
- {}
-};
+#define VULNWL_INTEL(model, whitelist) \
+ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
+
+#define VULNWL_AMD(family, whitelist) \
+ VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
+
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
+
+ /* Intel Family 6 */
+ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
+
+ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+
+ /*
+ * Technically, swapgs isn't serializing on AMD (despite it previously
+ * being documented as such in the APM). But according to AMD, %gs is
+ * updated non-speculatively, and the issuing of %gs-relative memory
+ * operands will be blocked until the %gs update completes, which is
+ * good enough for our purposes.
+ */
+
+ /* AMD Family 0xf - 0x12 */
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
- /* in addition to cpu_no_speculation */
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
{}
};
-static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+static bool __init cpu_matches(unsigned long which)
+{
+ const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
+
+ return m && !!(m->driver_data & which);
+}
+
+u64 x86_read_arch_cap_msr(void)
{
u64 ia32_cap = 0;
- if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
- if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
- !(ia32_cap & ARCH_CAP_SSB_NO))
- setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+ return ia32_cap;
+}
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = x86_read_arch_cap_msr();
+
+ /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+ if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+ setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
- if (x86_match_cpu(cpu_no_speculation))
+ if (cpu_matches(NO_SPECULATION))
return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
if (ia32_cap & ARCH_CAP_IBRS_ALL)
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
- if (x86_match_cpu(cpu_no_meltdown))
+ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ setup_force_cpu_bug(X86_BUG_MDS);
+ if (cpu_matches(MSBDS_ONLY))
+ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+ }
+
+ if (!cpu_matches(NO_SWAPGS))
+ setup_force_cpu_bug(X86_BUG_SWAPGS);
+
+ /*
+ * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
+ * - TSX is supported or
+ * - TSX_CTRL is present
+ *
+ * TSX_CTRL check is needed for cases when TSX could be disabled before
+ * the kernel boot e.g. kexec.
+ * TSX_CTRL check alone is not sufficient for cases when the microcode
+ * update is not present or running as guest that don't get TSX_CTRL.
+ */
+ if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+ (cpu_has(c, X86_FEATURE_RTM) ||
+ (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+ setup_force_cpu_bug(X86_BUG_TAA);
+
+ if (cpu_matches(NO_MELTDOWN))
return;
/* Rogue Data Cache Load? No! */
@@ -971,7 +1027,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
- if (x86_match_cpu(cpu_no_l1tf))
+ if (cpu_matches(NO_L1TF))
return;
setup_force_cpu_bug(X86_BUG_L1TF);
@@ -1380,6 +1436,8 @@ void __init identify_boot_cpu(void)
enable_sep_cpu();
#endif
cpu_detect_tlb(&boot_cpu_data);
+
+ tsx_init();
}
void identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 2275900d4d1b..4350f50b5deb 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -44,6 +44,22 @@ struct _tlb_table {
extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[];
+#ifdef CONFIG_CPU_SUP_INTEL
+enum tsx_ctrl_states {
+ TSX_CTRL_ENABLE,
+ TSX_CTRL_DISABLE,
+ TSX_CTRL_NOT_SUPPORTED,
+};
+
+extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
+
+extern void __init tsx_init(void);
+extern void tsx_enable(void);
+extern void tsx_disable(void);
+#else
+static inline void tsx_init(void) { }
+#endif /* CONFIG_CPU_SUP_INTEL */
+
extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
@@ -51,4 +67,6 @@ extern int detect_ht_early(struct cpuinfo_x86 *c);
extern void x86_spec_ctrl_setup_ap(void);
+extern u64 x86_read_arch_cap_msr(void);
+
#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index d39cfb2c6b63..a4f6e0ec4ba0 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -121,7 +121,7 @@ static void set_cx86_reorder(void)
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* Load/Store Serialize to mem access disable (=reorder it) */
- setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
+ setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
/* set load/store serialize from 1GB to 4GB */
ccr3 |= 0xe0;
setCx86(CX86_CCR3, ccr3);
@@ -132,11 +132,11 @@ static void set_cx86_memwb(void)
pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
/* CCR2 bit 2: unlock NW bit */
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
/* set 'Not Write-through' */
write_cr0(read_cr0() | X86_CR0_NW);
/* CCR2 bit 2: lock NW bit and set WT1 */
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
}
/*
@@ -150,14 +150,14 @@ static void geode_configure(void)
local_irq_save(flags);
/* Suspend on halt power saving and enable #SUSP pin */
- setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
+ setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
/* FPU fast, DTE cache, Mem bypass */
- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
+ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
set_cx86_memwb();
@@ -293,7 +293,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
/* Enable cxMMX extensions (GX1 Datasheet 54) */
- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
+ setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
/*
* GXm : 0x30 ... 0x5f GXm datasheet 51
@@ -316,7 +316,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
if (dir1 > 7) {
dir0_msn++; /* M II */
/* Enable MMX extensions (App note 108) */
- setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
+ setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
} else {
/* A 6x86MX - it has the bug. */
set_cpu_bug(c, X86_BUG_COMA);
@@ -434,7 +434,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c)
/* enable MAPEN */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
/* enable cpuid */
- setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80);
+ setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80);
/* disable MAPEN */
setCx86(CX86_CCR3, ccr3);
local_irq_restore(flags);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index cee0fec0d232..476a9d5c2f35 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -14,6 +14,7 @@
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/intel-family.h>
+#include <asm/microcode_intel.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -137,14 +138,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
- unsigned lower_word;
-
- wrmsr(MSR_IA32_UCODE_REV, 0, 0);
- /* Required by the SDM */
- sync_core();
- rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
- }
+ if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
+ c->microcode = intel_get_microcode_revision();
/* Now if any of them are set, check the blacklist and clear the lot */
if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
@@ -647,6 +642,11 @@ static void init_intel(struct cpuinfo_x86 *c)
detect_vmx_virtcap(c);
init_intel_energy_perf(c);
+
+ if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+ tsx_enable();
+ if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+ tsx_disable();
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 3e0199ee5a2f..0372913e0134 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -148,6 +148,11 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
KERNEL
),
+ MCESEV(
+ PANIC, "Instruction fetch error in kernel",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+ KERNEL
+ ),
#endif
MCESEV(
PANIC, "Action required: unknown MCACOD",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 25310d2b8609..07188a012492 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -139,6 +139,8 @@ void mce_setup(struct mce *m)
m->socketid = cpu_data(m->extcpu).phys_proc_id;
m->apicid = cpu_data(m->extcpu).initial_apicid;
rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+
+ m->microcode = boot_cpu_data.microcode;
}
DEFINE_PER_CPU(struct mce, injectm);
@@ -309,7 +311,7 @@ static void print_mce(struct mce *m)
*/
pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
- cpu_data(m->extcpu).microcode);
+ m->microcode);
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
}
@@ -671,20 +673,50 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
barrier();
m.status = mce_rdmsrl(msr_ops.status(i));
+
+ /* If this entry is not valid, ignore it */
if (!(m.status & MCI_STATUS_VAL))
continue;
/*
- * Uncorrected or signalled events are handled by the exception
- * handler when it is enabled, so don't process those here.
- *
- * TBD do the same check for MCI_STATUS_EN here?
+ * If we are logging everything (at CPU online) or this
+ * is a corrected error, then we must log it.
*/
- if (!(flags & MCP_UC) &&
- (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
- continue;
+ if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
+ goto log_it;
+
+ /*
+ * Newer Intel systems that support software error
+ * recovery need to make additional checks. Other
+ * CPUs should skip over uncorrected errors, but log
+ * everything else.
+ */
+ if (!mca_cfg.ser) {
+ if (m.status & MCI_STATUS_UC)
+ continue;
+ goto log_it;
+ }
+
+ /* Log "not enabled" (speculative) errors */
+ if (!(m.status & MCI_STATUS_EN))
+ goto log_it;
+ /*
+ * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
+ * UC == 1 && PCC == 0 && S == 0
+ */
+ if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
+ goto log_it;
+
+ /*
+ * Skip anything else. Presumption is that our read of this
+ * bank is racing with a machine check. Leave the log alone
+ * for do_machine_check() to deal with it.
+ */
+ continue;
+
+log_it:
error_seen = true;
mce_read_aux(&m, i);
@@ -750,8 +782,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
if (quirk_no_way_out)
quirk_no_way_out(i, m, regs);
+ m->bank = i;
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
- m->bank = i;
mce_read_aux(m, i);
*msg = tmp;
return 1;
@@ -1616,36 +1648,6 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
if (c->x86 == 0x15 && c->x86_model <= 0xf)
mce_flags.overflow_recov = 1;
- /*
- * Turn off MC4_MISC thresholding banks on those models since
- * they're not supported there.
- */
- if (c->x86 == 0x15 &&
- (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
- int i;
- u64 hwcr;
- bool need_toggle;
- u32 msrs[] = {
- 0x00000413, /* MC4_MISC0 */
- 0xc0000408, /* MC4_MISC1 */
- };
-
- rdmsrl(MSR_K7_HWCR, hwcr);
-
- /* McStatusWrEn has to be set */
- need_toggle = !(hwcr & BIT(18));
-
- if (need_toggle)
- wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
-
- /* Clear CntP bit safely */
- for (i = 0; i < ARRAY_SIZE(msrs); i++)
- msr_clear_bit(msrs[i], 62);
-
- /* restore old settings */
- if (need_toggle)
- wrmsrl(MSR_K7_HWCR, hwcr);
- }
}
if (c->x86_vendor == X86_VENDOR_INTEL) {
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 39526e1e3132..775d5f028fe8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -499,6 +499,40 @@ out:
return offset;
}
+/*
+ * Turn off MC4_MISC thresholding banks on all family 0x15 models since
+ * they're not supported there.
+ */
+void disable_err_thresholding(struct cpuinfo_x86 *c)
+{
+ int i;
+ u64 hwcr;
+ bool need_toggle;
+ u32 msrs[] = {
+ 0x00000413, /* MC4_MISC0 */
+ 0xc0000408, /* MC4_MISC1 */
+ };
+
+ if (c->x86 != 0x15)
+ return;
+
+ rdmsrl(MSR_K7_HWCR, hwcr);
+
+ /* McStatusWrEn has to be set */
+ need_toggle = !(hwcr & BIT(18));
+
+ if (need_toggle)
+ wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
+
+ /* Clear CntP bit safely */
+ for (i = 0; i < ARRAY_SIZE(msrs); i++)
+ msr_clear_bit(msrs[i], 62);
+
+ /* restore old settings */
+ if (need_toggle)
+ wrmsrl(MSR_K7_HWCR, hwcr);
+}
+
/* cpu init entry point, called from mce.c with preempt off */
void mce_amd_feature_init(struct cpuinfo_x86 *c)
{
@@ -506,6 +540,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
unsigned int bank, block, cpu = smp_processor_id();
int offset = -1;
+ disable_err_thresholding(c);
+
for (bank = 0; bank < mca_cfg.banks; ++bank) {
if (mce_flags.smca)
get_smca_bank_info(bank);
@@ -810,9 +846,12 @@ static const struct sysfs_ops threshold_ops = {
.store = store,
};
+static void threshold_block_release(struct kobject *kobj);
+
static struct kobj_type threshold_ktype = {
.sysfs_ops = &threshold_ops,
.default_attrs = default_attrs,
+ .release = threshold_block_release,
};
static const char *get_name(unsigned int bank, struct threshold_block *b)
@@ -843,8 +882,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
return buf_mcatype;
}
-static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
- unsigned int block, u32 address)
+static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
+ unsigned int bank, unsigned int block,
+ u32 address)
{
struct threshold_block *b = NULL;
u32 low, high;
@@ -888,16 +928,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
INIT_LIST_HEAD(&b->miscj);
- if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
- list_add(&b->miscj,
- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
- } else {
- per_cpu(threshold_banks, cpu)[bank]->blocks = b;
- }
+ if (tb->blocks)
+ list_add(&b->miscj, &tb->blocks->miscj);
+ else
+ tb->blocks = b;
- err = kobject_init_and_add(&b->kobj, &threshold_ktype,
- per_cpu(threshold_banks, cpu)[bank]->kobj,
- get_name(bank, b));
+ err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
if (err)
goto out_free;
recurse:
@@ -905,7 +941,7 @@ recurse:
if (!address)
return 0;
- err = allocate_threshold_blocks(cpu, bank, block, address);
+ err = allocate_threshold_blocks(cpu, tb, bank, block, address);
if (err)
goto out_free;
@@ -990,8 +1026,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out_free;
}
- per_cpu(threshold_banks, cpu)[bank] = b;
-
if (is_shared_bank(bank)) {
atomic_set(&b->cpus, 1);
@@ -1002,9 +1036,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
}
}
- err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
- if (!err)
- goto out;
+ err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
+ if (err)
+ goto out_free;
+
+ per_cpu(threshold_banks, cpu)[bank] = b;
+
+ return 0;
out_free:
kfree(b);
@@ -1038,8 +1076,12 @@ static int threshold_create_device(unsigned int cpu)
return err;
}
-static void deallocate_threshold_block(unsigned int cpu,
- unsigned int bank)
+static void threshold_block_release(struct kobject *kobj)
+{
+ kfree(to_block(kobj));
+}
+
+static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
{
struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
@@ -1049,13 +1091,11 @@ static void deallocate_threshold_block(unsigned int cpu,
return;
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
- kobject_put(&pos->kobj);
list_del(&pos->miscj);
- kfree(pos);
+ kobject_put(&pos->kobj);
}
- kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
- per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
+ kobject_put(&head->blocks->kobj);
}
static void __threshold_remove_blocks(struct threshold_bank *b)
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index c460c91d0c8f..be2439592b0e 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -190,7 +190,7 @@ static int therm_throt_process(bool new_event, int event, int level)
/* if we just entered the thermal event */
if (new_event) {
if (event == THERMAL_THROTTLING_EVENT)
- pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
+ pr_warn("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
this_cpu,
level == CORE_LEVEL ? "Core" : "Package",
state->count);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 732bb03fcf91..a19fddfb6bf8 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -707,22 +707,26 @@ int apply_microcode_amd(int cpu)
return -1;
/* need to apply patch? */
- if (rev >= mc_amd->hdr.patch_id) {
- c->microcode = rev;
- uci->cpu_sig.rev = rev;
- return 0;
- }
+ if (rev >= mc_amd->hdr.patch_id)
+ goto out;
if (__apply_microcode_amd(mc_amd)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
return -1;
}
- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
- mc_amd->hdr.patch_id);
- uci->cpu_sig.rev = mc_amd->hdr.patch_id;
- c->microcode = mc_amd->hdr.patch_id;
+ rev = mc_amd->hdr.patch_id;
+
+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
+
+out:
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
+
+ /* Update boot_cpu_data's revision too, if we're on the BSP: */
+ if (c->cpu_index == boot_cpu_data.cpu_index)
+ boot_cpu_data.microcode = rev;
return 0;
}
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 79291d6fb301..1308abfc4758 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -386,15 +386,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
csig.pf = 1 << ((val[1] >> 18) & 7);
}
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
- /* As documented in the SDM: Do a CPUID 1 here */
- sync_core();
-
- /* get the current revision from MSR 0x8B */
- native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-
- csig.rev = val[1];
+ csig.rev = intel_get_microcode_revision();
uci->cpu_sig = csig;
uci->valid = 1;
@@ -618,29 +611,35 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
struct microcode_intel *mc;
- unsigned int val[2];
+ u32 rev;
mc = uci->mc;
if (!mc)
return 0;
+ /*
+ * Save us the MSR write below - which is a particular expensive
+ * operation - when the other hyperthread has updated the microcode
+ * already.
+ */
+ rev = intel_get_microcode_revision();
+ if (rev >= mc->hdr.rev) {
+ uci->cpu_sig.rev = rev;
+ return 0;
+ }
+
/* write microcode via MSR 0x79 */
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- sync_core();
- /* get the current revision from MSR 0x8B */
- native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
- if (val[1] != mc->hdr.rev)
+ rev = intel_get_microcode_revision();
+ if (rev != mc->hdr.rev)
return -1;
#ifdef CONFIG_X86_64
/* Flush global tlb. This is precaution. */
flush_tlb_early();
#endif
- uci->cpu_sig.rev = val[1];
+ uci->cpu_sig.rev = rev;
if (early)
print_ucode(uci);
@@ -903,9 +902,9 @@ static int apply_microcode_intel(int cpu)
{
struct microcode_intel *mc;
struct ucode_cpu_info *uci;
- struct cpuinfo_x86 *c;
- unsigned int val[2];
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
static int prev_rev;
+ u32 rev;
/* We should bind the task to the CPU */
if (WARN_ON(raw_smp_processor_id() != cpu))
@@ -924,35 +923,42 @@ static int apply_microcode_intel(int cpu)
if (!get_matching_mc(mc, cpu))
return 0;
+ /*
+ * Save us the MSR write below - which is a particular expensive
+ * operation - when the other hyperthread has updated the microcode
+ * already.
+ */
+ rev = intel_get_microcode_revision();
+ if (rev >= mc->hdr.rev)
+ goto out;
+
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
- wrmsrl(MSR_IA32_UCODE_REV, 0);
- /* As documented in the SDM: Do a CPUID 1 here */
- sync_core();
+ rev = intel_get_microcode_revision();
- /* get the current revision from MSR 0x8B */
- rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-
- if (val[1] != mc->hdr.rev) {
+ if (rev != mc->hdr.rev) {
pr_err("CPU%d update to revision 0x%x failed\n",
cpu, mc->hdr.rev);
return -1;
}
- if (val[1] != prev_rev) {
+ if (rev != prev_rev) {
pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
- val[1],
+ rev,
mc->hdr.date & 0xffff,
mc->hdr.date >> 24,
(mc->hdr.date >> 16) & 0xff);
- prev_rev = val[1];
+ prev_rev = rev;
}
- c = &cpu_data(cpu);
+out:
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
- uci->cpu_sig.rev = val[1];
- c->microcode = val[1];
+ /* Update boot_cpu_data's revision too, if we're on the BSP: */
+ if (c->cpu_index == boot_cpu_data.cpu_index)
+ boot_cpu_data.microcode = rev;
return 0;
}
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index 6988c74409a8..711b74e0e623 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -3,6 +3,8 @@
# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
#
+set -e
+
IN=$1
OUT=$2
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
new file mode 100644
index 000000000000..032509adf9de
--- /dev/null
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Transactional Synchronization Extensions (TSX) control.
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author:
+ * Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+ */
+
+#include <linux/cpufeature.h>
+
+#include <asm/cmdline.h>
+
+#include "cpu.h"
+
+enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
+
+void tsx_disable(void)
+{
+ u64 tsx;
+
+ rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+
+ /* Force all transactions to immediately abort */
+ tsx |= TSX_CTRL_RTM_DISABLE;
+
+ /*
+ * Ensure TSX support is not enumerated in CPUID.
+ * This is visible to userspace and will ensure they
+ * do not waste resources trying TSX transactions that
+ * will always abort.
+ */
+ tsx |= TSX_CTRL_CPUID_CLEAR;
+
+ wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+}
+
+void tsx_enable(void)
+{
+ u64 tsx;
+
+ rdmsrl(MSR_IA32_TSX_CTRL, tsx);
+
+ /* Enable the RTM feature in the cpu */
+ tsx &= ~TSX_CTRL_RTM_DISABLE;
+
+ /*
+ * Ensure TSX support is enumerated in CPUID.
+ * This is visible to userspace and will ensure they
+ * can enumerate and use the TSX feature.
+ */
+ tsx &= ~TSX_CTRL_CPUID_CLEAR;
+
+ wrmsrl(MSR_IA32_TSX_CTRL, tsx);
+}
+
+static bool __init tsx_ctrl_is_supported(void)
+{
+ u64 ia32_cap = x86_read_arch_cap_msr();
+
+ /*
+ * TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
+ * MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
+ *
+ * TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
+ * microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
+ * bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
+ * MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
+ * tsx= cmdline requests will do nothing on CPUs without
+ * MSR_IA32_TSX_CTRL support.
+ */
+ return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
+}
+
+static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
+{
+ if (boot_cpu_has_bug(X86_BUG_TAA))
+ return TSX_CTRL_DISABLE;
+
+ return TSX_CTRL_ENABLE;
+}
+
+void __init tsx_init(void)
+{
+ char arg[5] = {};
+ int ret;
+
+ if (!tsx_ctrl_is_supported())
+ return;
+
+ ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
+ if (ret >= 0) {
+ if (!strcmp(arg, "on")) {
+ tsx_ctrl_state = TSX_CTRL_ENABLE;
+ } else if (!strcmp(arg, "off")) {
+ tsx_ctrl_state = TSX_CTRL_DISABLE;
+ } else if (!strcmp(arg, "auto")) {
+ tsx_ctrl_state = x86_get_tsx_auto_mode();
+ } else {
+ tsx_ctrl_state = TSX_CTRL_DISABLE;
+ pr_err("tsx: invalid option, defaulting to off\n");
+ }
+ } else {
+ /* tsx= not provided */
+ if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_AUTO))
+ tsx_ctrl_state = x86_get_tsx_auto_mode();
+ else if (IS_ENABLED(CONFIG_X86_INTEL_TSX_MODE_OFF))
+ tsx_ctrl_state = TSX_CTRL_DISABLE;
+ else
+ tsx_ctrl_state = TSX_CTRL_ENABLE;
+ }
+
+ if (tsx_ctrl_state == TSX_CTRL_DISABLE) {
+ tsx_disable();
+
+ /*
+ * tsx_disable() will change the state of the RTM and HLE CPUID
+ * bits. Clear them here since they are now expected to be not
+ * set.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_RTM);
+ setup_clear_cpu_cap(X86_FEATURE_HLE);
+ } else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
+
+ /*
+ * HW defaults TSX to be enabled at bootup.
+ * We may still need the TSX enable support
+ * during init for special cases like
+ * kexec after TSX is disabled.
+ */
+ tsx_enable();
+
+ /*
+ * tsx_enable() will change the state of the RTM and HLE CPUID
+ * bits. Force them here since they are now expected to be set.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RTM);
+ setup_force_cpu_cap(X86_FEATURE_HLE);
+ }
+}
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 756634f14df6..775c23d4021a 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -914,6 +914,8 @@ int __init hpet_enable(void)
return 0;
hpet_set_mapping();
+ if (!hpet_virt_address)
+ return 0;
/*
* Read the period and check for a sane value:
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 8771766d46b6..9954a604a822 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -352,6 +352,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
#endif
default:
WARN_ON_ONCE(1);
+ return -EINVAL;
}
/*
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index bcd1b82c86e8..005e9a77a664 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -25,9 +25,18 @@ int sysctl_panic_on_stackoverflow;
/*
* Probabilistic stack overflow check:
*
- * Only check the stack in process context, because everything else
- * runs on the big interrupt stacks. Checking reliably is too expensive,
- * so we just check from interrupts.
+ * Regular device interrupts can enter on the following stacks:
+ *
+ * - User stack
+ *
+ * - Kernel task stack
+ *
+ * - Interrupt stack if a device driver reenables interrupts
+ * which should only happen in really old drivers.
+ *
+ * - Debug IST stack
+ *
+ * All other contexts are invalid.
*/
static inline void stack_overflow_check(struct pt_regs *regs)
{
@@ -52,8 +61,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
return;
oist = this_cpu_ptr(&orig_ist);
- estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
- estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
+ estack_bottom = (u64)oist->ist[DEBUG_STACK];
+ estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN;
if (regs->sp >= estack_top && regs->sp <= estack_bottom)
return;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 8e36f249646e..904e18bb38c5 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -438,7 +438,7 @@ static void kgdb_disable_hw_debug(struct pt_regs *regs)
*/
void kgdb_roundup_cpus(unsigned long flags)
{
- apic->send_IPI_allbutself(APIC_DM_NMI);
+ apic->send_IPI_allbutself(NMI_VECTOR);
}
#endif
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 64a70b2e2285..dcd6df5943d6 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -376,6 +376,10 @@ int __copy_instruction(u8 *dest, u8 *src)
return 0;
memcpy(dest, insn.kaddr, length);
+ /* We should not singlestep on the exception masking instructions */
+ if (insn_masking_exception(&insn))
+ return 0;
+
#ifdef CONFIG_X86_64
if (insn_rip_relative(&insn)) {
s64 newdisp;
@@ -545,6 +549,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
unsigned long *sara = stack_addr(regs);
ri->ret_addr = (kprobe_opcode_t *) *sara;
+ ri->fp = sara;
/* Replace the return addr with trampoline addr */
*sara = (unsigned long) &kretprobe_trampoline;
@@ -746,15 +751,21 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
+ void *frame_pointer;
+ bool skipped = false;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/* fixup registers */
#ifdef CONFIG_X86_64
regs->cs = __KERNEL_CS;
+ /* On x86-64, we use pt_regs->sp for return address holder. */
+ frame_pointer = &regs->sp;
#else
regs->cs = __KERNEL_CS | get_kernel_rpl();
regs->gs = 0;
+ /* On x86-32, we use pt_regs->flags for return address holder. */
+ frame_pointer = &regs->flags;
#endif
regs->ip = trampoline_address;
regs->orig_ax = ~0UL;
@@ -776,8 +787,25 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
+ /*
+ * Return probes must be pushed on this hash list correct
+ * order (same as return order) so that it can be poped
+ * correctly. However, if we find it is pushed it incorrect
+ * order, this means we find a function which should not be
+ * probed, because the wrong order entry is pushed on the
+ * path of processing other kretprobe itself.
+ */
+ if (ri->fp != frame_pointer) {
+ if (!skipped)
+ pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
+ skipped = true;
+ continue;
+ }
orig_ret_address = (unsigned long)ri->ret_addr;
+ if (skipped)
+ pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
+ ri->rp->kp.addr);
if (orig_ret_address != trampoline_address)
/*
@@ -795,6 +823,8 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
+ if (ri->fp != frame_pointer)
+ continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index bfe4d6c96fbd..6b7b35d80264 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -32,6 +32,7 @@
#include <asm/x86_init.h>
#include <asm/reboot.h>
#include <asm/cache.h>
+#include <asm/nospec-branch.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
@@ -544,6 +545,9 @@ nmi_restart:
write_cr2(this_cpu_read(nmi_cr2));
if (this_cpu_dec_return(nmi_state))
goto nmi_restart;
+
+ if (user_mode(regs))
+ mds_user_clear_cpu_buffers();
}
NOKPROBE_SYMBOL(do_nmi);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e9195a139d4e..f1f3c471438f 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -35,6 +35,8 @@
#include <asm/switch_to.h>
#include <asm/spec-ctrl.h>
+#include "process.h"
+
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
@@ -170,11 +172,12 @@ int set_tsc_mode(unsigned int val)
return 0;
}
-static inline void switch_to_bitmap(struct tss_struct *tss,
- struct thread_struct *prev,
+static inline void switch_to_bitmap(struct thread_struct *prev,
struct thread_struct *next,
unsigned long tifp, unsigned long tifn)
{
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss);
+
if (tifn & _TIF_IO_BITMAP) {
/*
* Copy the relevant range of the IO bitmap.
@@ -308,32 +311,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
}
-static __always_inline void intel_set_ssb_state(unsigned long tifn)
+/*
+ * Update the MSRs managing speculation control, during context switch.
+ *
+ * tifp: Previous task's thread flags
+ * tifn: Next task's thread flags
+ */
+static __always_inline void __speculation_ctrl_update(unsigned long tifp,
+ unsigned long tifn)
{
- u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+ unsigned long tif_diff = tifp ^ tifn;
+ u64 msr = x86_spec_ctrl_base;
+ bool updmsr = false;
+
+ /*
+ * If TIF_SSBD is different, select the proper mitigation
+ * method. Note that if SSBD mitigation is disabled or permanentely
+ * enabled this branch can't be taken because nothing can set
+ * TIF_SSBD.
+ */
+ if (tif_diff & _TIF_SSBD) {
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ amd_set_ssb_virt_state(tifn);
+ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ amd_set_core_ssb_state(tifn);
+ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ msr |= ssbd_tif_to_spec_ctrl(tifn);
+ updmsr = true;
+ }
+ }
+
+ /*
+ * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
+ * otherwise avoid the MSR write.
+ */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ static_branch_unlikely(&switch_to_cond_stibp)) {
+ updmsr |= !!(tif_diff & _TIF_SPEC_IB);
+ msr |= stibp_tif_to_spec_ctrl(tifn);
+ }
- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ if (updmsr)
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
{
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
- amd_set_ssb_virt_state(tifn);
- else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
- amd_set_core_ssb_state(tifn);
- else
- intel_set_ssb_state(tifn);
+ if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
+ if (task_spec_ssb_disable(tsk))
+ set_tsk_thread_flag(tsk, TIF_SSBD);
+ else
+ clear_tsk_thread_flag(tsk, TIF_SSBD);
+
+ if (task_spec_ib_disable(tsk))
+ set_tsk_thread_flag(tsk, TIF_SPEC_IB);
+ else
+ clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
+ }
+ /* Return the updated threadinfo flags*/
+ return task_thread_info(tsk)->flags;
}
-void speculative_store_bypass_update(unsigned long tif)
+void speculation_ctrl_update(unsigned long tif)
{
+ /* Forced update. Make sure all relevant TIF flags are different */
preempt_disable();
- __speculative_store_bypass_update(tif);
+ __speculation_ctrl_update(~tif, tif);
preempt_enable();
}
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
- struct tss_struct *tss)
+/* Called from seccomp/prctl update */
+void speculation_ctrl_update_current(void)
+{
+ preempt_disable();
+ speculation_ctrl_update(speculation_ctrl_update_tif(current));
+ preempt_enable();
+}
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev, *next;
unsigned long tifp, tifn;
@@ -343,7 +399,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
tifn = READ_ONCE(task_thread_info(next_p)->flags);
tifp = READ_ONCE(task_thread_info(prev_p)->flags);
- switch_to_bitmap(tss, prev, next, tifp, tifn);
+ switch_to_bitmap(prev, next, tifp, tifn);
propagate_user_return_notify(prev_p, next_p);
@@ -361,8 +417,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOTSC)
cr4_toggle_bits(X86_CR4_TSD);
- if ((tifp ^ tifn) & _TIF_SSBD)
- __speculative_store_bypass_update(tifn);
+ if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
+ __speculation_ctrl_update(tifp, tifn);
+ } else {
+ speculation_ctrl_update_tif(prev_p);
+ tifn = speculation_ctrl_update_tif(next_p);
+
+ /* Enforce MSR update to ensure consistent state */
+ __speculation_ctrl_update(~tifn, tifn);
+ }
}
/*
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
new file mode 100644
index 000000000000..898e97cf6629
--- /dev/null
+++ b/arch/x86/kernel/process.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Code shared between 32 and 64 bit
+
+#include <asm/spec-ctrl.h>
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
+
+/*
+ * This needs to be inline to optimize for the common case where no extra
+ * work needs to be done.
+ */
+static inline void switch_to_extra(struct task_struct *prev,
+ struct task_struct *next)
+{
+ unsigned long next_tif = task_thread_info(next)->flags;
+ unsigned long prev_tif = task_thread_info(prev)->flags;
+
+ if (IS_ENABLED(CONFIG_SMP)) {
+ /*
+ * Avoid __switch_to_xtra() invocation when conditional
+ * STIPB is disabled and the only different bit is
+ * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
+ * in the TIF_WORK_CTXSW masks.
+ */
+ if (!static_branch_likely(&switch_to_cond_stibp)) {
+ prev_tif &= ~_TIF_SPEC_IB;
+ next_tif &= ~_TIF_SPEC_IB;
+ }
+ }
+
+ /*
+ * __switch_to_xtra() handles debug registers, i/o bitmaps,
+ * speculation mitigations etc.
+ */
+ if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
+ prev_tif & _TIF_WORK_CTXSW_PREV))
+ __switch_to_xtra(prev, next);
+}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd7be8efdc4c..4ca26fc7aa89 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -55,6 +55,8 @@
#include <asm/switch_to.h>
#include <asm/vm86.h>
+#include "process.h"
+
void __show_regs(struct pt_regs *regs, int all)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
@@ -127,6 +129,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct task_struct *tsk;
int err;
+ /*
+ * For a new task use the RESET flags value since there is no before.
+ * All the status flags are zero; DF and all the system flags must also
+ * be 0, specifically IF must be 0 because we context switch to the new
+ * task with interrupts disabled.
+ */
+ frame->flags = X86_EFLAGS_FIXED;
frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
@@ -264,12 +273,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
set_iopl_mask(next->iopl);
- /*
- * Now maybe handle debug registers and/or IO bitmaps
- */
- if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
- task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
- __switch_to_xtra(prev_p, next_p, tss);
+ switch_to_extra(prev_p, next_p);
/*
* Leave lazy mode, flushing any hypercalls made here.
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index a2661814bde0..6d6c15cd9b9a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -51,6 +51,8 @@
#include <asm/xen/hypervisor.h>
#include <asm/vdso.h>
+#include "process.h"
+
__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
/* Prints also some state that isn't saved in the pt_regs */
@@ -266,6 +268,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
childregs = task_pt_regs(p);
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
+
+ /*
+ * For a new task use the RESET flags value since there is no before.
+ * All the status flags are zero; DF and all the system flags must also
+ * be 0, specifically IF must be 0 because we context switch to the new
+ * task with interrupts disabled.
+ */
+ frame->flags = X86_EFLAGS_FIXED;
frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
@@ -454,12 +464,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* Reload esp0 and ss1. This changes current_thread_info(). */
load_sp0(tss, next);
- /*
- * Now maybe reload the debug registers and handle I/O bitmaps
- */
- if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
- task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
- __switch_to_xtra(prev_p, next_p, tss);
+ switch_to_extra(prev_p, next_p);
#ifdef CONFIG_XEN
/*
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index e497d374412a..7f377f8792aa 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -23,6 +23,7 @@
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <linux/context_tracking.h>
+#include <linux/nospec.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -652,7 +653,8 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
unsigned long val = 0;
if (n < HBP_NUM) {
- struct perf_event *bp = thread->ptrace_bps[n];
+ int index = array_index_nospec(n, HBP_NUM);
+ struct perf_event *bp = thread->ptrace_bps[index];
if (bp)
val = bp->hw.info.address;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 4a12362a194a..c55b11fe8e9f 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -82,6 +82,19 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
return 0;
}
+/*
+ * Some machines don't handle the default ACPI reboot method and
+ * require the EFI reboot method:
+ */
+static int __init set_efi_reboot(const struct dmi_system_id *d)
+{
+ if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) {
+ reboot_type = BOOT_EFI;
+ pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident);
+ }
+ return 0;
+}
+
void __noreturn machine_real_restart(unsigned int type)
{
local_irq_disable();
@@ -167,6 +180,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
},
},
+ { /* Handle reboot issue on Acer TravelMate X514-51T */
+ .callback = set_efi_reboot,
+ .ident = "Acer TravelMate X514-51T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"),
+ },
+ },
/* Apple */
{ /* Handle problems with rebooting on Apple MacBook5 */
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index b1a5d252d482..ca010dfb9682 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -129,16 +129,6 @@ static int restore_sigcontext(struct pt_regs *regs,
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
-#ifdef CONFIG_X86_64
- /*
- * Fix up SS if needed for the benefit of old DOSEMU and
- * CRIU.
- */
- if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
- user_64bit_mode(regs)))
- force_valid_ss(regs);
-#endif
-
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
regs->orig_ax = -1; /* disable syscall checks */
@@ -147,6 +137,15 @@ static int restore_sigcontext(struct pt_regs *regs,
buf = (void __user *)buf_val;
} get_user_catch(err);
+#ifdef CONFIG_X86_64
+ /*
+ * Fix up SS if needed for the benefit of old DOSEMU and
+ * CRIU.
+ */
+ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
+ force_valid_ss(regs);
+#endif
+
err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
force_iret();
@@ -458,6 +457,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
{
struct rt_sigframe __user *frame;
void __user *fp = NULL;
+ unsigned long uc_flags;
int err = 0;
frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
@@ -470,9 +470,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
return -EFAULT;
}
+ uc_flags = frame_uc_flags(regs);
+
put_user_try {
/* Create the ucontext. */
- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+ put_user_ex(uc_flags, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
save_altstack_ex(&frame->uc.uc_stack, regs->sp);
@@ -538,6 +540,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
{
#ifdef CONFIG_X86_X32_ABI
struct rt_sigframe_x32 __user *frame;
+ unsigned long uc_flags;
void __user *restorer;
int err = 0;
void __user *fpstate = NULL;
@@ -552,9 +555,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
return -EFAULT;
}
+ uc_flags = frame_uc_flags(regs);
+
put_user_try {
/* Create the ucontext. */
- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+ put_user_ex(uc_flags, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
put_user_ex(0, &frame->uc.uc__pad0);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 2863ad306692..33ba47c44816 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -181,6 +181,12 @@ asmlinkage __visible void smp_reboot_interrupt(void)
irq_exit();
}
+static int register_stop_handler(void)
+{
+ return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
+ NMI_FLAG_FIRST, "smp_stop");
+}
+
static void native_stop_other_cpus(int wait)
{
unsigned long flags;
@@ -214,39 +220,41 @@ static void native_stop_other_cpus(int wait)
apic->send_IPI_allbutself(REBOOT_VECTOR);
/*
- * Don't wait longer than a second if the caller
- * didn't ask us to wait.
+ * Don't wait longer than a second for IPI completion. The
+ * wait request is not checked here because that would
+ * prevent an NMI shutdown attempt in case that not all
+ * CPUs reach shutdown state.
*/
timeout = USEC_PER_SEC;
- while (num_online_cpus() > 1 && (wait || timeout--))
+ while (num_online_cpus() > 1 && timeout--)
udelay(1);
}
-
- /* if the REBOOT_VECTOR didn't work, try with the NMI */
- if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
- if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
- NMI_FLAG_FIRST, "smp_stop"))
- /* Note: we ignore failures here */
- /* Hope the REBOOT_IRQ is good enough */
- goto finish;
-
- /* sync above data before sending IRQ */
- wmb();
- pr_emerg("Shutting down cpus with NMI\n");
+ /* if the REBOOT_VECTOR didn't work, try with the NMI */
+ if (num_online_cpus() > 1) {
+ /*
+ * If NMI IPI is enabled, try to register the stop handler
+ * and send the IPI. In any case try to wait for the other
+ * CPUs to stop.
+ */
+ if (!smp_no_nmi_ipi && !register_stop_handler()) {
+ /* Sync above data before sending IRQ */
+ wmb();
- apic->send_IPI_allbutself(NMI_VECTOR);
+ pr_emerg("Shutting down cpus with NMI\n");
+ apic->send_IPI_allbutself(NMI_VECTOR);
+ }
/*
- * Don't wait longer than a 10 ms if the caller
- * didn't ask us to wait.
+ * Don't wait longer than 10 ms if the caller didn't
+ * reqeust it. If wait is true, the machine hangs here if
+ * one or more CPUs do not reach shutdown state.
*/
timeout = USEC_PER_MSEC * 10;
while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
-finish:
local_irq_save(flags);
disable_local_APIC();
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index 623965e86b65..897da526e40e 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -231,9 +231,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
{},
};
+/*
+ * Some devices have a portrait LCD but advertise a landscape resolution (and
+ * pitch). We simply swap width and height for these devices so that we can
+ * correctly deal with some of them coming with multiple resolutions.
+ */
+static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+ {
+ /*
+ * Lenovo MIIX310-10ICR, only some batches have the troublesome
+ * 800x1280 portrait screen. Luckily the portrait version has
+ * its own BIOS version, so we match on that.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
+ },
+ },
+ {
+ /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "Lenovo MIIX 320-10ICR"),
+ },
+ },
+ {
+ /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "Lenovo ideapad D330-10IGM"),
+ },
+ },
+ {},
+};
+
__init void sysfb_apply_efi_quirks(void)
{
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
dmi_check_system(efifb_dmi_system_table);
+
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+ dmi_check_system(efifb_dmi_swap_width_height)) {
+ u16 temp = screen_info.lfb_width;
+
+ screen_info.lfb_width = screen_info.lfb_height;
+ screen_info.lfb_height = temp;
+ screen_info.lfb_linelength = 4 * screen_info.lfb_width;
+ }
}
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 85195d447a92..f3215346e47f 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -94,11 +94,11 @@ __init int create_simplefb(const struct screen_info *si,
if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
size <<= 16;
length = mode->height * mode->stride;
- length = PAGE_ALIGN(length);
if (length > size) {
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
return -EINVAL;
}
+ length = PAGE_ALIGN(length);
/* setup IORESOURCE_MEM as framebuffer memory */
memset(&res, 0, sizeof(res));
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 9692a5e9fdab..b95693a73f12 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -4,6 +4,7 @@
#include <linux/user.h>
#include <linux/regset.h>
#include <linux/syscalls.h>
+#include <linux/nospec.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
@@ -219,6 +220,7 @@ int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *u_info)
{
struct user_desc info;
+ int index;
if (idx == -1 && get_user(idx, &u_info->entry_number))
return -EFAULT;
@@ -226,8 +228,11 @@ int do_get_thread_area(struct task_struct *p, int idx,
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
- fill_user_desc(&info, idx,
- &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]);
+ index = idx - GDT_ENTRY_TLS_MIN;
+ index = array_index_nospec(index,
+ GDT_ENTRY_TLS_MAX - GDT_ENTRY_TLS_MIN + 1);
+
+ fill_user_desc(&info, idx, &p->thread.tls_array[index]);
if (copy_to_user(u_info, &info, sizeof(info)))
return -EFAULT;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 769c370011d6..cb768417429d 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -713,7 +713,7 @@ unsigned long native_calibrate_tsc(void)
case INTEL_FAM6_KABYLAKE_DESKTOP:
crystal_khz = 24000; /* 24.0 MHz */
break;
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
crystal_khz = 25000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT:
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index a2456d4d286a..9b8b3cb2e934 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -6,6 +6,21 @@
#define FRAME_HEADER_SIZE (sizeof(long) * 2)
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ if (task == current) \
+ val = READ_ONCE(x); \
+ else \
+ val = READ_ONCE_NOCHECK(x); \
+ val; \
+})
+
unsigned long unwind_get_return_address(struct unwind_state *state)
{
unsigned long addr;
@@ -14,7 +29,8 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
if (unwind_done(state))
return 0;
- addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
+ addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
+ addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
addr_p);
return __kernel_text_address(addr) ? addr : 0;
@@ -48,7 +64,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (unwind_done(state))
return false;
- next_bp = (unsigned long *)*state->bp;
+ next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
/* make sure the next frame's data is accessible */
if (!update_stack_state(state, next_bp, FRAME_HEADER_SIZE))
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index e78a6b1db74b..73391c1bd2a9 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -296,6 +296,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
if (is_prefix_bad(insn))
return -ENOTSUPP;
+ /* We should not singlestep on the exception masking instructions */
+ if (insn_masking_exception(insn))
+ return -ENOTSUPP;
+
if (x86_64)
good_insns = good_insns_64;
else
@@ -514,9 +518,12 @@ struct uprobe_xol_ops {
void (*abort)(struct arch_uprobe *, struct pt_regs *);
};
-static inline int sizeof_long(void)
+static inline int sizeof_long(struct pt_regs *regs)
{
- return in_ia32_syscall() ? 4 : 8;
+ /*
+ * Check registers for mode as in_xxx_syscall() does not apply here.
+ */
+ return user_64bit_mode(regs) ? 8 : 4;
}
static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
@@ -527,9 +534,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
static int push_ret_address(struct pt_regs *regs, unsigned long ip)
{
- unsigned long new_sp = regs->sp - sizeof_long();
+ unsigned long new_sp = regs->sp - sizeof_long(regs);
- if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
+ if (copy_to_user((void __user *)new_sp, &ip, sizeof_long(regs)))
return -EFAULT;
regs->sp = new_sp;
@@ -562,7 +569,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
long correction = utask->vaddr - utask->xol_vaddr;
regs->ip += correction;
} else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
- regs->sp += sizeof_long(); /* Pop incorrect return address */
+ regs->sp += sizeof_long(regs); /* Pop incorrect return address */
if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
return -ERESTART;
}
@@ -671,7 +678,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
* "call" insn was executed out-of-line. Just restore ->sp and restart.
* We could also restore ->ip and try to call branch_emulate_op() again.
*/
- regs->sp += sizeof_long();
+ regs->sp += sizeof_long(regs);
return -ERESTART;
}
@@ -962,7 +969,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
{
- int rasize = sizeof_long(), nleft;
+ int rasize = sizeof_long(regs), nleft;
unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
@@ -980,7 +987,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
"%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
- force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
+ force_sig(SIGSEGV, current);
}
return -1;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index e783a5daaab2..55f04875293f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -367,7 +367,7 @@ SECTIONS
* Per-cpu symbols which need to be offset from __per_cpu_load
* for the boot processor.
*/
-#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
+#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
INIT_PER_CPU(gdt_page);
INIT_PER_CPU(irq_stack_union);
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index c17d3893ae60..63c3ff9e74d4 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -279,13 +279,18 @@ static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
{
switch (func) {
case 0:
- entry->eax = 1; /* only one leaf currently */
+ entry->eax = 7;
++*nent;
break;
case 1:
entry->ecx = F(MOVBE);
++*nent;
break;
+ case 7:
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ if (index == 0)
+ entry->ecx = F(RDPID);
+ ++*nent;
default:
break;
}
@@ -355,7 +360,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
- F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
+ F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
+ F(AMD_SSB_NO) | F(AMD_STIBP);
/* cpuid 0xC0000001.edx */
const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -380,14 +386,15 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
- F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+ F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) |
+ F(INTEL_STIBP) | F(MD_CLEAR);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
r = -E2BIG;
- if (*nent >= maxnent)
+ if (WARN_ON(*nent >= maxnent))
goto out;
do_cpuid_1_ent(entry, function, index);
@@ -464,8 +471,17 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
entry->ecx &= ~F(PKU);
+
entry->edx &= kvm_cpuid_7_0_edx_x86_features;
cpuid_mask(&entry->edx, CPUID_7_EDX);
+ if (boot_cpu_has(X86_FEATURE_IBPB) &&
+ boot_cpu_has(X86_FEATURE_IBRS))
+ entry->edx |= F(SPEC_CTRL);
+ if (boot_cpu_has(X86_FEATURE_STIBP))
+ entry->edx |= F(INTEL_STIBP);
+ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ entry->edx |= F(SPEC_CTRL_SSBD);
/*
* We emulate ARCH_CAPABILITIES in software even
* if the host doesn't support it.
@@ -633,7 +649,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ebx |= F(VIRT_SSBD);
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ /*
+ * The preference is to use SPEC CTRL MSR instead of the
+ * VIRT_SPEC MSR.
+ */
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+ !boot_cpu_has(X86_FEATURE_AMD_SSBD))
entry->ebx |= F(VIRT_SSBD);
break;
}
@@ -676,6 +697,9 @@ out:
static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
u32 idx, int *nent, int maxnent, unsigned int type)
{
+ if (*nent >= maxnent)
+ return -E2BIG;
+
if (type == KVM_GET_EMULATED_CPUID)
return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 8a841b9d8f84..b2bf8e1d5782 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -176,7 +176,7 @@ static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
- if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
+ if (best && (best->ebx & (bit(X86_FEATURE_AMD_IBRS | bit(X86_FEATURE_AMD_SSBD)))))
return true;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SPEC_CTRL_SSBD)));
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 510cfc06701a..da3cd734dee1 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -21,6 +21,7 @@
*/
#include <linux/kvm_host.h>
+#include <linux/nospec.h>
#include "kvm_cache_regs.h"
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
@@ -2579,15 +2580,13 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
* supports long mode.
*/
- cr4 = ctxt->ops->get_cr(ctxt, 4);
if (emulator_has_longmode(ctxt)) {
struct desc_struct cs_desc;
/* Zero CR4.PCIDE before CR0.PG. */
- if (cr4 & X86_CR4_PCIDE) {
+ cr4 = ctxt->ops->get_cr(ctxt, 4);
+ if (cr4 & X86_CR4_PCIDE)
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
- cr4 &= ~X86_CR4_PCIDE;
- }
/* A 32-bit code segment is required to clear EFER.LMA. */
memset(&cs_desc, 0, sizeof(cs_desc));
@@ -2601,13 +2600,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
if (cr0 & X86_CR0_PE)
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
- /* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
- if (cr4 & X86_CR4_PAE)
- ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
+ if (emulator_has_longmode(ctxt)) {
+ /* Clear CR4.PAE before clearing EFER.LME. */
+ cr4 = ctxt->ops->get_cr(ctxt, 4);
+ if (cr4 & X86_CR4_PAE)
+ ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
- /* And finally go back to 32-bit mode. */
- efer = 0;
- ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+ /* And finally go back to 32-bit mode. */
+ efer = 0;
+ ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
+ }
smbase = ctxt->ops->get_smbase(ctxt);
if (emulator_has_longmode(ctxt))
@@ -3529,6 +3531,16 @@ static int em_cwd(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_rdpid(struct x86_emulate_ctxt *ctxt)
+{
+ u64 tsc_aux = 0;
+
+ if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
+ return emulate_gp(ctxt, 0);
+ ctxt->dst.val = tsc_aux;
+ return X86EMUL_CONTINUE;
+}
+
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 tsc = 0;
@@ -4389,10 +4401,20 @@ static const struct opcode group8[] = {
F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
+/*
+ * The "memory" destination is actually always a register, since we come
+ * from the register case of group9.
+ */
+static const struct gprefix pfx_0f_c7_7 = {
+ N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
+};
+
+
static const struct group_dual group9 = { {
N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
- N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N,
+ GP(0, &pfx_0f_c7_7),
} };
static const struct opcode group11[] = {
@@ -5000,6 +5022,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
+ ctxt->intercept = x86_intercept_none;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
@@ -5052,16 +5075,28 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
+ has_seg_override = true;
+ ctxt->seg_override = VCPU_SREG_ES;
+ break;
case 0x2e: /* CS override */
+ has_seg_override = true;
+ ctxt->seg_override = VCPU_SREG_CS;
+ break;
case 0x36: /* SS override */
+ has_seg_override = true;
+ ctxt->seg_override = VCPU_SREG_SS;
+ break;
case 0x3e: /* DS override */
has_seg_override = true;
- ctxt->seg_override = (ctxt->b >> 3) & 3;
+ ctxt->seg_override = VCPU_SREG_DS;
break;
case 0x64: /* FS override */
+ has_seg_override = true;
+ ctxt->seg_override = VCPU_SREG_FS;
+ break;
case 0x65: /* GS override */
has_seg_override = true;
- ctxt->seg_override = ctxt->b & 7;
+ ctxt->seg_override = VCPU_SREG_GS;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
@@ -5145,10 +5180,15 @@ done_prefixes:
}
break;
case Escape:
- if (ctxt->modrm > 0xbf)
- opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
- else
+ if (ctxt->modrm > 0xbf) {
+ size_t size = ARRAY_SIZE(opcode.u.esc->high);
+ u32 index = array_index_nospec(
+ ctxt->modrm - 0xc0, size);
+
+ opcode = opcode.u.esc->high[index];
+ } else {
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
+ }
break;
case InstrDual:
if ((ctxt->modrm >> 6) == 3)
@@ -5256,6 +5296,8 @@ done_prefixes:
ctxt->memopp->addr.mem.ea + ctxt->_eip);
done:
+ if (rc == X86EMUL_PROPAGATE_FAULT)
+ ctxt->have_exception = true;
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 42b1c83741c8..5e837c96e93f 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -28,6 +28,7 @@
#include <linux/kvm_host.h>
#include <linux/highmem.h>
+#include <linux/nospec.h>
#include <asm/apicdef.h>
#include <trace/events/kvm.h>
@@ -719,11 +720,12 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
u32 index, u64 *pdata)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+ size_t size = ARRAY_SIZE(hv->hv_crash_param);
- if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+ if (WARN_ON_ONCE(index >= size))
return -EINVAL;
- *pdata = hv->hv_crash_param[index];
+ *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
return 0;
}
@@ -762,11 +764,12 @@ static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
u32 index, u64 data)
{
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+ size_t size = ARRAY_SIZE(hv->hv_crash_param);
- if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+ if (WARN_ON_ONCE(index >= size))
return -EINVAL;
- hv->hv_crash_param[index] = data;
+ hv->hv_crash_param[array_index_nospec(index, size)] = data;
return 0;
}
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 5f810bb80802..aa34b16e62c2 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -36,6 +36,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/nospec.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -73,13 +74,14 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
default:
{
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
- u64 redir_content;
+ u64 redir_content = ~0ULL;
- if (redir_index < IOAPIC_NUM_PINS)
- redir_content =
- ioapic->redirtbl[redir_index].bits;
- else
- redir_content = ~0ULL;
+ if (redir_index < IOAPIC_NUM_PINS) {
+ u32 index = array_index_nospec(
+ redir_index, IOAPIC_NUM_PINS);
+
+ redir_content = ioapic->redirtbl[index].bits;
+ }
result = (ioapic->ioregsel & 0x1) ?
(redir_content >> 32) & 0xffffffff :
@@ -299,6 +301,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
ioapic_debug("change redir index %x val %x\n", index, val);
if (index >= IOAPIC_NUM_PINS)
return;
+ index = array_index_nospec(index, IOAPIC_NUM_PINS);
e = &ioapic->redirtbl[index];
mask_before = e->fields.mask;
/* Preserve read-only fields */
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 6c0191615f23..cf8b3c17657a 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -436,7 +436,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
kvm_set_msi_irq(vcpu->kvm, entry, &irq);
- if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
+ if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
irq.dest_id, irq.dest_mode))
__set_bit(irq.vector, ioapic_handled_vectors);
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c8630569e392..3988e26af3b5 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -28,6 +28,7 @@
#include <linux/export.h>
#include <linux/math64.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/page.h>
@@ -531,9 +532,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
{
u8 val;
- if (pv_eoi_get_user(vcpu, &val) < 0)
+ if (pv_eoi_get_user(vcpu, &val) < 0) {
apic_debug("Can't read EOI MSR value: 0x%llx\n",
(unsigned long long)vcpu->arch.pv_eoi.msr_val);
+ return false;
+ }
return val & 0x1;
}
@@ -1587,15 +1590,20 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_LVTTHMR:
case APIC_LVTPC:
case APIC_LVT1:
- case APIC_LVTERR:
+ case APIC_LVTERR: {
/* TODO: Check vector */
+ size_t size;
+ u32 index;
+
if (!kvm_apic_sw_enabled(apic))
val |= APIC_LVT_MASKED;
-
- val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
+ size = ARRAY_SIZE(apic_lvt_mask);
+ index = array_index_nospec(
+ (reg - APIC_LVTT) >> 4, size);
+ val &= apic_lvt_mask[index];
kvm_lapic_set_reg(apic, reg, val);
-
break;
+ }
case APIC_LVTT:
if (!kvm_apic_sw_enabled(apic))
@@ -1992,7 +2000,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr;
- if (!apic_enabled(apic))
+ if (!kvm_apic_hw_enabled(apic))
return -1;
apic_update_ppr(apic);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 676edfc19a95..3a281a2decde 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -37,6 +37,7 @@
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/kthread.h>
#include <asm/page.h>
#include <asm/cmpxchg.h>
@@ -44,6 +45,30 @@
#include <asm/vmx.h>
#include <asm/kvm_page_track.h>
+extern bool itlb_multihit_kvm_mitigation;
+
+static int __read_mostly nx_huge_pages = -1;
+static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
+
+static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
+static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
+
+static struct kernel_param_ops nx_huge_pages_ops = {
+ .set = set_nx_huge_pages,
+ .get = param_get_bool,
+};
+
+static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
+ .set = set_nx_huge_pages_recovery_ratio,
+ .get = param_get_uint,
+};
+
+module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
+__MODULE_PARM_TYPE(nx_huge_pages, "bool");
+module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
+ &nx_huge_pages_recovery_ratio, 0644);
+__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
+
/*
* When setting this variable to true it enables Two-Dimensional-Paging
* where the hardware walks 2 page tables:
@@ -131,9 +156,6 @@ module_param(dbg, bool, 0644);
#include <trace/events/kvm.h>
-#define CREATE_TRACE_POINTS
-#include "mmutrace.h"
-
#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
@@ -142,6 +164,20 @@ module_param(dbg, bool, 0644);
/* make pte_list_desc fit well in cache line */
#define PTE_LIST_EXT 3
+/*
+ * Return values of handle_mmio_page_fault and mmu.page_fault:
+ * RET_PF_RETRY: let CPU fault again on the address.
+ * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
+ *
+ * For handle_mmio_page_fault only:
+ * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
+ */
+enum {
+ RET_PF_RETRY = 0,
+ RET_PF_EMULATE = 1,
+ RET_PF_INVALID = 2,
+};
+
struct pte_list_desc {
u64 *sptes[PTE_LIST_EXT];
struct pte_list_desc *more;
@@ -179,14 +215,23 @@ static u64 __read_mostly shadow_mmio_mask;
static u64 __read_mostly shadow_present_mask;
static void mmu_spte_set(u64 *sptep, u64 spte);
+static bool is_executable_pte(u64 spte);
static void mmu_free_roots(struct kvm_vcpu *vcpu);
+#define CREATE_TRACE_POINTS
+#include "mmutrace.h"
+
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
{
shadow_mmio_mask = mmio_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+static bool is_nx_huge_page_enabled(void)
+{
+ return READ_ONCE(nx_huge_pages);
+}
+
/*
* the low bit of the generation number is always presumed to be zero.
* This disables mmio caching during memslot updates. The concept is
@@ -324,6 +369,11 @@ static int is_last_spte(u64 pte, int level)
return 0;
}
+static bool is_executable_pte(u64 spte)
+{
+ return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
+}
+
static kvm_pfn_t spte_to_pfn(u64 pte)
{
return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -767,10 +817,16 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
- if (sp->role.direct)
- BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
- else
+ if (!sp->role.direct) {
sp->gfns[index] = gfn;
+ return;
+ }
+
+ if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
+ pr_err_ratelimited("gfn mismatch under direct page %llx "
+ "(expected %llx, got %llx)\n",
+ sp->gfn,
+ kvm_mmu_page_get_gfn(sp, index), gfn);
}
/*
@@ -829,6 +885,17 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_mmu_gfn_disallow_lpage(slot, gfn);
}
+static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ if (sp->lpage_disallowed)
+ return;
+
+ ++kvm->stat.nx_lpage_splits;
+ list_add_tail(&sp->lpage_disallowed_link,
+ &kvm->arch.lpage_disallowed_mmu_pages);
+ sp->lpage_disallowed = true;
+}
+
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
@@ -846,6 +913,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
+static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ --kvm->stat.nx_lpage_splits;
+ sp->lpage_disallowed = false;
+ list_del(&sp->lpage_disallowed_link);
+}
+
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
@@ -2382,6 +2456,9 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm_reload_remote_mmus(kvm);
}
+ if (sp->lpage_disallowed)
+ unaccount_huge_nx_page(kvm, sp);
+
sp->role.invalid = 1;
return ret;
}
@@ -2533,6 +2610,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (!speculative)
spte |= shadow_accessed_mask;
+ if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
+ is_nx_huge_page_enabled()) {
+ pte_access &= ~ACC_EXEC_MASK;
+ }
+
if (pte_access & ACC_EXEC_MASK)
spte |= shadow_x_mask;
else
@@ -2598,13 +2680,13 @@ done:
return ret;
}
-static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
- int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
- bool speculative, bool host_writable)
+static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
+ int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
+ bool speculative, bool host_writable)
{
int was_rmapped = 0;
int rmap_count;
- bool emulate = false;
+ int ret = RET_PF_RETRY;
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
*sptep, write_fault, gfn);
@@ -2634,18 +2716,15 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
true, host_writable)) {
if (write_fault)
- emulate = true;
+ ret = RET_PF_EMULATE;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
if (unlikely(is_mmio_spte(*sptep)))
- emulate = true;
+ ret = RET_PF_EMULATE;
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
- pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
- is_large_pte(*sptep)? "2MB" : "4kB",
- *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
- *sptep, sptep);
+ trace_kvm_mmu_set_spte(level, gfn, sptep);
if (!was_rmapped && is_large_pte(*sptep))
++vcpu->kvm->stat.lpages;
@@ -2657,9 +2736,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
}
}
- kvm_release_pfn_clean(pfn);
-
- return emulate;
+ return ret;
}
static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
@@ -2693,9 +2770,11 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
if (ret <= 0)
return -1;
- for (i = 0; i < ret; i++, gfn++, start++)
+ for (i = 0; i < ret; i++, gfn++, start++) {
mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
page_to_pfn(pages[i]), true, true);
+ put_page(pages[i]);
+ }
return 0;
}
@@ -2743,40 +2822,71 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
__direct_pte_prefetch(vcpu, sp, sptep);
}
-static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
- int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault)
+static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
+ gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
{
- struct kvm_shadow_walk_iterator iterator;
+ int level = *levelp;
+ u64 spte = *it.sptep;
+
+ if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
+ is_nx_huge_page_enabled() &&
+ is_shadow_present_pte(spte) &&
+ !is_large_pte(spte)) {
+ /*
+ * A small SPTE exists for this pfn, but FNAME(fetch)
+ * and __direct_map would like to create a large PTE
+ * instead: just force them to go down another level,
+ * patching back for them into pfn the next 9 bits of
+ * the address.
+ */
+ u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
+ *pfnp |= gfn & page_mask;
+ (*levelp)--;
+ }
+}
+
+static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
+ int map_writable, int level, kvm_pfn_t pfn,
+ bool prefault, bool lpage_disallowed)
+{
+ struct kvm_shadow_walk_iterator it;
struct kvm_mmu_page *sp;
- int emulate = 0;
- gfn_t pseudo_gfn;
+ int ret;
+ gfn_t gfn = gpa >> PAGE_SHIFT;
+ gfn_t base_gfn = gfn;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
- return 0;
+ return RET_PF_RETRY;
- for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
- if (iterator.level == level) {
- emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
- write, level, gfn, pfn, prefault,
- map_writable);
- direct_pte_prefetch(vcpu, iterator.sptep);
- ++vcpu->stat.pf_fixed;
- break;
- }
+ trace_kvm_mmu_spte_requested(gpa, level, pfn);
+ for_each_shadow_entry(vcpu, gpa, it) {
+ /*
+ * We cannot overwrite existing page tables with an NX
+ * large page, as the leaf could be executable.
+ */
+ disallowed_hugepage_adjust(it, gfn, &pfn, &level);
- drop_large_spte(vcpu, iterator.sptep);
- if (!is_shadow_present_pte(*iterator.sptep)) {
- u64 base_addr = iterator.addr;
+ base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+ if (it.level == level)
+ break;
- base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
- pseudo_gfn = base_addr >> PAGE_SHIFT;
- sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
- iterator.level - 1, 1, ACC_ALL);
+ drop_large_spte(vcpu, it.sptep);
+ if (!is_shadow_present_pte(*it.sptep)) {
+ sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
+ it.level - 1, true, ACC_ALL);
- link_shadow_page(vcpu, iterator.sptep, sp);
+ link_shadow_page(vcpu, it.sptep, sp);
+ if (lpage_disallowed)
+ account_huge_nx_page(vcpu->kvm, sp);
}
}
- return emulate;
+
+ ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
+ write, level, base_gfn, pfn, prefault,
+ map_writable);
+ direct_pte_prefetch(vcpu, it.sptep);
+ ++vcpu->stat.pf_fixed;
+ return ret;
}
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
@@ -2798,25 +2908,23 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
* Do not cache the mmio info caused by writing the readonly gfn
* into the spte otherwise read access on readonly gfn also can
* caused mmio page fault and treat it as mmio access.
- * Return 1 to tell kvm to emulate it.
*/
if (pfn == KVM_PFN_ERR_RO_FAULT)
- return 1;
+ return RET_PF_EMULATE;
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
- return 0;
+ return RET_PF_RETRY;
}
return -EFAULT;
}
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
- gfn_t *gfnp, kvm_pfn_t *pfnp,
+ gfn_t gfn, kvm_pfn_t *pfnp,
int *levelp)
{
kvm_pfn_t pfn = *pfnp;
- gfn_t gfn = *gfnp;
int level = *levelp;
/*
@@ -2826,7 +2934,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
* here.
*/
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
- level == PT_PAGE_TABLE_LEVEL &&
+ !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
PageTransCompoundMap(pfn_to_page(pfn)) &&
!mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask;
@@ -2843,8 +2951,6 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
mask = KVM_PAGES_PER_HPAGE(level) - 1;
VM_BUG_ON((gfn & mask) != (pfn & mask));
if (pfn & mask) {
- gfn &= ~mask;
- *gfnp = gfn;
kvm_release_pfn_clean(pfn);
pfn &= ~mask;
kvm_get_pfn(pfn);
@@ -3012,11 +3118,14 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
{
int r;
int level;
- bool force_pt_level = false;
+ bool force_pt_level;
kvm_pfn_t pfn;
unsigned long mmu_seq;
bool map_writable, write = error_code & PFERR_WRITE_MASK;
+ bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+ is_nx_huge_page_enabled();
+ force_pt_level = lpage_disallowed;
level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
/*
@@ -3031,32 +3140,30 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
}
if (fast_page_fault(vcpu, v, level, error_code))
- return 0;
+ return RET_PF_RETRY;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
- return 0;
+ return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
return r;
+ r = RET_PF_RETRY;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
make_mmu_pages_available(vcpu);
if (likely(!force_pt_level))
- transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
- r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
- spin_unlock(&vcpu->kvm->mmu_lock);
-
- return r;
-
+ transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
+ r = __direct_map(vcpu, v, write, map_writable, level, pfn,
+ prefault, false);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
- return 0;
+ return r;
}
@@ -3383,38 +3490,38 @@ exit:
return reserved;
}
-int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
u64 spte;
bool reserved;
if (mmio_info_in_cache(vcpu, addr, direct))
- return RET_MMIO_PF_EMULATE;
+ return RET_PF_EMULATE;
reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
if (WARN_ON(reserved))
- return RET_MMIO_PF_BUG;
+ return -EINVAL;
if (is_mmio_spte(spte)) {
gfn_t gfn = get_mmio_spte_gfn(spte);
unsigned access = get_mmio_spte_access(spte);
if (!check_mmio_spte(vcpu, spte))
- return RET_MMIO_PF_INVALID;
+ return RET_PF_INVALID;
if (direct)
addr = 0;
trace_handle_mmio_page_fault(addr, gfn, access);
vcpu_cache_mmio_info(vcpu, addr, gfn, access);
- return RET_MMIO_PF_EMULATE;
+ return RET_PF_EMULATE;
}
/*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/
- return RET_MMIO_PF_RETRY;
+ return RET_PF_RETRY;
}
EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
@@ -3464,7 +3571,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
if (page_fault_handle_page_track(vcpu, error_code, gfn))
- return 1;
+ return RET_PF_EMULATE;
r = mmu_topup_memory_caches(vcpu);
if (r)
@@ -3548,18 +3655,21 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
unsigned long mmu_seq;
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
+ bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+ is_nx_huge_page_enabled();
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (page_fault_handle_page_track(vcpu, error_code, gfn))
- return 1;
+ return RET_PF_EMULATE;
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
- force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
- PT_DIRECTORY_LEVEL);
+ force_pt_level =
+ lpage_disallowed ||
+ !check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL);
level = mapping_level(vcpu, gfn, &force_pt_level);
if (likely(!force_pt_level)) {
if (level > PT_DIRECTORY_LEVEL &&
@@ -3569,32 +3679,30 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
}
if (fast_page_fault(vcpu, gpa, level, error_code))
- return 0;
+ return RET_PF_RETRY;
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
- return 0;
+ return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
return r;
+ r = RET_PF_RETRY;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
make_mmu_pages_available(vcpu);
if (likely(!force_pt_level))
- transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
- r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
- spin_unlock(&vcpu->kvm->mmu_lock);
-
- return r;
-
+ transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
+ r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
+ prefault, lpage_disallowed);
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
- return 0;
+ return r;
}
static void nonpaging_init_context(struct kvm_vcpu *vcpu,
@@ -4510,23 +4618,24 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
enum emulation_result er;
bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
+ r = RET_PF_INVALID;
if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, cr2, direct);
- if (r == RET_MMIO_PF_EMULATE) {
+ if (r == RET_PF_EMULATE) {
emulation_type = 0;
goto emulate;
}
- if (r == RET_MMIO_PF_RETRY)
- return 1;
- if (r < 0)
- return r;
}
- r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
+ if (r == RET_PF_INVALID) {
+ r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
+ WARN_ON(r == RET_PF_INVALID);
+ }
+
+ if (r == RET_PF_RETRY)
+ return 1;
if (r < 0)
return r;
- if (!r)
- return 1;
if (mmio_info_in_cache(vcpu, cr2, direct))
emulation_type = 0;
@@ -4781,9 +4890,9 @@ restart:
* the guest, and the guest page table is using 4K page size
* mapping if the indirect sp has level = 1.
*/
- if (sp->role.direct &&
- !kvm_is_reserved_pfn(pfn) &&
- PageTransCompoundMap(pfn_to_page(pfn))) {
+ if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
+ !kvm_is_zone_device_pfn(pfn) &&
+ PageTransCompoundMap(pfn_to_page(pfn))) {
drop_spte(kvm, sptep);
need_tlb_flush = 1;
goto restart;
@@ -4965,7 +5074,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
int nr_to_scan = sc->nr_to_scan;
unsigned long freed = 0;
- spin_lock(&kvm_lock);
+ mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
int idx;
@@ -5015,7 +5124,7 @@ unlock:
break;
}
- spin_unlock(&kvm_lock);
+ mutex_unlock(&kvm_lock);
return freed;
}
@@ -5039,8 +5148,58 @@ static void mmu_destroy_caches(void)
kmem_cache_destroy(mmu_page_header_cache);
}
+static bool get_nx_auto_mode(void)
+{
+ /* Return true when CPU has the bug, and mitigations are ON */
+ return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
+}
+
+static void __set_nx_huge_pages(bool val)
+{
+ nx_huge_pages = itlb_multihit_kvm_mitigation = val;
+}
+
+static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
+{
+ bool old_val = nx_huge_pages;
+ bool new_val;
+
+ /* In "auto" mode deploy workaround only if CPU has the bug. */
+ if (sysfs_streq(val, "off"))
+ new_val = 0;
+ else if (sysfs_streq(val, "force"))
+ new_val = 1;
+ else if (sysfs_streq(val, "auto"))
+ new_val = get_nx_auto_mode();
+ else if (strtobool(val, &new_val) < 0)
+ return -EINVAL;
+
+ __set_nx_huge_pages(new_val);
+
+ if (new_val != old_val) {
+ struct kvm *kvm;
+ int idx;
+
+ mutex_lock(&kvm_lock);
+
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ idx = srcu_read_lock(&kvm->srcu);
+ kvm_mmu_invalidate_zap_all_pages(kvm);
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+ }
+ mutex_unlock(&kvm_lock);
+ }
+
+ return 0;
+}
+
int kvm_mmu_module_init(void)
{
+ if (nx_huge_pages == -1)
+ __set_nx_huge_pages(get_nx_auto_mode());
+
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
0, SLAB_ACCOUNT, NULL);
@@ -5104,3 +5263,116 @@ void kvm_mmu_module_exit(void)
unregister_shrinker(&mmu_shrinker);
mmu_audit_disable();
}
+
+static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
+{
+ unsigned int old_val;
+ int err;
+
+ old_val = nx_huge_pages_recovery_ratio;
+ err = param_set_uint(val, kp);
+ if (err)
+ return err;
+
+ if (READ_ONCE(nx_huge_pages) &&
+ !old_val && nx_huge_pages_recovery_ratio) {
+ struct kvm *kvm;
+
+ mutex_lock(&kvm_lock);
+
+ list_for_each_entry(kvm, &vm_list, vm_list)
+ wake_up_process(kvm->arch.nx_lpage_recovery_thread);
+
+ mutex_unlock(&kvm_lock);
+ }
+
+ return err;
+}
+
+static void kvm_recover_nx_lpages(struct kvm *kvm)
+{
+ int rcu_idx;
+ struct kvm_mmu_page *sp;
+ unsigned int ratio;
+ LIST_HEAD(invalid_list);
+ ulong to_zap;
+
+ rcu_idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+
+ ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+ to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
+ while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
+ /*
+ * We use a separate list instead of just using active_mmu_pages
+ * because the number of lpage_disallowed pages is expected to
+ * be relatively small compared to the total.
+ */
+ sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
+ struct kvm_mmu_page,
+ lpage_disallowed_link);
+ WARN_ON_ONCE(!sp->lpage_disallowed);
+ kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+ WARN_ON_ONCE(sp->lpage_disallowed);
+
+ if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ if (to_zap)
+ cond_resched_lock(&kvm->mmu_lock);
+ }
+ }
+
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, rcu_idx);
+}
+
+static long get_nx_lpage_recovery_timeout(u64 start_time)
+{
+ return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
+ ? start_time + 60 * HZ - get_jiffies_64()
+ : MAX_SCHEDULE_TIMEOUT;
+}
+
+static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
+{
+ u64 start_time;
+ long remaining_time;
+
+ while (true) {
+ start_time = get_jiffies_64();
+ remaining_time = get_nx_lpage_recovery_timeout(start_time);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop() && remaining_time > 0) {
+ schedule_timeout(remaining_time);
+ remaining_time = get_nx_lpage_recovery_timeout(start_time);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ set_current_state(TASK_RUNNING);
+
+ if (kthread_should_stop())
+ return 0;
+
+ kvm_recover_nx_lpages(kvm);
+ }
+}
+
+int kvm_mmu_post_init_vm(struct kvm *kvm)
+{
+ int err;
+
+ err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
+ "kvm-nx-lpage-recovery",
+ &kvm->arch.nx_lpage_recovery_thread);
+ if (!err)
+ kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
+
+ return err;
+}
+
+void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
+{
+ if (kvm->arch.nx_lpage_recovery_thread)
+ kthread_stop(kvm->arch.nx_lpage_recovery_thread);
+}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index c92834c55c59..e584689e7d46 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -56,23 +56,6 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
-/*
- * Return values of handle_mmio_page_fault:
- * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
- * directly.
- * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
- * fault path update the mmio spte.
- * RET_MMIO_PF_RETRY: let CPU fault again on the address.
- * RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
- */
-enum {
- RET_MMIO_PF_EMULATE = 1,
- RET_MMIO_PF_INVALID = 2,
- RET_MMIO_PF_RETRY = 0,
- RET_MMIO_PF_BUG = -1
-};
-
-int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
@@ -202,4 +185,8 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
+
+int kvm_mmu_post_init_vm(struct kvm *kvm);
+void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
+
#endif
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 5a24b846a1cb..756b14ecc957 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -322,6 +322,65 @@ TRACE_EVENT(
__entry->kvm_gen == __entry->spte_gen
)
);
+
+TRACE_EVENT(
+ kvm_mmu_set_spte,
+ TP_PROTO(int level, gfn_t gfn, u64 *sptep),
+ TP_ARGS(level, gfn, sptep),
+
+ TP_STRUCT__entry(
+ __field(u64, gfn)
+ __field(u64, spte)
+ __field(u64, sptep)
+ __field(u8, level)
+ /* These depend on page entry type, so compute them now. */
+ __field(bool, r)
+ __field(bool, x)
+ __field(u8, u)
+ ),
+
+ TP_fast_assign(
+ __entry->gfn = gfn;
+ __entry->spte = *sptep;
+ __entry->sptep = virt_to_phys(sptep);
+ __entry->level = level;
+ __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
+ __entry->x = is_executable_pte(__entry->spte);
+ __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
+ ),
+
+ TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
+ __entry->gfn, __entry->spte,
+ __entry->r ? "r" : "-",
+ __entry->spte & PT_PRESENT_MASK ? "w" : "-",
+ __entry->x ? "x" : "-",
+ __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
+ __entry->level, __entry->sptep
+ )
+);
+
+TRACE_EVENT(
+ kvm_mmu_spte_requested,
+ TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn),
+ TP_ARGS(addr, level, pfn),
+
+ TP_STRUCT__entry(
+ __field(u64, gfn)
+ __field(u64, pfn)
+ __field(u8, level)
+ ),
+
+ TP_fast_assign(
+ __entry->gfn = addr >> PAGE_SHIFT;
+ __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
+ __entry->level = level;
+ ),
+
+ TP_printk("gfn %llx pfn %llx level %d",
+ __entry->gfn, __entry->pfn, __entry->level
+ )
+);
+
#endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 0149ac59c273..3e3016411020 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -17,6 +17,7 @@
*/
#include <linux/kvm_host.h>
+#include <linux/nospec.h>
#include <asm/mtrr.h>
#include "cpuid.h"
@@ -202,11 +203,15 @@ static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
break;
case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
*seg = 1;
- *unit = msr - MSR_MTRRfix16K_80000;
+ *unit = array_index_nospec(
+ msr - MSR_MTRRfix16K_80000,
+ MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
break;
case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
*seg = 2;
- *unit = msr - MSR_MTRRfix4K_C0000;
+ *unit = array_index_nospec(
+ msr - MSR_MTRRfix4K_C0000,
+ MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
break;
default:
return false;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 37363900297d..e03225e707b2 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -499,6 +499,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn,
true, true);
+ kvm_release_pfn_clean(pfn);
return true;
}
@@ -572,12 +573,14 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
struct guest_walker *gw,
int write_fault, int hlevel,
- kvm_pfn_t pfn, bool map_writable, bool prefault)
+ kvm_pfn_t pfn, bool map_writable, bool prefault,
+ bool lpage_disallowed)
{
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
unsigned direct_access, access = gw->pt_access;
- int top_level, emulate;
+ int top_level, ret;
+ gfn_t gfn, base_gfn;
direct_access = gw->pte_access;
@@ -622,36 +625,49 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
link_shadow_page(vcpu, it.sptep, sp);
}
- for (;
- shadow_walk_okay(&it) && it.level > hlevel;
- shadow_walk_next(&it)) {
- gfn_t direct_gfn;
+ /*
+ * FNAME(page_fault) might have clobbered the bottom bits of
+ * gw->gfn, restore them from the virtual address.
+ */
+ gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
+ base_gfn = gfn;
+ trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
+
+ for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
clear_sp_write_flooding_count(it.sptep);
- validate_direct_spte(vcpu, it.sptep, direct_access);
- drop_large_spte(vcpu, it.sptep);
+ /*
+ * We cannot overwrite existing page tables with an NX
+ * large page, as the leaf could be executable.
+ */
+ disallowed_hugepage_adjust(it, gfn, &pfn, &hlevel);
- if (is_shadow_present_pte(*it.sptep))
- continue;
+ base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+ if (it.level == hlevel)
+ break;
- direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+ validate_direct_spte(vcpu, it.sptep, direct_access);
- sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
- true, direct_access);
- link_shadow_page(vcpu, it.sptep, sp);
+ drop_large_spte(vcpu, it.sptep);
+
+ if (!is_shadow_present_pte(*it.sptep)) {
+ sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
+ it.level - 1, true, direct_access);
+ link_shadow_page(vcpu, it.sptep, sp);
+ if (lpage_disallowed)
+ account_huge_nx_page(vcpu->kvm, sp);
+ }
}
- clear_sp_write_flooding_count(it.sptep);
- emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
- it.level, gw->gfn, pfn, prefault, map_writable);
+ ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
+ it.level, base_gfn, pfn, prefault, map_writable);
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
-
- return emulate;
+ ++vcpu->stat.pf_fixed;
+ return ret;
out_gpte_changed:
- kvm_release_pfn_clean(pfn);
- return 0;
+ return RET_PF_RETRY;
}
/*
@@ -717,9 +733,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
int r;
kvm_pfn_t pfn;
int level = PT_PAGE_TABLE_LEVEL;
- bool force_pt_level = false;
unsigned long mmu_seq;
bool map_writable, is_self_change_mapping;
+ bool lpage_disallowed = (error_code & PFERR_FETCH_MASK) &&
+ is_nx_huge_page_enabled();
+ bool force_pt_level = lpage_disallowed;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -746,12 +764,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (!prefault)
inject_page_fault(vcpu, &walker.fault);
- return 0;
+ return RET_PF_RETRY;
}
if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
shadow_page_table_clear_flood(vcpu, addr);
- return 1;
+ return RET_PF_EMULATE;
}
vcpu->arch.write_fault_to_shadow_pgtable = false;
@@ -773,7 +791,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
&map_writable))
- return 0;
+ return RET_PF_RETRY;
if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
walker.gfn, pfn, walker.pte_access, &r))
@@ -799,6 +817,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
walker.pte_access &= ~ACC_EXEC_MASK;
}
+ r = RET_PF_RETRY;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
goto out_unlock;
@@ -806,19 +825,15 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
make_mmu_pages_available(vcpu);
if (!force_pt_level)
- transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
+ transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
- level, pfn, map_writable, prefault);
- ++vcpu->stat.pf_fixed;
+ level, pfn, map_writable, prefault, lpage_disallowed);
kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
- spin_unlock(&vcpu->kvm->mmu_lock);
-
- return r;
out_unlock:
spin_unlock(&vcpu->kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
- return 0;
+ return r;
}
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 06ce377dcbc9..0827ee7d0e9b 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -124,8 +124,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
intr ? kvm_perf_overflow_intr :
kvm_perf_overflow, pmc);
if (IS_ERR(event)) {
- printk_once("kvm_pmu: event creation failed %ld\n",
- PTR_ERR(event));
+ pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
+ PTR_ERR(event), pmc->idx);
return;
}
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index f96e1f962587..fbf3d25af765 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -1,6 +1,8 @@
#ifndef __KVM_X86_PMU_H
#define __KVM_X86_PMU_H
+#include <linux/nospec.h>
+
#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
@@ -80,8 +82,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
u32 base)
{
- if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
- return &pmu->gp_counters[msr - base];
+ if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
+ u32 index = array_index_nospec(msr - base,
+ pmu->nr_arch_gp_counters);
+
+ return &pmu->gp_counters[index];
+ }
return NULL;
}
@@ -91,8 +97,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{
int base = MSR_CORE_PERF_FIXED_CTR0;
- if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
- return &pmu->fixed_counters[msr - base];
+ if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
+ u32 index = array_index_nospec(msr - base,
+ pmu->nr_arch_fixed_counters);
+
+ return &pmu->fixed_counters[index];
+ }
return NULL;
}
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 5ab4a364348e..84ae4dd261ca 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -87,10 +87,14 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
static unsigned intel_find_fixed_event(int idx)
{
- if (idx >= ARRAY_SIZE(fixed_pmc_events))
+ u32 event;
+ size_t size = ARRAY_SIZE(fixed_pmc_events);
+
+ if (idx >= size)
return PERF_COUNT_HW_MAX;
- return intel_arch_events[fixed_pmc_events[idx]].event_type;
+ event = fixed_pmc_events[array_index_nospec(idx, size)];
+ return intel_arch_events[event].event_type;
}
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
@@ -131,15 +135,19 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
bool fixed = idx & (1u << 30);
struct kvm_pmc *counters;
+ unsigned int num_counters;
idx &= ~(3u << 30);
- if (!fixed && idx >= pmu->nr_arch_gp_counters)
- return NULL;
- if (fixed && idx >= pmu->nr_arch_fixed_counters)
+ if (fixed) {
+ counters = pmu->fixed_counters;
+ num_counters = pmu->nr_arch_fixed_counters;
+ } else {
+ counters = pmu->gp_counters;
+ num_counters = pmu->nr_arch_gp_counters;
+ }
+ if (idx >= num_counters)
return NULL;
- counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
-
- return &counters[idx];
+ return &counters[array_index_nospec(idx, num_counters)];
}
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
@@ -235,11 +243,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
default:
- if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
- (pmc = get_fixed_pmc(pmu, msr))) {
- if (!msr_info->host_initiated)
- data = (s64)(s32)data;
- pmc->counter += data - pmc_read_counter(pmc);
+ if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
+ if (msr_info->host_initiated)
+ pmc->counter = data;
+ else
+ pmc->counter = (s32)data;
+ return 0;
+ } else if ((pmc = get_fixed_pmc(pmu, msr))) {
+ pmc->counter = data;
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 01eb0451b96d..1079228e4fef 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -590,8 +590,14 @@ static int get_npt_level(void)
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
vcpu->arch.efer = efer;
- if (!npt_enabled && !(efer & EFER_LMA))
- efer &= ~EFER_LME;
+
+ if (!npt_enabled) {
+ /* Shadow paging assumes NX to be available. */
+ efer |= EFER_NX;
+
+ if (!(efer & EFER_LMA))
+ efer &= ~EFER_LME;
+ }
to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
@@ -1518,7 +1524,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!kvm_vcpu_apicv_active(vcpu))
return;
- if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
+ /*
+ * Since the host physical APIC id is 8 bits,
+ * we can support host APIC ID upto 255.
+ */
+ if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
return;
entry = READ_ONCE(*(svm->avic_physical_id_cache));
@@ -3704,7 +3714,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
return 1;
/* The STIBP bit doesn't fault even if it's not advertised */
- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
return 1;
svm->spec_ctrl = data;
@@ -3940,14 +3950,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break;
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
+ int i;
+ struct kvm_vcpu *vcpu;
+ struct kvm *kvm = svm->vcpu.kvm;
struct kvm_lapic *apic = svm->vcpu.arch.apic;
/*
- * Update ICR high and low, then emulate sending IPI,
- * which is handled when writing APIC_ICR.
+ * At this point, we expect that the AVIC HW has already
+ * set the appropriate IRR bits on the valid target
+ * vcpus. So, we just need to kick the appropriate vcpu.
*/
- kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
- kvm_lapic_reg_write(apic, APIC_ICR, icrl);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ bool m = kvm_apic_match_dest(vcpu, apic,
+ icrl & KVM_APIC_SHORT_MASK,
+ GET_APIC_DEST_FIELD(icrh),
+ icrl & KVM_APIC_DEST_MASK);
+
+ if (m && !avic_vcpu_is_running(vcpu))
+ kvm_vcpu_wake_up(vcpu);
+ }
break;
}
case AVIC_IPI_FAILURE_INVALID_TARGET:
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 0a6cc6754ec5..ea618b713b6f 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -434,13 +434,13 @@ TRACE_EVENT(kvm_apic_ipi,
);
TRACE_EVENT(kvm_apic_accept_irq,
- TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
+ TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
TP_ARGS(apicid, dm, tm, vec),
TP_STRUCT__entry(
__field( __u32, apicid )
__field( __u16, dm )
- __field( __u8, tm )
+ __field( __u16, tm )
__field( __u8, vec )
),
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a34fb7284024..2ad59d8553a5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -714,7 +714,6 @@ struct vcpu_vmx {
u64 msr_guest_kernel_gs_base;
#endif
- u64 arch_capabilities;
u64 spec_ctrl;
u32 vm_entry_controls_shadow;
@@ -1548,7 +1547,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
return -1;
}
-static inline void __invvpid(int ext, u16 vpid, gva_t gva)
+static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
{
struct {
u64 vpid : 16;
@@ -1562,7 +1561,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
: : "a"(&operand), "c"(ext) : "cc", "memory");
}
-static inline void __invept(int ext, u64 eptp, gpa_t gpa)
+static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
{
struct {
u64 eptp, gpa;
@@ -1620,43 +1619,15 @@ static void vmcs_load(struct vmcs *vmcs)
}
#ifdef CONFIG_KEXEC_CORE
-/*
- * This bitmap is used to indicate whether the vmclear
- * operation is enabled on all cpus. All disabled by
- * default.
- */
-static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
-
-static inline void crash_enable_local_vmclear(int cpu)
-{
- cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
-static inline void crash_disable_local_vmclear(int cpu)
-{
- cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
-static inline int crash_local_vmclear_enabled(int cpu)
-{
- return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
static void crash_vmclear_local_loaded_vmcss(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
- if (!crash_local_vmclear_enabled(cpu))
- return;
-
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
loaded_vmcss_on_cpu_link)
vmcs_clear(v->vmcs);
}
-#else
-static inline void crash_enable_local_vmclear(int cpu) { }
-static inline void crash_disable_local_vmclear(int cpu) { }
#endif /* CONFIG_KEXEC_CORE */
static void __loaded_vmcs_clear(void *arg)
@@ -1668,19 +1639,24 @@ static void __loaded_vmcs_clear(void *arg)
return; /* vcpu migration can race with cpu offline */
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
- crash_disable_local_vmclear(cpu);
+
+ vmcs_clear(loaded_vmcs->vmcs);
+ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
+
list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
/*
- * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
- * is before setting loaded_vmcs->vcpu to -1 which is done in
- * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
- * then adds the vmcs into percpu list before it is deleted.
+ * Ensure all writes to loaded_vmcs, including deleting it from its
+ * current percpu list, complete before setting loaded_vmcs->vcpu to
+ * -1, otherwise a different cpu can see vcpu == -1 first and add
+ * loaded_vmcs to its percpu list before it's deleted from this cpu's
+ * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
*/
smp_wmb();
- loaded_vmcs_init(loaded_vmcs);
- crash_enable_local_vmclear(cpu);
+ loaded_vmcs->cpu = -1;
+ loaded_vmcs->launched = 0;
}
static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
@@ -2220,17 +2196,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
u64 guest_efer = vmx->vcpu.arch.efer;
u64 ignore_bits = 0;
- if (!enable_ept) {
- /*
- * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
- * host CPUID is more efficient than testing guest CPUID
- * or CR4. Host SMEP is anyway a requirement for guest SMEP.
- */
- if (boot_cpu_has(X86_FEATURE_SMEP))
- guest_efer |= EFER_NX;
- else if (!(guest_efer & EFER_NX))
- ignore_bits |= EFER_NX;
- }
+ /* Shadow paging assumes NX to be available. */
+ if (!enable_ept)
+ guest_efer |= EFER_NX;
/*
* LMA and LME handled by hardware; SCE meaningless outside long mode.
@@ -2480,18 +2448,17 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (!already_loaded) {
local_irq_disable();
- crash_disable_local_vmclear(cpu);
/*
- * Read loaded_vmcs->cpu should be before fetching
- * loaded_vmcs->loaded_vmcss_on_cpu_link.
- * See the comments in __loaded_vmcs_clear().
+ * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
+ * this cpu's percpu list, otherwise it may not yet be deleted
+ * from its previous cpu's percpu list. Pairs with the
+ * smb_wmb() in __loaded_vmcs_clear().
*/
smp_rmb();
list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
&per_cpu(loaded_vmcss_on_cpu, cpu));
- crash_enable_local_vmclear(cpu);
local_irq_enable();
}
@@ -3209,12 +3176,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = to_vmx(vcpu)->spec_ctrl;
break;
- case MSR_IA32_ARCH_CAPABILITIES:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has_arch_capabilities(vcpu))
- return 1;
- msr_info->data = to_vmx(vcpu)->arch_capabilities;
- break;
case MSR_IA32_SYSENTER_CS:
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break;
@@ -3376,11 +3337,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
MSR_TYPE_W);
break;
- case MSR_IA32_ARCH_CAPABILITIES:
- if (!msr_info->host_initiated)
- return 1;
- vmx->arch_capabilities = data;
- break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -3526,21 +3482,6 @@ static int hardware_enable(void)
if (cr4_read_shadow() & X86_CR4_VMXE)
return -EBUSY;
- INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
- INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
- spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
-
- /*
- * Now we can enable the vmclear operation in kdump
- * since the loaded_vmcss_on_cpu list on this cpu
- * has been initialized.
- *
- * Though the cpu is not in VMX operation now, there
- * is no problem to enable the vmclear operation
- * for the loaded_vmcss_on_cpu list is empty!
- */
- crash_enable_local_vmclear(cpu);
-
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
test_bits = FEATURE_CONTROL_LOCKED;
@@ -4661,6 +4602,26 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
(ss.selector & SEGMENT_RPL_MASK));
}
+static bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu,
+ unsigned int port, int size);
+static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ unsigned long exit_qualification;
+ unsigned short port;
+ int size;
+
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+ return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ port = exit_qualification >> 16;
+ size = (exit_qualification & 7) + 1;
+
+ return nested_vmx_check_io_bitmaps(vcpu, port, size);
+}
+
/*
* Check if guest state is valid. Returns true if valid, false if
* not.
@@ -5468,8 +5429,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
- vmx->arch_capabilities = kvm_get_arch_capabilities();
-
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */
@@ -6566,20 +6525,13 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
return 1;
}
else
- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
- NULL, 0) == EMULATE_DONE;
+ return emulate_instruction(vcpu, EMULTYPE_SKIP) ==
+ EMULATE_DONE;
}
- ret = handle_mmio_page_fault(vcpu, gpa, true);
- if (likely(ret == RET_MMIO_PF_EMULATE))
- return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
- EMULATE_DONE;
-
- if (unlikely(ret == RET_MMIO_PF_INVALID))
- return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
-
- if (unlikely(ret == RET_MMIO_PF_RETRY))
- return 1;
+ ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
+ if (ret >= 0)
+ return ret;
/* It is the real ept misconfig */
WARN_ON(1);
@@ -7653,6 +7605,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t gva = 0;
+ struct x86_exception e;
if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu))
@@ -7679,8 +7632,12 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
vmx_instruction_info, true, &gva))
return 1;
/* _system ok, as nested_vmx_check_permission verified cpl=0 */
- kvm_write_guest_virt_system(vcpu, gva, &field_value,
- (is_long_mode(vcpu) ? 8 : 4), NULL);
+ if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
+ (is_long_mode(vcpu) ? 8 : 4),
+ &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
}
nested_vmx_succeed(vcpu);
@@ -8050,23 +8007,17 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
static const int kvm_vmx_max_exit_handlers =
ARRAY_SIZE(kvm_vmx_exit_handlers);
-static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
+/*
+ * Return true if an IO instruction with the specified port and size should cause
+ * a VM-exit into L1.
+ */
+bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
+ int size)
{
- unsigned long exit_qualification;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
gpa_t bitmap, last_bitmap;
- unsigned int port;
- int size;
u8 b;
- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
- return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
-
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-
- port = exit_qualification >> 16;
- size = (exit_qualification & 7) + 1;
-
last_bitmap = (gpa_t)-1;
b = -1;
@@ -9220,8 +9171,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->__launched = vmx->loaded_vmcs->launched;
+ /* L1D Flush includes CPU buffer clear to mitigate MDS */
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
+ else if (static_branch_unlikely(&mds_user_clear))
+ mds_clear_cpu_buffers();
asm(
/* Store host registers */
@@ -9580,8 +9534,8 @@ free_vcpu:
return ERR_PTR(err);
}
-#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
-#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
+#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
static int vmx_vm_init(struct kvm *kvm)
{
@@ -11356,11 +11310,71 @@ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
}
+static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ unsigned short port;
+ bool intercept;
+ int size;
+
+ if (info->intercept == x86_intercept_in ||
+ info->intercept == x86_intercept_ins) {
+ port = info->src_val;
+ size = info->dst_bytes;
+ } else {
+ port = info->dst_val;
+ size = info->src_bytes;
+ }
+
+ /*
+ * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
+ * VM-exits depend on the 'unconditional IO exiting' VM-execution
+ * control.
+ *
+ * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
+ */
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+ intercept = nested_cpu_has(vmcs12,
+ CPU_BASED_UNCOND_IO_EXITING);
+ else
+ intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
+
+ return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
+}
+
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
- return X86EMUL_CONTINUE;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+
+ switch (info->intercept) {
+ /*
+ * RDPID causes #UD if disabled through secondary execution controls.
+ * Because it is marked as EmulateOnUD, we need to intercept it here.
+ */
+ case x86_intercept_rdtscp:
+ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
+ ctxt->exception.vector = UD_VECTOR;
+ ctxt->exception.error_code_valid = false;
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ break;
+
+ case x86_intercept_in:
+ case x86_intercept_ins:
+ case x86_intercept_out:
+ case x86_intercept_outs:
+ return vmx_check_intercept_io(vcpu, info);
+
+ /* TODO: check more intercepts... */
+ default:
+ break;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
}
#ifdef CONFIG_X86_64
@@ -11842,7 +11856,7 @@ module_exit(vmx_exit)
static int __init vmx_init(void)
{
- int r;
+ int r, cpu;
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
@@ -11864,6 +11878,12 @@ static int __init vmx_init(void)
}
}
+ for_each_possible_cpu(cpu) {
+ INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
+ INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
+ spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ }
+
#ifdef CONFIG_KEXEC_CORE
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a29df9ccbfde..314eb954bdee 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -54,6 +54,7 @@
#include <linux/pvclock_gtod.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
+#include <linux/nospec.h>
#include <trace/events/kvm.h>
#include <asm/debugreg.h>
@@ -191,6 +192,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "mmu_unsync", VM_STAT(mmu_unsync) },
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
{ "largepages", VM_STAT(lpages) },
+ { "nx_largepages_splitted", VM_STAT(nx_lpage_splits) },
{ NULL }
};
@@ -272,13 +274,14 @@ int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
int err;
- if (((value ^ smsr->values[slot].curr) & mask) == 0)
+ value = (value & mask) | (smsr->values[slot].host & ~mask);
+ if (value == smsr->values[slot].curr)
return 0;
- smsr->values[slot].curr = value;
err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
if (err)
return 1;
+ smsr->values[slot].curr = value;
if (!smsr->registered) {
smsr->urn.on_user_return = kvm_on_user_return;
user_return_notifier_register(&smsr->urn);
@@ -535,8 +538,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
data, offset, len, access);
}
+static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
+{
+ return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
+ rsvd_bits(1, 2);
+}
+
/*
- * Load the pae pdptrs. Return true is they are all valid.
+ * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
*/
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
{
@@ -555,8 +564,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
}
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if ((pdpte[i] & PT_PRESENT_MASK) &&
- (pdpte[i] &
- vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
+ (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
ret = 0;
goto out;
}
@@ -582,7 +590,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
gfn_t gfn;
int r;
- if (is_long_mode(vcpu) || !is_pae(vcpu))
+ if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu))
return false;
if (!test_bit(VCPU_EXREG_PDPTR,
@@ -882,9 +890,11 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
+ size_t size = ARRAY_SIZE(vcpu->arch.db);
+
switch (dr) {
case 0 ... 3:
- vcpu->arch.db[dr] = val;
+ vcpu->arch.db[array_index_nospec(dr, size)] = val;
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu->arch.eff_db[dr] = val;
break;
@@ -921,9 +931,11 @@ EXPORT_SYMBOL_GPL(kvm_set_dr);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
+ size_t size = ARRAY_SIZE(vcpu->arch.db);
+
switch (dr) {
case 0 ... 3:
- *val = vcpu->arch.db[dr];
+ *val = vcpu->arch.db[array_index_nospec(dr, size)];
break;
case 4:
/* fall through */
@@ -1027,6 +1039,14 @@ u64 kvm_get_arch_capabilities(void)
rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
/*
+ * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
+ * the nested hypervisor runs with NX huge pages. If it is not,
+ * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other
+ * L1 guests, so it need not worry about its own (L2) guests.
+ */
+ data |= ARCH_CAP_PSCHANGE_MC_NO;
+
+ /*
* If we're doing cache flushes (either "always" or "cond")
* we will do one whenever the guest does a vmlaunch/vmresume.
* If an outer hypervisor is doing the cache flush for us
@@ -1038,8 +1058,40 @@ u64 kvm_get_arch_capabilities(void)
if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
+ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+ data |= ARCH_CAP_RDCL_NO;
+ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+ data |= ARCH_CAP_SSB_NO;
+ if (!boot_cpu_has_bug(X86_BUG_MDS))
+ data |= ARCH_CAP_MDS_NO;
+
+ /*
+ * On TAA affected systems, export MDS_NO=0 when:
+ * - TSX is enabled on the host, i.e. X86_FEATURE_RTM=1.
+ * - Updated microcode is present. This is detected by
+ * the presence of ARCH_CAP_TSX_CTRL_MSR and ensures
+ * that VERW clears CPU buffers.
+ *
+ * When MDS_NO=0 is exported, guests deploy clear CPU buffer
+ * mitigation and don't complain:
+ *
+ * "Vulnerable: Clear CPU buffers attempted, no microcode"
+ *
+ * If TSX is disabled on the system, guests are also mitigated against
+ * TAA and clear CPU buffer mitigation is not required for guests.
+ */
+ if (!boot_cpu_has(X86_FEATURE_RTM))
+ data &= ~ARCH_CAP_TAA_NO;
+ else if (!boot_cpu_has_bug(X86_BUG_TAA))
+ data |= ARCH_CAP_TAA_NO;
+ else if (data & ARCH_CAP_TSX_CTRL_MSR)
+ data &= ~ARCH_CAP_MDS_NO;
+
+ /* KVM does not emulate MSR_IA32_TSX_CTRL. */
+ data &= ~ARCH_CAP_TSX_CTRL_MSR;
return data;
}
+
EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
@@ -1073,11 +1125,8 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
return 0;
}
-bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (efer & efer_reserved_bits)
- return false;
-
if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;
@@ -1095,19 +1144,33 @@ bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
}
return true;
+
+}
+bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+ if (efer & efer_reserved_bits)
+ return false;
+
+ return __kvm_valid_efer(vcpu, efer);
}
EXPORT_SYMBOL_GPL(kvm_valid_efer);
-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u64 old_efer = vcpu->arch.efer;
+ u64 efer = msr_info->data;
- if (!kvm_valid_efer(vcpu, efer))
+ if (efer & efer_reserved_bits)
return 1;
- if (is_paging(vcpu)
- && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
- return 1;
+ if (!msr_info->host_initiated) {
+ if (!__kvm_valid_efer(vcpu, efer))
+ return 1;
+
+ if (is_paging(vcpu) &&
+ (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+ return 1;
+ }
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
@@ -1354,7 +1417,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
vcpu->arch.tsc_always_catchup = 1;
return 0;
} else {
- WARN(1, "user requested TSC rate below hardware speed\n");
+ pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
return -1;
}
}
@@ -1364,8 +1427,8 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
user_tsc_khz, tsc_khz);
if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
- WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
- user_tsc_khz);
+ pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
+ user_tsc_khz);
return -1;
}
@@ -2067,7 +2130,10 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
default:
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MCx_CTL(bank_num)) {
- u32 offset = msr - MSR_IA32_MC0_CTL;
+ u32 offset = array_index_nospec(
+ msr - MSR_IA32_MC0_CTL,
+ MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
+
/* only 0 or all 1s can be written to IA32_MCi_CTL
* some Linux kernels though clear bit 10 in bank 4 to
* workaround a BIOS/GART TBL issue on AMD K8s, ignore
@@ -2197,8 +2263,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (msr_info->host_initiated)
vcpu->arch.microcode_version = data;
break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated)
+ return 1;
+ vcpu->arch.arch_capabilities = data;
+ break;
case MSR_EFER:
- return set_efer(vcpu, data);
+ return set_efer(vcpu, msr_info);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
@@ -2430,7 +2501,10 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
default:
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MCx_CTL(bank_num)) {
- u32 offset = msr - MSR_IA32_MC0_CTL;
+ u32 offset = array_index_nospec(
+ msr - MSR_IA32_MC0_CTL,
+ MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
+
data = vcpu->arch.mce_banks[offset];
break;
}
@@ -2473,6 +2547,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_UCODE_REV:
msr_info->data = vcpu->arch.microcode_version;
break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_arch_capabilities(vcpu))
+ return 1;
+ msr_info->data = vcpu->arch.arch_capabilities;
+ break;
case MSR_MTRRcap:
case 0x200 ... 0x2ff:
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -4598,6 +4678,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
/* kvm_write_guest_virt_system can pull in tons of pages. */
vcpu->arch.l1tf_flush_l1d = true;
+ /*
+ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+ * is returned, but our callers are not ready for that and they blindly
+ * call kvm_inject_page_fault. Ensure that they at least do not leak
+ * uninitialized kernel stack memory into cr2 and error code.
+ */
+ memset(exception, 0, sizeof(*exception));
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
@@ -5735,8 +5822,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
- if (ctxt->have_exception && inject_emulated_exception(vcpu))
+ if (ctxt->have_exception) {
+ /*
+ * #UD should result in just EMULATION_FAILED, and trap-like
+ * exception should not be encountered during decode.
+ */
+ WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
+ exception_type(ctxt->exception.vector) == EXCPT_TRAP);
+ inject_emulated_exception(vcpu);
return EMULATE_DONE;
+ }
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
@@ -5801,12 +5896,13 @@ restart:
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- kvm_rip_write(vcpu, ctxt->eip);
- if (r == EMULATE_DONE && ctxt->tf)
- kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
- exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+ exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+ kvm_rip_write(vcpu, ctxt->eip);
+ if (r == EMULATE_DONE && ctxt->tf)
+ kvm_vcpu_do_singlestep(vcpu, &r);
__kvm_set_rflags(vcpu, ctxt->eflags);
+ }
/*
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
@@ -5908,17 +6004,17 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
- spin_lock(&kvm_lock);
+ mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
continue;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
- if (vcpu->cpu != smp_processor_id())
+ if (vcpu->cpu != raw_smp_processor_id())
send_ipi = 1;
}
}
- spin_unlock(&kvm_lock);
+ mutex_unlock(&kvm_lock);
if (freq->old < freq->new && send_ipi) {
/*
@@ -6036,14 +6132,12 @@ static void kvm_set_mmio_spte_mask(void)
/* Set the present bit. */
mask |= 1ull;
-#ifdef CONFIG_X86_64
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (maxphyaddr == 52)
mask &= ~1ull;
-#endif
kvm_mmu_set_mmio_spte_mask(mask);
}
@@ -6056,12 +6150,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
struct kvm_vcpu *vcpu;
int i;
- spin_lock(&kvm_lock);
+ mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
atomic_set(&kvm_guest_has_master_clock, 0);
- spin_unlock(&kvm_lock);
+ mutex_unlock(&kvm_lock);
}
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
@@ -7448,7 +7542,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_update_cpuid(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (!is_long_mode(vcpu) && is_pae(vcpu)) {
+ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
mmu_reset_needed = 1;
}
@@ -7672,6 +7766,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int r;
+ vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
kvm_vcpu_mtrr_init(vcpu);
r = vcpu_load(vcpu);
if (r)
@@ -7712,7 +7807,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_mmu_unload(vcpu);
vcpu_put(vcpu);
- kvm_x86_ops->vcpu_free(vcpu);
+ kvm_arch_vcpu_free(vcpu);
}
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -8028,6 +8123,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
+ INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
@@ -8056,6 +8152,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return 0;
}
+int kvm_arch_post_init_vm(struct kvm *kvm)
+{
+ return kvm_mmu_post_init_vm(kvm);
+}
+
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
int r;
@@ -8162,6 +8263,11 @@ int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
}
EXPORT_SYMBOL_GPL(x86_set_memory_region);
+void kvm_arch_pre_destroy_vm(struct kvm *kvm)
+{
+ kvm_mmu_pre_destroy_vm(kvm);
+}
+
void kvm_arch_destroy_vm(struct kvm *kvm)
{
if (current->mm == kvm->mm) {
@@ -8213,6 +8319,13 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
{
int i;
+ /*
+ * Clear out the previous array pointers for the KVM_MR_MOVE case. The
+ * old arrays will be freed by __kvm_set_memory_region() if installing
+ * the new memslot is successful.
+ */
+ memset(&slot->arch, 0, sizeof(slot->arch));
+
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
unsigned long ugfn;
@@ -8286,6 +8399,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
+ if (change == KVM_MR_MOVE)
+ return kvm_arch_create_memslot(kvm, memslot,
+ mem->memory_size >> PAGE_SHIFT);
+
return 0;
}
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index 2dd1fe13a37b..19f707992db2 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -1,5 +1,6 @@
#include <linux/types.h>
#include <linux/export.h>
+#include <asm/cpu.h>
unsigned int x86_family(unsigned int sig)
{
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 9758524ee99f..71a3759a2d4e 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -112,8 +112,8 @@ static void delay_mwaitx(unsigned long __loops)
__monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
/*
- * AMD, like Intel, supports the EAX hint and EAX=0xf
- * means, do not enter any deep C-state and we use it
+ * AMD, like Intel's MWAIT version, supports the EAX hint and
+ * EAX=0xf0 means, do not enter any deep C-state and we use it
* here in delay() to minimize wakeup latency.
*/
__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 1754e094bc28..82e105b284e0 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -333,7 +333,7 @@ AVXcode: 1
06: CLTS
07: SYSRET (o64)
08: INVD
-09: WBINVD
+09: WBINVD | WBNOINVD (F3)
0a:
0b: UD2 (1B)
0c:
@@ -364,7 +364,7 @@ AVXcode: 1
# a ModR/M byte.
1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
-1c:
+1c: Grp20 (1A),(1C)
1d:
1e:
1f: NOP Ev
@@ -792,6 +792,8 @@ f3: Grp17 (1A)
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
+f9: MOVDIRI My,Gy
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
@@ -907,7 +909,7 @@ EndTable
GrpTable: Grp3_2
0: TEST Ev,Iz
-1:
+1: TEST Ev,Iz
2: NOT Ev
3: NEG Ev
4: MUL rAX,Ev
@@ -943,9 +945,9 @@ GrpTable: Grp6
EndTable
GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5: rdpkru (110),(11B) | wrpkru (111),(11B)
@@ -1011,7 +1013,7 @@ GrpTable: Grp15
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
5: XRSTOR | lfence (11B)
-6: XSAVEOPT | clwb (66) | mfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
7: clflush | clflushopt (66) | sfence (11B)
EndTable
@@ -1042,6 +1044,10 @@ GrpTable: Grp19
6: vscatterpf1qps/d Wx (66),(ev)
EndTable
+GrpTable: Grp20
+0: cldemote Mb
+EndTable
+
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
index afbc4d805d66..df5aee5402c4 100644
--- a/arch/x86/math-emu/fpu_emu.h
+++ b/arch/x86/math-emu/fpu_emu.h
@@ -176,7 +176,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
#define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
((y) + EXTENDED_Ebias) & 0x7fff; }
#define exponent16(x) (*(short *)&((x)->exp))
-#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (y); }
+#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (u16)(y); }
#define addexponent(x,y) { (*(short *)&((x)->exp)) += (y); }
#define stdexp(x) { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }
diff --git a/arch/x86/math-emu/reg_constant.c b/arch/x86/math-emu/reg_constant.c
index 00548354912f..382093c5072b 100644
--- a/arch/x86/math-emu/reg_constant.c
+++ b/arch/x86/math-emu/reg_constant.c
@@ -17,7 +17,7 @@
#include "control_w.h"
#define MAKE_REG(s, e, l, h) { l, h, \
- ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
+ (u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
#if 0
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 5c419b8f99a0..102b4e78f4e6 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -273,18 +273,19 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
- return NULL;
- if (!pmd_present(*pmd))
+ if (pmd_present(*pmd) != pmd_present(*pmd_k))
set_pmd(pmd, *pmd_k);
+
+ if (!pmd_present(*pmd_k))
+ return NULL;
else
- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
return pmd_k;
}
-void vmalloc_sync_all(void)
+static void vmalloc_sync(void)
{
unsigned long address;
@@ -299,22 +300,28 @@ void vmalloc_sync_all(void)
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
spinlock_t *pgt_lock;
- pmd_t *ret;
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
- ret = vmalloc_sync_one(page_address(page), address);
+ vmalloc_sync_one(page_address(page), address);
spin_unlock(pgt_lock);
-
- if (!ret)
- break;
}
spin_unlock(&pgd_lock);
}
}
+void vmalloc_sync_mappings(void)
+{
+ vmalloc_sync();
+}
+
+void vmalloc_sync_unmappings(void)
+{
+ vmalloc_sync();
+}
+
/*
* 32-bit:
*
@@ -409,11 +416,23 @@ out:
#else /* CONFIG_X86_64: */
-void vmalloc_sync_all(void)
+void vmalloc_sync_mappings(void)
{
+ /*
+ * 64-bit mappings might allocate new p4d/pud pages
+ * that need to be propagated to all tasks' PGDs.
+ */
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
}
+void vmalloc_sync_unmappings(void)
+{
+ /*
+ * Unmappings never allocate or free p4d/pud pages.
+ * No work is required here.
+ */
+}
+
/*
* 64-bit:
*
@@ -430,8 +449,6 @@ static noinline int vmalloc_fault(unsigned long address)
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
- WARN_ON_ONCE(in_nmi());
-
/*
* Copy kernel mappings over when needed. This can also
* happen within a race in page table update. In the later
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 1680768d392c..82f727fbbbd2 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -98,6 +98,20 @@ static inline int pte_allows_gup(unsigned long pteval, int write)
}
/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+ struct page *head = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(head) < 0))
+ return NULL;
+ if (unlikely(!page_cache_add_speculative(head, refs)))
+ return NULL;
+ return head;
+}
+
+/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
* register pressure.
@@ -112,7 +126,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
- struct page *page;
+ struct page *head, *page;
/* Similar to the PMD case, NUMA hinting must take slow path */
if (pte_protnone(pte)) {
@@ -138,7 +152,21 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
}
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
- get_page(page);
+
+ head = try_get_compound_head(page, 1);
+ if (!head) {
+ put_dev_pagemap(pgmap);
+ pte_unmap(ptep);
+ return 0;
+ }
+
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ put_page(head);
+ put_dev_pagemap(pgmap);
+ pte_unmap(ptep);
+ return 0;
+ }
+
put_dev_pagemap(pgmap);
SetPageReferenced(page);
pages[*nr] = page;
@@ -174,9 +202,12 @@ static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
undo_dev_pagemap(nr, nr_start, pages);
return 0;
}
+ if (unlikely(!try_get_page(page))) {
+ put_dev_pagemap(pgmap);
+ return 0;
+ }
SetPageReferenced(page);
pages[*nr] = page;
- get_page(page);
put_dev_pagemap(pgmap);
(*nr)++;
pfn++;
@@ -202,6 +233,8 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
refs = 0;
head = pmd_page(pmd);
+ if (WARN_ON_ONCE(page_ref_count(head) <= 0))
+ return 0;
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON_PAGE(compound_head(page) != head, page);
@@ -261,6 +294,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
refs = 0;
head = pud_page(pud);
+ if (WARN_ON_ONCE(page_ref_count(head) <= 0))
+ return 0;
page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON_PAGE(compound_head(page) != head, page);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 90801a8f19c9..ce092a62fc5d 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -790,7 +790,7 @@ unsigned long max_swapfile_size(void)
pages = generic_max_swapfile_size();
- if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
unsigned long long l1tf_limit = l1tf_pfn_limit();
/*
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 3f729e20f0e3..12522dbae615 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -9,6 +9,7 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/cpu.h>
#undef pr_fmt
#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
@@ -297,7 +298,8 @@ void __init kaiser_check_boottime_disable(void)
goto skip;
}
- if (cmdline_find_option_bool(boot_command_line, "nopti"))
+ if (cmdline_find_option_bool(boot_command_line, "nopti") ||
+ cpu_mitigations_off())
goto disable;
skip:
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index e30baa8ad94f..08e0380414a9 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -251,7 +251,7 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
if (pgd_val(pgd) != 0) {
pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
- pgdp[i] = native_make_pgd(0);
+ pgd_clear(&pgdp[i]);
paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
pmd_free(mm, pmd);
@@ -419,7 +419,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(*ptep, entry);
if (changed && dirty) {
- *ptep = entry;
+ set_pte(ptep, entry);
pte_update(vma->vm_mm, address, ptep);
}
@@ -436,7 +436,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
if (changed && dirty) {
- *pmdp = entry;
+ set_pmd(pmdp, entry);
/*
* We had a write-protection fault here and changed the pmd
* to to more permissive. No need to flush the TLB for that,
@@ -544,8 +544,8 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
fixmaps_set++;
}
-void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
- pgprot_t flags)
+void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags)
{
__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index eac92e2d171b..a112bb175dd4 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -30,6 +30,12 @@
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/
+/*
+ * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
+ * stored in cpu_tlb_state.last_user_mm_ibpb.
+ */
+#define LAST_USER_MM_IBPB 0x1UL
+
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
struct flush_tlb_info {
@@ -101,33 +107,101 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
local_irq_restore(flags);
}
+static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+{
+ unsigned long next_tif = task_thread_info(next)->flags;
+ unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
+
+ return (unsigned long)next->mm | ibpb;
+}
+
+static void cond_ibpb(struct task_struct *next)
+{
+ if (!next || !next->mm)
+ return;
+
+ /*
+ * Both, the conditional and the always IBPB mode use the mm
+ * pointer to avoid the IBPB when switching between tasks of the
+ * same process. Using the mm pointer instead of mm->context.ctx_id
+ * opens a hypothetical hole vs. mm_struct reuse, which is more or
+ * less impossible to control by an attacker. Aside of that it
+ * would only affect the first schedule so the theoretically
+ * exposed data is not really interesting.
+ */
+ if (static_branch_likely(&switch_mm_cond_ibpb)) {
+ unsigned long prev_mm, next_mm;
+
+ /*
+ * This is a bit more complex than the always mode because
+ * it has to handle two cases:
+ *
+ * 1) Switch from a user space task (potential attacker)
+ * which has TIF_SPEC_IB set to a user space task
+ * (potential victim) which has TIF_SPEC_IB not set.
+ *
+ * 2) Switch from a user space task (potential attacker)
+ * which has TIF_SPEC_IB not set to a user space task
+ * (potential victim) which has TIF_SPEC_IB set.
+ *
+ * This could be done by unconditionally issuing IBPB when
+ * a task which has TIF_SPEC_IB set is either scheduled in
+ * or out. Though that results in two flushes when:
+ *
+ * - the same user space task is scheduled out and later
+ * scheduled in again and only a kernel thread ran in
+ * between.
+ *
+ * - a user space task belonging to the same process is
+ * scheduled in after a kernel thread ran in between
+ *
+ * - a user space task belonging to the same process is
+ * scheduled in immediately.
+ *
+ * Optimize this with reasonably small overhead for the
+ * above cases. Mangle the TIF_SPEC_IB bit into the mm
+ * pointer of the incoming task which is stored in
+ * cpu_tlbstate.last_user_mm_ibpb for comparison.
+ */
+ next_mm = mm_mangle_tif_spec_ib(next);
+ prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
+
+ /*
+ * Issue IBPB only if the mm's are different and one or
+ * both have the IBPB bit set.
+ */
+ if (next_mm != prev_mm &&
+ (next_mm | prev_mm) & LAST_USER_MM_IBPB)
+ indirect_branch_prediction_barrier();
+
+ this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
+ }
+
+ if (static_branch_unlikely(&switch_mm_always_ibpb)) {
+ /*
+ * Only flush when switching to a user space task with a
+ * different context than the user space task which ran
+ * last on this CPU.
+ */
+ if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
+ indirect_branch_prediction_barrier();
+ this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
+ }
+ }
+}
+
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
- u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
-
/*
* Avoid user/user BTB poisoning by flushing the branch
* predictor when switching between processes. This stops
* one process from doing Spectre-v2 attacks on another.
- *
- * As an optimization, flush indirect branches only when
- * switching into processes that disable dumping. This
- * protects high value processes like gpg, without having
- * too high performance overhead. IBPB is *expensive*!
- *
- * This will not flush branches when switching into kernel
- * threads. It will also not flush if we switch to idle
- * thread and back to the same process. It will flush if we
- * switch to a different non-dumpable process.
*/
- if (tsk && tsk->mm &&
- tsk->mm->context.ctx_id != last_ctx_id &&
- get_dumpable(tsk->mm) != SUID_DUMP_USER)
- indirect_branch_prediction_barrier();
+ cond_ibpb(tsk);
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
/*
@@ -143,14 +217,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
}
- /*
- * Record last user mm's context id, so we can avoid
- * flushing branch buffer with IBPB if we switch back
- * to the same user.
- */
- if (next != &init_mm)
- this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
-
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index cd9764520851..d9dabd0c31fc 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -15,8 +15,6 @@
#include <asm/nospec-branch.h>
#include <linux/bpf.h>
-int bpf_jit_enable __read_mostly;
-
/*
* assembly code in arch/x86/net/bpf_jit.S
*/
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 20fa7c84109d..62950ef7f84e 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -573,6 +573,17 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
/*
+ * Device [1022:7914]
+ * When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
+ */
+static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
+ dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
+
+/*
* Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
*
* Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 9bd115484745..5f0e596b0519 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1117,6 +1117,8 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = {
void __init pcibios_irq_init(void)
{
+ struct irq_routing_table *rtable = NULL;
+
DBG(KERN_DEBUG "PCI: IRQ init\n");
if (raw_pci_ops == NULL)
@@ -1127,8 +1129,10 @@ void __init pcibios_irq_init(void)
pirq_table = pirq_find_routing_table();
#ifdef CONFIG_PCI_BIOS
- if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
+ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
pirq_table = pcibios_get_irq_routing_table();
+ rtable = pirq_table;
+ }
#endif
if (pirq_table) {
pirq_peer_trick();
@@ -1143,8 +1147,10 @@ void __init pcibios_irq_init(void)
* If we're using the I/O APIC, avoid using the PCI IRQ
* routing table
*/
- if (io_apic_assign_pci_irqs)
+ if (io_apic_assign_pci_irqs) {
+ kfree(rtable);
pirq_table = NULL;
+ }
}
x86_init.pci.fixup_irqs();
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
index d49d3be81953..ecb5866aaf84 100644
--- a/arch/x86/platform/atom/punit_atom_debug.c
+++ b/arch/x86/platform/atom/punit_atom_debug.c
@@ -154,8 +154,8 @@ static void punit_dbgfs_unregister(void)
(kernel_ulong_t)&drv_data }
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, punit_device_tng),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT, punit_device_byt),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, punit_device_tng),
ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht),
{}
};
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index a0e85f2aff7d..f08abdf8bb67 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -478,7 +478,6 @@ void __init efi_init(void)
efi_char16_t *c16;
char vendor[100] = "unknown";
int i = 0;
- void *tmp;
#ifdef CONFIG_X86_32
if (boot_params.efi_info.efi_systab_hi ||
@@ -503,14 +502,16 @@ void __init efi_init(void)
/*
* Show what we know for posterity
*/
- c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
+ c16 = early_memremap_ro(efi.systab->fw_vendor,
+ sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
- for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
- vendor[i] = *c16++;
+ for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
+ vendor[i] = c16[i];
vendor[i] = '\0';
- } else
+ early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
+ } else {
pr_err("Could not map the firmware vendor!\n");
- early_memunmap(tmp, 2);
+ }
pr_info("EFI v%u.%.02u by %s\n",
efi.systab->hdr.revision >> 16,
@@ -896,9 +897,6 @@ static void __init kexec_enter_virtual_mode(void)
if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
runtime_code_page_mkexec();
-
- /* clean DUMMY object */
- efi_delete_dummy_variable();
#endif
}
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 53cace2ec0e2..c8f947a4aaf2 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -13,6 +13,7 @@
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/tboot.h>
+#include <linux/dmi.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
@@ -24,7 +25,7 @@
#include <asm/debugreg.h>
#include <asm/cpu.h>
#include <asm/mmu_context.h>
-#include <linux/dmi.h>
+#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -82,12 +83,8 @@ static void __save_processor_state(struct saved_context *ctxt)
/*
* descriptor tables
*/
-#ifdef CONFIG_X86_32
store_idt(&ctxt->idt);
-#else
-/* CONFIG_X86_64 */
- store_idt((struct desc_ptr *)&ctxt->idt_limit);
-#endif
+
/*
* We save it here, but restore it only in the hibernate case.
* For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
@@ -103,22 +100,18 @@ static void __save_processor_state(struct saved_context *ctxt)
/*
* segment registers
*/
-#ifdef CONFIG_X86_32
- savesegment(es, ctxt->es);
- savesegment(fs, ctxt->fs);
+#ifdef CONFIG_X86_32_LAZY_GS
savesegment(gs, ctxt->gs);
- savesegment(ss, ctxt->ss);
-#else
-/* CONFIG_X86_64 */
- asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
- asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
- asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
- asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
- asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
+#endif
+#ifdef CONFIG_X86_64
+ savesegment(gs, ctxt->gs);
+ savesegment(fs, ctxt->fs);
+ savesegment(ds, ctxt->ds);
+ savesegment(es, ctxt->es);
rdmsrl(MSR_FS_BASE, ctxt->fs_base);
- rdmsrl(MSR_GS_BASE, ctxt->gs_base);
- rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+ rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+ rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
mtrr_save_fixed_ranges(NULL);
rdmsrl(MSR_EFER, ctxt->efer);
@@ -178,6 +171,9 @@ static void fix_processor_context(void)
write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
syscall_init(); /* This sets MSR_*STAR and related */
+#else
+ if (boot_cpu_has(X86_FEATURE_SEP))
+ enable_sep_cpu();
#endif
load_TR_desc(); /* This does ltr */
load_mm_ldt(current->active_mm); /* This does lldt */
@@ -186,9 +182,12 @@ static void fix_processor_context(void)
}
/**
- * __restore_processor_state - restore the contents of CPU registers saved
- * by __save_processor_state()
- * @ctxt - structure to load the registers contents from
+ * __restore_processor_state - restore the contents of CPU registers saved
+ * by __save_processor_state()
+ * @ctxt - structure to load the registers contents from
+ *
+ * The asm code that gets us here will have restored a usable GDT, although
+ * it will be pointing to the wrong alias.
*/
static void notrace __restore_processor_state(struct saved_context *ctxt)
{
@@ -211,46 +210,52 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
write_cr2(ctxt->cr2);
write_cr0(ctxt->cr0);
+ /* Restore the IDT. */
+ load_idt(&ctxt->idt);
+
/*
- * now restore the descriptor tables to their proper values
- * ltr is done i fix_processor_context().
+ * Just in case the asm code got us here with the SS, DS, or ES
+ * out of sync with the GDT, update them.
*/
-#ifdef CONFIG_X86_32
- load_idt(&ctxt->idt);
+ loadsegment(ss, __KERNEL_DS);
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+
+ /*
+ * Restore percpu access. Percpu access can happen in exception
+ * handlers or in complicated helpers like load_gs_index().
+ */
+#ifdef CONFIG_X86_64
+ wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
#else
-/* CONFIG_X86_64 */
- load_idt((const struct desc_ptr *)&ctxt->idt_limit);
+ loadsegment(fs, __KERNEL_PERCPU);
+ loadsegment(gs, __KERNEL_STACK_CANARY);
#endif
+ /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
+ fix_processor_context();
+
/*
- * segment registers
+ * Now that we have descriptor tables fully restored and working
+ * exception handling, restore the usermode segments.
*/
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_64
+ loadsegment(ds, ctxt->es);
loadsegment(es, ctxt->es);
loadsegment(fs, ctxt->fs);
- loadsegment(gs, ctxt->gs);
- loadsegment(ss, ctxt->ss);
+ load_gs_index(ctxt->gs);
/*
- * sysenter MSRs
+ * Restore FSBASE and GSBASE after restoring the selectors, since
+ * restoring the selectors clobbers the bases. Keep in mind
+ * that MSR_KERNEL_GS_BASE is horribly misnamed.
*/
- if (boot_cpu_has(X86_FEATURE_SEP))
- enable_sep_cpu();
-#else
-/* CONFIG_X86_64 */
- asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
- asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
- asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
- load_gs_index(ctxt->gs);
- asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
-
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
- wrmsrl(MSR_GS_BASE, ctxt->gs_base);
- wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+ wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+#elif defined(CONFIG_X86_32_LAZY_GS)
+ loadsegment(gs, ctxt->gs);
#endif
- fix_processor_context();
-
do_fpu_end();
x86_platform.restore_sched_clock_state();
mtrr_bp_restore();
@@ -288,7 +293,17 @@ int hibernate_resume_nonboot_cpu_disable(void)
* address in its instruction pointer may not be possible to resolve
* any more at that point (the page tables used by it previously may
* have been overwritten by hibernate image data).
+ *
+ * First, make sure that we wake up all the potentially disabled SMT
+ * threads which have been initially brought up and then put into
+ * mwait/cpuidle sleep.
+ * Those will be put to proper (not interfering with hibernation
+ * resume) sleep afterwards, and the resumed kernel will decide itself
+ * what to do with them.
*/
+ ret = cpuhp_smt_enable();
+ if (ret)
+ return ret;
smp_ops.play_dead = resume_play_dead;
ret = disable_nonboot_cpus();
smp_ops.play_dead = play_dead;
@@ -377,15 +392,14 @@ static int __init bsp_pm_check_init(void)
core_initcall(bsp_pm_check_init);
-static int msr_init_context(const u32 *msr_id, const int total_num)
+static int msr_build_context(const u32 *msr_id, const int num)
{
- int i = 0;
+ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
struct saved_msr *msr_array;
+ int total_num;
+ int i, j;
- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
- return -EINVAL;
- }
+ total_num = saved_msrs->num + num;
msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
if (!msr_array) {
@@ -393,19 +407,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
return -ENOMEM;
}
- for (i = 0; i < total_num; i++) {
- msr_array[i].info.msr_no = msr_id[i];
+ if (saved_msrs->array) {
+ /*
+ * Multiple callbacks can invoke this function, so copy any
+ * MSR save requests from previous invocations.
+ */
+ memcpy(msr_array, saved_msrs->array,
+ sizeof(struct saved_msr) * saved_msrs->num);
+
+ kfree(saved_msrs->array);
+ }
+
+ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+ msr_array[i].info.msr_no = msr_id[j];
msr_array[i].valid = false;
msr_array[i].info.reg.q = 0;
}
- saved_context.saved_msrs.num = total_num;
- saved_context.saved_msrs.array = msr_array;
+ saved_msrs->num = total_num;
+ saved_msrs->array = msr_array;
return 0;
}
/*
- * The following section is a quirk framework for problematic BIOSen:
+ * The following sections are a quirk framework for problematic BIOSen:
* Sometimes MSRs are modified by the BIOSen after suspended to
* RAM, this might cause unexpected behavior after wakeup.
* Thus we save/restore these specified MSRs across suspend/resume
@@ -420,7 +445,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
}
static struct dmi_system_id msr_save_dmi_table[] = {
@@ -435,9 +460,58 @@ static struct dmi_system_id msr_save_dmi_table[] = {
{}
};
+static int msr_save_cpuid_features(const struct x86_cpu_id *c)
+{
+ u32 cpuid_msr_id[] = {
+ MSR_AMD64_CPUID_FN_1,
+ };
+
+ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
+ c->family);
+
+ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
+}
+
+static const struct x86_cpu_id msr_save_cpu_table[] = {
+ {
+ .vendor = X86_VENDOR_AMD,
+ .family = 0x15,
+ .model = X86_MODEL_ANY,
+ .feature = X86_FEATURE_ANY,
+ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+ },
+ {
+ .vendor = X86_VENDOR_AMD,
+ .family = 0x16,
+ .model = X86_MODEL_ANY,
+ .feature = X86_FEATURE_ANY,
+ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+ },
+ {}
+};
+
+typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
+static int pm_cpu_check(const struct x86_cpu_id *c)
+{
+ const struct x86_cpu_id *m;
+ int ret = 0;
+
+ m = x86_match_cpu(msr_save_cpu_table);
+ if (m) {
+ pm_cpu_match_t fn;
+
+ fn = (pm_cpu_match_t)m->driver_data;
+ ret = fn(m);
+ }
+
+ return ret;
+}
+
static int pm_check_save_msr(void)
{
dmi_check_system(msr_save_dmi_table);
+ pm_cpu_check(msr_save_cpu_table);
+
return 0;
}
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index fef485b789ca..6120046bb7dd 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -11,6 +11,7 @@
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/suspend.h>
+#include <linux/cpu.h>
#include <asm/init.h>
#include <asm/proto.h>
@@ -218,3 +219,35 @@ int arch_hibernation_header_restore(void *addr)
restore_cr3 = rdr->cr3;
return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
}
+
+int arch_resume_nosmt(void)
+{
+ int ret = 0;
+ /*
+ * We reached this while coming out of hibernation. This means
+ * that SMT siblings are sleeping in hlt, as mwait is not safe
+ * against control transition during resume (see comment in
+ * hibernate_resume_nonboot_cpu_disable()).
+ *
+ * If the resumed kernel has SMT disabled, we have to take all the
+ * SMT siblings out of hlt, and offline them again so that they
+ * end up in mwait proper.
+ *
+ * Called with hotplug disabled.
+ */
+ cpu_hotplug_enable();
+ if (cpu_smt_control == CPU_SMT_DISABLED ||
+ cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
+ enum cpuhp_smt_control old = cpu_smt_control;
+
+ ret = cpuhp_smt_enable();
+ if (ret)
+ goto out;
+ ret = cpuhp_smt_disable(old);
+ if (ret)
+ goto out;
+ }
+out:
+ cpu_hotplug_disable();
+ return ret;
+}
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 25012abc3409..ce5f431e6823 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -47,7 +47,7 @@ $(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
targets += realmode.lds
$(obj)/realmode.lds: $(obj)/pasyms.h
-LDFLAGS_realmode.elf := --emit-relocs -T
+LDFLAGS_realmode.elf := -m elf_i386 --emit-relocs -T
CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj)
targets += realmode.elf
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index a3d2c62fd805..0a3ad5dd1e8b 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -68,7 +68,7 @@ BEGIN {
lprefix1_expr = "\\((66|!F3)\\)"
lprefix2_expr = "\\(F3\\)"
- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
+ lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4
@@ -256,7 +256,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
return add_flags(imm, mod)
}
-/^[0-9a-f]+\:/ {
+/^[0-9a-f]+:/ {
if (NR == 1)
next
# get index
diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c
index 00f54a91bb4b..3c423dfcd78b 100644
--- a/arch/x86/um/os-Linux/registers.c
+++ b/arch/x86/um/os-Linux/registers.c
@@ -5,6 +5,7 @@
*/
#include <errno.h>
+#include <stdlib.h>
#include <sys/ptrace.h>
#ifdef __i386__
#include <sys/user.h>
@@ -26,17 +27,18 @@ int save_i387_registers(int pid, unsigned long *fp_regs)
int save_fp_registers(int pid, unsigned long *fp_regs)
{
+#ifdef PTRACE_GETREGSET
struct iovec iov;
if (have_xstate_support) {
iov.iov_base = fp_regs;
- iov.iov_len = sizeof(struct _xstate);
+ iov.iov_len = FP_SIZE * sizeof(unsigned long);
if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
return -errno;
return 0;
- } else {
+ } else
+#endif
return save_i387_registers(pid, fp_regs);
- }
}
int restore_i387_registers(int pid, unsigned long *fp_regs)
@@ -48,17 +50,17 @@ int restore_i387_registers(int pid, unsigned long *fp_regs)
int restore_fp_registers(int pid, unsigned long *fp_regs)
{
+#ifdef PTRACE_SETREGSET
struct iovec iov;
-
if (have_xstate_support) {
iov.iov_base = fp_regs;
- iov.iov_len = sizeof(struct _xstate);
+ iov.iov_len = FP_SIZE * sizeof(unsigned long);
if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
return -errno;
return 0;
- } else {
+ } else
+#endif
return restore_i387_registers(pid, fp_regs);
- }
}
#ifdef __i386__
@@ -122,13 +124,21 @@ int put_fp_registers(int pid, unsigned long *regs)
void arch_init_registers(int pid)
{
- struct _xstate fp_regs;
+#ifdef PTRACE_GETREGSET
+ void * fp_regs;
struct iovec iov;
- iov.iov_base = &fp_regs;
- iov.iov_len = sizeof(struct _xstate);
+ fp_regs = malloc(FP_SIZE * sizeof(unsigned long));
+ if(fp_regs == NULL)
+ return;
+
+ iov.iov_base = fp_regs;
+ iov.iov_len = FP_SIZE * sizeof(unsigned long);
if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) == 0)
have_xstate_support = 1;
+
+ free(fp_regs);
+#endif
}
#endif
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index cb3c22370cf5..7bcd10614f8b 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -50,7 +50,11 @@ void foo(void)
DEFINE(HOST_GS, GS);
DEFINE(HOST_ORIG_AX, ORIG_EAX);
#else
- DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long));
+#ifdef FP_XSTATE_MAGIC1
+ DEFINE_LONGS(HOST_FP_SIZE, 2696);
+#else
+ DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
+#endif
DEFINE_LONGS(HOST_BX, RBX);
DEFINE_LONGS(HOST_CX, RCX);
DEFINE_LONGS(HOST_DI, RDI);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index a45d32abea26..b9beae798d72 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -626,6 +626,7 @@ void cpu_reset(void)
"add %2, %2, %7\n\t"
"addi %0, %0, -1\n\t"
"bnez %0, 1b\n\t"
+ "isync\n\t"
/* Jump to identity mapping */
"jx %3\n"
"2:\n\t"
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index 7538d802b65a..483593068139 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -272,10 +272,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
return 1;
}
+/*
+ * level == 0 is for the return address from the caller of this function,
+ * not from this function itself.
+ */
unsigned long return_address(unsigned level)
{
struct return_addr_data r = {
- .skip = level + 1,
+ .skip = level,
};
walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
return r.addr;
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index a71d2739fa82..9210b9cc4ec9 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -114,13 +114,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
// FIXME EXPORT_SYMBOL(screen_info);
#endif
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
-
extern long common_exception_return;
EXPORT_SYMBOL(common_exception_return);
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 35c822286bbe..3ce5ccdb054d 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -218,6 +218,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
unsigned tlbidx = w | (e << PAGE_SHIFT);
unsigned r0 = dtlb ?
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
+ unsigned r1 = dtlb ?
+ read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
unsigned pte = get_pte_for_vaddr(vpn);
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
@@ -233,8 +235,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
}
if (tlb_asid == mm_asid) {
- unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
- read_itlb_translation(tlbidx);
if ((pte ^ r1) & PAGE_MASK) {
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
diff --git a/block/bio.c b/block/bio.c
index 68972e3d3f5c..4c18a68913de 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1214,8 +1214,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
}
}
- if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
+ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
+ if (!map_data)
+ __free_page(page);
break;
+ }
len -= bytes;
offset = 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 864f38c676dd..3dbaef75d1b2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -908,6 +908,7 @@ out_exit_flush_rq:
q->exit_rq_fn(q, q->fq->flush_rq);
out_free_flush_queue:
blk_free_flush_queue(q->fq);
+ q->fq = NULL;
return -ENOMEM;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
diff --git a/block/blk-map.c b/block/blk-map.c
index 171841a961fe..e1c7fdd96e48 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -144,7 +144,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return 0;
unmap_rq:
- __blk_rq_unmap_user(bio);
+ blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
return ret;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index fda6a12fc776..61503b98e8a8 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -305,13 +305,7 @@ void blk_recalc_rq_segments(struct request *rq)
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
- unsigned short seg_cnt;
-
- /* estimate segment number by bi_vcnt for non-cloned bio */
- if (bio_flagged(bio, BIO_CLONED))
- seg_cnt = bio_segments(bio);
- else
- seg_cnt = bio->bi_vcnt;
+ unsigned short seg_cnt = bio_segments(bio);
if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
(seg_cnt < queue_max_segments(q)))
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 8c0894e0713b..5b64d9d7d147 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -243,20 +243,25 @@ static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *pag
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
{
+ const size_t size = PAGE_SIZE - 1;
unsigned int i, first = 1;
- ssize_t ret = 0;
+ int ret = 0, pos = 0;
for_each_cpu(i, hctx->cpumask) {
if (first)
- ret += sprintf(ret + page, "%u", i);
+ ret = snprintf(pos + page, size - pos, "%u", i);
else
- ret += sprintf(ret + page, ", %u", i);
+ ret = snprintf(pos + page, size - pos, ", %u", i);
+
+ if (ret >= size - pos)
+ break;
first = 0;
+ pos += ret;
}
- ret += sprintf(ret + page, "\n");
- return ret;
+ ret = snprintf(pos + page, size + 1 - pos, "\n");
+ return pos + ret;
}
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 4bc701b32ce2..89bb6250633d 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -336,6 +336,13 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
struct blk_mq_hw_ctx *hctx;
int i;
+ /*
+ * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
+ * queue_hw_ctx after freeze the queue, so we use q_usage_counter
+ * to avoid race with it.
+ */
+ if (!percpu_ref_tryget(&q->q_usage_counter))
+ return;
queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags;
@@ -351,7 +358,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
}
-
+ blk_queue_exit(q);
}
static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6fd9e3c8dcb6..3a7504c4f91d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2339,6 +2339,10 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
+ /*
+ * Sync with blk_mq_queue_tag_busy_iter.
+ */
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index f679ae122843..0d644f37e3c6 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -349,7 +349,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size);
* storage device can address. The default of 512 covers most
* hardware.
**/
-void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
+void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
{
q->limits.logical_block_size = size;
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 556826ac7cb4..b6e5447d563e 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -4,8 +4,8 @@
#include <linux/cdrom.h>
#include <linux/compat.h>
#include <linux/elevator.h>
-#include <linux/fd.h>
#include <linux/hdreg.h>
+#include <linux/pr.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/types.h>
@@ -209,318 +209,6 @@ static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode,
#define BLKBSZSET_32 _IOW(0x12, 113, int)
#define BLKGETSIZE64_32 _IOR(0x12, 114, int)
-struct compat_floppy_drive_params {
- char cmos;
- compat_ulong_t max_dtr;
- compat_ulong_t hlt;
- compat_ulong_t hut;
- compat_ulong_t srt;
- compat_ulong_t spinup;
- compat_ulong_t spindown;
- unsigned char spindown_offset;
- unsigned char select_delay;
- unsigned char rps;
- unsigned char tracks;
- compat_ulong_t timeout;
- unsigned char interleave_sect;
- struct floppy_max_errors max_errors;
- char flags;
- char read_track;
- short autodetect[8];
- compat_int_t checkfreq;
- compat_int_t native_format;
-};
-
-struct compat_floppy_drive_struct {
- signed char flags;
- compat_ulong_t spinup_date;
- compat_ulong_t select_date;
- compat_ulong_t first_read_date;
- short probed_format;
- short track;
- short maxblock;
- short maxtrack;
- compat_int_t generation;
- compat_int_t keep_data;
- compat_int_t fd_ref;
- compat_int_t fd_device;
- compat_int_t last_checked;
- compat_caddr_t dmabuf;
- compat_int_t bufblocks;
-};
-
-struct compat_floppy_fdc_state {
- compat_int_t spec1;
- compat_int_t spec2;
- compat_int_t dtr;
- unsigned char version;
- unsigned char dor;
- compat_ulong_t address;
- unsigned int rawcmd:2;
- unsigned int reset:1;
- unsigned int need_configure:1;
- unsigned int perp_mode:2;
- unsigned int has_fifo:1;
- unsigned int driver_version;
- unsigned char track[4];
-};
-
-struct compat_floppy_write_errors {
- unsigned int write_errors;
- compat_ulong_t first_error_sector;
- compat_int_t first_error_generation;
- compat_ulong_t last_error_sector;
- compat_int_t last_error_generation;
- compat_uint_t badness;
-};
-
-#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
-#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
-#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
-#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
-#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
-#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
-#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
-#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
-
-static struct {
- unsigned int cmd32;
- unsigned int cmd;
-} fd_ioctl_trans_table[] = {
- { FDSETPRM32, FDSETPRM },
- { FDDEFPRM32, FDDEFPRM },
- { FDGETPRM32, FDGETPRM },
- { FDSETDRVPRM32, FDSETDRVPRM },
- { FDGETDRVPRM32, FDGETDRVPRM },
- { FDGETDRVSTAT32, FDGETDRVSTAT },
- { FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
- { FDGETFDCSTAT32, FDGETFDCSTAT },
- { FDWERRORGET32, FDWERRORGET }
-};
-
-#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
-
-static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- mm_segment_t old_fs = get_fs();
- void *karg = NULL;
- unsigned int kcmd = 0;
- int i, err;
-
- for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
- if (cmd == fd_ioctl_trans_table[i].cmd32) {
- kcmd = fd_ioctl_trans_table[i].cmd;
- break;
- }
- if (!kcmd)
- return -EINVAL;
-
- switch (cmd) {
- case FDSETPRM32:
- case FDDEFPRM32:
- case FDGETPRM32:
- {
- compat_uptr_t name;
- struct compat_floppy_struct __user *uf;
- struct floppy_struct *f;
-
- uf = compat_ptr(arg);
- f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- if (cmd == FDGETPRM32)
- break;
- err = __get_user(f->size, &uf->size);
- err |= __get_user(f->sect, &uf->sect);
- err |= __get_user(f->head, &uf->head);
- err |= __get_user(f->track, &uf->track);
- err |= __get_user(f->stretch, &uf->stretch);
- err |= __get_user(f->gap, &uf->gap);
- err |= __get_user(f->rate, &uf->rate);
- err |= __get_user(f->spec1, &uf->spec1);
- err |= __get_user(f->fmt_gap, &uf->fmt_gap);
- err |= __get_user(name, &uf->name);
- f->name = compat_ptr(name);
- if (err) {
- err = -EFAULT;
- goto out;
- }
- break;
- }
- case FDSETDRVPRM32:
- case FDGETDRVPRM32:
- {
- struct compat_floppy_drive_params __user *uf;
- struct floppy_drive_params *f;
-
- uf = compat_ptr(arg);
- f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- if (cmd == FDGETDRVPRM32)
- break;
- err = __get_user(f->cmos, &uf->cmos);
- err |= __get_user(f->max_dtr, &uf->max_dtr);
- err |= __get_user(f->hlt, &uf->hlt);
- err |= __get_user(f->hut, &uf->hut);
- err |= __get_user(f->srt, &uf->srt);
- err |= __get_user(f->spinup, &uf->spinup);
- err |= __get_user(f->spindown, &uf->spindown);
- err |= __get_user(f->spindown_offset, &uf->spindown_offset);
- err |= __get_user(f->select_delay, &uf->select_delay);
- err |= __get_user(f->rps, &uf->rps);
- err |= __get_user(f->tracks, &uf->tracks);
- err |= __get_user(f->timeout, &uf->timeout);
- err |= __get_user(f->interleave_sect, &uf->interleave_sect);
- err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
- err |= __get_user(f->flags, &uf->flags);
- err |= __get_user(f->read_track, &uf->read_track);
- err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
- err |= __get_user(f->checkfreq, &uf->checkfreq);
- err |= __get_user(f->native_format, &uf->native_format);
- if (err) {
- err = -EFAULT;
- goto out;
- }
- break;
- }
- case FDGETDRVSTAT32:
- case FDPOLLDRVSTAT32:
- karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- break;
- case FDGETFDCSTAT32:
- karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- break;
- case FDWERRORGET32:
- karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- break;
- default:
- return -EINVAL;
- }
- set_fs(KERNEL_DS);
- err = __blkdev_driver_ioctl(bdev, mode, kcmd, (unsigned long)karg);
- set_fs(old_fs);
- if (err)
- goto out;
- switch (cmd) {
- case FDGETPRM32:
- {
- struct floppy_struct *f = karg;
- struct compat_floppy_struct __user *uf = compat_ptr(arg);
-
- err = __put_user(f->size, &uf->size);
- err |= __put_user(f->sect, &uf->sect);
- err |= __put_user(f->head, &uf->head);
- err |= __put_user(f->track, &uf->track);
- err |= __put_user(f->stretch, &uf->stretch);
- err |= __put_user(f->gap, &uf->gap);
- err |= __put_user(f->rate, &uf->rate);
- err |= __put_user(f->spec1, &uf->spec1);
- err |= __put_user(f->fmt_gap, &uf->fmt_gap);
- err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
- break;
- }
- case FDGETDRVPRM32:
- {
- struct compat_floppy_drive_params __user *uf;
- struct floppy_drive_params *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->cmos, &uf->cmos);
- err |= __put_user(f->max_dtr, &uf->max_dtr);
- err |= __put_user(f->hlt, &uf->hlt);
- err |= __put_user(f->hut, &uf->hut);
- err |= __put_user(f->srt, &uf->srt);
- err |= __put_user(f->spinup, &uf->spinup);
- err |= __put_user(f->spindown, &uf->spindown);
- err |= __put_user(f->spindown_offset, &uf->spindown_offset);
- err |= __put_user(f->select_delay, &uf->select_delay);
- err |= __put_user(f->rps, &uf->rps);
- err |= __put_user(f->tracks, &uf->tracks);
- err |= __put_user(f->timeout, &uf->timeout);
- err |= __put_user(f->interleave_sect, &uf->interleave_sect);
- err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
- err |= __put_user(f->flags, &uf->flags);
- err |= __put_user(f->read_track, &uf->read_track);
- err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
- err |= __put_user(f->checkfreq, &uf->checkfreq);
- err |= __put_user(f->native_format, &uf->native_format);
- break;
- }
- case FDGETDRVSTAT32:
- case FDPOLLDRVSTAT32:
- {
- struct compat_floppy_drive_struct __user *uf;
- struct floppy_drive_struct *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->flags, &uf->flags);
- err |= __put_user(f->spinup_date, &uf->spinup_date);
- err |= __put_user(f->select_date, &uf->select_date);
- err |= __put_user(f->first_read_date, &uf->first_read_date);
- err |= __put_user(f->probed_format, &uf->probed_format);
- err |= __put_user(f->track, &uf->track);
- err |= __put_user(f->maxblock, &uf->maxblock);
- err |= __put_user(f->maxtrack, &uf->maxtrack);
- err |= __put_user(f->generation, &uf->generation);
- err |= __put_user(f->keep_data, &uf->keep_data);
- err |= __put_user(f->fd_ref, &uf->fd_ref);
- err |= __put_user(f->fd_device, &uf->fd_device);
- err |= __put_user(f->last_checked, &uf->last_checked);
- err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
- err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
- break;
- }
- case FDGETFDCSTAT32:
- {
- struct compat_floppy_fdc_state __user *uf;
- struct floppy_fdc_state *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->spec1, &uf->spec1);
- err |= __put_user(f->spec2, &uf->spec2);
- err |= __put_user(f->dtr, &uf->dtr);
- err |= __put_user(f->version, &uf->version);
- err |= __put_user(f->dor, &uf->dor);
- err |= __put_user(f->address, &uf->address);
- err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
- (char *)&f->address + sizeof(f->address), sizeof(int));
- err |= __put_user(f->driver_version, &uf->driver_version);
- err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
- break;
- }
- case FDWERRORGET32:
- {
- struct compat_floppy_write_errors __user *uf;
- struct floppy_write_errors *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->write_errors, &uf->write_errors);
- err |= __put_user(f->first_error_sector, &uf->first_error_sector);
- err |= __put_user(f->first_error_generation, &uf->first_error_generation);
- err |= __put_user(f->last_error_sector, &uf->last_error_sector);
- err |= __put_user(f->last_error_generation, &uf->last_error_generation);
- err |= __put_user(f->badness, &uf->badness);
- break;
- }
- default:
- break;
- }
- if (err)
- err = -EFAULT;
-
-out:
- kfree(karg);
- return err;
-}
-
static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
@@ -537,16 +225,6 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
case HDIO_GET_ADDRESS:
case HDIO_GET_BUSSTATE:
return compat_hdio_ioctl(bdev, mode, cmd, arg);
- case FDSETPRM32:
- case FDDEFPRM32:
- case FDGETPRM32:
- case FDSETDRVPRM32:
- case FDGETDRVPRM32:
- case FDGETDRVSTAT32:
- case FDPOLLDRVSTAT32:
- case FDGETFDCSTAT32:
- case FDWERRORGET32:
- return compat_fd_ioctl(bdev, mode, cmd, arg);
case CDROMREADAUDIO:
return compat_cdrom_read_audio(bdev, mode, cmd, arg);
case CDROM_SEND_PACKET:
@@ -566,23 +244,6 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
case HDIO_DRIVE_CMD:
/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
case 0x330:
- /* 0x02 -- Floppy ioctls */
- case FDMSGON:
- case FDMSGOFF:
- case FDSETEMSGTRESH:
- case FDFLUSH:
- case FDWERRORCLR:
- case FDSETMAXERRS:
- case FDGETMAXERRS:
- case FDGETDRVTYP:
- case FDEJECT:
- case FDCLRPRM:
- case FDFMTBEG:
- case FDFMTEND:
- case FDRESET:
- case FDTWADDLE:
- case FDFMTTRK:
- case FDRAWCMD:
/* CDROM stuff */
case CDROMPAUSE:
case CDROMRESUME:
@@ -746,6 +407,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKTRACETEARDOWN: /* compatible */
ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
return ret;
+ case IOC_PR_REGISTER:
+ case IOC_PR_RESERVE:
+ case IOC_PR_RELEASE:
+ case IOC_PR_PREEMPT:
+ case IOC_PR_PREEMPT_ABORT:
+ case IOC_PR_CLEAR:
+ return blkdev_ioctl(bdev, mode, cmd,
+ (unsigned long)compat_ptr(arg));
default:
if (disk->fops->compat_ioctl)
ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index b5953f1d1a18..cf3975ee4fd8 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -136,11 +136,13 @@ void af_alg_release_parent(struct sock *sk)
sk = ask->parent;
ask = alg_sk(sk);
- lock_sock(sk);
+ local_bh_disable();
+ bh_lock_sock(sk);
ask->nokey_refcnt -= nokey;
if (!last)
last = !--ask->refcnt;
- release_sock(sk);
+ bh_unlock_sock(sk);
+ local_bh_enable();
if (last)
sock_put(sk);
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 5c098ffa7d3d..9e5b24329b41 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -652,11 +652,9 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn);
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
- if (!spawn->alg)
- return;
-
down_write(&crypto_alg_sem);
- list_del(&spawn->list);
+ if (spawn->alg)
+ list_del(&spawn->list);
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
@@ -664,22 +662,16 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn);
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
- struct crypto_alg *alg2;
down_read(&crypto_alg_sem);
alg = spawn->alg;
- alg2 = alg;
- if (alg2)
- alg2 = crypto_mod_get(alg2);
- up_read(&crypto_alg_sem);
-
- if (!alg2) {
- if (alg)
- crypto_shoot_alg(alg);
- return ERR_PTR(-EAGAIN);
+ if (alg && !crypto_mod_get(alg)) {
+ alg->cra_flags |= CRYPTO_ALG_DYING;
+ alg = NULL;
}
+ up_read(&crypto_alg_sem);
- return alg;
+ return alg ?: ERR_PTR(-EAGAIN);
}
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index aaf2f810d170..b28f45aca2ef 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -538,7 +538,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
lock_sock(sk);
tx_nents = skcipher_all_sg_nents(ctx);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
- if (unlikely(!sreq->tsg))
+ if (unlikely(ZERO_OR_NULL_PTR(sreq->tsg)))
goto unlock;
sg_init_table(sreq->tsg, tx_nents);
memcpy(iv, ctx->iv, ivsize);
diff --git a/crypto/api.c b/crypto/api.c
index abf53e67e3d8..b273b3a726a9 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -355,13 +355,12 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
return len;
}
-void crypto_shoot_alg(struct crypto_alg *alg)
+static void crypto_shoot_alg(struct crypto_alg *alg)
{
down_write(&crypto_alg_sem);
alg->cra_flags |= CRYPTO_ALG_DYING;
up_write(&crypto_alg_sem);
}
-EXPORT_SYMBOL_GPL(crypto_shoot_alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask)
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 331f6baf2df8..13f3de68b479 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -14,6 +14,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
select MPILIB
select CRYPTO_HASH_INFO
select CRYPTO_AKCIPHER
+ select CRYPTO_HASH
help
This option provides support for asymmetric public key type handling.
If signature generation and/or verification are to be used,
@@ -33,6 +34,7 @@ config X509_CERTIFICATE_PARSER
config PKCS7_MESSAGE_PARSER
tristate "PKCS#7 message parser"
depends on X509_CERTIFICATE_PARSER
+ select CRYPTO_HASH
select ASN1
select OID_REGISTRY
help
@@ -55,6 +57,7 @@ config SIGNED_PE_FILE_VERIFICATION
bool "Support for PE file signature verification"
depends on PKCS7_MESSAGE_PARSER=y
depends on SYSTEM_DATA_VERIFICATION
+ select CRYPTO_HASH
select ASN1
select OID_REGISTRY
help
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index cb1c3a3287b0..96d842a13ffc 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -67,6 +67,8 @@ struct chachapoly_req_ctx {
unsigned int cryptlen;
/* Actual AD, excluding IV */
unsigned int assoclen;
+ /* request flags, with MAY_SLEEP cleared if needed */
+ u32 flags;
union {
struct poly_req poly;
struct chacha_req chacha;
@@ -76,8 +78,12 @@ struct chachapoly_req_ctx {
static inline void async_done_continue(struct aead_request *req, int err,
int (*cont)(struct aead_request *))
{
- if (!err)
+ if (!err) {
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = cont(req);
+ }
if (err != -EINPROGRESS && err != -EBUSY)
aead_request_complete(req, err);
@@ -144,7 +150,7 @@ static int chacha_decrypt(struct aead_request *req)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
}
- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ skcipher_request_set_callback(&creq->req, rctx->flags,
chacha_decrypt_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, src, dst,
@@ -188,7 +194,7 @@ static int poly_tail(struct aead_request *req)
memcpy(&preq->tail.cryptlen, &len, sizeof(len));
sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_tail_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src,
@@ -219,7 +225,7 @@ static int poly_cipherpad(struct aead_request *req)
sg_init_table(preq->src, 1);
sg_set_buf(preq->src, &preq->pad, padlen);
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_cipherpad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@ -250,7 +256,7 @@ static int poly_cipher(struct aead_request *req)
sg_init_table(rctx->src, 2);
crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_cipher_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
@@ -280,7 +286,7 @@ static int poly_adpad(struct aead_request *req)
sg_init_table(preq->src, 1);
sg_set_buf(preq->src, preq->pad, padlen);
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_adpad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
@@ -304,7 +310,7 @@ static int poly_ad(struct aead_request *req)
struct poly_req *preq = &rctx->u.poly;
int err;
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_ad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
@@ -331,7 +337,7 @@ static int poly_setkey(struct aead_request *req)
sg_init_table(preq->src, 1);
sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_setkey_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
@@ -355,7 +361,7 @@ static int poly_init(struct aead_request *req)
struct poly_req *preq = &rctx->u.poly;
int err;
- ahash_request_set_callback(&preq->req, aead_request_flags(req),
+ ahash_request_set_callback(&preq->req, rctx->flags,
poly_init_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
@@ -393,7 +399,7 @@ static int poly_genkey(struct aead_request *req)
chacha_iv(creq->iv, req, 0);
- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ skcipher_request_set_callback(&creq->req, rctx->flags,
poly_genkey_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
@@ -433,7 +439,7 @@ static int chacha_encrypt(struct aead_request *req)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
}
- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
+ skcipher_request_set_callback(&creq->req, rctx->flags,
chacha_encrypt_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, src, dst,
@@ -451,6 +457,7 @@ static int chachapoly_encrypt(struct aead_request *req)
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
rctx->cryptlen = req->cryptlen;
+ rctx->flags = aead_request_flags(req);
/* encrypt call chain:
* - chacha_encrypt/done()
@@ -472,6 +479,7 @@ static int chachapoly_decrypt(struct aead_request *req)
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
+ rctx->flags = aead_request_flags(req);
/* decrypt call chain:
* - poly_genkey/done()
@@ -647,8 +655,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha_name,
- poly_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,%s)", name, chacha->base.cra_name,
+ poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto out_drop_chacha;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha->base.cra_driver_name,
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 8e94e29dc6fc..d08048ae5552 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0;
}
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
- u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
- *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+ *(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
@@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- return __chksum_finup(&ctx->crc, data, len, out);
+ return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(&ctx->crc, data, length, out);
+ return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 1c5705481c69..810be7a9e3c4 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -55,6 +55,9 @@ static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
list_for_each_entry(q, &crypto_alg_list, cra_list) {
int match = 0;
+ if (crypto_is_larval(q))
+ continue;
+
if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
continue;
@@ -266,38 +269,43 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
drop_alg:
crypto_mod_put(alg);
- if (err)
+ if (err) {
+ kfree_skb(skb);
return err;
+ }
return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
}
static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct crypto_alg *alg;
+ const size_t start_pos = cb->args[0];
+ size_t pos = 0;
struct crypto_dump_info info;
- int err;
-
- if (cb->args[0])
- goto out;
-
- cb->args[0] = 1;
+ struct crypto_alg *alg;
+ int res;
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
+ down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list) {
- err = crypto_report_alg(alg, &info);
- if (err)
- goto out_err;
+ if (pos >= start_pos) {
+ res = crypto_report_alg(alg, &info);
+ if (res == -EMSGSIZE)
+ break;
+ if (res)
+ goto out;
+ }
+ pos++;
}
-
+ cb->args[0] = pos;
+ res = skb->len;
out:
- return skb->len;
-out_err:
- return err;
+ up_read(&crypto_alg_sem);
+ return res;
}
static int crypto_dump_report_done(struct netlink_callback *cb)
@@ -480,7 +488,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) {
struct crypto_alg *alg;
- u16 dump_alloc = 0;
+ unsigned long dump_alloc = 0;
if (link->dump == NULL)
return -EINVAL;
@@ -488,16 +496,16 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
+ up_read(&crypto_alg_sem);
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
- .min_dump_alloc = dump_alloc,
+ .min_dump_alloc = min(dump_alloc, 65535UL),
};
err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
- up_read(&crypto_alg_sem);
return err;
}
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 414c78a9c214..7cf6c3e4825c 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -897,10 +897,11 @@ static void ecc_point_mult(struct ecc_point *result,
static inline void ecc_swap_digits(const u64 *in, u64 *out,
unsigned int ndigits)
{
+ const __be64 *src = (__force __be64 *)in;
int i;
for (i = 0; i < ndigits; i++)
- out[i] = __swab64(in[ndigits - 1 - i]);
+ out[i] = be64_to_cpu(src[ndigits - 1 - i]);
}
int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 8d335ec6dfc5..ce502f7b15f2 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -616,7 +616,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
- const char *full_name,
const char *ctr_name,
const char *ghash_name)
{
@@ -657,7 +656,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
goto err_free_inst;
err = -EINVAL;
- if (ghash->digestsize != 16)
+ if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
+ ghash->digestsize != 16)
goto err_drop_ghash;
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
@@ -669,24 +669,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
- /* We only support 16-byte blocks. */
- if (crypto_skcipher_alg_ivsize(ctr) != 16)
- goto out_put_ctr;
-
- /* Not a stream cipher? */
+ /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
- if (ctr->base.cra_blocksize != 1)
+ if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+ crypto_skcipher_alg_ivsize(ctr) != 16 ||
+ ctr->base.cra_blocksize != 1)
goto out_put_ctr;
err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
+ goto out_put_ctr;
+
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s,%s)", ctr->base.cra_driver_name,
ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
- memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
inst->alg.base.cra_flags = (ghash->base.cra_flags |
ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (ghash->base.cra_priority +
@@ -728,7 +728,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
{
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
- char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@@ -738,12 +737,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
- CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- return crypto_gcm_create_common(tmpl, tb, full_name,
- ctr_name, "ghash");
+ return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
}
static struct crypto_template crypto_gcm_tmpl = {
@@ -757,7 +751,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
{
const char *ctr_name;
const char *ghash_name;
- char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
@@ -767,12 +760,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
if (IS_ERR(ghash_name))
return PTR_ERR(ghash_name);
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
- ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- return crypto_gcm_create_common(tmpl, tb, full_name,
- ctr_name, ghash_name);
+ return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
}
static struct crypto_template crypto_gcm_base_tmpl = {
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 12ad3e3a84e3..73b56f2f44f1 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -34,6 +34,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+ be128 k;
if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -42,7 +43,12 @@ static int ghash_setkey(struct crypto_shash *tfm,
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
- ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
+
+ BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
+ memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
+ ctx->gf128 = gf128mul_init_4k_lle(&k);
+ memzero_explicit(&k, GHASH_BLOCK_SIZE);
+
if (!ctx->gf128)
return -ENOMEM;
diff --git a/crypto/internal.h b/crypto/internal.h
index 7eefcdb00227..6184c4226a8f 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -87,7 +87,6 @@ void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg);
void crypto_remove_final(struct list_head *list);
-void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg,
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index f8ec3d4ba4a8..85082574c515 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -130,7 +130,6 @@ static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
struct padata_priv *padata = pcrypt_request_padata(preq);
padata->info = err;
- req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
padata_do_serial(padata);
}
@@ -394,7 +393,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
int ret;
pinst->kobj.kset = pcrypt_kset;
- ret = kobject_add(&pinst->kobj, NULL, name);
+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
if (!ret)
kobject_uevent(&pinst->kobj, KOBJ_ADD);
@@ -505,11 +504,12 @@ err:
static void __exit pcrypt_exit(void)
{
+ crypto_unregister_template(&pcrypt_tmpl);
+
pcrypt_fini_padata(&pencrypt);
pcrypt_fini_padata(&pdecrypt);
kset_unregister(pcrypt_kset);
- crypto_unregister_template(&pcrypt_tmpl);
}
module_init(pcrypt_init);
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 7830d304dff6..d58224d80867 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -267,15 +267,6 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
- req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
- if (!req_ctx->out_buf) {
- kfree(req_ctx->in_buf);
- return -ENOMEM;
- }
-
- pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
- ctx->key_size, NULL);
-
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index d7da0eea5622..319d9962552e 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -186,7 +186,7 @@ static int encrypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 64);
- salsa20_ivsetup(ctx, walk.iv);
+ salsa20_ivsetup(ctx, desc->info);
while (walk.nbytes >= 64) {
salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 9033088ca231..ebff33765ac3 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -4527,7 +4527,49 @@ static struct hash_testvec poly1305_tv_template[] = {
.psize = 80,
.digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
- },
+ }, { /* Regression test for overflow in AVX2 implementation */
+ .plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff",
+ .psize = 300,
+ .digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
+ "\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
+ }
};
/*
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 321bc6ff2a9d..904c8444aa0a 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -25,8 +25,9 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#define TGR192_DIGEST_SIZE 24
#define TGR160_DIGEST_SIZE 20
@@ -468,10 +469,9 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
u64 a, b, c, aa, bb, cc;
u64 x[8];
int i;
- const __le64 *ptr = (const __le64 *)data;
for (i = 0; i < 8; i++)
- x[i] = le64_to_cpu(ptr[i]);
+ x[i] = get_unaligned_le64(data + i * sizeof(__le64));
/* save */
a = aa = tctx->a;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 957d3fa3b543..a9158858f54c 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -243,7 +243,7 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
{}
};
@@ -448,12 +448,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
* have _PS0 and _PS3 without _PSC (and no power resources), so
* acpi_bus_init_power() will assume that the BIOS has put them into D0.
*/
- ret = acpi_device_fix_up_power(adev);
- if (ret) {
- /* Skip the device, but continue the namespace scan. */
- ret = 0;
- goto err_out;
- }
+ acpi_device_fix_up_power(adev);
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 6b0d3ef7309c..2ccfbb61ca89 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
if (node < 0)
node = memory_add_physaddr_to_nid(info->start_addr);
- result = add_memory(node, info->start_addr, info->length);
+ result = __add_memory(node, info->start_addr, info->length);
/*
* If the memory block has been used by the kernel, add_memory()
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 667dc5c86fef..ea0573176894 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2069,21 +2069,29 @@ static int __init intel_opregion_present(void)
return opregion;
}
+/* Check if the chassis-type indicates there is no builtin LCD panel */
static bool dmi_is_desktop(void)
{
const char *chassis_type;
+ unsigned long type;
chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
if (!chassis_type)
return false;
- if (!strcmp(chassis_type, "3") || /* 3: Desktop */
- !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
- !strcmp(chassis_type, "5") || /* 5: Pizza Box */
- !strcmp(chassis_type, "6") || /* 6: Mini Tower */
- !strcmp(chassis_type, "7") || /* 7: Tower */
- !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
+ if (kstrtoul(chassis_type, 10, &type) != 0)
+ return false;
+
+ switch (type) {
+ case 0x03: /* Desktop */
+ case 0x04: /* Low Profile Desktop */
+ case 0x05: /* Pizza Box */
+ case 0x06: /* Mini Tower */
+ case 0x07: /* Tower */
+ case 0x10: /* Lunch Box */
+ case 0x11: /* Main Server Chassis */
return true;
+ }
return false;
}
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 396e358c2cee..4296f4932294 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -58,12 +58,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
}
#endif
+static bool acpi_no_watchdog;
+
static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
{
const struct acpi_table_wdat *wdat = NULL;
acpi_status status;
- if (acpi_disabled)
+ if (acpi_disabled || acpi_no_watchdog)
return NULL;
status = acpi_get_table(ACPI_SIG_WDAT, 0,
@@ -91,6 +93,14 @@ bool acpi_has_watchdog(void)
}
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
+/* ACPI watchdog can be disabled on boot command line */
+static int __init disable_acpi_watchdog(char *str)
+{
+ acpi_no_watchdog = true;
+ return 1;
+}
+__setup("acpi_no_watchdog", disable_acpi_watchdog);
+
void __init acpi_watchdog_init(void)
{
const struct acpi_wdat_entry *entries;
@@ -129,12 +139,11 @@ void __init acpi_watchdog_init(void)
gas = &entries[i].register_region;
res.start = gas->address;
+ res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
res.flags = IORESOURCE_MEM;
- res.end = res.start + ALIGN(gas->access_width, 4) - 1;
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
res.flags = IORESOURCE_IO;
- res.end = res.start + gas->access_width - 1;
} else {
pr_warn("Unsupported address space: %u\n",
gas->space_id);
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 92fa47c6498c..20fd17aaa918 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -247,6 +247,8 @@ acpi_status
acpi_ev_initialize_region(union acpi_operand_object *region_obj,
u8 acpi_ns_locked);
+u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
+
/*
* evsci - SCI (System Control Interrupt) handling/dispatch
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index dff1207a6078..219bc576d127 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -428,9 +428,9 @@ struct acpi_simple_repair_info {
/* Info for running the _REG methods */
struct acpi_reg_walk_info {
- acpi_adr_space_type space_id;
u32 function;
u32 reg_run_count;
+ acpi_adr_space_type space_id;
};
/*****************************************************************************
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 6a4b603d0e83..10bbf6ca082a 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -272,7 +272,7 @@ cleanup:
* FUNCTION: acpi_ds_get_field_names
*
* PARAMETERS: info - create_field info structure
- * ` walk_state - Current method state
+ * walk_state - Current method state
* arg - First parser arg for the field name list
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index fd34040d4f44..9c41d2153d0f 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -440,6 +440,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
walk_state));
+ /*
+ * Disassembler: handle create field operators here.
+ *
+ * create_buffer_field is a deferred op that is typically processed in load
+ * pass 2. However, disassembly of control method contents walk the parse
+ * tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
+ * in a later walk. This is a problem when there is a control method that
+ * has the same name as the AML_CREATE object. In this case, any use of the
+ * name segment will be detected as a method call rather than a reference
+ * to a buffer field.
+ *
+ * This earlier creation during disassembly solves this issue by inserting
+ * the named object in the ACPI namespace so that references to this name
+ * would be a name string rather than a method call.
+ */
+ if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
+ (walk_state->op_info->flags & AML_CREATE)) {
+ status = acpi_ds_create_buffer_field(op, walk_state);
+ return_ACPI_STATUS(status);
+ }
+
/* We are only interested in opcodes that have an associated name */
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 4c6f79514040..9cb60fdc77e5 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -677,6 +677,19 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
+ /*
+ * These address spaces do not need a call to _REG, since the ACPI
+ * specification defines them as: "must always be accessible". Since
+ * they never change state (never become unavailable), no need to ever
+ * call _REG on them. Also, a data_table is not a "real" address space,
+ * so do not call _REG. September 2018.
+ */
+ if ((space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) ||
+ (space_id == ACPI_ADR_SPACE_SYSTEM_IO) ||
+ (space_id == ACPI_ADR_SPACE_DATA_TABLE)) {
+ return_VOID;
+ }
+
info.space_id = space_id;
info.function = function;
info.reg_run_count = 0;
@@ -738,8 +751,8 @@ acpi_ev_reg_run(acpi_handle obj_handle,
}
/*
- * We only care about regions.and objects that are allowed to have address
- * space handlers
+ * We only care about regions and objects that are allowed to have
+ * address space handlers
*/
if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
return (AE_OK);
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 75ddd160a716..c8646c397786 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -50,9 +50,6 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini")
-/* Local prototypes */
-static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
-
/*******************************************************************************
*
* FUNCTION: acpi_ev_system_memory_region_setup
@@ -67,7 +64,6 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
* DESCRIPTION: Setup a system_memory operation region
*
******************************************************************************/
-
acpi_status
acpi_ev_system_memory_region_setup(acpi_handle handle,
u32 function,
@@ -347,7 +343,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*
******************************************************************************/
-static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
+u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
{
acpi_status status;
struct acpi_pnp_device_id *hid;
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index d2743067126a..f87d59a05c68 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -227,7 +227,6 @@ acpi_remove_address_space_handler(acpi_handle device,
*/
region_obj =
handler_obj->address_space.region_list;
-
}
/* Remove this Handler object from the list */
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0375c6024062..acb4e7523a1d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -203,40 +203,40 @@ static int ghes_estatus_pool_init(void)
return 0;
}
-static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
+static void ghes_estatus_pool_free_chunk(struct gen_pool *pool,
struct gen_pool_chunk *chunk,
void *data)
{
- free_page(chunk->start_addr);
+ vfree((void *)chunk->start_addr);
}
static void ghes_estatus_pool_exit(void)
{
gen_pool_for_each_chunk(ghes_estatus_pool,
- ghes_estatus_pool_free_chunk_page, NULL);
+ ghes_estatus_pool_free_chunk, NULL);
gen_pool_destroy(ghes_estatus_pool);
}
static int ghes_estatus_pool_expand(unsigned long len)
{
- unsigned long i, pages, size, addr;
- int ret;
+ unsigned long size, addr;
ghes_estatus_pool_size_request += PAGE_ALIGN(len);
size = gen_pool_size(ghes_estatus_pool);
if (size >= ghes_estatus_pool_size_request)
return 0;
- pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
- for (i = 0; i < pages; i++) {
- addr = __get_free_page(GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
- if (ret)
- return ret;
- }
- return 0;
+ addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
+ if (!addr)
+ return -ENOMEM;
+
+ /*
+ * New allocation must be visible in all pgd before it can be found by
+ * an NMI allocating from the pool.
+ */
+ vmalloc_sync_mappings();
+
+ return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
}
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 6b81746cd13c..e5b1b3f1c231 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -324,8 +324,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)node->node_data;
- if (idx > its->its_count) {
- dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
+ if (idx >= its->its_count) {
+ dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
idx, its->its_count);
return -ENXIO;
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 0a3ca20f99af..6b2c9d68d810 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -158,7 +158,7 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
{
acpi_status status;
- if (!*data)
+ if (!data)
return -EINVAL;
status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index e0ea8f56d2bf..9ec4618df533 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -360,8 +360,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
union acpi_object *psd = NULL;
struct acpi_psd_package *pdomain;
- status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
- ACPI_TYPE_PACKAGE);
+ status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
+ &buffer, ACPI_TYPE_PACKAGE);
+ if (status == AE_NOT_FOUND) /* _PSD is optional */
+ return 0;
if (ACPI_FAILURE(status))
return -ENODEV;
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
index c68e72414a67..435bd0ffc8c0 100644
--- a/drivers/acpi/custom_method.c
+++ b/drivers/acpi/custom_method.c
@@ -48,8 +48,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if ((*ppos > max_size) ||
(*ppos + count > max_size) ||
(*ppos + count < count) ||
- (count > uncopied_bytes))
+ (count > uncopied_bytes)) {
+ kfree(buf);
return -EINVAL;
+ }
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
@@ -69,6 +71,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
+ kfree(buf);
return count;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 993fd31394c8..c76e4527620c 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1096,9 +1096,19 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off)
*/
int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
+ /*
+ * Skip devices whose ACPI companions match the device IDs below,
+ * because they require special power management handling incompatible
+ * with the generic ACPI PM domain.
+ */
+ static const struct acpi_device_id special_pm_ids[] = {
+ {"PNP0C0B", }, /* Generic ACPI fan */
+ {"INT3404", }, /* Fan */
+ {}
+ };
struct acpi_device *adev = ACPI_COMPANION(dev);
- if (!adev)
+ if (!adev || !acpi_match_device_ids(adev, special_pm_ids))
return -ENODEV;
if (dev->pm_domain)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 416953a42510..c6c7e4287c4d 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -375,19 +375,21 @@ void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
-static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
+/* Must be called with mutex_lock(&acpi_ioremap_lock) */
+static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map)
{
- if (!--map->refcount)
+ unsigned long refcount = --map->refcount;
+
+ if (!refcount)
list_del_rcu(&map->list);
+ return refcount;
}
static void acpi_os_map_cleanup(struct acpi_ioremap *map)
{
- if (!map->refcount) {
- synchronize_rcu_expedited();
- acpi_unmap(map->phys, map->virt);
- kfree(map);
- }
+ synchronize_rcu_expedited();
+ acpi_unmap(map->phys, map->virt);
+ kfree(map);
}
/**
@@ -407,6 +409,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
+ unsigned long refcount;
if (!acpi_gbl_permanent_mmap) {
__acpi_unmap_table(virt, size);
@@ -420,10 +423,11 @@ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
return;
}
- acpi_os_drop_map_ref(map);
+ refcount = acpi_os_drop_map_ref(map);
mutex_unlock(&acpi_ioremap_lock);
- acpi_os_map_cleanup(map);
+ if (!refcount)
+ acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
@@ -464,6 +468,7 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{
u64 addr;
struct acpi_ioremap *map;
+ unsigned long refcount;
if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
@@ -479,10 +484,11 @@ void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
mutex_unlock(&acpi_ioremap_lock);
return;
}
- acpi_os_drop_map_ref(map);
+ refcount = acpi_os_drop_map_ref(map);
mutex_unlock(&acpi_ioremap_lock);
- acpi_os_map_cleanup(map);
+ if (!refcount)
+ acpi_os_map_cleanup(map);
}
EXPORT_SYMBOL(acpi_os_unmap_generic_address);
@@ -1126,6 +1132,7 @@ void acpi_os_wait_events_complete(void)
flush_workqueue(kacpid_wq);
flush_workqueue(kacpi_notify_wq);
}
+EXPORT_SYMBOL(acpi_os_wait_events_complete);
struct acpi_hp_work {
struct work_struct work;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index c576a6fe4ebb..94ded9513c73 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -462,8 +462,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
* No IRQ known to the ACPI subsystem - maybe the BIOS /
* driver reported one, then use it. Exit in any case.
*/
- if (!acpi_pci_irq_valid(dev, pin))
+ if (!acpi_pci_irq_valid(dev, pin)) {
+ kfree(entry);
return 0;
+ }
if (acpi_isa_register_gsi(dev))
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index b66815f35be6..317ecc2e5757 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -454,8 +454,9 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
decode_osc_support(root, "OS supports", support);
status = acpi_pci_osc_support(root, support);
if (ACPI_FAILURE(status)) {
- dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
- acpi_format_exception(status));
+ dev_info(&device->dev, "_OSC failed (%s)%s\n",
+ acpi_format_exception(status),
+ pcie_aspm_support_enabled() ? "; disabling ASPM" : "");
*no_aspm = 1;
return;
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index ad0b13ad4bbb..4a76000bcf7a 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -443,9 +443,13 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
/*
* The spec requires that bit 4 always be 1. If it's not set, assume
- * that the implementation doesn't support an SBS charger
+ * that the implementation doesn't support an SBS charger.
+ *
+ * And on some MacBooks a status of 0xffff is always returned, no
+ * matter whether the charger is plugged in or not, which is also
+ * wrong, so ignore the SBS charger for those too.
*/
- if (!((status >> 4) & 0x1))
+ if (!((status >> 4) & 0x1) || status == 0xffff)
return -ENODEV;
sbs->charger_present = (status >> 15) & 0x1;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 7a3431018e0a..5008ead4609a 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -196,6 +196,7 @@ int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc)
hc->callback = NULL;
hc->context = NULL;
mutex_unlock(&hc->lock);
+ acpi_os_wait_events_complete();
return 0;
}
@@ -292,6 +293,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device)
hc = acpi_driver_data(device);
acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
+ acpi_os_wait_events_complete();
kfree(hc);
device->driver_data = NULL;
return 0;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 80499f421a29..e12288c245b5 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -334,7 +334,8 @@ enum {
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
- BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+ BINDER_LOOPER_STATE_NEED_RETURN = 0x20,
+ BINDER_LOOPER_STATE_POLL = 0x40,
};
struct binder_thread {
@@ -488,7 +489,7 @@ static void binder_insert_free_buffer(struct binder_proc *proc,
new_buffer_size = binder_buffer_size(proc, new_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %p\n",
+ "%d: add free buffer, size %zd, at %pK\n",
proc->pid, new_buffer_size, new_buffer);
while (*p) {
@@ -566,7 +567,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
struct mm_struct *mm;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %p-%p\n", proc->pid,
+ "%d: %s pages %pK-%pK\n", proc->pid,
allocate ? "allocate" : "free", start, end);
if (end <= start)
@@ -581,6 +582,12 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
if (mm) {
down_write(&mm->mmap_sem);
+ if (!mmget_still_valid(mm)) {
+ if (allocate == 0)
+ goto free_range;
+ goto err_no_vma;
+ }
+
vma = proc->vma;
if (vma && mm != proc->vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n",
@@ -606,7 +613,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
BUG_ON(*page);
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
if (*page == NULL) {
- pr_err("%d: binder_alloc_buf failed for page at %p\n",
+ pr_err("%d: binder_alloc_buf failed for page at %pK\n",
proc->pid, page_addr);
goto err_alloc_page_failed;
}
@@ -615,7 +622,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
flush_cache_vmap((unsigned long)page_addr,
(unsigned long)page_addr + PAGE_SIZE);
if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
+ pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
proc->pid, page_addr);
goto err_map_kernel_failed;
}
@@ -719,7 +726,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
}
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
+ "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
proc->pid, size, buffer, buffer_size);
has_page_addr =
@@ -749,7 +756,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
binder_insert_free_buffer(proc, new_buffer);
}
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %p\n",
+ "%d: binder_alloc_buf size %zd got %pK\n",
proc->pid, size, buffer);
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
@@ -789,7 +796,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
if (buffer_end_page(prev) == buffer_end_page(buffer))
free_page_end = 0;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
+ "%d: merge free, buffer %pK share page with %pK\n",
proc->pid, buffer, prev);
}
@@ -802,14 +809,14 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
buffer_start_page(buffer))
free_page_start = 0;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
+ "%d: merge free, buffer %pK share page with %pK\n",
proc->pid, buffer, prev);
}
}
list_del(&buffer->entry);
if (free_page_start || free_page_end) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
+ "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
proc->pid, buffer, free_page_start ? "" : " end",
free_page_end ? "" : " start", prev, next);
binder_update_page_range(proc, 0, free_page_start ?
@@ -830,7 +837,7 @@ static void binder_free_buf(struct binder_proc *proc,
ALIGN(buffer->offsets_size, sizeof(void *));
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %p size %zd buffer_size %zd\n",
+ "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
proc->pid, buffer, size, buffer_size);
BUG_ON(buffer->free);
@@ -1260,7 +1267,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d buffer release %d, size %zd-%zd, failed at %p\n",
+ "%d buffer release %d, size %zd-%zd, failed at %pK\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at);
@@ -2123,7 +2130,7 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+ "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
@@ -2622,6 +2629,27 @@ static int binder_free_thread(struct binder_proc *proc,
} else
BUG();
}
+
+ /*
+ * If this thread used poll, make sure we remove the waitqueue
+ * from any epoll data structures holding it with POLLFREE.
+ * waitqueue_active() is safe to use here because we're holding
+ * the global lock.
+ */
+ if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
+ waitqueue_active(&thread->wait)) {
+ wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
+ }
+
+ /*
+ * This is needed to avoid races between wake_up_poll() above and
+ * and ep_remove_waitqueue() called for other reasons (eg the epoll file
+ * descriptor being closed); ep_remove_waitqueue() holds an RCU read
+ * lock, so we can be sure it's done after calling synchronize_rcu().
+ */
+ if (thread->looper & BINDER_LOOPER_STATE_POLL)
+ synchronize_rcu();
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
binder_release_work(&thread->todo);
@@ -2645,6 +2673,8 @@ static unsigned int binder_poll(struct file *filp,
return POLLERR;
}
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
+
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo) && thread->return_error == BR_OK;
@@ -2930,7 +2960,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+ pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
vma->vm_start += PAGE_SIZE;
}
}
@@ -3191,7 +3221,7 @@ static void binder_deferred_release(struct binder_proc *proc)
page_addr = proc->buffer + i * PAGE_SIZE;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %p not freed\n",
+ "%s: %d: page %d at %pK not freed\n",
__func__, proc->pid, i, page_addr);
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
__free_page(proc->pages[i]);
@@ -3272,7 +3302,7 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix,
struct binder_transaction *t)
{
seq_printf(m,
- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
@@ -3286,7 +3316,7 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix,
if (t->buffer->target_node)
seq_printf(m, " node %d",
t->buffer->target_node->debug_id);
- seq_printf(m, " size %zd:%zd data %p\n",
+ seq_printf(m, " size %zd:%zd data %pK\n",
t->buffer->data_size, t->buffer->offsets_size,
t->buffer->data);
}
@@ -3294,7 +3324,7 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix,
static void print_binder_buffer(struct seq_file *m, const char *prefix,
struct binder_buffer *buffer)
{
- seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
+ seq_printf(m, "%s %d: %pK size %zd:%zd %s\n",
prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
buffer->transaction ? "active" : "delivered");
@@ -3397,7 +3427,7 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
{
- seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
ref->node->debug_id, ref->strong, ref->weak, ref->death);
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cbdf105dfe88..ca84e84bc369 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -100,7 +100,8 @@ config SATA_AHCI_PLATFORM
config AHCI_BRCM
tristate "Broadcom AHCI SATA support"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
+ ARCH_BCM_63XX
help
This option enables support for the AHCI SATA3 controller found on
Broadcom SoC's.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5408a292078b..89e62043d02e 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -86,6 +86,7 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev);
+static void ahci_shutdown_one(struct pci_dev *dev);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -582,6 +583,7 @@ static struct pci_driver ahci_pci_driver = {
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
.remove = ahci_remove_one,
+ .shutdown = ahci_shutdown_one,
.driver = {
.pm = &ahci_pci_pm_ops,
},
@@ -1775,6 +1777,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
+static void ahci_shutdown_one(struct pci_dev *pdev)
+{
+ ata_pci_shutdown_one(pdev);
+}
+
static void ahci_remove_one(struct pci_dev *pdev)
{
pm_runtime_get_noresume(&pdev->dev);
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 6f8a7341fa08..f50a76ad63e4 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/string.h>
#include "ahci.h"
@@ -88,6 +89,7 @@ struct brcm_ahci_priv {
u32 port_mask;
u32 quirks;
enum brcm_ahci_version version;
+ struct reset_control *rcdev;
};
static const struct ata_port_info ahci_brcm_port_info = {
@@ -226,19 +228,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
brcm_sata_phy_disable(priv, i);
}
-static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
+static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
struct brcm_ahci_priv *priv)
{
- void __iomem *ahci;
- struct resource *res;
u32 impl;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
- ahci = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(ahci))
- return 0;
-
- impl = readl(ahci + HOST_PORTS_IMPL);
+ impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
if (fls(impl) > SATA_TOP_MAX_PHYS)
dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
@@ -246,9 +241,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
else if (!impl)
dev_info(priv->dev, "no ports found\n");
- devm_iounmap(&pdev->dev, ahci);
- devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
-
return impl;
}
@@ -275,11 +267,10 @@ static int brcm_ahci_suspend(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
- int ret;
- ret = ahci_platform_suspend(dev);
brcm_sata_phys_disable(priv);
- return ret;
+
+ return ahci_platform_suspend(dev);
}
static int brcm_ahci_resume(struct device *dev)
@@ -287,11 +278,44 @@ static int brcm_ahci_resume(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
+ int ret;
+
+ /* Make sure clocks are turned on before re-configuration */
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret)
+ return ret;
brcm_sata_init(priv);
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
- return ahci_platform_resume(dev);
+
+ /* Since we had to enable clocks earlier on, we cannot use
+ * ahci_platform_resume() as-is since a second call to
+ * ahci_platform_enable_resources() would bump up the resources
+ * (regulators, clocks, PHYs) count artificially so we copy the part
+ * after ahci_platform_enable_resources().
+ */
+ ret = ahci_platform_enable_phys(hpriv);
+ if (ret)
+ goto out_disable_phys;
+
+ ret = ahci_platform_resume_host(dev);
+ if (ret)
+ goto out_disable_platform_phys;
+
+ /* We resumed so update PM runtime state */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+ brcm_sata_phys_disable(priv);
+ ahci_platform_disable_clks(hpriv);
+ return ret;
}
#endif
@@ -332,43 +356,73 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
+ /* Reset is optional depending on platform */
+ priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
+ if (!IS_ERR_OR_NULL(priv->rcdev))
+ reset_control_deassert(priv->rcdev);
+
if ((priv->version == BRCM_SATA_BCM7425) ||
(priv->version == BRCM_SATA_NSP)) {
priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
}
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv)) {
+ ret = PTR_ERR(hpriv);
+ goto out_reset;
+ }
+
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret)
+ goto out_reset;
+
+ /* Must be first so as to configure endianness including that
+ * of the standard AHCI register space.
+ */
brcm_sata_init(priv);
- priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
- if (!priv->port_mask)
- return -ENODEV;
+ /* Initializes priv->port_mask which is used below */
+ priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+ if (!priv->port_mask) {
+ ret = -ENODEV;
+ goto out_disable_clks;
+ }
+ /* Must be done before ahci_platform_enable_phys() */
brcm_sata_phys_enable(priv);
- hpriv = ahci_platform_get_resources(pdev);
- if (IS_ERR(hpriv))
- return PTR_ERR(hpriv);
hpriv->plat_data = priv;
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
brcm_sata_alpm_init(hpriv);
- ret = ahci_platform_enable_resources(hpriv);
- if (ret)
- return ret;
-
if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
+ ret = ahci_platform_enable_phys(hpriv);
+ if (ret)
+ goto out_disable_phys;
+
ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
&ahci_platform_sht);
if (ret)
- return ret;
+ goto out_disable_platform_phys;
dev_info(dev, "Broadcom AHCI SATA3 registered\n");
return 0;
+
+out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+ brcm_sata_phys_disable(priv);
+out_disable_clks:
+ ahci_platform_disable_clks(hpriv);
+out_reset:
+ if (!IS_ERR_OR_NULL(priv->rcdev))
+ reset_control_assert(priv->rcdev);
+ return ret;
}
static int brcm_ahci_remove(struct platform_device *pdev)
@@ -378,12 +432,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
struct brcm_ahci_priv *priv = hpriv->plat_data;
int ret;
+ brcm_sata_phys_disable(priv);
+
ret = ata_platform_remove_one(pdev);
if (ret)
return ret;
- brcm_sata_phys_disable(priv);
-
return 0;
}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index f233ce60a678..1610fff19bb3 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -190,7 +190,6 @@ struct ata_port_operations ahci_pmp_retry_srst_ops = {
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
static bool ahci_em_messages __read_mostly = true;
-EXPORT_SYMBOL_GPL(ahci_em_messages);
module_param(ahci_em_messages, bool, 0444);
/* add other LED protocol types when they become supported */
MODULE_PARM_DESC(ahci_em_messages,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index cd2eab6aa92e..0b80502bc1c5 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
* RETURNS:
* 0 on success otherwise a negative error code
*/
-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
{
int rc, i;
@@ -71,6 +71,7 @@ disable_phys:
}
return rc;
}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
/**
* ahci_platform_disable_phys - Disable PHYs
@@ -78,7 +79,7 @@ disable_phys:
*
* This function disables all PHYs found in hpriv->phys.
*/
-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
{
int i;
@@ -87,6 +88,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
phy_exit(hpriv->phys[i]);
}
}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
/**
* ahci_platform_enable_clks - Enable platform clocks
@@ -300,6 +302,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
hpriv->phys[port] = NULL;
rc = 0;
break;
+ case -EPROBE_DEFER:
+ /* Do not complain yet */
+ break;
default:
dev_err(dev,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 35be49f5791d..ba0cffbd0bb6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4355,9 +4355,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
- /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
- { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
- { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ /* drives which fail FPDMA_AA activation (some may freeze afterwards)
+ the ST disks also have LPM issues */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+ ATA_HORKAGE_NOLPM, },
+ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
+ ATA_HORKAGE_NOLPM, },
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
@@ -6547,6 +6550,9 @@ void ata_host_detach(struct ata_host *host)
{
int i;
+ /* Ensure ata_port probe has completed */
+ async_synchronize_full();
+
for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
@@ -6574,6 +6580,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
ata_host_detach(host);
}
+void ata_pci_shutdown_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ ap->pflags |= ATA_PFLAG_FROZEN;
+
+ /* Disable port interrupts */
+ if (ap->ops->freeze)
+ ap->ops->freeze(ap);
+
+ /* Stop the port DMA engines */
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ }
+}
+
/* move to PCI subsystem */
int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
{
@@ -7194,6 +7220,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
#ifdef CONFIG_PCI
EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
#ifdef CONFIG_PM
EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 90c38778bc1f..16f8fda89981 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1600,7 +1600,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
- if (ata_id_has_ncq_autosense(dev->id))
+ if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
@@ -1849,7 +1849,8 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
+ if (dev->class == ATA_DEV_ZAC &&
+ ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
char sense_key, asc, ascq;
sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
@@ -1903,10 +1904,11 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
}
switch (qc->dev->class) {
- case ATA_DEV_ATA:
case ATA_DEV_ZAC:
if (stat & ATA_SENSE)
ata_eh_request_sense(qc, qc->scsicmd);
+ /* fall through */
+ case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
if (err & (ATA_UNC | ATA_AMNF))
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 85aa76116a30..7924d0635718 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -764,6 +764,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
if (dev->flags & ATA_DFLAG_DETACH) {
detach = 1;
+ rc = -ENODEV;
goto fail;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a3d60ccafd9a..c4f2b563c9f0 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1734,6 +1734,21 @@ nothing_to_do:
return 1;
}
+static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
+{
+ struct request *rq = scmd->request;
+ u32 req_blocks;
+
+ if (!blk_rq_is_passthrough(rq))
+ return true;
+
+ req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
+ if (n_blocks > req_blocks)
+ return false;
+
+ return true;
+}
+
/**
* ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
* @qc: Storage for translated ATA taskfile
@@ -1776,6 +1791,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
scsi_10_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
+ if (!ata_check_nblocks(scmd, n_block))
+ goto invalid_fld;
break;
case READ_6:
case WRITE_6:
@@ -1790,6 +1807,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
*/
if (!n_block)
n_block = 256;
+ if (!ata_check_nblocks(scmd, n_block))
+ goto invalid_fld;
break;
case READ_16:
case WRITE_16:
@@ -1800,6 +1819,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
scsi_16_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
+ if (!ata_check_nblocks(scmd, n_block))
+ goto invalid_fld;
break;
default:
DPRINTK("no-byte command\n");
@@ -4423,22 +4444,19 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/
shost->max_host_blocked = 1;
- rc = scsi_add_host_with_dma(ap->scsi_host,
- &ap->tdev, ap->host->dev);
+ rc = scsi_add_host_with_dma(shost, &ap->tdev, ap->host->dev);
if (rc)
- goto err_add;
+ goto err_alloc;
}
return 0;
- err_add:
- scsi_host_put(host->ports[i]->scsi_host);
err_alloc:
while (--i >= 0) {
struct Scsi_Host *shost = host->ports[i]->scsi_host;
+ /* scsi_host_put() is in ata_devres_release() */
scsi_remove_host(shost);
- scsi_host_put(shost);
}
return rc;
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 8d22acdf90f0..0e2bc5b9a78c 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -703,6 +703,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int offset;
unsigned char *buf;
+ if (!qc->cursg) {
+ qc->curbytes = qc->nbytes;
+ return;
+ }
if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST;
@@ -742,6 +746,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg);
+ if (!qc->cursg)
+ ap->hsm_task_state = HSM_ST_LAST;
qc->cursg_ofs = 0;
}
}
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 0ad96c647541..083856272e92 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -51,38 +51,52 @@ static int eject_tray(struct ata_device *dev)
/* Per the spec, only slot type and drawer type ODD can be supported */
static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
{
- char buf[16];
+ char *buf;
unsigned int ret;
- struct rm_feature_desc *desc = (void *)(buf + 8);
+ struct rm_feature_desc *desc;
struct ata_taskfile tf;
- static const char cdb[] = { GPCMD_GET_CONFIGURATION,
+ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
- 0, sizeof(buf),
+ 0, 16,
0, 0, 0,
};
+ buf = kzalloc(16, GFP_KERNEL);
+ if (!buf)
+ return ODD_MECH_TYPE_UNSUPPORTED;
+ desc = (void *)(buf + 8);
+
ata_tf_init(dev, &tf);
tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
tf.protocol = ATAPI_PROT_PIO;
- tf.lbam = sizeof(buf);
+ tf.lbam = 16;
ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
- buf, sizeof(buf), 0);
- if (ret)
+ buf, 16, 0);
+ if (ret) {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
- if (be16_to_cpu(desc->feature_code) != 3)
+ if (be16_to_cpu(desc->feature_code) != 3) {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
- if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+ if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+ kfree(buf);
return ODD_MECH_TYPE_SLOT;
- else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+ } else if (desc->mech_type == 1 && desc->load == 0 &&
+ desc->eject == 1) {
+ kfree(buf);
return ODD_MECH_TYPE_DRAWER;
- else
+ } else {
+ kfree(buf);
return ODD_MECH_TYPE_UNSUPPORTED;
+ }
}
/* Test if ODD is zero power ready by sense code */
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index bd6b089c67a3..634c814cbeda 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -659,7 +659,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
* start of new transfer.
*/
drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_rx_data.direction = DMA_FROM_DEVICE;
+ drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM;
drv_data->dma_rx_data.name = "ep93xx-pata-rx";
drv_data->dma_rx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
@@ -667,7 +667,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
return;
drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_tx_data.direction = DMA_TO_DEVICE;
+ drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV;
drv_data->dma_tx_data.name = "ep93xx-pata-tx";
drv_data->dma_tx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
@@ -678,7 +678,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
/* Configure receive channel direction and source address */
memset(&conf, 0, sizeof(conf));
- conf.direction = DMA_FROM_DEVICE;
+ conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = drv_data->udma_in_phys;
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
@@ -689,7 +689,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
/* Configure transmit channel direction and destination address */
memset(&conf, 0, sizeof(conf));
- conf.direction = DMA_TO_DEVICE;
+ conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = drv_data->udma_out_phys;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 31c60101a69a..7fa840170151 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -199,7 +199,7 @@ config ATM_NICSTAR_USE_SUNI
make the card work).
config ATM_NICSTAR_USE_IDT77105
- bool "Use IDT77015 PHY driver (25Mbps)"
+ bool "Use IDT77105 PHY driver (25Mbps)"
depends on ATM_NICSTAR
help
Support for the PHYsical layer chip in ForeRunner LE25 cards. In
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 40c2d561417b..88819409e0be 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -372,7 +372,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
<< MID_DMA_VCI_SHIFT) | MID_DT_JK;
- j++;
+ dma[j++] = 0;
}
here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
if (!eff) size += skip;
@@ -445,7 +445,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
if (size != eff) {
dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
(vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
- j++;
+ dma[j++] = 0;
}
if (!j || j > 2*RX_DMA_BUF) {
printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 85aaf2222587..82296fe2ba3b 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -927,6 +927,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
}
if (!to) {
printk ("No more free channels for FS50..\n");
+ kfree(vcc);
return -EBUSY;
}
vcc->channo = dev->channo;
@@ -937,6 +938,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
printk ("Channel is in use for FS155.\n");
+ kfree(vcc);
return -EBUSY;
}
}
@@ -950,6 +952,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
tc, sizeof (struct fs_transmit_config));
if (!tc) {
fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
+ kfree(vcc);
return -ENOMEM;
}
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index b2756765950e..fe47c924dc64 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -63,6 +63,7 @@
#include <asm/byteorder.h>
#include <linux/vmalloc.h>
#include <linux/jiffies.h>
+#include <linux/nospec.h>
#include "iphase.h"
#include "suni.h"
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
}
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
board = ia_cmds.status;
- if ((board < 0) || (board > iadev_count))
- board = 0;
+
+ if ((board < 0) || (board > iadev_count))
+ board = 0;
+ board = array_index_nospec(board, iadev_count + 1);
+
iadev = ia_dev[board];
switch (ia_cmds.cmd) {
case MEMDUMP:
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a0b88f148990..e23e2672a1d6 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -126,7 +126,7 @@ static unsigned long dummy[2] = {0,0};
#define zin_n(r) inl(zatm_dev->base+r*4)
#define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
#define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
-#define zwait while (zin(CMR) & uPD98401_BUSY)
+#define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
/* RX0, RX1, TX0, TX1 */
static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
@@ -140,7 +140,7 @@ static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
{
- zwait;
+ zwait();
zout(value,CER);
zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
(uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
@@ -149,10 +149,10 @@ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
{
- zwait;
+ zwait();
zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
(uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait;
+ zwait();
return zin(CER);
}
@@ -241,7 +241,7 @@ static void refill_pool(struct atm_dev *dev,int pool)
}
if (first) {
spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait;
+ zwait();
zout(virt_to_bus(first),CER);
zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
CMR);
@@ -508,9 +508,9 @@ static int open_rx_first(struct atm_vcc *vcc)
}
if (zatm_vcc->pool < 0) return -EMSGSIZE;
spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait;
+ zwait();
zout(uPD98401_OPEN_CHAN,CMR);
- zwait;
+ zwait();
DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
spin_unlock_irqrestore(&zatm_dev->lock, flags);
@@ -571,21 +571,21 @@ static void close_rx(struct atm_vcc *vcc)
pos = vcc->vci >> 1;
shift = (1-(vcc->vci & 1)) << 4;
zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
- zwait;
+ zwait();
zout(uPD98401_NOP,CMR);
- zwait;
+ zwait();
zout(uPD98401_NOP,CMR);
spin_unlock_irqrestore(&zatm_dev->lock, flags);
}
spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait;
+ zwait();
zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait;
+ zwait();
udelay(10); /* why oh why ... ? */
zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait;
+ zwait();
if (!(zin(CMR) & uPD98401_CHAN_ADDR))
printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
"%d\n",vcc->dev->number,zatm_vcc->rx_chan);
@@ -699,7 +699,7 @@ printk("NONONONOO!!!!\n");
skb_queue_tail(&zatm_vcc->tx_queue,skb);
DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
uPD98401_TXVC_QRP));
- zwait;
+ zwait();
zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
uPD98401_CHAN_ADDR_SHIFT),CMR);
spin_unlock_irqrestore(&zatm_dev->lock, flags);
@@ -891,12 +891,12 @@ static void close_tx(struct atm_vcc *vcc)
}
spin_lock_irqsave(&zatm_dev->lock, flags);
#if 0
- zwait;
+ zwait();
zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
#endif
- zwait;
+ zwait();
zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
- zwait;
+ zwait();
if (!(zin(CMR) & uPD98401_CHAN_ADDR))
printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
"%d\n",vcc->dev->number,chan);
@@ -926,9 +926,9 @@ static int open_tx_first(struct atm_vcc *vcc)
zatm_vcc->tx_chan = 0;
if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
spin_lock_irqsave(&zatm_dev->lock, flags);
- zwait;
+ zwait();
zout(uPD98401_OPEN_CHAN,CMR);
- zwait;
+ zwait();
DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
spin_unlock_irqrestore(&zatm_dev->lock, flags);
@@ -1559,7 +1559,7 @@ static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
struct zatm_dev *zatm_dev;
zatm_dev = ZATM_DEV(dev);
- zwait;
+ zwait();
zout(value,CER);
zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
(uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
@@ -1571,10 +1571,10 @@ static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
struct zatm_dev *zatm_dev;
zatm_dev = ZATM_DEV(dev);
- zwait;
+ zwait();
zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
(uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
- zwait;
+ zwait();
return zin(CER) & 0xff;
}
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 89b032f2ffd2..08da6160e94d 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -461,9 +461,9 @@ int component_bind_all(struct device *master_dev, void *data)
}
if (ret != 0) {
- for (; i--; )
- if (!master->match->compare[i].duplicate) {
- c = master->match->compare[i].component;
+ for (; i > 0; i--)
+ if (!master->match->compare[i - 1].duplicate) {
+ c = master->match->compare[i - 1].component;
component_unbind(c, master, data);
}
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 465cbecb7d9f..99aa6f1a7e3c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -10,6 +10,7 @@
*
*/
+#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fwnode.h>
@@ -1395,12 +1396,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
*/
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
{
+ unsigned int ref;
+
/* see if we live in a "glue" directory */
if (!live_in_glue_dir(glue_dir, dev))
return;
mutex_lock(&gdp_mutex);
- if (!kobject_has_children(glue_dir))
+ /**
+ * There is a race condition between removing glue directory
+ * and adding a new device under the glue directory.
+ *
+ * CPU1: CPU2:
+ *
+ * device_add()
+ * get_device_parent()
+ * class_dir_create_and_add()
+ * kobject_add_internal()
+ * create_dir() // create glue_dir
+ *
+ * device_add()
+ * get_device_parent()
+ * kobject_get() // get glue_dir
+ *
+ * device_del()
+ * cleanup_glue_dir()
+ * kobject_del(glue_dir)
+ *
+ * kobject_add()
+ * kobject_add_internal()
+ * create_dir() // in glue_dir
+ * sysfs_create_dir_ns()
+ * kernfs_create_dir_ns(sd)
+ *
+ * sysfs_remove_dir() // glue_dir->sd=NULL
+ * sysfs_put() // free glue_dir->sd
+ *
+ * // sd is freed
+ * kernfs_new_node(sd)
+ * kernfs_get(glue_dir)
+ * kernfs_add_one()
+ * kernfs_put()
+ *
+ * Before CPU1 remove last child device under glue dir, if CPU2 add
+ * a new device under glue dir, the glue_dir kobject reference count
+ * will be increase to 2 in kobject_get(k). And CPU2 has been called
+ * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
+ * and sysfs_put(). This result in glue_dir->sd is freed.
+ *
+ * Then the CPU2 will see a stale "empty" but still potentially used
+ * glue dir around in kernfs_new_node().
+ *
+ * In order to avoid this happening, we also should make sure that
+ * kernfs_node for glue_dir is released in CPU1 only when refcount
+ * for glue_dir kobj is 1.
+ */
+ ref = atomic_read(&glue_dir->kref.refcount);
+ if (!kobject_has_children(glue_dir) && !--ref)
kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
@@ -2617,6 +2669,8 @@ void device_shutdown(void)
wait_for_device_probe();
device_block_probing();
+ cpufreq_suspend();
+
spin_lock(&devices_kset->list_lock);
/*
* Walk the devices list backward, shutting down each in turn.
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f1f4ce7ddb47..677c5f36674b 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -531,11 +531,33 @@ ssize_t __weak cpu_show_l1tf(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
+static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
+static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -543,6 +565,9 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
+ &dev_attr_mds.attr,
+ &dev_attr_tsx_async_abort.attr,
+ &dev_attr_itlb_multihit.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index dbf92e3272f9..ec8c515552d2 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -346,7 +346,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
- WARN_ON(!list_empty(&dev->devres_head));
+ if (!list_empty(&dev->devres_head)) {
+ dev_crit(dev, "Resources present before probing\n");
+ return -EBUSY;
+ }
re_probe:
dev->driver = drv;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c5cdd190b781..6a3694a4843f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -500,15 +500,20 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
return -EINVAL;
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
nid = memory_add_physaddr_to_nid(phys_addr);
- ret = add_memory(nid, phys_addr,
- MIN_MEMORY_BLOCK_SIZE * sections_per_block);
+ ret = __add_memory(nid, phys_addr,
+ MIN_MEMORY_BLOCK_SIZE * sections_per_block);
if (ret)
goto out;
ret = count;
out:
+ unlock_device_hotplug();
return ret;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 14ff40371f01..bef299ef6227 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -27,6 +27,8 @@
#include <linux/clk/clk-conf.h>
#include <linux/limits.h>
#include <linux/property.h>
+#include <linux/kmemleak.h>
+#include <linux/types.h>
#include "base.h"
#include "power/power.h"
@@ -67,7 +69,7 @@ void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
struct resource *platform_get_resource(struct platform_device *dev,
unsigned int type, unsigned int num)
{
- int i;
+ u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@@ -152,7 +154,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
unsigned int type,
const char *name)
{
- int i;
+ u32 i;
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
@@ -349,7 +351,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties);
*/
int platform_device_add(struct platform_device *pdev)
{
- int i, ret;
+ u32 i;
+ int ret;
if (!pdev)
return -EINVAL;
@@ -415,7 +418,7 @@ int platform_device_add(struct platform_device *pdev)
pdev->id = PLATFORM_DEVID_AUTO;
}
- while (--i >= 0) {
+ while (i--) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
@@ -436,7 +439,7 @@ EXPORT_SYMBOL_GPL(platform_device_add);
*/
void platform_device_del(struct platform_device *pdev)
{
- int i;
+ u32 i;
if (pdev) {
device_remove_properties(&pdev->dev);
@@ -516,6 +519,8 @@ struct platform_device *platform_device_register_full(
if (!pdev->dev.dma_mask)
goto err;
+ kmemleak_ignore(pdev->dev.dma_mask);
+
*pdev->dev.dma_mask = pdevinfo->dma_mask;
pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f1cbf63dace5..cd1188fb51f9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1385,6 +1385,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ /* Avoid direct_complete to let wakeup_path propagate. */
+ if (device_may_wakeup(dev) || dev->power.wakeup_path)
+ dev->power.direct_complete = false;
+
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 69c84fddfe8a..1799a1dfa46e 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1506,6 +1506,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
map->format.reg_bytes +
map->format.pad_bytes,
val, val_len);
+ else
+ ret = -ENOTSUPP;
/* If that didn't work fall back on linearising by hand. */
if (ret == -ENOTSUPP) {
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index b63f23e6ad61..ddb32c890fa6 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -145,6 +145,7 @@ out2:
out1:
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(soc_device_register);
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
@@ -153,6 +154,7 @@ void soc_device_unregister(struct soc_device *soc_dev)
device_unregister(&soc_dev->dev);
}
+EXPORT_SYMBOL_GPL(soc_device_unregister);
static int __init soc_bus_register(void)
{
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index f499a469e66d..12b2cc9a3fbe 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -78,7 +78,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
}
- v = BCMA_CORE_PCI_MDIODATA_START;
+ v |= BCMA_CORE_PCI_MDIODATA_START;
v |= BCMA_CORE_PCI_MDIODATA_READ;
v |= BCMA_CORE_PCI_MDIODATA_TA;
@@ -121,7 +121,7 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
}
- v = BCMA_CORE_PCI_MDIODATA_START;
+ v |= BCMA_CORE_PCI_MDIODATA_START;
v |= BCMA_CORE_PCI_MDIODATA_WRITE;
v |= BCMA_CORE_PCI_MDIODATA_TA;
v |= data;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 5fd50a284168..db4354fb2a0d 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1699,11 +1699,41 @@ static const struct block_device_operations floppy_fops = {
.check_events = amiga_check_events,
};
+static struct gendisk *fd_alloc_disk(int drive)
+{
+ struct gendisk *disk;
+
+ disk = alloc_disk(1);
+ if (!disk)
+ goto out;
+
+ disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
+ if (IS_ERR(disk->queue)) {
+ disk->queue = NULL;
+ goto out_put_disk;
+ }
+
+ unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
+ if (!unit[drive].trackbuf)
+ goto out_cleanup_queue;
+
+ return disk;
+
+out_cleanup_queue:
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+out_put_disk:
+ put_disk(disk);
+out:
+ unit[drive].type->code = FD_NODRIVE;
+ return NULL;
+}
+
static int __init fd_probe_drives(void)
{
int drive,drives,nomem;
- printk(KERN_INFO "FD: probing units\nfound ");
+ pr_info("FD: probing units\nfound");
drives=0;
nomem=0;
for(drive=0;drive<FD_MAX_UNITS;drive++) {
@@ -1711,27 +1741,17 @@ static int __init fd_probe_drives(void)
fd_probe(drive);
if (unit[drive].type->code == FD_NODRIVE)
continue;
- disk = alloc_disk(1);
+
+ disk = fd_alloc_disk(drive);
if (!disk) {
- unit[drive].type->code = FD_NODRIVE;
+ pr_cont(" no mem for fd%d", drive);
+ nomem = 1;
continue;
}
unit[drive].gendisk = disk;
-
- disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
- if (!disk->queue) {
- unit[drive].type->code = FD_NODRIVE;
- continue;
- }
-
drives++;
- if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
- printk("no mem for ");
- unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
- drives--;
- nomem = 1;
- }
- printk("fd%d ",drive);
+
+ pr_cont(" fd%d",drive);
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
@@ -1742,11 +1762,11 @@ static int __init fd_probe_drives(void)
}
if ((drives > 0) || (nomem == 0)) {
if (drives == 0)
- printk("no drives");
- printk("\n");
+ pr_cont(" no drives");
+ pr_cont("\n");
return drives;
}
- printk("\n");
+ pr_cont("\n");
return -ENOMEM;
}
@@ -1837,30 +1857,6 @@ out_blkdev:
return ret;
}
-#if 0 /* not safe to unload */
-static int __exit amiga_floppy_remove(struct platform_device *pdev)
-{
- int i;
-
- for( i = 0; i < FD_MAX_UNITS; i++) {
- if (unit[i].type->code != FD_NODRIVE) {
- struct request_queue *q = unit[i].gendisk->queue;
- del_gendisk(unit[i].gendisk);
- put_disk(unit[i].gendisk);
- kfree(unit[i].trackbuf);
- if (q)
- blk_cleanup_queue(q);
- }
- }
- blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
- free_irq(IRQ_AMIGA_CIAA_TB, NULL);
- free_irq(IRQ_AMIGA_DSKBLK, NULL);
- custom.dmacon = DMAF_DISK; /* disable DMA */
- amiga_chip_free(raw_buf);
- unregister_blkdev(FLOPPY_MAJOR, "fd");
-}
-#endif
-
static struct platform_driver amiga_floppy_driver = {
.driver = {
.name = "amiga-floppy",
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c76d4016eeb..7e35574a17df 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -581,6 +581,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
return kobj;
}
+static inline void brd_check_and_reset_par(void)
+{
+ if (unlikely(!max_part))
+ max_part = 1;
+
+ /*
+ * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
+ * otherwise, it is possiable to get same dev_t when adding partitions.
+ */
+ if ((1U << MINORBITS) % max_part != 0)
+ max_part = 1UL << fls(max_part);
+
+ if (max_part > DISK_MAX_PARTS) {
+ pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
+ DISK_MAX_PARTS, DISK_MAX_PARTS);
+ max_part = DISK_MAX_PARTS;
+ }
+}
+
static int __init brd_init(void)
{
struct brd_device *brd, *next;
@@ -604,8 +623,7 @@ static int __init brd_init(void)
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
return -EIO;
- if (unlikely(!max_part))
- max_part = 1;
+ brd_check_and_reset_par();
for (i = 0; i < rd_nr; i++) {
brd = brd_alloc(i);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 83482721bc01..daa9cef96ec6 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -332,6 +332,8 @@ static int drbd_thread_setup(void *arg)
thi->name[0],
resource->name);
+ allow_kernel_signal(DRBD_SIGKILL);
+ allow_kernel_signal(SIGXCPU);
restart:
retval = thi->function(thi);
@@ -793,7 +795,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
if (nc->tentative && connection->agreed_pro_version < 92) {
rcu_read_unlock();
- mutex_unlock(&sock->mutex);
drbd_err(connection, "--dry-run is not supported by peer");
return -EOPNOTSUPP;
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index abee91940a36..b809f325c2be 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1508,6 +1508,30 @@ static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *dis
}
}
+static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
+{
+ int err = -EBUSY;
+
+ if (device->act_log &&
+ device->act_log->nr_elements == dc->al_extents)
+ return 0;
+
+ drbd_suspend_io(device);
+ /* If IO completion is currently blocked, we would likely wait
+ * "forever" for the activity log to become unused. So we don't. */
+ if (atomic_read(&device->ap_bio_cnt))
+ goto out;
+
+ wait_event(device->al_wait, lc_try_lock(device->act_log));
+ drbd_al_shrink(device);
+ err = drbd_check_al_size(device, dc);
+ lc_unlock(device->act_log);
+ wake_up(&device->al_wait);
+out:
+ drbd_resume_io(device);
+ return err;
+}
+
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
@@ -1570,15 +1594,12 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
}
}
- drbd_suspend_io(device);
- wait_event(device->al_wait, lc_try_lock(device->act_log));
- drbd_al_shrink(device);
- err = drbd_check_al_size(device, new_disk_conf);
- lc_unlock(device->act_log);
- wake_up(&device->al_wait);
- drbd_resume_io(device);
-
+ err = disk_opts_check_al_size(device, new_disk_conf);
if (err) {
+ /* Could be just "busy". Ignore?
+ * Introduce dedicated error code? */
+ drbd_msg_put_info(adm_ctx.reply_skb,
+ "Try again without changing current al-extents setting");
retcode = ERR_NOMEM;
goto fail_unlock;
}
@@ -1927,9 +1948,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
}
}
- if (device->state.conn < C_CONNECTED &&
- device->state.role == R_PRIMARY && device->ed_uuid &&
- (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
+ if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
+ (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
+ (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
(unsigned long long)device->ed_uuid);
retcode = ERR_DATA_NOT_CURRENT;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 83957a1e15ed..a10ff6e8c20b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -4037,6 +4037,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
enum determine_dev_size dd = DS_UNCHANGED;
sector_t p_size, p_usize, p_csize, my_usize;
+ sector_t new_size, cur_size;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
@@ -4044,6 +4045,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
if (!peer_device)
return config_unknown_volume(connection, pi);
device = peer_device->device;
+ cur_size = drbd_get_capacity(device->this_bdev);
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
@@ -4054,7 +4056,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
device->p_size = p_size;
if (get_ldev(device)) {
- sector_t new_size, cur_size;
rcu_read_lock();
my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
@@ -4072,7 +4073,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
- cur_size = drbd_get_capacity(device->this_bdev);
if (new_size < cur_size &&
device->state.disk >= D_OUTDATED &&
device->state.conn < C_CONNECTED) {
@@ -4137,9 +4137,36 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
*
* However, if he sends a zero current size,
* take his (user-capped or) backing disk size anyways.
+ *
+ * Unless of course he does not have a disk himself.
+ * In which case we ignore this completely.
*/
+ sector_t new_size = p_csize ?: p_usize ?: p_size;
drbd_reconsider_queue_parameters(device, NULL, o);
- drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size);
+ if (new_size == 0) {
+ /* Ignore, peer does not know nothing. */
+ } else if (new_size == cur_size) {
+ /* nothing to do */
+ } else if (cur_size != 0 && p_size == 0) {
+ drbd_warn(device, "Ignored diskless peer device size (peer:%llu != me:%llu sectors)!\n",
+ (unsigned long long)new_size, (unsigned long long)cur_size);
+ } else if (new_size < cur_size && device->state.role == R_PRIMARY) {
+ drbd_err(device, "The peer's device size is too small! (%llu < %llu sectors); demote me first!\n",
+ (unsigned long long)new_size, (unsigned long long)cur_size);
+ conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
+ return -EIO;
+ } else {
+ /* I believe the peer, if
+ * - I don't have a current size myself
+ * - we agree on the size anyways
+ * - I do have a current size, am Secondary,
+ * and he has the only disk
+ * - I do have a current size, am Primary,
+ * and he has the only disk,
+ * which is larger than my current size
+ */
+ drbd_set_my_capacity(device, new_size);
+ }
}
if (get_ldev(device)) {
@@ -4425,6 +4452,25 @@ static int receive_state(struct drbd_connection *connection, struct packet_info
if (peer_state.conn == C_AHEAD)
ns.conn = C_BEHIND;
+ /* TODO:
+ * if (primary and diskless and peer uuid != effective uuid)
+ * abort attach on peer;
+ *
+ * If this node does not have good data, was already connected, but
+ * the peer did a late attach only now, trying to "negotiate" with me,
+ * AND I am currently Primary, possibly frozen, with some specific
+ * "effective" uuid, this should never be reached, really, because
+ * we first send the uuids, then the current state.
+ *
+ * In this scenario, we already dropped the connection hard
+ * when we received the unsuitable uuids (receive_uuids().
+ *
+ * Should we want to change this, that is: not drop the connection in
+ * receive_uuids() already, then we would need to add a branch here
+ * that aborts the attach of "unsuitable uuids" on the peer in case
+ * this node is currently Diskless Primary.
+ */
+
if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
get_ldev_if_state(device, D_NEGOTIATING)) {
int cr; /* consider resync */
@@ -5297,7 +5343,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
unsigned int key_len;
char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
- SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
+ struct shash_desc *desc;
struct packet_info pi;
struct net_conf *nc;
int err, rv;
@@ -5310,6 +5356,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
memcpy(secret, nc->shared_secret, key_len);
rcu_read_unlock();
+ desc = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(connection->cram_hmac_tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ rv = -1;
+ goto fail;
+ }
desc->tfm = connection->cram_hmac_tfm;
desc->flags = 0;
@@ -5452,7 +5505,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
kfree(peers_ch);
kfree(response);
kfree(right_response);
- shash_desc_zero(desc);
+ if (desc) {
+ shash_desc_zero(desc);
+ kfree(desc);
+ }
return rv;
}
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index 6c9d5d4a8a75..110f64d9e91c 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -126,7 +126,7 @@ extern enum drbd_state_rv _drbd_set_state(struct drbd_device *, union drbd_state
enum chg_state_flags,
struct completion *done);
extern void print_st_err(struct drbd_device *, union drbd_state,
- union drbd_state, int);
+ union drbd_state, enum drbd_state_rv);
enum drbd_state_rv
_conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 6914c6e1e1a8..4496e7a49235 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -192,6 +192,7 @@ static int print_unex = 1;
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/async.h>
+#include <linux/compat.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
@@ -847,14 +848,17 @@ static void reset_fdc_info(int mode)
/* selects the fdc and drive, and enables the fdc's input/dma. */
static void set_fdc(int drive)
{
+ unsigned int new_fdc = fdc;
+
if (drive >= 0 && drive < N_DRIVE) {
- fdc = FDC(drive);
+ new_fdc = FDC(drive);
current_drive = drive;
}
- if (fdc != 1 && fdc != 0) {
+ if (new_fdc >= N_FDC) {
pr_info("bad fdc value\n");
return;
}
+ fdc = new_fdc;
set_dor(fdc, ~0, 8);
#if N_FDC > 1
set_dor(1 - fdc, ~8, 0);
@@ -2113,6 +2117,9 @@ static void setup_format_params(int track)
raw_cmd->kernel_data = floppy_track_buffer;
raw_cmd->length = 4 * F_SECT_PER_TRACK;
+ if (!F_SECT_PER_TRACK)
+ return;
+
/* allow for about 30ms for data transport per track */
head_shift = (F_SECT_PER_TRACK + 5) / 6;
@@ -3233,8 +3240,12 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
int cnt;
/* sanity checking for parameters. */
- if (g->sect <= 0 ||
- g->head <= 0 ||
+ if ((int)g->sect <= 0 ||
+ (int)g->head <= 0 ||
+ /* check for overflow in max_sector */
+ (int)(g->sect * g->head) <= 0 ||
+ /* check for zero in F_SECT_PER_TRACK */
+ (unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 ||
g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
/* check if reserved bits are set */
(g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
@@ -3378,6 +3389,24 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
+static bool valid_floppy_drive_params(const short autodetect[8],
+ int native_format)
+{
+ size_t floppy_type_size = ARRAY_SIZE(floppy_type);
+ size_t i = 0;
+
+ for (i = 0; i < 8; ++i) {
+ if (autodetect[i] < 0 ||
+ autodetect[i] >= floppy_type_size)
+ return false;
+ }
+
+ if (native_format < 0 || native_format >= floppy_type_size)
+ return false;
+
+ return true;
+}
+
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long param)
{
@@ -3504,6 +3533,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
SUPBOUND(size, strlen((const char *)outparam) + 1);
break;
case FDSETDRVPRM:
+ if (!valid_floppy_drive_params(inparam.dp.autodetect,
+ inparam.dp.native_format))
+ return -EINVAL;
*UDP = inparam.dp;
break;
case FDGETDRVPRM:
@@ -3569,6 +3601,332 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
}
+#ifdef CONFIG_COMPAT
+
+struct compat_floppy_drive_params {
+ char cmos;
+ compat_ulong_t max_dtr;
+ compat_ulong_t hlt;
+ compat_ulong_t hut;
+ compat_ulong_t srt;
+ compat_ulong_t spinup;
+ compat_ulong_t spindown;
+ unsigned char spindown_offset;
+ unsigned char select_delay;
+ unsigned char rps;
+ unsigned char tracks;
+ compat_ulong_t timeout;
+ unsigned char interleave_sect;
+ struct floppy_max_errors max_errors;
+ char flags;
+ char read_track;
+ short autodetect[8];
+ compat_int_t checkfreq;
+ compat_int_t native_format;
+};
+
+struct compat_floppy_drive_struct {
+ signed char flags;
+ compat_ulong_t spinup_date;
+ compat_ulong_t select_date;
+ compat_ulong_t first_read_date;
+ short probed_format;
+ short track;
+ short maxblock;
+ short maxtrack;
+ compat_int_t generation;
+ compat_int_t keep_data;
+ compat_int_t fd_ref;
+ compat_int_t fd_device;
+ compat_int_t last_checked;
+ compat_caddr_t dmabuf;
+ compat_int_t bufblocks;
+};
+
+struct compat_floppy_fdc_state {
+ compat_int_t spec1;
+ compat_int_t spec2;
+ compat_int_t dtr;
+ unsigned char version;
+ unsigned char dor;
+ compat_ulong_t address;
+ unsigned int rawcmd:2;
+ unsigned int reset:1;
+ unsigned int need_configure:1;
+ unsigned int perp_mode:2;
+ unsigned int has_fifo:1;
+ unsigned int driver_version;
+ unsigned char track[4];
+};
+
+struct compat_floppy_write_errors {
+ unsigned int write_errors;
+ compat_ulong_t first_error_sector;
+ compat_int_t first_error_generation;
+ compat_ulong_t last_error_sector;
+ compat_int_t last_error_generation;
+ compat_uint_t badness;
+};
+
+#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
+#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
+#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
+#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
+#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
+#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
+#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
+#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
+
+static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+ struct compat_floppy_struct __user *arg)
+{
+ struct floppy_struct v;
+ int drive, type;
+ int err;
+
+ BUILD_BUG_ON(offsetof(struct floppy_struct, name) !=
+ offsetof(struct compat_floppy_struct, name));
+
+ if (!(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL)))
+ return -EPERM;
+
+ memset(&v, 0, sizeof(struct floppy_struct));
+ if (copy_from_user(&v, arg, offsetof(struct floppy_struct, name)))
+ return -EFAULT;
+
+ mutex_lock(&floppy_mutex);
+ drive = (long)bdev->bd_disk->private_data;
+ type = ITYPE(UDRS->fd_device);
+ err = set_geometry(cmd == FDSETPRM32 ? FDSETPRM : FDDEFPRM,
+ &v, drive, type, bdev);
+ mutex_unlock(&floppy_mutex);
+ return err;
+}
+
+static int compat_get_prm(int drive,
+ struct compat_floppy_struct __user *arg)
+{
+ struct compat_floppy_struct v;
+ struct floppy_struct *p;
+ int err;
+
+ memset(&v, 0, sizeof(v));
+ mutex_lock(&floppy_mutex);
+ err = get_floppy_geometry(drive, ITYPE(UDRS->fd_device), &p);
+ if (err) {
+ mutex_unlock(&floppy_mutex);
+ return err;
+ }
+ memcpy(&v, p, offsetof(struct floppy_struct, name));
+ mutex_unlock(&floppy_mutex);
+ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_struct)))
+ return -EFAULT;
+ return 0;
+}
+
+static int compat_setdrvprm(int drive,
+ struct compat_floppy_drive_params __user *arg)
+{
+ struct compat_floppy_drive_params v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
+ return -EFAULT;
+ if (!valid_floppy_drive_params(v.autodetect, v.native_format))
+ return -EINVAL;
+ mutex_lock(&floppy_mutex);
+ UDP->cmos = v.cmos;
+ UDP->max_dtr = v.max_dtr;
+ UDP->hlt = v.hlt;
+ UDP->hut = v.hut;
+ UDP->srt = v.srt;
+ UDP->spinup = v.spinup;
+ UDP->spindown = v.spindown;
+ UDP->spindown_offset = v.spindown_offset;
+ UDP->select_delay = v.select_delay;
+ UDP->rps = v.rps;
+ UDP->tracks = v.tracks;
+ UDP->timeout = v.timeout;
+ UDP->interleave_sect = v.interleave_sect;
+ UDP->max_errors = v.max_errors;
+ UDP->flags = v.flags;
+ UDP->read_track = v.read_track;
+ memcpy(UDP->autodetect, v.autodetect, sizeof(v.autodetect));
+ UDP->checkfreq = v.checkfreq;
+ UDP->native_format = v.native_format;
+ mutex_unlock(&floppy_mutex);
+ return 0;
+}
+
+static int compat_getdrvprm(int drive,
+ struct compat_floppy_drive_params __user *arg)
+{
+ struct compat_floppy_drive_params v;
+
+ memset(&v, 0, sizeof(struct compat_floppy_drive_params));
+ mutex_lock(&floppy_mutex);
+ v.cmos = UDP->cmos;
+ v.max_dtr = UDP->max_dtr;
+ v.hlt = UDP->hlt;
+ v.hut = UDP->hut;
+ v.srt = UDP->srt;
+ v.spinup = UDP->spinup;
+ v.spindown = UDP->spindown;
+ v.spindown_offset = UDP->spindown_offset;
+ v.select_delay = UDP->select_delay;
+ v.rps = UDP->rps;
+ v.tracks = UDP->tracks;
+ v.timeout = UDP->timeout;
+ v.interleave_sect = UDP->interleave_sect;
+ v.max_errors = UDP->max_errors;
+ v.flags = UDP->flags;
+ v.read_track = UDP->read_track;
+ memcpy(v.autodetect, UDP->autodetect, sizeof(v.autodetect));
+ v.checkfreq = UDP->checkfreq;
+ v.native_format = UDP->native_format;
+ mutex_unlock(&floppy_mutex);
+
+ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
+ return -EFAULT;
+ return 0;
+}
+
+static int compat_getdrvstat(int drive, bool poll,
+ struct compat_floppy_drive_struct __user *arg)
+{
+ struct compat_floppy_drive_struct v;
+
+ memset(&v, 0, sizeof(struct compat_floppy_drive_struct));
+ mutex_lock(&floppy_mutex);
+
+ if (poll) {
+ if (lock_fdc(drive))
+ goto Eintr;
+ if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
+ goto Eintr;
+ process_fd_request();
+ }
+ v.spinup_date = UDRS->spinup_date;
+ v.select_date = UDRS->select_date;
+ v.first_read_date = UDRS->first_read_date;
+ v.probed_format = UDRS->probed_format;
+ v.track = UDRS->track;
+ v.maxblock = UDRS->maxblock;
+ v.maxtrack = UDRS->maxtrack;
+ v.generation = UDRS->generation;
+ v.keep_data = UDRS->keep_data;
+ v.fd_ref = UDRS->fd_ref;
+ v.fd_device = UDRS->fd_device;
+ v.last_checked = UDRS->last_checked;
+ v.dmabuf = (uintptr_t)UDRS->dmabuf;
+ v.bufblocks = UDRS->bufblocks;
+ mutex_unlock(&floppy_mutex);
+
+ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
+ return -EFAULT;
+ return 0;
+Eintr:
+ mutex_unlock(&floppy_mutex);
+ return -EINTR;
+}
+
+static int compat_getfdcstat(int drive,
+ struct compat_floppy_fdc_state __user *arg)
+{
+ struct compat_floppy_fdc_state v32;
+ struct floppy_fdc_state v;
+
+ mutex_lock(&floppy_mutex);
+ v = *UFDCS;
+ mutex_unlock(&floppy_mutex);
+
+ memset(&v32, 0, sizeof(struct compat_floppy_fdc_state));
+ v32.spec1 = v.spec1;
+ v32.spec2 = v.spec2;
+ v32.dtr = v.dtr;
+ v32.version = v.version;
+ v32.dor = v.dor;
+ v32.address = v.address;
+ v32.rawcmd = v.rawcmd;
+ v32.reset = v.reset;
+ v32.need_configure = v.need_configure;
+ v32.perp_mode = v.perp_mode;
+ v32.has_fifo = v.has_fifo;
+ v32.driver_version = v.driver_version;
+ memcpy(v32.track, v.track, 4);
+ if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_fdc_state)))
+ return -EFAULT;
+ return 0;
+}
+
+static int compat_werrorget(int drive,
+ struct compat_floppy_write_errors __user *arg)
+{
+ struct compat_floppy_write_errors v32;
+ struct floppy_write_errors v;
+
+ memset(&v32, 0, sizeof(struct compat_floppy_write_errors));
+ mutex_lock(&floppy_mutex);
+ v = *UDRWE;
+ mutex_unlock(&floppy_mutex);
+ v32.write_errors = v.write_errors;
+ v32.first_error_sector = v.first_error_sector;
+ v32.first_error_generation = v.first_error_generation;
+ v32.last_error_sector = v.last_error_sector;
+ v32.last_error_generation = v.last_error_generation;
+ v32.badness = v.badness;
+ if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_write_errors)))
+ return -EFAULT;
+ return 0;
+}
+
+static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+ unsigned long param)
+{
+ int drive = (long)bdev->bd_disk->private_data;
+ switch (cmd) {
+ case FDMSGON:
+ case FDMSGOFF:
+ case FDSETEMSGTRESH:
+ case FDFLUSH:
+ case FDWERRORCLR:
+ case FDEJECT:
+ case FDCLRPRM:
+ case FDFMTBEG:
+ case FDRESET:
+ case FDTWADDLE:
+ return fd_ioctl(bdev, mode, cmd, param);
+ case FDSETMAXERRS:
+ case FDGETMAXERRS:
+ case FDGETDRVTYP:
+ case FDFMTEND:
+ case FDFMTTRK:
+ case FDRAWCMD:
+ return fd_ioctl(bdev, mode, cmd,
+ (unsigned long)compat_ptr(param));
+ case FDSETPRM32:
+ case FDDEFPRM32:
+ return compat_set_geometry(bdev, mode, cmd, compat_ptr(param));
+ case FDGETPRM32:
+ return compat_get_prm(drive, compat_ptr(param));
+ case FDSETDRVPRM32:
+ return compat_setdrvprm(drive, compat_ptr(param));
+ case FDGETDRVPRM32:
+ return compat_getdrvprm(drive, compat_ptr(param));
+ case FDPOLLDRVSTAT32:
+ return compat_getdrvstat(drive, true, compat_ptr(param));
+ case FDGETDRVSTAT32:
+ return compat_getdrvstat(drive, false, compat_ptr(param));
+ case FDGETFDCSTAT32:
+ return compat_getfdcstat(drive, compat_ptr(param));
+ case FDWERRORGET32:
+ return compat_werrorget(drive, compat_ptr(param));
+ }
+ return -EINVAL;
+}
+#endif
+
static void __init config_types(void)
{
bool has_drive = false;
@@ -3891,6 +4249,9 @@ static const struct block_device_operations floppy_fops = {
.getgeo = fd_getgeo,
.check_events = floppy_check_events,
.revalidate_disk = floppy_revalidate,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = fd_compat_ioctl,
+#endif
};
/*
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28ce17405aab..f236b7984b94 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -82,7 +82,6 @@
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_index_mutex);
-static DEFINE_MUTEX(loop_ctl_mutex);
static int max_part;
static int part_shift;
@@ -1034,7 +1033,7 @@ static int loop_clr_fd(struct loop_device *lo)
*/
if (atomic_read(&lo->lo_refcnt) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
@@ -1083,12 +1082,12 @@ static int loop_clr_fd(struct loop_device *lo)
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
loop_unprepare_queue(lo);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
/*
- * Need not hold loop_ctl_mutex to fput backing file.
- * Calling fput holding loop_ctl_mutex triggers a circular
+ * Need not hold lo_ctl_mutex to fput backing file.
+ * Calling fput holding lo_ctl_mutex triggers a circular
* lock dependency possibility warning as fput can take
- * bd_mutex which is usually taken before loop_ctl_mutex.
+ * bd_mutex which is usually taken before lo_ctl_mutex.
*/
fput(filp);
return 0;
@@ -1351,7 +1350,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock_nested(&loop_ctl_mutex, 1);
+ mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
@@ -1360,7 +1359,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
err = loop_change_fd(lo, bdev, arg);
break;
case LOOP_CLR_FD:
- /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
+ /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
err = loop_clr_fd(lo);
if (!err)
goto out_unlocked;
@@ -1396,7 +1395,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked:
return err;
@@ -1529,16 +1528,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_ctl_mutex);
err = loop_set_status_compat(
lo, (const struct compat_loop_info __user *) arg);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
break;
case LOOP_GET_STATUS:
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_ctl_mutex);
err = loop_get_status_compat(
lo, (struct compat_loop_info __user *) arg);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
break;
case LOOP_SET_CAPACITY:
case LOOP_CLR_FD:
@@ -1547,6 +1546,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
arg = (unsigned long) compat_ptr(arg);
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
+ case LOOP_SET_DIRECT_IO:
err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:
@@ -1582,7 +1582,7 @@ static void __lo_release(struct loop_device *lo)
if (atomic_dec_return(&lo->lo_refcnt))
return;
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
/*
* In autoclear mode, stop the loop thread
@@ -1599,7 +1599,7 @@ static void __lo_release(struct loop_device *lo)
loop_flush(lo);
}
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
}
static void lo_release(struct gendisk *disk, fmode_t mode)
@@ -1645,10 +1645,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
struct loop_device *lo = ptr;
struct loop_func_table *xfer = data;
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_encryption == xfer)
loop_release_xfer(lo);
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
@@ -1814,6 +1814,7 @@ static int loop_add(struct loop_device **l, int i)
if (!part_shift)
disk->flags |= GENHD_FL_NO_PART_SCAN;
disk->flags |= GENHD_FL_EXT_DEVT;
+ mutex_init(&lo->lo_ctl_mutex);
atomic_set(&lo->lo_refcnt, 0);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
@@ -1926,19 +1927,19 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm);
if (ret < 0)
break;
- mutex_lock(&loop_ctl_mutex);
+ mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_state != Lo_unbound) {
ret = -EBUSY;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
break;
}
if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
break;
}
lo->lo_disk->private_data = NULL;
- mutex_unlock(&loop_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
break;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index a923e74495ce..60f0fd2c0c65 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -55,6 +55,7 @@ struct loop_device {
spinlock_t lo_lock;
int lo_state;
+ struct mutex lo_ctl_mutex;
struct kthread_worker worker;
struct task_struct *worker_task;
bool use_dio;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 34997df132e2..6beafaa335c7 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -1025,8 +1025,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
cancel_work_sync(&card->event_work);
+ destroy_workqueue(card->event_wq);
rsxx_destroy_dev(card);
rsxx_dma_destroy(card);
+ destroy_workqueue(card->creg_ctrl.creg_wq);
spin_lock_irqsave(&card->irq_lock, flags);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 10332c24f961..f287eec36b28 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -215,10 +215,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
- blk_mq_stop_hw_queue(hctx);
+ /* Don't stop the queue if -ENOMEM: we may have failed to
+ * bounce the buffer due to global resource outage.
+ */
+ if (err == -ENOSPC)
+ blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
- /* Out of mem doesn't actually happen, since we fall back
- * to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
return BLK_MQ_RQ_QUEUE_BUSY;
return BLK_MQ_RQ_QUEUE_ERROR;
@@ -392,6 +394,8 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
+ num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index d6eaaa25d1cc..a700e525535c 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -929,6 +929,8 @@ next:
out_of_memory:
pr_alert("%s: out of memory\n", __func__);
put_free_pages(ring, pages_to_gnt, segs_to_map);
+ for (i = last_map; i < num; i++)
+ pages[i]->handle = BLKBACK_INVALID_HANDLE;
return -ENOMEM;
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 5dfe6e8af140..1d1f86657967 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -178,6 +178,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->domid = domid;
atomic_set(&blkif->refcnt, 1);
init_completion(&blkif->drain_complete);
+
+ /*
+ * Because freeing back to the cache may be deferred, it is not
+ * safe to unload the module (and hence destroy the cache) until
+ * this has completed. To prevent premature unloading, take an
+ * extra module reference here and release only when the object
+ * has been freed back to the cache.
+ */
+ __module_get(THIS_MODULE);
INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
return blkif;
@@ -322,6 +331,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
/* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif);
+ module_put(THIS_MODULE);
}
int __init xen_blkif_interface_init(void)
@@ -967,6 +977,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
}
blkif->nr_ring_pages = nr_grefs;
+ err = -ENOMEM;
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@@ -989,7 +1000,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
if (err) {
xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
- return err;
+ goto fail;
}
return 0;
@@ -1009,8 +1020,7 @@ fail:
}
kfree(req);
}
- return -ENOMEM;
-
+ return err;
}
static int connect_ring(struct backend_info *be)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c08ee8cf1e29..6ee3e928ebf1 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1104,8 +1104,8 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (!VDEV_IS_EXTENDED(info->vdevice)) {
err = xen_translate_vdev(info->vdevice, &minor, &offset);
if (err)
- return err;
- nr_parts = PARTS_PER_DISK;
+ return err;
+ nr_parts = PARTS_PER_DISK;
} else {
minor = BLKIF_MINOR_EXT(info->vdevice);
nr_parts = PARTS_PER_EXT_DISK;
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index c4328d9d9981..f838119d12b2 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1062,6 +1062,8 @@ static int ace_setup(struct ace_device *ace)
return 0;
err_read:
+ /* prevent double queue cleanup */
+ ace->gd->queue = NULL;
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 28afd5d585f9..b7dfa4afd516 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -363,6 +363,9 @@ int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
return err;
}
+ /* Give the controller some time to get ready to receive the NVM */
+ msleep(10);
+
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8dce1a890078..4e3b24a0511f 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -362,6 +362,9 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8822BE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8822CE Bluetooth devices */
+ { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
+
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@ -1066,7 +1069,7 @@ static int btusb_open(struct hci_dev *hdev)
if (data->setup_on_usb) {
err = data->setup_on_usb(hdev);
if (err < 0)
- return err;
+ goto setup_fail;
}
data->intf->needs_remote_wakeup = 1;
@@ -1098,6 +1101,7 @@ done:
failed:
clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+setup_fail:
usb_autopm_put_interface(data->intf);
return err;
}
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 0ccf6bf01ed4..c50b68bbecdc 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -101,6 +101,9 @@ static int ath_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
ath = kzalloc(sizeof(*ath), GFP_KERNEL);
if (!ath)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index deed58013555..25042c794852 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -279,6 +279,9 @@ static int bcm_open(struct hci_uart *hu)
bt_dev_dbg(hu->hdev, "hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
if (!bcm)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index a2c921faaa12..26f9982bab26 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -605,6 +605,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
if (*ptr == 0xc0) {
BT_ERR("Short BCSP packet");
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_START;
bcsp->rx_count = 0;
} else
@@ -620,6 +621,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
BT_ERR("Error in BCSP hdr checksum");
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
@@ -644,6 +646,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
bscp_get_crc(bcsp));
kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
bcsp->rx_count = 0;
continue;
@@ -759,6 +762,11 @@ static int bcsp_close(struct hci_uart *hu)
skb_queue_purge(&bcsp->rel);
skb_queue_purge(&bcsp->unrel);
+ if (bcsp->rx_skb) {
+ kfree_skb(bcsp->rx_skb);
+ bcsp->rx_skb = NULL;
+ }
+
kfree(bcsp);
return 0;
}
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 73306384af6c..f822e862b689 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -407,6 +407,9 @@ static int intel_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
intel = kzalloc(sizeof(*intel), GFP_KERNEL);
if (!intel)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 2230f9368c21..0a21fb86fd67 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -263,6 +263,15 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+ if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+ return true;
+
+ return false;
+}
+
/* Flow control or un-flow control the device */
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
{
@@ -644,15 +653,14 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
return err;
hu->proto = p;
- set_bit(HCI_UART_PROTO_READY, &hu->flags);
err = hci_uart_register_dev(hu);
if (err) {
- clear_bit(HCI_UART_PROTO_READY, &hu->flags);
p->close(hu);
return err;
}
+ set_bit(HCI_UART_PROTO_READY, &hu->flags);
return 0;
}
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index bbc4b39b1dbf..716d89a90907 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -66,6 +66,9 @@ static int mrvl_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
if (!mrvl)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 070139513e65..aeef870e31b8 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -109,6 +109,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
void hci_uart_init_tty(struct hci_uart *hu);
void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 2051d926e303..4f3d988210b0 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
if (ret)
goto unlock;
- *buf = readl(rsb->regs + RSB_DATA);
+ *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
unlock:
mutex_unlock(&rsb->lock);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index ff4280800cd0..782dbab5ad56 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -265,6 +265,7 @@
/* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
/* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
+#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
@@ -997,6 +998,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
tracks->xa = 0;
tracks->error = 0;
cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
+ tracks->error = CDS_NO_INFO;
+ return;
+ }
+
/* Grab the TOC header so we can see how many tracks there are */
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
if (ret) {
@@ -1163,7 +1170,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
ret = open_for_data(cdi);
if (ret)
goto err;
- cdrom_mmc3_profile(cdi);
+ if (CDROM_CAN(CDC_GENERIC_PACKET))
+ cdrom_mmc3_profile(cdi);
if (mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
@@ -2872,6 +2880,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
it doesn't give enough information or fails. then we return
the toc contents. */
use_toc:
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+
toc.cdte_format = CDROM_MSF;
toc.cdte_track = CDROM_LEADOUT;
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
@@ -3683,9 +3694,9 @@ static struct ctl_table_header *cdrom_sysctl_header;
static void cdrom_sysctl_register(void)
{
- static int initialized;
+ static atomic_t initialized = ATOMIC_INIT(0);
- if (initialized == 1)
+ if (!atomic_add_unless(&initialized, 1, 1))
return;
cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
@@ -3696,8 +3707,6 @@ static void cdrom_sysctl_register(void)
cdrom_sysctl_settings.debug = debug;
cdrom_sysctl_settings.lock = lockdoor;
cdrom_sysctl_settings.check = check_media_type;
-
- initialized = 1;
}
static void cdrom_sysctl_unregister(void)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e4866ed3c3da..c82f53cfa7ea 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -392,7 +392,7 @@ config XILINX_HWICAP
config R3964
tristate "Siemens R3964 line discipline"
- depends on TTY
+ depends on TTY && BROKEN
---help---
This driver allows synchronous communication with devices using the
Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 50272fe81f26..bedfd2412ec1 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -376,7 +376,7 @@ static __init int hpet_mmap_enable(char *str)
pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
return 1;
}
-__setup("hpet_mmap", hpet_mmap_enable);
+__setup("hpet_mmap=", hpet_mmap_enable);
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
@@ -569,8 +569,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
unsigned long long m;
m = hpets->hp_tick_freq + (dis >> 1);
- do_div(m, dis);
- return (unsigned long)m;
+ return div64_ul(m, dis);
}
static int
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index d2d2c89de5b4..5e79b4bfe27a 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -88,7 +88,7 @@ static void add_early_randomness(struct hwrng *rng)
size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, rng_buffer, size, 1);
+ bytes_read = rng_get_data(rng, rng_buffer, size, 0);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(rng_buffer, bytes_read);
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index 37a58d78aab3..3324a7f4bee3 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -114,7 +114,8 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
{
cancel_delayed_work_sync(&idle_work);
hwrng_unregister(&omap3_rom_rng_ops);
- clk_disable_unprepare(rng_clk);
+ if (!rng_idle)
+ clk_disable_unprepare(rng_clk);
return 0;
}
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 83c695938a2d..f53d47e3355d 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -166,6 +166,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
return devm_hwrng_register(dev, &priv->rng);
}
+static int stm32_rng_remove(struct platform_device *ofdev)
+{
+ pm_runtime_disable(&ofdev->dev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int stm32_rng_runtime_suspend(struct device *dev)
{
@@ -202,6 +209,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
+ .remove = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 3fa2f8a009b3..1c5c4314c6b5 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -73,7 +73,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
if (!vi->busy) {
vi->busy = true;
- init_completion(&vi->have_data);
+ reinit_completion(&vi->have_data);
register_buffer(vi, buf, size);
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5d509ccf1299..74044b52d2c6 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2646,7 +2646,9 @@ get_guid(ipmi_smi_t intf)
if (rv)
/* Send failed, no GUID available. */
intf->bmc->guid_set = 0;
- wait_event(intf->waitq, intf->bmc->guid_set != 2);
+ else
+ wait_event(intf->waitq, intf->bmc->guid_set != 2);
+
intf->null_user_handler = NULL;
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index e0a53156b782..82af65818444 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -283,6 +283,9 @@ struct smi_info {
*/
bool irq_enable_broken;
+ /* Is the driver in maintenance mode? */
+ bool in_maintenance_mode;
+
/*
* Did we get an attention that we did not handle?
*/
@@ -1093,11 +1096,20 @@ static int ipmi_thread(void *data)
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
&busy_until);
- if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+ if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
; /* do nothing */
- else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
- schedule();
- else if (smi_result == SI_SM_IDLE) {
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
+ /*
+ * In maintenance mode we run as fast as
+ * possible to allow firmware updates to
+ * complete as fast as possible, but normally
+ * don't bang on the scheduler.
+ */
+ if (smi_info->in_maintenance_mode)
+ schedule();
+ else
+ usleep_range(100, 200);
+ } else if (smi_result == SI_SM_IDLE) {
if (atomic_read(&smi_info->need_watch)) {
schedule_timeout_interruptible(100);
} else {
@@ -1105,8 +1117,9 @@ static int ipmi_thread(void *data)
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
- } else
+ } else {
schedule_timeout_interruptible(1);
+ }
}
return 0;
}
@@ -1285,6 +1298,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
if (!enable)
atomic_set(&smi_info->req_events, 0);
+ smi_info->in_maintenance_mode = enable;
}
static const struct ipmi_smi_handlers handlers = {
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index dac36ef450ba..a4ef9a6bd367 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -699,12 +699,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
- } else if (blocknum != ssif_info->multi_pos) {
+ } else if (blocknum + 1 != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
* but multi_pos starts at one, so the +1.
*/
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Received message out of sequence, expected %u, got %u\n",
+ ssif_info->multi_pos - 1, blocknum);
result = -EIO;
} else {
ssif_inc_stat(ssif_info, received_message_parts);
@@ -742,10 +746,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
msg = ssif_info->curr_msg;
if (msg) {
+ if (data) {
+ if (len > IPMI_MAX_MSG_LENGTH)
+ len = IPMI_MAX_MSG_LENGTH;
+ memcpy(msg->rsp, data, len);
+ } else {
+ len = 0;
+ }
msg->rsp_size = len;
- if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
- msg->rsp_size = IPMI_MAX_MSG_LENGTH;
- memcpy(msg->rsp, data, msg->rsp_size);
ssif_info->curr_msg = NULL;
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 593a8818aca9..e87a40c198fa 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -96,6 +96,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
}
#endif
+static inline bool should_stop_iteration(void)
+{
+ if (need_resched())
+ cond_resched();
+ return fatal_signal_pending(current);
+}
+
/*
* This funcion reads the *physical* memory. The f_pos points directly to the
* memory location.
@@ -162,6 +169,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
p += sz;
count -= sz;
read += sz;
+ if (should_stop_iteration())
+ break;
}
*ppos += read;
@@ -233,6 +242,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
p += sz;
count -= sz;
written += sz;
+ if (should_stop_iteration())
+ break;
}
*ppos += written;
@@ -446,6 +457,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
read += sz;
low_count -= sz;
count -= sz;
+ if (should_stop_iteration()) {
+ count = 0;
+ break;
+ }
}
}
@@ -470,6 +485,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
buf += sz;
read += sz;
p += sz;
+ if (should_stop_iteration())
+ break;
}
free_page((unsigned long)kbuf);
}
@@ -522,6 +539,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
p += sz;
count -= sz;
written += sz;
+ if (should_stop_iteration())
+ break;
}
*ppos += written;
@@ -573,6 +592,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
buf += sz;
virtr += sz;
p += sz;
+ if (should_stop_iteration())
+ break;
}
free_page((unsigned long)kbuf);
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 336d02a488cc..3648727dfe83 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -624,20 +624,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(time32, argp, sizeof(time32)))
return -EFAULT;
+ if ((time32[0] < 0) || (time32[1] < 0))
+ return -EINVAL;
+
return pp_set_timeout(pp->pdev, time32[0], time32[1]);
case PPSETTIME64:
if (copy_from_user(time64, argp, sizeof(time64)))
return -EFAULT;
+ if ((time64[0] < 0) || (time64[1] < 0))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] >>= 32;
+
return pp_set_timeout(pp->pdev, time64[0], time64[1]);
case PPGETTIME32:
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time32[0] = ts.tv_sec;
time32[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time32[0] < 0) || (time32[1] < 0))
- return -EINVAL;
if (copy_to_user(argp, time32, sizeof(time32)))
return -EFAULT;
@@ -648,8 +655,9 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time64[0] = ts.tv_sec;
time64[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time64[0] < 0) || (time64[1] < 0))
- return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] <<= 32;
if (copy_to_user(argp, time64, sizeof(time64)))
return -EFAULT;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 81b65d0e7563..4cbc73173701 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2118,8 +2118,8 @@ struct batched_entropy {
/*
* Get a random word for internal kernel use only. The quality of the random
- * number is either as good as RDRAND or as good as /dev/urandom, with the
- * goal of being quite fast and not depleting entropy.
+ * number is good as /dev/urandom, but there is no backtrack protection, with
+ * the goal of being quite fast and not depleting entropy.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
unsigned long get_random_long(void)
@@ -2127,9 +2127,6 @@ unsigned long get_random_long(void)
unsigned long ret;
struct batched_entropy *batch;
- if (arch_get_random_long(&ret))
- return ret;
-
batch = &get_cpu_var(batched_entropy_long);
if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
extract_crng((u8 *)batch->entropy_long);
@@ -2153,9 +2150,6 @@ unsigned int get_random_int(void)
unsigned int ret;
struct batched_entropy *batch;
- if (arch_get_random_int(&ret))
- return ret;
-
batch = &get_cpu_var(batched_entropy_int);
if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
extract_crng((u8 *)batch->entropy_int);
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index fa0f66809503..d29f78441cdb 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -102,19 +102,29 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
unsigned int expected;
- /* sanity check */
- if (count < 6)
+ /* A sanity check that the upper layer wants to get at least the header
+ * as that is the minimum size for any TPM response.
+ */
+ if (count < TPM_HEADER_SIZE)
return -EIO;
+ /* If this bit is set, according to the spec, the TPM is in
+ * unrecoverable condition.
+ */
if (ioread32(&priv->cca->sts) & CRB_CTRL_STS_ERROR)
return -EIO;
- memcpy_fromio(buf, priv->rsp, 6);
- expected = be32_to_cpup((__be32 *) &buf[2]);
- if (expected > count || expected < 6)
+ /* Read the first 8 bytes in order to get the length of the response.
+ * We read exactly a quad word in order to make sure that the remaining
+ * reads will be aligned.
+ */
+ memcpy_fromio(buf, priv->rsp, 8);
+
+ expected = be32_to_cpup((__be32 *)&buf[2]);
+ if (expected > count || expected < TPM_HEADER_SIZE)
return -EIO;
- memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
+ memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
return expected;
}
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 95ce2e9ccdc6..cc4e642d3180 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -65,7 +65,15 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
dev_dbg(&chip->dev,
"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
(int)min_t(size_t, 64, len), buf, len, status);
- return status;
+
+ if (status < 0)
+ return status;
+
+ /* The upper layer does not support incomplete sends. */
+ if (status != len)
+ return -E2BIG;
+
+ return 0;
}
static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 67549ce88cc9..774748497ace 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -18,10 +18,11 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
struct ttyprintk_port {
struct tty_port port;
- struct mutex port_write_mutex;
+ spinlock_t spinlock;
};
static struct ttyprintk_port tpk_port;
@@ -100,11 +101,12 @@ static int tpk_open(struct tty_struct *tty, struct file *filp)
static void tpk_close(struct tty_struct *tty, struct file *filp)
{
struct ttyprintk_port *tpkp = tty->driver_data;
+ unsigned long flags;
- mutex_lock(&tpkp->port_write_mutex);
+ spin_lock_irqsave(&tpkp->spinlock, flags);
/* flush tpk_printk buffer */
tpk_printk(NULL, 0);
- mutex_unlock(&tpkp->port_write_mutex);
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
tty_port_close(&tpkp->port, tty, filp);
}
@@ -116,13 +118,14 @@ static int tpk_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct ttyprintk_port *tpkp = tty->driver_data;
+ unsigned long flags;
int ret;
/* exclusive use of tpk_printk within this tty */
- mutex_lock(&tpkp->port_write_mutex);
+ spin_lock_irqsave(&tpkp->spinlock, flags);
ret = tpk_printk(buf, count);
- mutex_unlock(&tpkp->port_write_mutex);
+ spin_unlock_irqrestore(&tpkp->spinlock, flags);
return ret;
}
@@ -172,7 +175,7 @@ static int __init ttyprintk_init(void)
{
int ret = -ENOMEM;
- mutex_init(&tpk_port.port_write_mutex);
+ spin_lock_init(&tpk_port.spinlock);
ttyprintk_driver = tty_alloc_driver(1,
TTY_DRIVER_RESET_TERMIOS |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8c0017d48571..34548d3b4d13 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -75,7 +75,7 @@ struct ports_driver_data {
/* All the console devices handled by this driver */
struct list_head consoles;
};
-static struct ports_driver_data pdrvdata;
+static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
static DEFINE_SPINLOCK(pdrvdata_lock);
static DECLARE_COMPLETION(early_console_added);
@@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
}
}
-static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
+static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
int pages)
{
struct port_buffer *buf;
@@ -445,7 +445,7 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
return buf;
}
- if (is_rproc_serial(vq->vdev)) {
+ if (is_rproc_serial(vdev)) {
/*
* Allocate DMA memory from ancestor. When a virtio
* device is created by remoteproc, the DMA memory is
@@ -455,9 +455,9 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
* DMA_MEMORY_INCLUDES_CHILDREN had been supported
* in dma-coherent.c
*/
- if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
+ if (!vdev->dev.parent || !vdev->dev.parent->parent)
goto free_buf;
- buf->dev = vq->vdev->dev.parent->parent;
+ buf->dev = vdev->dev.parent->parent;
/* Increase device refcnt to avoid freeing it */
get_device(buf->dev);
@@ -841,7 +841,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
count = min((size_t)(32 * 1024), count);
- buf = alloc_buf(port->out_vq, count, 0);
+ buf = alloc_buf(port->portdev->vdev, count, 0);
if (!buf)
return -ENOMEM;
@@ -960,7 +960,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
if (ret < 0)
goto error_out;
- buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
+ buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
if (!buf) {
ret = -ENOMEM;
goto error_out;
@@ -1369,24 +1369,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
port->cons.ws.ws_col = cols;
}
-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
{
struct port_buffer *buf;
- unsigned int nr_added_bufs;
+ int nr_added_bufs;
int ret;
nr_added_bufs = 0;
do {
- buf = alloc_buf(vq, PAGE_SIZE, 0);
+ buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
if (!buf)
- break;
+ return -ENOMEM;
spin_lock_irq(lock);
ret = add_inbuf(vq, buf);
if (ret < 0) {
spin_unlock_irq(lock);
free_buf(buf, true);
- break;
+ return ret;
}
nr_added_bufs++;
spin_unlock_irq(lock);
@@ -1406,7 +1406,6 @@ static int add_port(struct ports_device *portdev, u32 id)
char debugfs_name[16];
struct port *port;
dev_t devt;
- unsigned int nr_added_bufs;
int err;
port = kmalloc(sizeof(*port), GFP_KERNEL);
@@ -1425,6 +1424,7 @@ static int add_port(struct ports_device *portdev, u32 id)
port->async_queue = NULL;
port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+ port->cons.vtermno = 0;
port->host_connected = port->guest_connected = false;
port->stats = (struct port_stats) { 0 };
@@ -1464,11 +1464,13 @@ static int add_port(struct ports_device *portdev, u32 id)
spin_lock_init(&port->outvq_lock);
init_waitqueue_head(&port->waitqueue);
- /* Fill the in_vq with buffers so the host can send us data. */
- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
- if (!nr_added_bufs) {
+ /* We can safely ignore ENOSPC because it means
+ * the queue already has buffers. Buffers are removed
+ * only by virtcons_remove(), not by unplug_port()
+ */
+ err = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (err < 0 && err != -ENOSPC) {
dev_err(port->dev, "Error allocating inbufs\n");
- err = -ENOMEM;
goto free_device;
}
@@ -1991,19 +1993,40 @@ static void remove_vqs(struct ports_device *portdev)
kfree(portdev->out_vqs);
}
-static void remove_controlq_data(struct ports_device *portdev)
+static void virtcons_remove(struct virtio_device *vdev)
{
- struct port_buffer *buf;
- unsigned int len;
+ struct ports_device *portdev;
+ struct port *port, *port2;
- if (!use_multiport(portdev))
- return;
+ portdev = vdev->priv;
- while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
- free_buf(buf, true);
+ spin_lock_irq(&pdrvdata_lock);
+ list_del(&portdev->list);
+ spin_unlock_irq(&pdrvdata_lock);
- while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
- free_buf(buf, true);
+ /* Disable interrupts for vqs */
+ vdev->config->reset(vdev);
+ /* Finish up work that's lined up */
+ if (use_multiport(portdev))
+ cancel_work_sync(&portdev->control_work);
+ else
+ cancel_work_sync(&portdev->config_work);
+
+ list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ unplug_port(port);
+
+ unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+
+ /*
+ * When yanking out a device, we immediately lose the
+ * (device-side) queues. So there's no point in keeping the
+ * guest side around till we drop our final reference. This
+ * also means that any ports which are in an open state will
+ * have to just stop using the port, as the vqs are going
+ * away.
+ */
+ remove_vqs(portdev);
+ kfree(portdev);
}
/*
@@ -2072,6 +2095,7 @@ static int virtcons_probe(struct virtio_device *vdev)
spin_lock_init(&portdev->ports_lock);
INIT_LIST_HEAD(&portdev->ports);
+ INIT_LIST_HEAD(&portdev->list);
virtio_device_ready(portdev->vdev);
@@ -2079,18 +2103,22 @@ static int virtcons_probe(struct virtio_device *vdev)
INIT_WORK(&portdev->control_work, &control_work_handler);
if (multiport) {
- unsigned int nr_added_bufs;
-
spin_lock_init(&portdev->c_ivq_lock);
spin_lock_init(&portdev->c_ovq_lock);
- nr_added_bufs = fill_queue(portdev->c_ivq,
- &portdev->c_ivq_lock);
- if (!nr_added_bufs) {
+ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
+ if (err < 0) {
dev_err(&vdev->dev,
"Error allocating buffers for control queue\n");
- err = -ENOMEM;
- goto free_vqs;
+ /*
+ * The host might want to notify mgmt sw about device
+ * add failure.
+ */
+ __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
+ /* Device was functional: we need full cleanup. */
+ virtcons_remove(vdev);
+ return err;
}
} else {
/*
@@ -2121,11 +2149,6 @@ static int virtcons_probe(struct virtio_device *vdev)
return 0;
-free_vqs:
- /* The host might want to notify mgmt sw about device add failure */
- __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
- VIRTIO_CONSOLE_DEVICE_READY, 0);
- remove_vqs(portdev);
free_chrdev:
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
free:
@@ -2134,43 +2157,6 @@ fail:
return err;
}
-static void virtcons_remove(struct virtio_device *vdev)
-{
- struct ports_device *portdev;
- struct port *port, *port2;
-
- portdev = vdev->priv;
-
- spin_lock_irq(&pdrvdata_lock);
- list_del(&portdev->list);
- spin_unlock_irq(&pdrvdata_lock);
-
- /* Disable interrupts for vqs */
- vdev->config->reset(vdev);
- /* Finish up work that's lined up */
- if (use_multiport(portdev))
- cancel_work_sync(&portdev->control_work);
- else
- cancel_work_sync(&portdev->config_work);
-
- list_for_each_entry_safe(port, port2, &portdev->ports, list)
- unplug_port(port);
-
- unregister_chrdev(portdev->chr_major, "virtio-portsdev");
-
- /*
- * When yanking out a device, we immediately lose the
- * (device-side) queues. So there's no point in keeping the
- * guest side around till we drop our final reference. This
- * also means that any ports which are in an open state will
- * have to just stop using the port, as the vqs are going
- * away.
- */
- remove_controlq_data(portdev);
- remove_vqs(portdev);
- kfree(portdev);
-}
-
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -2201,15 +2187,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
vdev->config->reset(vdev);
- virtqueue_disable_cb(portdev->c_ivq);
+ if (use_multiport(portdev))
+ virtqueue_disable_cb(portdev->c_ivq);
cancel_work_sync(&portdev->control_work);
cancel_work_sync(&portdev->config_work);
/*
* Once more: if control_work_handler() was running, it would
* enable the cb as the last step.
*/
- virtqueue_disable_cb(portdev->c_ivq);
- remove_controlq_data(portdev);
+ if (use_multiport(portdev))
+ virtqueue_disable_cb(portdev->c_ivq);
list_for_each_entry(port, &portdev->ports, list) {
virtqueue_disable_cb(port->in_vq);
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index c813c27f2e58..90988e7a5b47 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -27,6 +27,10 @@
#define MOR_KEY_MASK (0xff << 16)
+#define clk_main_parent_select(s) (((s) & \
+ (AT91_PMC_MOSCEN | \
+ AT91_PMC_OSCBYPASS)) ? 1 : 0)
+
struct clk_main_osc {
struct clk_hw hw;
struct regmap *regmap;
@@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
regmap_read(regmap, AT91_PMC_SR, &status);
- return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
+ return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
}
static const struct clk_ops main_osc_ops = {
@@ -158,7 +162,7 @@ at91_clk_register_main_osc(struct regmap *regmap,
if (bypass)
regmap_update_bits(regmap,
AT91_CKGR_MOR, MOR_KEY_MASK |
- AT91_PMC_MOSCEN,
+ AT91_PMC_OSCBYPASS,
AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
hw = &osc->hw;
@@ -350,7 +354,10 @@ static int clk_main_probe_frequency(struct regmap *regmap)
regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
if (mcfr & AT91_PMC_MAINRDY)
return 0;
- usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(MAINF_LOOP_MIN_WAIT);
+ else
+ usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
} while (time_before(prep_time, timeout));
return -ETIMEDOUT;
@@ -530,7 +537,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
- return status & AT91_PMC_MOSCEN ? 1 : 0;
+ return clk_main_parent_select(status);
}
static const struct clk_ops sam9x5_main_ops = {
@@ -572,7 +579,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
clkmain->hw.init = &init;
clkmain->regmap = regmap;
regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
- clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
+ clkmain->parent = clk_main_parent_select(status);
hw = &clkmain->hw;
ret = clk_hw_register(NULL, &clkmain->hw);
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 791770a563fc..6fac6383d024 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -78,6 +78,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
tmp_parent_rate = req->rate * div;
tmp_parent_rate = clk_hw_round_rate(parent,
tmp_parent_rate);
+ if (!tmp_parent_rate)
+ continue;
+
tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
if (tmp_rate < req->rate)
tmp_diff = req->rate - tmp_rate;
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index ab6ecefc49ad..43ba2a8b03fa 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -74,7 +74,10 @@ static int clk_slow_osc_prepare(struct clk_hw *hw)
writel(tmp | AT91_SCKC_OSC32EN, sckcr);
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(osc->startup_usec);
+ else
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
return 0;
}
@@ -197,7 +200,10 @@ static int clk_slow_rc_osc_prepare(struct clk_hw *hw)
writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr);
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(osc->startup_usec);
+ else
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
return 0;
}
@@ -310,7 +316,10 @@ static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
writel(tmp, sckcr);
- usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(SLOWCK_SW_TIME_USEC);
+ else
+ usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1);
return 0;
}
@@ -443,7 +452,10 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
return 0;
}
- usleep_range(osc->startup_usec, osc->startup_usec + 1);
+ if (system_state < SYSTEM_RUNNING)
+ udelay(osc->startup_usec);
+ else
+ usleep_range(osc->startup_usec, osc->startup_usec + 1);
osc->prepared = true;
return 0;
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 727ed8e1bb72..8e4581004695 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -293,6 +293,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
/* Map system registers */
srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
hb_clk->reg = of_iomap(srnp, 0);
+ of_node_put(srnp);
BUG_ON(!hb_clk->reg);
hb_clk->reg += reg;
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 80ae2a51452d..65876ff6df41 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -540,7 +540,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
.guts_compat = "fsl,qoriq-device-config-1.0",
.init_periph = p5020_init_periph,
.cmux_groups = {
- &p2041_cmux_grp1, &p2041_cmux_grp2
+ &p5020_cmux_grp1, &p5020_cmux_grp2
},
.cmux_to_group = {
0, 1, -1
@@ -1245,6 +1245,7 @@ static void __init clockgen_init(struct device_node *np)
pr_err("%s: Couldn't map %s regs\n", __func__,
guts->full_name);
}
+ of_node_put(guts);
}
}
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 14071a57c926..f5d74e8db432 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -255,7 +255,7 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
* This requires of_device_id table. In the same time this will not change the
* actual *device* matching so do not add .of_match_table.
*/
-static const struct of_device_id s2mps11_dt_match[] = {
+static const struct of_device_id s2mps11_dt_match[] __used = {
{
.compatible = "samsung,s2mps11-clk",
.data = (void *)S2MPS11X,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5da068e435d1..459a96f3043e 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2449,11 +2449,17 @@ static int __clk_core_init(struct clk_core *core)
if (core->flags & CLK_IS_CRITICAL) {
unsigned long flags;
- clk_core_prepare(core);
+ ret = clk_core_prepare(core);
+ if (ret)
+ goto out;
flags = clk_enable_lock();
- clk_core_enable(core);
+ ret = clk_core_enable(core);
clk_enable_unlock(flags);
+ if (ret) {
+ clk_core_unprepare(core);
+ goto out;
+ }
}
/*
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 3d918b281927..57e14143f5b5 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -504,6 +504,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
anatop_base = base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
/* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index a6be682ed011..7e8993f80340 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -203,6 +203,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 0b98e03b1741..488ef111f3c1 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -416,6 +416,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
base = of_iomap(np, 0);
WARN_ON(!base);
+ of_node_put(np);
clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 0476353ab423..a19ab032d073 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -203,6 +203,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
anatop_base = of_iomap(np, 0);
BUG_ON(!anatop_base);
+ of_node_put(np);
np = ccm_node;
ccm_base = of_iomap(np, 0);
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 9adaf48aea23..20cfdf837bfa 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock);
static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
+static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
static DEFINE_SPINLOCK(reset_lock);
@@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
/* The gate clocks has mux parent. */
{MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
- {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
- {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+ {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
{MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
{MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 2c7c1085f883..8fdfa97900cd 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -177,8 +177,10 @@ static void __init a370_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &a370_coreclks);
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, a370_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index b3094315a3c0..2fa15a274719 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -202,7 +202,9 @@ static void __init axp_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &axp_coreclks);
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, axp_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index 59fad9546c84..5f258c9bb68b 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -190,10 +190,14 @@ static void __init dove_clk_init(struct device_node *np)
mvebu_coreclk_setup(np, &dove_coreclks);
- if (ddnp)
+ if (ddnp) {
dove_divider_clk_init(ddnp);
+ of_node_put(ddnp);
+ }
- if (cgnp)
+ if (cgnp) {
mvebu_clk_gating_setup(cgnp, dove_gating_desc);
+ of_node_put(cgnp);
+ }
}
CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index a2a8d614039d..890ebf623261 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -333,6 +333,8 @@ static void __init kirkwood_clk_init(struct device_node *np)
if (cgnp) {
mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc);
+
+ of_node_put(cgnp);
}
}
CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index c40b1804f58c..bb556f9bbeda 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -362,6 +362,7 @@ struct dummy_clk {
};
static struct dummy_clk dummy_clks[] __initdata = {
DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"),
+ DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"),
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
};
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a071bba8018c..29abb600d7e1 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -107,7 +107,7 @@ static int update_config(struct clk_rcg2 *rcg)
}
WARN(1, "%s: rcg didn't update its configuration.", name);
- return 0;
+ return -EBUSY;
}
static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
@@ -194,8 +194,13 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw,
clk_flags = clk_hw_get_flags(hw);
p = clk_hw_get_parent_by_index(hw, index);
+ if (!p)
+ return -EINVAL;
+
if (clk_flags & CLK_SET_RATE_PARENT) {
if (f->pre_div) {
+ if (!rate)
+ rate = req->rate;
rate /= 2;
rate *= f->pre_div + 1;
}
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index fffcbaf0fba7..f89a9f0aa606 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -37,6 +37,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
if (!f)
return NULL;
+ if (!f->freq)
+ return f;
+
for (; f->freq; f++)
if (rate <= f->freq)
return f;
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index fe03e6fbc7df..ea6c227331fc 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -140,22 +140,6 @@ static const char * const gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
"gpll0_early_div"
};
-static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_GPLL2, 2 },
- { P_GPLL3, 3 },
- { P_GPLL0_EARLY_DIV, 6 }
-};
-
-static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div[] = {
- "xo",
- "gpll0",
- "gpll2",
- "gpll3",
- "gpll0_early_div"
-};
-
static const struct parent_map gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
@@ -194,26 +178,6 @@ static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early
"gpll0_early_div"
};
-static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_GPLL2, 2 },
- { P_GPLL3, 3 },
- { P_GPLL1, 4 },
- { P_GPLL4, 5 },
- { P_GPLL0_EARLY_DIV, 6 }
-};
-
-static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div[] = {
- "xo",
- "gpll0",
- "gpll2",
- "gpll3",
- "gpll1",
- "gpll4",
- "gpll0_early_div"
-};
-
static struct clk_fixed_factor xo = {
.mult = 1,
.div = 1,
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index fe7d9ed1d436..b0a18bc1a27f 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -59,10 +59,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
u32 delay_num = 0;
/* See the comment for rockchip_mmc_set_phase below */
- if (!rate) {
- pr_err("%s: invalid clk rate\n", __func__);
+ if (!rate)
return -EINVAL;
- }
raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 523378d1396e..d62031eedbe6 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -361,8 +361,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(2), 5, GFLAGS),
MUX(SCLK_MAC, "sclk_macref", mux_sclk_macref_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(21), 4, 1, MFLAGS),
- GATE(0, "sclk_mac_lbtest", "sclk_macref",
- RK2928_CLKGATE_CON(2), 12, 0, GFLAGS),
+ GATE(0, "sclk_mac_lbtest", "sclk_macref", 0,
+ RK2928_CLKGATE_CON(2), 12, GFLAGS),
COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0,
RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
@@ -390,8 +390,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
* Clock-Architecture Diagram 4
*/
- GATE(SCLK_SMC, "sclk_smc", "hclk_peri",
- RK2928_CLKGATE_CON(2), 4, 0, GFLAGS),
+ GATE(SCLK_SMC, "sclk_smc", "hclk_peri", 0,
+ RK2928_CLKGATE_CON(2), 4, GFLAGS),
COMPOSITE_NOMUX(SCLK_SPI0, "sclk_spi0", "pclk_peri", 0,
RK2928_CLKSEL_CON(25), 0, 7, DFLAGS,
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 39af05a589b3..32b130c53ff9 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -826,6 +826,9 @@ static const int rk3288_saved_cru_reg_ids[] = {
RK3288_CLKSEL_CON(10),
RK3288_CLKSEL_CON(33),
RK3288_CLKSEL_CON(37),
+
+ /* We turn aclk_dmac1 on for suspend; this will restore it */
+ RK3288_CLKGATE_CON(10),
};
static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
@@ -842,6 +845,14 @@ static int rk3288_clk_suspend(void)
}
/*
+ * Going into deep sleep (specifically setting PMU_CLR_DMA in
+ * RK3288_PMU_PWRMODE_CON1) appears to fail unless
+ * "aclk_dmac1" is on.
+ */
+ writel_relaxed(1 << (12 + 16),
+ rk3288_cru_base + RK3288_CLKGATE_CON(10));
+
+ /*
* Switch PLLs other than DPLL (for SDRAM) to slow mode to
* avoid crashes on resume. The Mask ROM on the system will
* put APLL, CPLL, and GPLL into slow mode at resume time
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 8bf7e805fd34..1271315b0c25 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -152,7 +152,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
struct exynos_cpuclk *cpuclk, void __iomem *base)
{
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
- unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
+ unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg;
unsigned long flags;
@@ -280,7 +280,7 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
struct exynos_cpuclk *cpuclk, void __iomem *base)
{
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
- unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
+ unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg;
unsigned long flags;
@@ -432,7 +432,7 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
else
cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
- cpuclk->alt_parent = __clk_lookup(alt_parent);
+ cpuclk->alt_parent = __clk_get_hw(__clk_lookup(alt_parent));
if (!cpuclk->alt_parent) {
pr_err("%s: could not lookup alternate parent %s\n",
__func__, alt_parent);
diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
index d4b6b517fe1b..bd38c6aa3897 100644
--- a/drivers/clk/samsung/clk-cpu.h
+++ b/drivers/clk/samsung/clk-cpu.h
@@ -49,7 +49,7 @@ struct exynos_cpuclk_cfg_data {
*/
struct exynos_cpuclk {
struct clk_hw hw;
- struct clk *alt_parent;
+ struct clk_hw *alt_parent;
void __iomem *ctrl_base;
spinlock_t *lock;
const struct exynos_cpuclk_cfg_data *cfg;
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index faab9b31baf5..91f9b79e3941 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1225,6 +1225,7 @@ static unsigned long __init exynos4_get_xom(void)
xom = readl(chipid_base + 8);
iounmap(chipid_base);
+ of_node_put(np);
}
return xom;
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 13c09a740840..7f8c7cf3c2ab 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -170,12 +170,20 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
GATE_BUS_CPU,
GATE_SCLK_CPU,
CLKOUT_CMU_CPU,
+ APLL_CON0,
+ KPLL_CON0,
+ CPLL_CON0,
+ DPLL_CON0,
EPLL_CON0,
EPLL_CON1,
EPLL_CON2,
RPLL_CON0,
RPLL_CON1,
RPLL_CON2,
+ IPLL_CON0,
+ SPLL_CON0,
+ VPLL_CON0,
+ MPLL_CON0,
SRC_TOP0,
SRC_TOP1,
SRC_TOP2,
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
index 77e1e2491689..edb7197cc4b4 100644
--- a/drivers/clk/sirf/clk-common.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
{
struct clk_dmn *clk = to_dmnclk(hw);
u32 cfg = clkc_readl(clk->regofs);
+ const char *name = clk_hw_get_name(hw);
/* parent of io domain can only be pll3 */
- if (strcmp(hw->init->name, "io") == 0)
+ if (strcmp(name, "io") == 0)
return 4;
WARN_ON((cfg & (BIT(3) - 1)) > 4);
@@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
{
struct clk_dmn *clk = to_dmnclk(hw);
u32 cfg = clkc_readl(clk->regofs);
+ const char *name = clk_hw_get_name(hw);
/* parent of io domain can only be pll3 */
- if (strcmp(hw->init->name, "io") == 0)
+ if (strcmp(name, "io") == 0)
return -EINVAL;
cfg &= ~(BIT(3) - 1);
@@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
{
unsigned long fin;
unsigned ratio, wait, hold;
- unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
+ const char *name = clk_hw_get_name(hw);
+ unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
fin = *parent_rate;
ratio = fin / rate;
@@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_dmn *clk = to_dmnclk(hw);
unsigned long fin;
unsigned ratio, wait, hold, reg;
- unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
+ const char *name = clk_hw_get_name(hw);
+ unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
fin = parent_rate;
ratio = fin / rate;
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 35fabe1a32c3..269467e8e07e 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -95,6 +95,7 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0);
+ of_node_put(clkmgr_np);
BUG_ON(!clk_mgr_a10_base_addr);
pll_clk->hw.reg = clk_mgr_a10_base_addr + reg;
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index c7f463172e4b..b4b44e9b5901 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -100,6 +100,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
+ of_node_put(clkmgr_np);
BUG_ON(!clk_mgr_base_addr);
pll_clk->hw.reg = clk_mgr_base_addr + reg;
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 5c6d37bdf247..765c6977484e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -132,7 +132,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
8, 4, /* N */
4, 2, /* K */
0, 4, /* M */
- BIT(31), /* gate */
+ BIT(31) | BIT(23) | BIT(22), /* gate */
BIT(28), /* lock */
CLK_SET_RATE_UNGATE);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index a26c8a19fe93..9dd6daaa1336 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -458,7 +458,7 @@ static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" };
static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
0x134, 16, 4, 24, 3, BIT(31), 0);
-static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", "pll-periph0" };
+static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", "pll-periph1" };
static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents,
0x134, 0, 5, 8, 3, BIT(15), 0);
diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
index 63fdb790df29..bee305bdddbe 100644
--- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c
+++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
@@ -78,6 +78,10 @@ static void __init sun8i_h3_bus_gates_init(struct device_node *node)
clk_parent = APB1;
else if (index >= 96 && index <= 127)
clk_parent = APB2;
+ else {
+ WARN_ON(true);
+ continue;
+ }
clk_reg = reg + 4 * (index / 32);
clk_bit = index % 32;
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 66d1fc7dff58..1ab36a355daf 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -638,8 +638,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
pll_override_writel(val, params->pmc_divp_reg, pll);
val = pll_override_readl(params->pmc_divnm_reg, pll);
- val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) |
- ~(divn_mask(pll) << div_nmp->override_divn_shift);
+ val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) |
+ (divn_mask(pll) << div_nmp->override_divn_shift));
val |= (cfg->m << div_nmp->override_divm_shift) |
(cfg->n << div_nmp->override_divn_shift);
pll_override_writel(val, params->pmc_divnm_reg, pll);
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 4ce4e7fb1124..d9c1f229c644 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -797,7 +797,11 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("vcp", "clk_m", 29, 0, tegra_clk_vcp, 0),
GATE("apbdma", "clk_m", 34, 0, tegra_clk_apbdma, 0),
GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
- GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
+ /*
+ * Critical for RAM re-repair operation, which must occur on resume
+ * from LP1 system suspend and as part of CCPLEX cluster switching.
+ */
+ GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL),
GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c
index 91377abfefa1..17a04300f93b 100644
--- a/drivers/clk/tegra/clk-tegra-pmc.c
+++ b/drivers/clk/tegra/clk-tegra-pmc.c
@@ -60,16 +60,16 @@ struct pmc_clk_init_data {
static DEFINE_SPINLOCK(clk_out_lock);
-static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern1",
+static const char *clk_out1_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern1",
};
-static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern2",
+static const char *clk_out2_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern2",
};
-static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2",
- "clk_m_div4", "extern3",
+static const char *clk_out3_parents[] = { "osc", "osc_div2",
+ "osc_div4", "extern3",
};
static struct pmc_clk_init_data pmc_clks[] = {
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 1ba871b7fe11..e5717807c00a 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -198,6 +198,10 @@ static int __init asm9260_timer_init(struct device_node *np)
}
clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clk!\n");
+ return PTR_ERR(clk);
+ }
ret = clk_prepare_enable(clk);
if (ret) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index fb0cf8b74516..ae3cbaeffd9c 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -211,7 +211,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
static struct clocksource mct_frc = {
.name = "mct-frc",
- .rating = 400,
+ .rating = 450, /* use value higher than ARM arch timer */
.read = exynos4_frc_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -466,7 +466,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
evt->set_state_oneshot_stopped = set_state_shutdown;
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
- evt->rating = 450;
+ evt->rating = 500; /* use value higher than ARM arch timer */
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
@@ -563,7 +563,19 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
return 0;
out_irq:
- free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+ if (mct_int_type == MCT_INT_PPI) {
+ free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+ } else {
+ for_each_possible_cpu(cpu) {
+ struct mct_clock_event_device *pcpu_mevt =
+ per_cpu_ptr(&percpu_mct_tick, cpu);
+
+ if (pcpu_mevt->evt.irq != -1) {
+ free_irq(pcpu_mevt->evt.irq, pcpu_mevt);
+ pcpu_mevt->evt.irq = -1;
+ }
+ }
+ }
return err;
}
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 4f87f3e76d83..c3e96de525a2 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -201,6 +201,11 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
}
rate = clk_get_rate(clk);
+ if (!rate) {
+ pr_err("Couldn't get parent clock rate\n");
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
cs->timer.base = base;
cs->timer.clk = clk;
@@ -274,6 +279,11 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem
}
rate = clk_get_rate(clk);
+ if (!rate) {
+ pr_err("Couldn't get parent clock rate\n");
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
ce->timer.base = base;
ce->timer.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a38a23f0b3f4..86d48f8c6a2e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -875,6 +875,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr);
ssize_t ret;
+ if (!fattr->show)
+ return -EIO;
+
down_read(&policy->rwsem);
ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
@@ -889,6 +892,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
+ if (!fattr->store)
+ return -EIO;
+
get_online_cpus();
if (cpu_online(policy->cpu)) {
@@ -1065,6 +1071,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
cpufreq_global_kobject, "policy%u", cpu);
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+ kobject_put(&policy->kobj);
goto err_free_real_cpus;
}
@@ -1645,6 +1652,9 @@ void cpufreq_resume(void)
if (!cpufreq_driver)
return;
+ if (unlikely(!cpufreq_suspended))
+ return;
+
cpufreq_suspended = false;
if (!has_target() && !cpufreq_driver->resume)
@@ -2439,6 +2449,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (cpufreq_disabled())
return -ENODEV;
+ /*
+ * The cpufreq core depends heavily on the availability of device
+ * structure, make sure they are available before proceeding further.
+ */
+ if (!get_cpu_device(0))
+ return -EPROBE_DEFER;
+
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
driver_data->target) ||
@@ -2542,14 +2559,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
-/*
- * Stop cpufreq at shutdown to make sure it isn't holding any locks
- * or mutexes when secondary CPUs are halted.
- */
-static struct syscore_ops cpufreq_syscore_ops = {
- .shutdown = cpufreq_suspend,
-};
-
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
@@ -2561,8 +2570,6 @@ static int __init cpufreq_core_init(void)
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
- register_syscore_ops(&cpufreq_syscore_ops);
-
return 0;
}
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 38d1a8216084..32c9524a6ec5 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -449,6 +449,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
/* Failure, so roll back. */
pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
+ kobject_put(&dbs_data->attr_set.kobj);
+
policy->governor_data = NULL;
if (!have_governor_per_policy())
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f690085b1ad9..4fe999687415 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1413,7 +1413,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_SANDYBRIDGE, core_params),
ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_params),
ICPU(INTEL_FAM6_IVYBRIDGE, core_params),
ICPU(INTEL_FAM6_HASWELL_CORE, core_params),
ICPU(INTEL_FAM6_BROADWELL_CORE, core_params),
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 35dd4d7ffee0..991b6a3062c4 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -145,10 +145,19 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
int err = -ENODEV;
cpu = of_get_cpu_node(policy->cpu, NULL);
-
if (!cpu)
goto out;
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+ of_node_put(cpu);
+ if (!max_freqp) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
+
dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
if (!dn)
dn = of_find_compatible_node(NULL, NULL,
@@ -184,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
- if (!max_freqp) {
- err = -EINVAL;
- goto out_unmap_sdcpwr;
- }
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
pr_debug("max clock-frequency is at %u kHz\n", max_freq);
pr_debug("initializing frequency table\n");
@@ -211,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
-out_unmap_sdcpwr:
- iounmap(sdcpwr_mapbase);
-
out_unmap_sdcasr:
iounmap(sdcasr_mapbase);
out:
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index ff44016ea031..641f8021855a 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -551,6 +551,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
if (!voltage_gpio){
pr_err("missing cpu-vcore-select gpio\n");
return 1;
@@ -587,6 +588,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
pvr = mfspr(SPRN_PVR);
has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index a1d7fa48229d..b4fc65512aad 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -955,6 +955,12 @@ static int init_chip_info(void)
static inline void clean_chip_info(void)
{
+ int i;
+
+ /* flush any pending work items */
+ if (chips)
+ for (i = 0; i < nr_chips; i++)
+ cancel_work_sync(&chips[i].throttle);
kfree(chips);
}
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 5a4c5a639f61..2eaeebcc93af 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
!cbe_get_cpu_mic_tm_regs(policy->cpu)) {
pr_info("invalid CBE regs pointers for cpufreq\n");
+ of_node_put(cpu);
return -EINVAL;
}
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index ab264d393233..3780e1aa6807 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -61,24 +61,23 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
* __cpuidle_set_driver - set per CPU driver variables for the given driver.
* @drv: a valid pointer to a struct cpuidle_driver
*
- * For each CPU in the driver's cpumask, unset the registered driver per CPU
- * to @drv.
- *
- * Returns 0 on success, -EBUSY if the CPUs have driver(s) already.
+ * Returns 0 on success, -EBUSY if any CPU in the cpumask have a driver
+ * different from drv already.
*/
static inline int __cpuidle_set_driver(struct cpuidle_driver *drv)
{
int cpu;
for_each_cpu(cpu, drv->cpumask) {
+ struct cpuidle_driver *old_drv;
- if (__cpuidle_get_cpu_driver(cpu)) {
- __cpuidle_unset_driver(drv);
+ old_drv = __cpuidle_get_cpu_driver(cpu);
+ if (old_drv && old_drv != drv)
return -EBUSY;
- }
+ }
+ for_each_cpu(cpu, drv->cpumask)
per_cpu(cpuidle_drivers, cpu) = drv;
- }
return 0;
}
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4afca3968773..e3b8bebfdd30 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -138,7 +138,8 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
ctx->hash_final = 0;
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
+ SA_SAVE_IV : SA_NOT_SAVE_IV),
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index c7524bbbaf98..e5e83c6de536 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -400,12 +400,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
dma_alloc_coherent(dev->core_dev->device,
dev->scatter_buffer_size * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_ATOMIC);
- if (!dev->scatter_buffer_va) {
- dma_free_coherent(dev->core_dev->device,
- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
- dev->sdr, dev->sdr_pa);
+ if (!dev->scatter_buffer_va)
return -ENOMEM;
- }
sd_array = dev->sdr;
@@ -646,6 +642,15 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
addr = dma_map_page(dev->core_dev->device, sg_page(dst),
dst->offset, dst->length, DMA_FROM_DEVICE);
}
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+
+ crypto4xx_memcpy_from_le32((u32 *)req->iv,
+ pd_uinfo->sr_va->save_iv,
+ crypto_skcipher_ivsize(skcipher));
+ }
+
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (ablk_req->base.complete != NULL)
ablk_req->base.complete(&ablk_req->base, 0);
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 677ca17fd223..a194ee0ddbb6 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
/* Find the TRNG device node and map it */
trng = of_find_matching_node(NULL, ppc4xx_trng_match);
- if (!trng || !of_device_is_available(trng))
+ if (!trng || !of_device_is_available(trng)) {
+ of_node_put(trng);
return;
+ }
dev->trng_base = of_iomap(trng, 0);
of_node_put(trng);
@@ -109,7 +111,6 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
return;
err_out:
- of_node_put(trng);
iounmap(dev->trng_base);
kfree(rng);
dev->trng_base = NULL;
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
index 931d22531f51..7bbda51b7337 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.h
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -26,9 +26,9 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
#else
static inline void ppc4xx_trng_probe(
- struct crypto4xx_device *dev __maybe_unused) { }
+ struct crypto4xx_core_device *dev __maybe_unused) { }
static inline void ppc4xx_trng_remove(
- struct crypto4xx_device *dev __maybe_unused) { }
+ struct crypto4xx_core_device *dev __maybe_unused) { }
#endif
#endif
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index e3d40a8dfffb..915253b9c912 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -87,7 +87,6 @@
struct atmel_aes_caps {
bool has_dualbuff;
bool has_cfb64;
- bool has_ctr32;
bool has_gcm;
u32 max_burst_size;
};
@@ -923,8 +922,9 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
struct scatterlist *src, *dst;
- u32 ctr, blocks;
size_t datalen;
+ u32 ctr;
+ u16 blocks, start, end;
bool use_dma, fragmented = false;
/* Check for transfer completion. */
@@ -936,27 +936,17 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
datalen = req->nbytes - ctx->offset;
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
ctr = be32_to_cpu(ctx->iv[3]);
- if (dd->caps.has_ctr32) {
- /* Check 32bit counter overflow. */
- u32 start = ctr;
- u32 end = start + blocks - 1;
-
- if (end < start) {
- ctr |= 0xffffffff;
- datalen = AES_BLOCK_SIZE * -start;
- fragmented = true;
- }
- } else {
- /* Check 16bit counter overflow. */
- u16 start = ctr & 0xffff;
- u16 end = start + (u16)blocks - 1;
-
- if (blocks >> 16 || end < start) {
- ctr |= 0xffff;
- datalen = AES_BLOCK_SIZE * (0x10000-start);
- fragmented = true;
- }
+
+ /* Check 16bit counter overflow. */
+ start = ctr & 0xffff;
+ end = start + blocks - 1;
+
+ if (blocks >> 16 || end < start) {
+ ctr |= 0xffff;
+ datalen = AES_BLOCK_SIZE * (0x10000 - start);
+ fragmented = true;
}
+
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
/* Jump to offset. */
@@ -1926,7 +1916,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
dd->caps.has_cfb64 = 0;
- dd->caps.has_ctr32 = 0;
dd->caps.has_gcm = 0;
dd->caps.max_burst_size = 1;
@@ -1935,14 +1924,12 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;
case 0x200:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
- dd->caps.has_ctr32 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4f4a88d600fa..dbcfed5171b2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -76,7 +76,7 @@
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 10 * CAAM_CMD_SZ)
/* Note: Nonce is counted in enckeylen */
#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
@@ -478,6 +478,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
u32 geniv, moveiv;
u32 ctx1_iv_off = 0;
u32 *desc;
+ u32 *wait_cmd;
const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -740,6 +741,14 @@ copy_iv:
/* Will read cryptlen */
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /*
+ * Wait for IV transfer (ofifo -> class2) to finish before starting
+ * ciphertext transfer (ofifo -> external memory).
+ */
+ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_cmd);
+
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
@@ -2074,10 +2083,11 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
/*
* The crypto API expects us to set the IV (req->info) to the last
- * ciphertext block. This is used e.g. by the CTS mode.
+ * ciphertext block when running in CBC mode.
*/
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
- ivsize, 0);
+ if ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+ ivsize, ivsize, 0);
kfree(edesc);
@@ -2096,6 +2106,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
struct ablkcipher_request *req = context;
struct ablkcipher_edesc *edesc;
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
#ifdef DEBUG
@@ -2120,10 +2131,11 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
/*
* The crypto API expects us to set the IV (req->info) to the last
- * ciphertext block.
+ * ciphertext block when running in CBC mode.
*/
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
- ivsize, 0);
+ if ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
+ scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+ ivsize, ivsize, 0);
kfree(edesc);
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index d99cf124252c..02e7bd75515c 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -416,7 +416,10 @@ static int __init caam_rng_init(void)
#endif
dev_info(dev, "registering rng-caam\n");
- return hwrng_register(&caam_rng);
+
+ err = hwrng_register(&caam_rng);
+ if (!err)
+ return err;
free_rng_ctx:
kfree(rng_ctx);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 89291c15015c..3f768699332b 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -79,8 +80,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
return -EINVAL;
if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
- (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
- (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
+ (ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
(req->nbytes & (AES_BLOCK_SIZE - 1)))
return -EINVAL;
@@ -291,7 +291,7 @@ static struct ccp_aes_def aes_algs[] = {
.version = CCP_VERSION(3, 0),
.name = "cfb(aes)",
.driver_name = "cfb-aes-ccp",
- .blocksize = AES_BLOCK_SIZE,
+ .blocksize = 1,
.ivsize = AES_BLOCK_SIZE,
.alg_defaults = &ccp_aes_defaults,
},
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index f796e36d7ec3..46d18f39fa7b 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -40,57 +40,63 @@ struct ccp_tasklet_data {
struct ccp_cmd *cmd;
};
-/* Human-readable error strings */
-char *ccp_error_codes[] = {
- "",
- "ERR 01: ILLEGAL_ENGINE",
- "ERR 02: ILLEGAL_KEY_ID",
- "ERR 03: ILLEGAL_FUNCTION_TYPE",
- "ERR 04: ILLEGAL_FUNCTION_MODE",
- "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
- "ERR 06: ILLEGAL_FUNCTION_SIZE",
- "ERR 07: Zlib_MISSING_INIT_EOM",
- "ERR 08: ILLEGAL_FUNCTION_RSVD",
- "ERR 09: ILLEGAL_BUFFER_LENGTH",
- "ERR 10: VLSB_FAULT",
- "ERR 11: ILLEGAL_MEM_ADDR",
- "ERR 12: ILLEGAL_MEM_SEL",
- "ERR 13: ILLEGAL_CONTEXT_ID",
- "ERR 14: ILLEGAL_KEY_ADDR",
- "ERR 15: 0xF Reserved",
- "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
- "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
- "ERR 18: CMD_TIMEOUT",
- "ERR 19: IDMA0_AXI_SLVERR",
- "ERR 20: IDMA0_AXI_DECERR",
- "ERR 21: 0x15 Reserved",
- "ERR 22: IDMA1_AXI_SLAVE_FAULT",
- "ERR 23: IDMA1_AIXI_DECERR",
- "ERR 24: 0x18 Reserved",
- "ERR 25: ZLIBVHB_AXI_SLVERR",
- "ERR 26: ZLIBVHB_AXI_DECERR",
- "ERR 27: 0x1B Reserved",
- "ERR 27: ZLIB_UNEXPECTED_EOM",
- "ERR 27: ZLIB_EXTRA_DATA",
- "ERR 30: ZLIB_BTYPE",
- "ERR 31: ZLIB_UNDEFINED_SYMBOL",
- "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
- "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
- "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
- "ERR 35: ZLIB_UNCOMPRESSED_LEN",
- "ERR 36: ZLIB_LIMIT_REACHED",
- "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
- "ERR 38: ODMA0_AXI_SLVERR",
- "ERR 39: ODMA0_AXI_DECERR",
- "ERR 40: 0x28 Reserved",
- "ERR 41: ODMA1_AXI_SLVERR",
- "ERR 42: ODMA1_AXI_DECERR",
- "ERR 43: LSB_PARITY_ERR",
+ /* Human-readable error strings */
+#define CCP_MAX_ERROR_CODE 64
+ static char *ccp_error_codes[] = {
+ "",
+ "ILLEGAL_ENGINE",
+ "ILLEGAL_KEY_ID",
+ "ILLEGAL_FUNCTION_TYPE",
+ "ILLEGAL_FUNCTION_MODE",
+ "ILLEGAL_FUNCTION_ENCRYPT",
+ "ILLEGAL_FUNCTION_SIZE",
+ "Zlib_MISSING_INIT_EOM",
+ "ILLEGAL_FUNCTION_RSVD",
+ "ILLEGAL_BUFFER_LENGTH",
+ "VLSB_FAULT",
+ "ILLEGAL_MEM_ADDR",
+ "ILLEGAL_MEM_SEL",
+ "ILLEGAL_CONTEXT_ID",
+ "ILLEGAL_KEY_ADDR",
+ "0xF Reserved",
+ "Zlib_ILLEGAL_MULTI_QUEUE",
+ "Zlib_ILLEGAL_JOBID_CHANGE",
+ "CMD_TIMEOUT",
+ "IDMA0_AXI_SLVERR",
+ "IDMA0_AXI_DECERR",
+ "0x15 Reserved",
+ "IDMA1_AXI_SLAVE_FAULT",
+ "IDMA1_AIXI_DECERR",
+ "0x18 Reserved",
+ "ZLIBVHB_AXI_SLVERR",
+ "ZLIBVHB_AXI_DECERR",
+ "0x1B Reserved",
+ "ZLIB_UNEXPECTED_EOM",
+ "ZLIB_EXTRA_DATA",
+ "ZLIB_BTYPE",
+ "ZLIB_UNDEFINED_SYMBOL",
+ "ZLIB_UNDEFINED_DISTANCE_S",
+ "ZLIB_CODE_LENGTH_SYMBOL",
+ "ZLIB _VHB_ILLEGAL_FETCH",
+ "ZLIB_UNCOMPRESSED_LEN",
+ "ZLIB_LIMIT_REACHED",
+ "ZLIB_CHECKSUM_MISMATCH0",
+ "ODMA0_AXI_SLVERR",
+ "ODMA0_AXI_DECERR",
+ "0x28 Reserved",
+ "ODMA1_AXI_SLVERR",
+ "ODMA1_AXI_DECERR",
};
-void ccp_log_error(struct ccp_device *d, int e)
+void ccp_log_error(struct ccp_device *d, unsigned int e)
{
- dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
+ if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
+ return;
+
+ if (e < ARRAY_SIZE(ccp_error_codes))
+ dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
+ else
+ dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
}
/* List of CCPs, CCP count, read-write access lock, and access functions
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 347b77108baa..cfe21d033745 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -607,7 +607,7 @@ void ccp_platform_exit(void);
void ccp_add_device(struct ccp_device *ccp);
void ccp_del_device(struct ccp_device *ccp);
-extern void ccp_log_error(struct ccp_device *, int);
+extern void ccp_log_error(struct ccp_device *, unsigned int);
struct ccp_device *ccp_alloc_struct(struct device *dev);
bool ccp_queues_suspended(struct ccp_device *ccp);
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 8d0eeb46d4a2..c4581510c3a1 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -309,6 +309,7 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
desc->tx_desc.flags = flags;
desc->tx_desc.tx_submit = ccp_tx_submit;
desc->ccp = chan->ccp;
+ INIT_LIST_HEAD(&desc->entry);
INIT_LIST_HEAD(&desc->pending);
INIT_LIST_HEAD(&desc->active);
desc->status = DMA_IN_PROGRESS;
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
index ee4be1b0d30b..0a57b3db2d67 100644
--- a/drivers/crypto/mxc-scc.c
+++ b/drivers/crypto/mxc-scc.c
@@ -178,12 +178,12 @@ static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
else
from = scc->black_memory;
- dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
+ dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
ctx->dst_nents * 8);
len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
from, ctx->size, ctx->offset);
if (!len) {
- dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
+ dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
return -EINVAL;
}
@@ -274,7 +274,7 @@ static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
to, len, ctx->offset);
if (!len) {
- dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
+ dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
return -EINVAL;
}
@@ -335,9 +335,9 @@ static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
return;
}
- dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
- (void *)readl(scc->base + SCC_SCM_RED_START),
- (void *)readl(scc->base + SCC_SCM_BLACK_START));
+ dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
+ readl(scc->base + SCC_SCM_RED_START),
+ readl(scc->base + SCC_SCM_BLACK_START));
/* clear interrupt control registers */
writel(SCC_SCM_INTR_CTRL_CLR_INTR,
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d4dd3c48e25c..3e1df1594b4e 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -26,6 +26,7 @@
#include <crypto/sha.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
@@ -33,7 +34,6 @@
#define DCP_ALIGNMENT 64
-
/*
* Null hashes to align with hw behavior on imx6sl and ull
* these are flipped for consistency with hw output
@@ -381,9 +381,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
if (limit_hit)
break;
}
- if (last_out_len >= AES_BLOCK_SIZE) {
- memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
- AES_BLOCK_SIZE);
+
+ /* Copy the IV for CBC for chaining */
+ if (!rctx->ecb) {
+ if (rctx->enc)
+ memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
+ else
+ memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
}
return ret;
@@ -627,49 +633,46 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
- const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
uint8_t *out_buf = sdcp->coh->sha_out_buf;
- uint8_t *src_buf;
-
struct scatterlist *src;
- unsigned int i, len, clen;
+ unsigned int i, len, clen, oft = 0;
int ret;
int fin = rctx->fini;
if (fin)
rctx->fini = 0;
- for_each_sg(req->src, src, nents, i) {
- src_buf = sg_virt(src);
- len = sg_dma_len(src);
-
- do {
- if (actx->fill + len > DCP_BUF_SZ)
- clen = DCP_BUF_SZ - actx->fill;
- else
- clen = len;
-
- memcpy(in_buf + actx->fill, src_buf, clen);
- len -= clen;
- src_buf += clen;
- actx->fill += clen;
-
- /*
- * If we filled the buffer and still have some
- * more data, submit the buffer.
- */
- if (len && actx->fill == DCP_BUF_SZ) {
- ret = mxs_dcp_run_sha(req);
- if (ret)
- return ret;
- actx->fill = 0;
- rctx->init = 0;
- }
- } while (len);
+ src = req->src;
+ len = req->nbytes;
+
+ while (len) {
+ if (actx->fill + len > DCP_BUF_SZ)
+ clen = DCP_BUF_SZ - actx->fill;
+ else
+ clen = len;
+
+ scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
+ 0);
+
+ len -= clen;
+ oft += clen;
+ actx->fill += clen;
+
+ /*
+ * If we filled the buffer and still have some
+ * more data, submit the buffer.
+ */
+ if (len && actx->fill == DCP_BUF_SZ) {
+ ret = mxs_dcp_run_sha(req);
+ if (ret)
+ return ret;
+ actx->fill = 0;
+ rctx->init = 0;
+ }
}
if (fin) {
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 47576098831f..b3ea6d60c458 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1632,6 +1632,11 @@ static bool spacc_is_compatible(struct platform_device *pdev,
return false;
}
+static void spacc_tasklet_kill(void *data)
+{
+ tasklet_kill(data);
+}
+
static int spacc_probe(struct platform_device *pdev)
{
int i, err, ret = -EINVAL;
@@ -1674,6 +1679,14 @@ static int spacc_probe(struct platform_device *pdev)
return -ENXIO;
}
+ tasklet_init(&engine->complete, spacc_spacc_complete,
+ (unsigned long)engine);
+
+ ret = devm_add_action(&pdev->dev, spacc_tasklet_kill,
+ &engine->complete);
+ if (ret)
+ return ret;
+
if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
engine->name, engine)) {
dev_err(engine->dev, "failed to request IRQ\n");
@@ -1736,8 +1749,6 @@ static int spacc_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&engine->completed);
INIT_LIST_HEAD(&engine->in_progress);
engine->in_flight = 0;
- tasklet_init(&engine->complete, spacc_spacc_complete,
- (unsigned long)engine);
platform_set_drvdata(pdev, engine);
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 980e07475012..0d596a99f564 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -95,7 +95,7 @@ struct service_hndl {
static inline int get_current_node(void)
{
- return topology_physical_package_id(smp_processor_id());
+ return topology_physical_package_id(raw_smp_processor_id());
}
int adf_service_register(struct service_hndl *service);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 500e4090e2fd..5a37c075ee55 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -298,7 +298,7 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
}
static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
- struct scatterlist **dst)
+ struct scatterlist **dst)
{
void *pages;
int len;
@@ -510,7 +510,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
}
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
struct scatterlist *sg;
int err;
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index 0de2f62d51ff..b2e683713539 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -250,7 +250,10 @@ static int sun4i_hash(struct ahash_request *areq)
}
} else {
/* Since we have the flag final, we can go up to modulo 4 */
- end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+ if (areq->nbytes < 4)
+ end = 0;
+ else
+ end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
}
/* TODO if SGlen % 4 and op->len == 0 then DMA */
@@ -283,8 +286,8 @@ static int sun4i_hash(struct ahash_request *areq)
*/
while (op->len < 64 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, end - i,
- 64 - op->len);
+ in_r = min(end - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
@@ -304,8 +307,8 @@ static int sun4i_hash(struct ahash_request *areq)
}
if (mi.length - in_i > 3 && i < end) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- ((mi.length - in_i) / 4) * 4);
+ in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
+ in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
/* how many bytes we can write in the device*/
todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
@@ -331,8 +334,8 @@ static int sun4i_hash(struct ahash_request *areq)
if ((areq->nbytes - i) < 64) {
while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
/* how many bytes we can read from current SG */
- in_r = min3(mi.length - in_i, areq->nbytes - i,
- 64 - op->len);
+ in_r = min(areq->nbytes - i, 64 - op->len);
+ in_r = min_t(size_t, mi.length - in_i, in_r);
memcpy(op->buf + op->len, mi.addr + in_i, in_r);
op->len += in_r;
i += in_r;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 463033b4db1d..8b383d3d21c2 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -943,11 +943,13 @@ static void talitos_sg_unmap(struct device *dev,
static void ipsec_esp_unmap(struct device *dev,
struct talitos_edesc *edesc,
- struct aead_request *areq)
+ struct aead_request *areq, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
@@ -956,7 +958,7 @@ static void ipsec_esp_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
areq->assoclen);
if (edesc->dma_len)
@@ -967,7 +969,7 @@ static void ipsec_esp_unmap(struct device *dev,
unsigned int dst_nents = edesc->dst_nents ? : 1;
sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
- areq->assoclen + areq->cryptlen - ivsize);
+ areq->assoclen + cryptlen - ivsize);
}
}
@@ -984,12 +986,11 @@ static void ipsec_esp_encrypt_done(struct device *dev,
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
void *icvdata;
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, areq);
+ ipsec_esp_unmap(dev, edesc, areq, true);
/* copy the generated ICV to dst */
if (edesc->icv_ool) {
@@ -998,9 +999,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
else
icvdata = &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 2];
- sg = sg_last(areq->dst, edesc->dst_nents);
- memcpy((char *)sg_virt(sg) + sg->length - authsize,
- icvdata, authsize);
+ sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
+ authsize, areq->assoclen + areq->cryptlen);
}
kfree(edesc);
@@ -1016,19 +1016,27 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
char *oicv, *icv;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, req);
+ ipsec_esp_unmap(dev, edesc, req, false);
if (!err) {
+ char icvdata[SHA512_DIGEST_SIZE];
+ int nents = edesc->dst_nents ? : 1;
+ unsigned int len = req->assoclen + req->cryptlen;
+
/* auth check */
- sg = sg_last(req->dst, edesc->dst_nents ? : 1);
- icv = (char *)sg_virt(sg) + sg->length - authsize;
+ if (nents > 1) {
+ sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
+ len - authsize);
+ icv = icvdata;
+ } else {
+ icv = (char *)sg_virt(req->dst) + len - authsize;
+ }
if (edesc->dma_len) {
if (is_sec1)
@@ -1060,7 +1068,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
edesc = container_of(desc, struct talitos_edesc, desc);
- ipsec_esp_unmap(dev, edesc, req);
+ ipsec_esp_unmap(dev, edesc, req, false);
/* check ICV auth status */
if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
@@ -1167,6 +1175,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
* fill in and submit ipsec_esp descriptor
*/
static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ bool encrypt,
void (*callback)(struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1176,7 +1185,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
- unsigned int cryptlen = areq->cryptlen;
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
unsigned int ivsize = crypto_aead_ivsize(aead);
int tbl_off = 0;
int sg_count, ret;
@@ -1318,7 +1327,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
- ipsec_esp_unmap(dev, edesc, areq);
+ ipsec_esp_unmap(dev, edesc, areq, encrypt);
kfree(edesc);
}
return ret;
@@ -1427,9 +1436,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
unsigned int authsize = crypto_aead_authsize(authenc);
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
unsigned int ivsize = crypto_aead_ivsize(authenc);
+ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
- iv, areq->assoclen, areq->cryptlen,
+ iv, areq->assoclen, cryptlen,
authsize, ivsize, icv_stashing,
areq->base.flags, encrypt);
}
@@ -1448,7 +1458,7 @@ static int aead_encrypt(struct aead_request *req)
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
+ return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
}
static int aead_decrypt(struct aead_request *req)
@@ -1458,17 +1468,15 @@ static int aead_decrypt(struct aead_request *req)
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
struct talitos_private *priv = dev_get_drvdata(ctx->dev);
struct talitos_edesc *edesc;
- struct scatterlist *sg;
void *icvdata;
- req->cryptlen -= authsize;
-
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, req->iv, 1, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
+ (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
((!edesc->src_nents && !edesc->dst_nents) ||
priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
@@ -1480,7 +1488,8 @@ static int aead_decrypt(struct aead_request *req)
/* reset integrity check result bits */
edesc->desc.hdr_lo = 0;
- return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
+ return ipsec_esp(edesc, req, false,
+ ipsec_esp_decrypt_hwauth_done);
}
/* Have to check the ICV with software */
@@ -1493,11 +1502,10 @@ static int aead_decrypt(struct aead_request *req)
else
icvdata = &edesc->link_tbl[0];
- sg = sg_last(req->src, edesc->src_nents ? : 1);
+ sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+ req->assoclen + req->cryptlen - authsize);
- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
-
- return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
+ return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
}
static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1524,6 +1532,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
+static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
+ keylen == AES_KEYSIZE_256)
+ return ablkcipher_setkey(cipher, key, keylen);
+
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ return -EINVAL;
+}
+
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct ablkcipher_request *areq)
@@ -1544,11 +1564,15 @@ static void ablkcipher_done(struct device *dev,
int err)
{
struct ablkcipher_request *areq = context;
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
struct talitos_edesc *edesc;
edesc = container_of(desc, struct talitos_edesc, desc);
common_nonsnoop_unmap(dev, edesc, areq);
+ memcpy(areq->info, ctx->iv, ivsize);
kfree(edesc);
@@ -1648,6 +1672,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, true);
@@ -1665,6 +1697,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct talitos_edesc *edesc;
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
+
+ if (!areq->nbytes)
+ return 0;
+
+ if (areq->nbytes % blocksize)
+ return -EINVAL;
/* allocate extended descriptor */
edesc = ablkcipher_edesc_alloc(areq, false);
@@ -2185,7 +2225,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2229,7 +2269,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha1),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha1-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2271,7 +2311,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha224-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2315,7 +2355,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha224),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha224-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2357,7 +2397,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2401,7 +2441,7 @@ static struct talitos_alg_template driver_algs[] = {
.cra_name = "authenc(hmac(sha256),"
"cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-sha256-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2527,7 +2567,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-"
- "cbc-aes-talitos",
+ "cbc-aes-talitos-hsna",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2569,7 +2609,7 @@ static struct talitos_alg_template driver_algs[] = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-"
- "cbc-3des-talitos",
+ "cbc-3des-talitos-hsna",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_ASYNC,
},
@@ -2613,6 +2653,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2623,13 +2664,13 @@ static struct talitos_alg_template driver_algs[] = {
.alg.crypto = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-talitos",
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
CRYPTO_ALG_ASYNC,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .setkey = ablkcipher_aes_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
@@ -3002,6 +3043,7 @@ static int talitos_remove(struct platform_device *ofdev)
break;
case CRYPTO_ALG_TYPE_AEAD:
crypto_unregister_aead(&t_alg->algt.alg.aead);
+ break;
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&t_alg->algt.alg.hash);
break;
@@ -3111,7 +3153,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_priority = t_alg->algt.priority;
else
alg->cra_priority = TALITOS_CRA_PRIORITY;
- alg->cra_alignmask = 0;
+ if (has_ftr_sec1(priv))
+ alg->cra_alignmask = 3;
+ else
+ alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index de6e241b0866..957377c309a9 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -2,13 +2,13 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
-TARGET := linux-ppc64le
+override flavour := linux-ppc64le
else
-TARGET := linux-ppc64
+override flavour := linux-ppc64
endif
quiet_cmd_perl = PERL $@
- cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
+ cmd_perl = $(PERL) $(<) $(flavour) > $(@)
$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl
$(call cmd,perl)
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 0b4a293b8a1e..930a3f1e3ec0 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1318,7 +1318,7 @@ Loop_ctr32_enc:
addi $idx,$idx,16
bdnz Loop_ctr32_enc
- vadduwm $ivec,$ivec,$one
+ vadduqm $ivec,$ivec,$one
vmr $dat,$inptail
lvx $inptail,0,$inp
addi $inp,$inp,16
@@ -1815,7 +1815,7 @@ Lctr32_enc8x_three:
stvx_u $out1,$x10,$out
stvx_u $out2,$x20,$out
addi $out,$out,0x30
- b Lcbc_dec8x_done
+ b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_two:
@@ -1827,7 +1827,7 @@ Lctr32_enc8x_two:
stvx_u $out0,$x00,$out
stvx_u $out1,$x10,$out
addi $out,$out,0x20
- b Lcbc_dec8x_done
+ b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_one:
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 1c4b5b889fba..1bfe867c0b7b 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* GHASH routines supporting VMX instructions on the Power 8
*
- * Copyright (C) 2015 International Business Machines Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 only.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright (C) 2015, 2019 International Business Machines Inc.
*
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ *
+ * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
+ * mechanism. The new approach is based on arm64 code, which is:
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <linux/types.h>
@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
const u8 *in, size_t len);
struct p8_ghash_ctx {
+ /* key used by vector asm */
u128 htable[16];
- struct crypto_shash *fallback;
+ /* key used by software fallback */
+ be128 key;
};
struct p8_ghash_desc_ctx {
u64 shash[2];
u8 buffer[GHASH_DIGEST_SIZE];
int bytes;
- struct shash_desc fallback_desc;
};
-static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
-{
- const char *alg = "ghash-generic";
- struct crypto_shash *fallback;
- struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
- return PTR_ERR(fallback);
- }
-
- crypto_shash_set_flags(fallback,
- crypto_shash_get_flags((struct crypto_shash
- *) tfm));
-
- /* Check if the descsize defined in the algorithm is still enough. */
- if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
- + crypto_shash_descsize(fallback)) {
- printk(KERN_ERR
- "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
- alg,
- shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
- crypto_shash_descsize(fallback));
- return -EINVAL;
- }
- ctx->fallback = fallback;
-
- return 0;
-}
-
-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
-{
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (ctx->fallback) {
- crypto_free_shash(ctx->fallback);
- ctx->fallback = NULL;
- }
-}
-
static int p8_ghash_init(struct shash_desc *desc)
{
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
dctx->bytes = 0;
memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
- dctx->fallback_desc.tfm = ctx->fallback;
- dctx->fallback_desc.flags = desc->flags;
- return crypto_shash_init(&dctx->fallback_desc);
+ return 0;
}
static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- return crypto_shash_setkey(ctx->fallback, key, keylen);
+
+ memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static inline void __ghash_block(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx)
+{
+ if (!IN_INTERRUPT) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ dctx->buffer, GHASH_DIGEST_SIZE);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ } else {
+ crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
+ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
+ }
+}
+
+static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen)
+{
+ if (!IN_INTERRUPT) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ src, srclen);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ } else {
+ while (srclen >= GHASH_BLOCK_SIZE) {
+ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
+ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
+ srclen -= GHASH_BLOCK_SIZE;
+ src += GHASH_BLOCK_SIZE;
+ }
+ }
}
static int p8_ghash_update(struct shash_desc *desc,
@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_desc *desc,
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
- return crypto_shash_update(&dctx->fallback_desc, src,
- srclen);
- } else {
- if (dctx->bytes) {
- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
- memcpy(dctx->buffer + dctx->bytes, src,
- srclen);
- dctx->bytes += srclen;
- return 0;
- }
+ if (dctx->bytes) {
+ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
memcpy(dctx->buffer + dctx->bytes, src,
- GHASH_DIGEST_SIZE - dctx->bytes);
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- dctx->buffer, GHASH_DIGEST_SIZE);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- src += GHASH_DIGEST_SIZE - dctx->bytes;
- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
- dctx->bytes = 0;
- }
- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
- if (len) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- src += len;
- srclen -= len;
- }
- if (srclen) {
- memcpy(dctx->buffer, src, srclen);
- dctx->bytes = srclen;
+ srclen);
+ dctx->bytes += srclen;
+ return 0;
}
- return 0;
+ memcpy(dctx->buffer + dctx->bytes, src,
+ GHASH_DIGEST_SIZE - dctx->bytes);
+
+ __ghash_block(ctx, dctx);
+
+ src += GHASH_DIGEST_SIZE - dctx->bytes;
+ srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+ dctx->bytes = 0;
+ }
+ len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+ if (len) {
+ __ghash_blocks(ctx, dctx, src, len);
+ src += len;
+ srclen -= len;
}
+ if (srclen) {
+ memcpy(dctx->buffer, src, srclen);
+ dctx->bytes = srclen;
+ }
+ return 0;
}
static int p8_ghash_final(struct shash_desc *desc, u8 *out)
@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
- return crypto_shash_final(&dctx->fallback_desc, out);
- } else {
- if (dctx->bytes) {
- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
- dctx->buffer[i] = 0;
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- dctx->buffer, GHASH_DIGEST_SIZE);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- dctx->bytes = 0;
- }
- memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
- return 0;
+ if (dctx->bytes) {
+ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+ dctx->buffer[i] = 0;
+ __ghash_block(ctx, dctx);
+ dctx->bytes = 0;
}
+ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+ return 0;
}
struct shash_alg p8_ghash_alg = {
@@ -215,11 +178,9 @@ struct shash_alg p8_ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
.cra_priority = 1000,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
.cra_module = THIS_MODULE,
- .cra_init = p8_ghash_init_tfm,
- .cra_exit = p8_ghash_exit_tfm,
},
};
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 41254e702f1e..2ce7cc94d78b 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -102,7 +102,8 @@ config ARM_TEGRA_DEVFREQ
config ARM_RK3399_DMC_DEVFREQ
tristate "ARM RK3399 DMC DEVFREQ Driver"
- depends on ARCH_ROCKCHIP
+ depends on (ARCH_ROCKCHIP && HAVE_ARM_SMCCC) || \
+ (COMPILE_TEST && HAVE_ARM_SMCCC)
select DEVFREQ_EVENT_ROCKCHIP_DFI
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ_EVENT
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index db70cee71caa..1ba9d02381e8 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -135,6 +135,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
int lev, prev_lev, ret = 0;
unsigned long cur_time;
+ lockdep_assert_held(&devfreq->lock);
cur_time = jiffies;
/* Immediately exit if previous_freq is not initialized yet. */
@@ -481,11 +482,6 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
static void _remove_devfreq(struct devfreq *devfreq)
{
mutex_lock(&devfreq_list_lock);
- if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
- mutex_unlock(&devfreq_list_lock);
- dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
- return;
- }
list_del(&devfreq->node);
mutex_unlock(&devfreq_list_lock);
@@ -557,6 +553,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->dev.parent = dev;
devfreq->dev.class = devfreq_class;
devfreq->dev.release = devfreq_dev_release;
+ INIT_LIST_HEAD(&devfreq->node);
devfreq->profile = profile;
strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
@@ -985,7 +982,7 @@ static ssize_t available_governors_show(struct device *d,
* The devfreq with immutable governor (e.g., passive) shows
* only own governor.
*/
- if (df->governor->immutable) {
+ if (df->governor && df->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
"%s ", df->governor_name);
/*
@@ -1170,12 +1167,17 @@ static ssize_t trans_stat_show(struct device *dev,
int i, j;
unsigned int max_state = devfreq->profile->max_state;
- if (!devfreq->stop_polling &&
- devfreq_update_status(devfreq, devfreq->previous_freq))
- return 0;
if (max_state == 0)
return sprintf(buf, "Not Supported.\n");
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling &&
+ devfreq_update_status(devfreq, devfreq->previous_freq)) {
+ mutex_unlock(&devfreq->lock);
+ return 0;
+ }
+ mutex_unlock(&devfreq->lock);
+
len = sprintf(buf, " From : To\n");
len += sprintf(buf + len, " :");
for (i = 0; i < max_state; i++)
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index cd949800eed9..8851bc4e8e3e 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -33,7 +33,7 @@ config DEVFREQ_EVENT_EXYNOS_PPMU
config DEVFREQ_EVENT_ROCKCHIP_DFI
tristate "ROCKCHIP DFI DEVFREQ event Driver"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
help
This add the devfreq-event driver for Rockchip SoC. It provides DFI
(DDR Monitor Module) driver to count ddr load.
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 1b21bb60e797..2c8f41fbe94f 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -198,11 +198,10 @@ static void exynos_bus_exit(struct device *dev)
if (ret < 0)
dev_warn(dev, "failed to disable the devfreq-event devices\n");
- if (bus->regulator)
- regulator_disable(bus->regulator);
-
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
+ if (bus->regulator)
+ regulator_disable(bus->regulator);
}
/*
@@ -391,6 +390,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
struct exynos_bus *bus;
int ret, max_state;
unsigned long min_freq, max_freq;
+ bool passive = false;
if (!np) {
dev_err(dev, "failed to find devicetree node\n");
@@ -404,27 +404,27 @@ static int exynos_bus_probe(struct platform_device *pdev)
bus->dev = &pdev->dev;
platform_set_drvdata(pdev, bus);
- /* Parse the device-tree to get the resource information */
- ret = exynos_bus_parse_of(np, bus);
- if (ret < 0)
- return ret;
-
profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
- if (!profile) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!profile)
+ return -ENOMEM;
node = of_parse_phandle(dev->of_node, "devfreq", 0);
if (node) {
of_node_put(node);
- goto passive;
+ passive = true;
} else {
ret = exynos_bus_parent_parse_of(np, bus);
+ if (ret < 0)
+ return ret;
}
+ /* Parse the device-tree to get the resource information */
+ ret = exynos_bus_parse_of(np, bus);
if (ret < 0)
- goto err;
+ goto err_reg;
+
+ if (passive)
+ goto passive;
/* Initialize the struct profile and governor data for parent device */
profile->polling_ms = 50;
@@ -514,6 +514,9 @@ out:
err:
dev_pm_opp_of_remove_table(dev);
clk_disable_unprepare(bus->clk);
+err_reg:
+ if (!passive)
+ regulator_disable(bus->regulator);
return ret;
}
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 5be96b2249e7..62c262fc2178 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -152,7 +152,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
static int devfreq_passive_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
- struct device *dev = devfreq->dev.parent;
struct devfreq_passive_data *p_data
= (struct devfreq_passive_data *)devfreq->data;
struct devfreq *parent = (struct devfreq *)p_data->parent;
@@ -168,12 +167,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
p_data->this = devfreq;
nb->notifier_call = devfreq_passive_notifier_call;
- ret = devm_devfreq_register_notifier(dev, parent, nb,
+ ret = devfreq_register_notifier(parent, nb,
DEVFREQ_TRANSITION_NOTIFIER);
break;
case DEVFREQ_GOV_STOP:
- devm_devfreq_unregister_notifier(dev, parent, nb,
- DEVFREQ_TRANSITION_NOTIFIER);
+ WARN_ON(devfreq_unregister_notifier(parent, nb,
+ DEVFREQ_TRANSITION_NOTIFIER));
break;
default:
break;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index f0c374d6ab40..05d66d4fc31d 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -204,7 +204,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
a_fences = get_fences(a, &a_num_fences);
b_fences = get_fences(b, &b_num_fences);
if (a_num_fences > INT_MAX - b_num_fences)
- return NULL;
+ goto err;
num_fences = a_num_fences + b_num_fences;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2e476accd3ba..adc750c4ef77 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -120,7 +120,7 @@ config DMA_JZ4740
config DMA_JZ4780
tristate "JZ4780 DMA support"
- depends on MACH_JZ4780 || COMPILE_TEST
+ depends on MIPS || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b222dd7afe8e..12d904829324 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1608,7 +1608,11 @@ static void at_xdmac_tasklet(unsigned long data)
struct at_xdmac_desc,
xfer_node);
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
- BUG_ON(!desc->active_xfer);
+ if (!desc->active_xfer) {
+ dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
+ spin_unlock_bh(&atchan->lock);
+ return;
+ }
txd = &desc->tx_dma_desc;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 6ba53bbd0e16..b984d00bc055 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -891,8 +891,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (rc)
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set DMA mask\n");
return rc;
+ }
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 74794c9859f6..f0932f25a9b1 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1797,13 +1797,10 @@ static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec,
static int coh901318_config(struct coh901318_chan *cohc,
struct coh901318_params *param)
{
- unsigned long flags;
const struct coh901318_params *p;
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
- spin_lock_irqsave(&cohc->lock, flags);
-
if (param)
p = param;
else
@@ -1823,8 +1820,6 @@ static int coh901318_config(struct coh901318_chan *cohc,
coh901318_set_conf(cohc, p->config);
coh901318_set_ctrl(cohc, p->ctrl_lli_last);
- spin_unlock_irqrestore(&cohc->lock, flags);
-
return 0;
}
@@ -1949,8 +1944,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
return;
}
- spin_lock(&cohc->lock);
-
/*
* When we reach this point, at least one queue item
* should have been moved over from cohc->queue to
@@ -1971,8 +1964,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
- spin_unlock(&cohc->lock);
-
/*
* This tasklet will remove items from cohc->active
* and thus terminates them.
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index 2ceb5a26f860..709e1308777a 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct cppi41_dd *cdd = c->cdd;
struct cppi41_desc *d;
struct scatterlist *sg;
unsigned int i;
+ int error;
+
+ error = pm_runtime_get(cdd->ddev.dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(cdd->ddev.dev);
+
+ return NULL;
+ }
+
+ if (cdd->is_suspended)
+ goto err_out_not_ready;
d = c->desc;
for_each_sg(sgl, sg, sg_len, i) {
@@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
d++;
}
- return &c->txd;
+ txd = &c->txd;
+
+err_out_not_ready:
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
+
+ return txd;
}
static void cppi41_compute_td_desc(struct cppi41_desc *d)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 7f0b9aa15867..9887f2a14aa9 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -451,7 +451,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
if (chan->hw_2d) {
if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
- !axi_dmac_check_len(chan, xt->numf))
+ xt->numf == 0)
return NULL;
if (xt->sgl[0].size + dst_icg > chan->max_length ||
xt->sgl[0].size + src_icg > chan->max_length)
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 803cfb4523b0..aca2d6fd92d5 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -580,7 +580,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
to_jz4780_dma_desc(vdesc), 0);
} else if (cookie == jzchan->desc->vdesc.tx.cookie) {
txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
- (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
+ jzchan->curr_hwdesc + 1);
} else
txstate->residue = 0;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 5bda0eb9f393..7536fe80bc33 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -87,13 +87,20 @@ static void dw_dma_acpi_controller_register(struct dw_dma *dw)
dma_cap_set(DMA_SLAVE, info->dma_cap);
info->filter_fn = dw_dma_acpi_filter;
- ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
- info);
+ ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
if (ret)
dev_err(dev, "could not register acpi_dma_controller\n");
}
+
+static void dw_dma_acpi_controller_free(struct dw_dma *dw)
+{
+ struct device *dev = dw->dma.dev;
+
+ acpi_dma_controller_free(dev);
+}
#else /* !CONFIG_ACPI */
static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
+static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_OF
@@ -226,6 +233,9 @@ static int dw_remove(struct platform_device *pdev)
{
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+ if (ACPI_HANDLE(&pdev->dev))
+ dw_dma_acpi_controller_free(chip->dw);
+
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 57962bff7532..56ec72468745 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -2268,9 +2268,6 @@ static int edma_probe(struct platform_device *pdev)
ecc->default_queue = info->default_queue;
- for (i = 0; i < ecc->num_slots; i++)
- edma_write_slot(ecc, i, &dummy_paramset);
-
if (info->rsv) {
/* Set the reserved slots in inuse list */
rsv_slots = info->rsv->rsv_slots;
@@ -2283,6 +2280,12 @@ static int edma_probe(struct platform_device *pdev)
}
}
+ for (i = 0; i < ecc->num_slots; i++) {
+ /* Reset only unused - not reserved - paRAM slots */
+ if (!test_bit(i, ecc->slot_inuse))
+ edma_write_slot(ecc, i, &dummy_paramset);
+ }
+
/* Clear the xbar mapped channels in unused list */
xbar_chans = info->xbar_chans;
if (xbar_chans) {
@@ -2337,8 +2340,10 @@ static int edma_probe(struct platform_device *pdev)
ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
sizeof(*ecc->tc_list), GFP_KERNEL);
- if (!ecc->tc_list)
- return -ENOMEM;
+ if (!ecc->tc_list) {
+ ret = -ENOMEM;
+ goto err_reg1;
+ }
for (i = 0;; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 29d04ca71d52..15525a2b8ebd 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
if (hsuc->direction == DMA_MEM_TO_DEV) {
bsr = config->dst_maxburst;
- mtsr = config->src_addr_width;
+ mtsr = config->dst_addr_width;
} else if (hsuc->direction == DMA_DEV_TO_MEM) {
bsr = config->src_maxburst;
- mtsr = config->dst_addr_width;
+ mtsr = config->src_addr_width;
}
hsu_chan_disable(hsuc);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 1953e57505f4..f17a4c7a1781 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -589,7 +589,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- idma64->dma.dev = chip->dev;
+ idma64->dma.dev = chip->sysdev;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
@@ -629,6 +629,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
{
struct idma64_chip *chip;
struct device *dev = &pdev->dev;
+ struct device *sysdev = dev->parent;
struct resource *mem;
int ret;
@@ -645,11 +646,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
chip->dev = dev;
+ chip->sysdev = sysdev;
ret = idma64_probe(chip);
if (ret)
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
index 6b816878e5e7..baa32e1425de 100644
--- a/drivers/dma/idma64.h
+++ b/drivers/dma/idma64.h
@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
/**
* struct idma64_chip - representation of iDMA 64-bit controller hardware
* @dev: struct device of the DMA controller
+ * @sysdev: struct device of the physical device that does DMA
* @irq: irq line
* @regs: memory mapped I/O space
* @idma64: struct idma64 that is filed by idma64_probe()
*/
struct idma64_chip {
struct device *dev;
+ struct device *sysdev;
int irq;
void __iomem *regs;
struct idma64 *idma64;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 1cfa1d9bc971..f8786f60cdc1 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -290,7 +290,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
struct scatterlist *sg = d->sg;
unsigned long now;
- now = min(d->len, sg_dma_len(sg));
+ now = min_t(size_t, d->len, sg_dma_len(sg));
if (d->len != IMX_DMA_LENGTH_LOOP)
d->len -= now;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 77354c999c76..cf90b7f36209 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1970,6 +1970,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
if (!sdma->script_number)
sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+ if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
+ / sizeof(s32)) {
+ dev_err(sdma->dev,
+ "SDMA script number %d not match with firmware.\n",
+ sdma->script_number);
+ return;
+ }
+
for (i = 0; i < sdma->script_number; i++)
if (addr_arr[i] > 0)
saddr_arr[i] = addr_arr[i];
@@ -2363,28 +2371,6 @@ static int sdma_probe(struct platform_device *pdev)
if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
- if (pdata) {
- ret = sdma_get_firmware(sdma, pdata->fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
- } else {
- /*
- * Because that device tree does not encode ROM script address,
- * the RAM script in firmware is mandatory for device tree
- * probe, otherwise it fails.
- */
- ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
- &fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware name\n");
- else {
- ret = sdma_get_firmware(sdma, fw_name);
- if (ret)
- dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
- }
- }
- sdma->fw_name = fw_name;
-
sdma->dma_device.dev = &pdev->dev;
sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
@@ -2434,6 +2420,34 @@ static int sdma_probe(struct platform_device *pdev)
/* There maybe multi sdma devices such as i.mx8mscale */
sdma->idx = sdma_dev_idx++;
+ /*
+ * Kick off firmware loading as the very last step:
+ * attempt to load firmware only if we're not on the error path, because
+ * the firmware callback requires a fully functional and allocated sdma
+ * instance.
+ */
+ if (pdata) {
+ ret = sdma_get_firmware(sdma, pdata->fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
+ } else {
+ /*
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
+ */
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get firmware name\n");
+ } else {
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
+ }
+ }
+ sdma->fw_name = fw_name;
+
return 0;
err_register:
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 49386ce04bf5..1389f0582e29 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -394,10 +394,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
SZ_2M, &descs->hw, flags);
- if (!descs->virt && (i > 0)) {
+ if (!descs->virt) {
int idx;
for (idx = 0; idx < i; idx++) {
+ descs = &ioat_chan->descs[idx];
dma_free_coherent(to_dev(ioat_chan), SZ_2M,
descs->virt, descs->hw);
descs->virt = NULL;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index d139706f01fe..9d936584d331 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -129,7 +129,7 @@ static void
ioat_init_channel(struct ioatdma_device *ioat_dma,
struct ioatdma_chan *ioat_chan, int idx);
static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1;
@@ -573,7 +573,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
* ioat_enumerate_channels - find and initialize the device's channels
* @ioat_dma: the ioat dma device to be enumerated
*/
-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
{
struct ioatdma_chan *ioat_chan;
struct device *dev = &ioat_dma->pdev->dev;
@@ -592,7 +592,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
if (xfercap_log == 0)
- return 0;
+ return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
@@ -609,7 +609,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
}
}
dma->chancnt = i;
- return i;
}
/**
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index a410657f7bcd..012584cf3c17 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -125,9 +125,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
chain_node) {
pr_debug("\tcookie: %d slot: %d busy: %d "
- "this_desc: %#x next_desc: %#x ack: %d\n",
+ "this_desc: %#x next_desc: %#llx ack: %d\n",
iter->async_tx.cookie, iter->idx, busy,
- iter->async_tx.phys, iop_desc_get_next_desc(iter),
+ iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
async_tx_test_ack(&iter->async_tx));
prefetch(_iter);
prefetch(&_iter->async_tx);
@@ -315,9 +315,9 @@ retry:
int i;
dev_dbg(iop_chan->device->common.dev,
"allocated slot: %d "
- "(desc %p phys: %#x) slots_per_op %d\n",
+ "(desc %p phys: %#llx) slots_per_op %d\n",
iter->idx, iter->hw_desc,
- iter->async_tx.phys, slots_per_op);
+ (u64)iter->async_tx.phys, slots_per_op);
/* pre-ack all but the last descriptor */
if (num_slots != slots_per_op)
@@ -525,7 +525,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
return NULL;
BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
- dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
+ dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
__func__, len);
spin_lock_bh(&iop_chan->lock);
@@ -558,7 +558,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
- "%s src_cnt: %d len: %u flags: %lx\n",
+ "%s src_cnt: %d len: %zu flags: %lx\n",
__func__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock);
@@ -591,7 +591,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
if (unlikely(!len))
return NULL;
- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
__func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);
@@ -629,7 +629,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
dev_dbg(iop_chan->device->common.dev,
- "%s src_cnt: %d len: %u flags: %lx\n",
+ "%s src_cnt: %d len: %zu flags: %lx\n",
__func__, src_cnt, len, flags);
if (dmaf_p_disabled_continue(flags))
@@ -692,7 +692,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
return NULL;
BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
+ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
__func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 23f75285a4d9..5d524f29c5f1 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1044,6 +1044,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->op_in_desc = XOR_MODE_IN_DESC;
dma_dev = &mv_chan->dmadev;
+ dma_dev->dev = &pdev->dev;
mv_chan->xordev = xordev;
/*
@@ -1076,7 +1077,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
- dma_dev->dev = &pdev->dev;
/* set prep routines based on capability */
if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 6b16ce390dce..9f901f16bddc 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1429,8 +1429,10 @@ static int omap_dma_probe(struct platform_device *pdev)
rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
IRQF_SHARED, "omap-dma-engine", od);
- if (rc)
+ if (rc) {
+ omap_dma_free(od);
return rc;
+ }
}
if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 6d7e3cd4aba4..57b375d0de29 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1020,6 +1020,7 @@ static void _stop(struct pl330_thread *thrd)
{
void __iomem *regs = thrd->dmac->base;
u8 insn[6] = {0, 0, 0, 0, 0, 0};
+ u32 inten = readl(regs + INTEN);
if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
@@ -1032,10 +1033,13 @@ static void _stop(struct pl330_thread *thrd)
_emit_KILL(0, insn);
- /* Stop generating interrupts for SEV */
- writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
-
_execute_DBGINSN(thrd, insn, is_manager(thrd));
+
+ /* clear the event */
+ if (inten & (1 << thrd->ev))
+ writel(1 << thrd->ev, regs + INTCLR);
+ /* Stop generating interrupts for SEV */
+ writel(inten & ~(1 << thrd->ev), regs + INTEN);
}
/* Start doing req 'idx' of thread 'thrd' */
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 6497f5283e3b..81acbde13394 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -686,7 +686,21 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
/* remove all transactions, including active transaction */
spin_lock_irqsave(&bchan->vc.lock, flag);
+ /*
+ * If we have transactions queued, then some might be committed to the
+ * hardware in the desc fifo. The only way to reset the desc fifo is
+ * to do a hardware reset (either by pipe or the entire block).
+ * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
+ * pipe. If the pipe is left disabled (default state after pipe reset)
+ * and is accessed by a connected hardware engine, a fatal error in
+ * the BAM will occur. There is a small window where this could happen
+ * with bam_chan_init_hw(), but it is assumed that the caller has
+ * stopped activity on any attached hardware engine. Make sure to do
+ * this first so that the BAM hardware doesn't cause memory corruption
+ * by accessing freed resources.
+ */
if (bchan->curr_txd) {
+ bam_chan_init_hw(bchan, bchan->curr_txd->dir);
list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
bchan->curr_txd = NULL;
}
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e244e10a94b5..5444f39bf939 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -131,24 +131,25 @@ static void hidma_process_completed(struct hidma_chan *mchan)
desc = &mdesc->desc;
last_cookie = desc->cookie;
+ llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+
spin_lock_irqsave(&mchan->lock, irqflags);
+ if (llstat == DMA_COMPLETE) {
+ mchan->last_success = last_cookie;
+ result.result = DMA_TRANS_NOERROR;
+ } else {
+ result.result = DMA_TRANS_ABORTED;
+ }
+
dma_cookie_complete(desc);
spin_unlock_irqrestore(&mchan->lock, irqflags);
- llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
dmaengine_desc_get_callback(desc, &cb);
dma_run_dependencies(desc);
spin_lock_irqsave(&mchan->lock, irqflags);
list_move(&mdesc->node, &mchan->free);
-
- if (llstat == DMA_COMPLETE) {
- mchan->last_success = last_cookie;
- result.result = DMA_TRANS_NOERROR;
- } else
- result.result = DMA_TRANS_ABORTED;
-
spin_unlock_irqrestore(&mchan->lock, irqflags);
dmaengine_desc_callback_invoke(&cb, &result);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index d032032337e7..e4fe24be3d7a 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1111,7 +1111,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
/* Someone calling slave DMA on a generic channel? */
- if (rchan->mid_rid < 0 || !sg_len) {
+ if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
dev_warn(chan->device->dev,
"%s: bad parameter: len=%d, id=%d\n",
__func__, sg_len, rchan->mid_rid);
@@ -1311,6 +1311,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
enum dma_status status;
unsigned long flags;
unsigned int residue;
+ bool cyclic;
status = dma_cookie_status(chan, cookie, txstate);
if (status == DMA_COMPLETE || !txstate)
@@ -1318,10 +1319,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&rchan->lock, flags);
residue = rcar_dmac_chan_get_residue(rchan, cookie);
+ cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
spin_unlock_irqrestore(&rchan->lock, flags);
/* if there's no residue, the cookie is complete */
- if (!residue)
+ if (!residue && !cyclic)
return DMA_COMPLETE;
dma_set_residue(txstate, residue);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 8684d11b29bb..68b41daab3a8 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -142,7 +142,7 @@ enum d40_events {
* when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
*/
-static u32 d40_backup_regs[] = {
+static __maybe_unused u32 d40_backup_regs[] = {
D40_DREG_LCPA,
D40_DREG_LCLA,
D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
-static u32 d40_backup_regs_chan[] = {
+static __maybe_unused u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG,
D40_CHAN_REG_SSELT,
D40_CHAN_REG_SSPTR,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 3722b9d8d9fe..4eaf92b2b886 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -288,7 +288,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
/* Do not allocate if desc are waiting for ack */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
- if (async_tx_test_ack(&dma_desc->txd)) {
+ if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
list_del(&dma_desc->node);
spin_unlock_irqrestore(&tdc->lock, flags);
dma_desc->txd.flags = 0;
@@ -635,7 +635,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
dma_desc = sgreq->dma_desc;
- dma_desc->bytes_transferred += sgreq->req_len;
+ /* if we dma for long enough the transfer count will wrap */
+ dma_desc->bytes_transferred =
+ (dma_desc->bytes_transferred + sgreq->req_len) %
+ dma_desc->bytes_requested;
/* Callback need to be call */
if (!dma_desc->cb_count)
@@ -752,10 +755,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
bool was_busy;
spin_lock_irqsave(&tdc->lock, flags);
- if (list_empty(&tdc->pending_sg_req)) {
- spin_unlock_irqrestore(&tdc->lock, flags);
- return 0;
- }
if (!tdc->busy)
goto skip_dma_stop;
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b10cbaa82ff5..2d4aeba579f7 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -22,7 +22,6 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
-#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -99,6 +98,7 @@ struct tegra_adma_chan_regs {
unsigned int src_addr;
unsigned int trg_addr;
unsigned int fifo_ctrl;
+ unsigned int cmd;
unsigned int tc;
};
@@ -128,6 +128,7 @@ struct tegra_adma_chan {
enum dma_transfer_direction sreq_dir;
unsigned int sreq_index;
bool sreq_reserved;
+ struct tegra_adma_chan_regs ch_regs;
/* Transfer count and position info */
unsigned int tx_buf_count;
@@ -141,6 +142,7 @@ struct tegra_adma {
struct dma_device dma_dev;
struct device *dev;
void __iomem *base_addr;
+ struct clk *ahub_clk;
unsigned int nr_channels;
unsigned long rx_requests_reserved;
unsigned long tx_requests_reserved;
@@ -635,23 +637,67 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
static int tegra_adma_runtime_suspend(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
+ struct tegra_adma_chan_regs *ch_reg;
+ struct tegra_adma_chan *tdc;
+ int i;
tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+ if (!tdma->global_cmd)
+ goto clk_disable;
+
+ for (i = 0; i < tdma->nr_channels; i++) {
+ tdc = &tdma->channels[i];
+ ch_reg = &tdc->ch_regs;
+ ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
+ /* skip if channel is not active */
+ if (!ch_reg->cmd)
+ continue;
+ ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
+ ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
+ ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
+ ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
+ ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+ ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
+ }
+
+clk_disable:
+ clk_disable_unprepare(tdma->ahub_clk);
- return pm_clk_suspend(dev);
+ return 0;
}
static int tegra_adma_runtime_resume(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
- int ret;
+ struct tegra_adma_chan_regs *ch_reg;
+ struct tegra_adma_chan *tdc;
+ int ret, i;
- ret = pm_clk_resume(dev);
- if (ret)
+ ret = clk_prepare_enable(tdma->ahub_clk);
+ if (ret) {
+ dev_err(dev, "ahub clk_enable failed: %d\n", ret);
return ret;
-
+ }
tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (!tdma->global_cmd)
+ return 0;
+
+ for (i = 0; i < tdma->nr_channels; i++) {
+ tdc = &tdma->channels[i];
+ ch_reg = &tdc->ch_regs;
+ /* skip if channel was not active earlier */
+ if (!ch_reg->cmd)
+ continue;
+ tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
+ tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+ tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
+ tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
+ }
+
return 0;
}
@@ -692,23 +738,11 @@ static int tegra_adma_probe(struct platform_device *pdev)
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
- ret = pm_clk_create(&pdev->dev);
- if (ret)
- return ret;
-
- ret = of_pm_clk_add_clk(&pdev->dev, "d_audio");
- if (ret)
- goto clk_destroy;
-
- pm_runtime_enable(&pdev->dev);
-
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- goto rpm_disable;
-
- ret = tegra_adma_init(tdma);
- if (ret)
- goto rpm_put;
+ tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
+ if (IS_ERR(tdma->ahub_clk)) {
+ dev_err(&pdev->dev, "Error: Missing ahub controller clock\n");
+ return PTR_ERR(tdma->ahub_clk);
+ }
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < tdma->nr_channels; i++) {
@@ -727,6 +761,16 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->tdma = tdma;
}
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto rpm_disable;
+
+ ret = tegra_adma_init(tdma);
+ if (ret)
+ goto rpm_put;
+
dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
@@ -768,15 +812,13 @@ static int tegra_adma_probe(struct platform_device *pdev)
dma_remove:
dma_async_device_unregister(&tdma->dma_dev);
-irq_dispose:
- while (--i >= 0)
- irq_dispose_mapping(tdma->channels[i].irq);
rpm_put:
pm_runtime_put_sync(&pdev->dev);
rpm_disable:
pm_runtime_disable(&pdev->dev);
-clk_destroy:
- pm_clk_destroy(&pdev->dev);
+irq_dispose:
+ while (--i >= 0)
+ irq_dispose_mapping(tdma->channels[i].irq);
return ret;
}
@@ -786,6 +828,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
struct tegra_adma *tdma = platform_get_drvdata(pdev);
int i;
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
for (i = 0; i < tdma->nr_channels; ++i)
@@ -793,7 +836,6 @@ static int tegra_adma_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- pm_clk_destroy(&pdev->dev);
return 0;
}
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 8c3c588834d2..a7e1f6e17e3d 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -395,8 +395,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
nelm * 2);
- if (ret)
+ if (ret) {
+ kfree(rsv_events);
return ret;
+ }
for (i = 0; i < nelm; i++) {
ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 896bafb7a532..cf6588cc3efd 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
- td_desc->desc_list_len, DMA_MEM_TO_DEV);
+ td_desc->desc_list_len, DMA_TO_DEVICE);
return &td_desc->txd;
}
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 8288fe4d17c3..cd271f782605 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -72,6 +72,9 @@
#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
+#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
+#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
+#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
#define XILINX_DMA_REG_DMASR 0x0004
#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
@@ -2054,8 +2057,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
chan->config.gen_lock = cfg->gen_lock;
chan->config.master = cfg->master;
+ dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
if (cfg->gen_lock && chan->genlock) {
dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+ dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
}
@@ -2069,11 +2074,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
chan->config.delay = cfg->delay;
if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+ dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
chan->config.coalesc = cfg->coalesc;
}
if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+ dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
chan->config.delay = cfg->delay;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index b0bd0f64d8f2..6037efa94c9b 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1651,6 +1651,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
int irq = irq_desc_get_irq(desc);
+ unsigned long bits;
dberr = (irq == edac->db_irq) ? 1 : 0;
sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
@@ -1660,7 +1661,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
- for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
+ bits = irq_status;
+ for_each_set_bit(bit, &bits, 32) {
irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
if (irq)
generic_handle_irq(irq);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 40d792e96b75..d59641194860 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -26,7 +26,7 @@
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
static int edac_mc_panic_on_ue;
-static int edac_mc_poll_msec = 1000;
+static unsigned int edac_mc_poll_msec = 1000;
/* Getter functions for above */
int edac_mc_get_log_ue(void)
@@ -45,30 +45,30 @@ int edac_mc_get_panic_on_ue(void)
}
/* this is temporary */
-int edac_mc_get_poll_msec(void)
+unsigned int edac_mc_get_poll_msec(void)
{
return edac_mc_poll_msec;
}
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
- unsigned long l;
+ unsigned int i;
int ret;
if (!val)
return -EINVAL;
- ret = kstrtoul(val, 0, &l);
+ ret = kstrtouint(val, 0, &i);
if (ret)
return ret;
- if (l < 1000)
+ if (i < 1000)
return -EINVAL;
- *((unsigned long *)kp->arg) = l;
+ *((unsigned int *)kp->arg) = i;
/* notify edac_mc engine to reset the poll period */
- edac_mc_reset_delay_period(l);
+ edac_mc_reset_delay_period(i);
return 0;
}
@@ -82,7 +82,7 @@ MODULE_PARM_DESC(edac_mc_log_ue,
module_param(edac_mc_log_ce, int, 0644);
MODULE_PARM_DESC(edac_mc_log_ce,
"Log correctable error to console: 0=off 1=on");
-module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
+module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
&edac_mc_poll_msec, 0644);
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
@@ -426,6 +426,8 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
+ int err;
+
csrow->dev.type = &csrow_attr_type;
csrow->dev.bus = mci->bus;
csrow->dev.groups = csrow_dev_groups;
@@ -438,7 +440,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
edac_dbg(0, "creating (virtual) csrow node %s\n",
dev_name(&csrow->dev));
- return device_add(&csrow->dev);
+ err = device_add(&csrow->dev);
+ if (err)
+ put_device(&csrow->dev);
+
+ return err;
}
/* Create a CSROW object under specifed edac_mc_device */
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index cfaacb99c973..c36f9f721fb2 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -33,7 +33,7 @@ extern int edac_mc_get_log_ue(void);
extern int edac_mc_get_log_ce(void);
extern int edac_mc_get_panic_on_ue(void);
extern int edac_get_poll_msec(void);
-extern int edac_mc_get_poll_msec(void);
+extern unsigned int edac_mc_get_poll_msec(void);
unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
unsigned len);
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index e3fa4390f846..4ddbf6604e2a 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -189,6 +189,7 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
/* Cleans the error report buffer */
memset(e, 0, sizeof (*e));
e->error_count = 1;
+ e->grain = 1;
strcpy(e->label, "unknown label");
e->msg = pvt->msg;
e->other_detail = pvt->other_detail;
@@ -284,7 +285,7 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
/* Error grain */
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
- e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
+ e->grain = ~mem_err->physical_addr_mask + 1;
/* Memory error location, mapped on e->location */
p = e->location;
@@ -391,8 +392,13 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
if (p > pvt->other_detail)
*(p - 1) = '\0';
+ /* Sanity-check driver-supplied grain value. */
+ if (WARN_ON_ONCE(!e->grain))
+ e->grain = 1;
+
+ grain_bits = fls_long(e->grain - 1);
+
/* Generate the trace event */
- grain_bits = fls_long(e->grain);
snprintf(pvt->detail_location, sizeof(pvt->detail_location),
"APEI location: %s %s", e->location, e->other_detail);
trace_mc_event(type, e->msg, e->label, e->error_count,
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 56e6c4c7c60d..4c0b6df8b5dd 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1684,6 +1684,16 @@ static int arizona_extcon_remove(struct platform_device *pdev)
struct arizona_extcon_info *info = platform_get_drvdata(pdev);
struct arizona *arizona = info->arizona;
int jack_irq_rise, jack_irq_fall;
+ bool change;
+
+ regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA, 0,
+ &change);
+
+ if (change) {
+ regulator_disable(info->micvdd);
+ pm_runtime_put(info->dev);
+ }
gpiod_put(info->micd_pol_gpio);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 4a0612fb9c07..b9b48d45a6dc 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -321,12 +321,10 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
{
int ret = 0;
- if (usb_type == MAX8997_USB_HOST) {
- ret = max8997_muic_set_path(info, info->path_usb, attached);
- if (ret < 0) {
- dev_err(info->dev, "failed to update muic register\n");
- return ret;
- }
+ ret = max8997_muic_set_path(info, info->path_usb, attached);
+ if (ret < 0) {
+ dev_err(info->dev, "failed to update muic register\n");
+ return ret;
}
switch (usb_type) {
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index b22325688503..9d2d8a6673c8 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -69,6 +69,10 @@ struct sm5502_muic_info {
/* Default value of SM5502 register to bring up MUIC device. */
static struct reg_data sm5502_reg_data[] = {
{
+ .reg = SM5502_REG_RESET,
+ .val = SM5502_REG_RESET_MASK,
+ .invert = true,
+ }, {
.reg = SM5502_REG_CONTROL,
.val = SM5502_REG_CONTROL_MASK_INT_MASK,
.invert = false,
diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h
index 974b53222f56..12f8b01e5753 100644
--- a/drivers/extcon/extcon-sm5502.h
+++ b/drivers/extcon/extcon-sm5502.h
@@ -241,6 +241,8 @@ enum sm5502_reg {
#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
| (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+#define SM5502_REG_RESET_MASK (0x1)
+
/* SM5502 Interrupts */
enum sm5502_irq {
/* INT1 */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 15475892af0c..bc19ac0e662e 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -249,7 +249,11 @@ static int fwnet_header_cache(const struct neighbour *neigh,
h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
- hh->hh_len = FWNET_HLEN;
+
+ /* Pairs with the READ_ONCE() in neigh_resolve_output(),
+ * neigh_hh_output() and neigh_update_hhs().
+ */
+ smp_store_release(&hh->hh_len, FWNET_HLEN);
return 0;
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index bca172d42c74..854df538ae01 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -144,7 +144,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
config ISCSI_IBFT_FIND
bool "iSCSI Boot Firmware Table Attributes"
- depends on X86 && ACPI
+ depends on X86 && ISCSI_IBFT
default n
help
This option enables the kernel to find the region of memory
@@ -155,7 +155,8 @@ config ISCSI_IBFT_FIND
config ISCSI_IBFT
tristate "iSCSI Boot Firmware Table Attributes module"
select ISCSI_BOOT_SYSFS
- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+ select ISCSI_IBFT_FIND if X86
+ depends on ACPI && SCSI && SCSI_LOWLEVEL
default n
help
This option enables support for detection and exposing of iSCSI
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index d42537425438..c0e54396f250 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -375,7 +375,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
pcie->device_id.vendor_id, pcie->device_id.device_id);
p = pcie->device_id.class_code;
- printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+ printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
}
if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
@@ -384,6 +384,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk(
"%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
pfx, pcie->bridge.secondary_status, pcie->bridge.control);
+
+ /* Fatal errors call __ghes_panic() before AER handler prints this */
+ if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
+ (gdata->error_severity & CPER_SEV_FATAL)) {
+ struct aer_capability_regs *aer;
+
+ aer = (struct aer_capability_regs *)pcie->aer_info;
+ printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
+ pfx, aer->uncor_status, aer->uncor_mask);
+ printk("%saer_uncor_severity: 0x%08x\n",
+ pfx, aer->uncor_severity);
+ printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
+ aer->header_log.dw0, aer->header_log.dw1,
+ aer->header_log.dw2, aer->header_log.dw3);
+ }
}
static void cper_estatus_print_section(
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2f47c5b5f4cb..d89457d62a24 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -243,6 +243,9 @@ static __init int efivar_ssdt_load(void)
void *data;
int ret;
+ if (!efivar_ssdt[0])
+ return 0;
+
ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
list_for_each_entry_safe(entry, aux, &entries, list) {
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 3e626fd9bd4e..1c65f5ac4368 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -139,13 +139,16 @@ static ssize_t
efivar_attr_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
+ unsigned long size = sizeof(var->Data);
char *str = buf;
+ int ret;
if (!entry || !buf)
return -EINVAL;
- var->DataSize = 1024;
- if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+ ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+ var->DataSize = size;
+ if (ret)
return -EIO;
if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
@@ -172,13 +175,16 @@ static ssize_t
efivar_size_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
+ unsigned long size = sizeof(var->Data);
char *str = buf;
+ int ret;
if (!entry || !buf)
return -EINVAL;
- var->DataSize = 1024;
- if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+ ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+ var->DataSize = size;
+ if (ret)
return -EIO;
str += sprintf(str, "0x%lx\n", var->DataSize);
@@ -189,12 +195,15 @@ static ssize_t
efivar_data_read(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
+ unsigned long size = sizeof(var->Data);
+ int ret;
if (!entry || !buf)
return -EINVAL;
- var->DataSize = 1024;
- if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+ ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+ var->DataSize = size;
+ if (ret)
return -EIO;
memcpy(buf, var->Data, var->DataSize);
@@ -263,6 +272,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
u8 *data;
int err;
+ if (!entry || !buf)
+ return -EINVAL;
+
if (is_compat()) {
struct compat_efi_variable *compat;
@@ -314,14 +326,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
{
struct efi_variable *var = &entry->var;
struct compat_efi_variable *compat;
+ unsigned long datasize = sizeof(var->Data);
size_t size;
+ int ret;
if (!entry || !buf)
return 0;
- var->DataSize = 1024;
- if (efivar_entry_get(entry, &entry->var.Attributes,
- &entry->var.DataSize, entry->var.Data))
+ ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
+ var->DataSize = datasize;
+ if (ret)
return -EIO;
if (is_compat()) {
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 993aa56755f6..726d1467b778 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -18,7 +18,6 @@
#include "efistub.h"
-bool __nokaslr;
static int efi_get_secureboot(efi_system_table_t *sys_table_arg)
{
@@ -268,18 +267,6 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail;
}
- /* check whether 'nokaslr' was passed on the command line */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- static const u8 default_cmdline[] = CONFIG_CMDLINE;
- const u8 *str, *cmdline = cmdline_ptr;
-
- if (IS_ENABLED(CONFIG_CMDLINE_FORCE))
- cmdline = default_cmdline;
- str = strstr(cmdline, "nokaslr");
- if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
- __nokaslr = true;
- }
-
si = setup_graphics(sys_table);
status = handle_kernel_image(sys_table, image_addr, &image_size,
@@ -291,9 +278,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail_free_cmdline;
}
- status = efi_parse_options(cmdline_ptr);
- if (status != EFI_SUCCESS)
- pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
+ IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
+ cmdline_size == 0)
+ efi_parse_options(CONFIG_CMDLINE);
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
+ efi_parse_options(cmdline_ptr);
secure_boot = efi_get_secureboot(sys_table);
if (secure_boot > 0)
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 959d9b8d4845..f7a6970e9abc 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -24,8 +24,6 @@
#include "efistub.h"
-extern bool __nokaslr;
-
efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
{
u64 tg;
@@ -60,7 +58,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
u64 phys_seed = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- if (!__nokaslr) {
+ if (!nokaslr()) {
status = efi_get_random_bytes(sys_table_arg,
sizeof(phys_seed),
(u8 *)&phys_seed);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 09d10dcf1fc6..1c963c4d1bde 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -32,6 +32,13 @@
static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
+static int __section(.data) __nokaslr;
+
+int __pure nokaslr(void)
+{
+ return __nokaslr;
+}
+
/*
* Allow the platform to override the allocation granularity: this allows
* systems that have the capability to run with a larger page size to deal
@@ -351,17 +358,13 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
* environments, first in the early boot environment of the EFI boot
* stub, and subsequently during the kernel boot.
*/
-efi_status_t efi_parse_options(char *cmdline)
+efi_status_t efi_parse_options(char const *cmdline)
{
char *str;
- /*
- * Currently, the only efi= option we look for is 'nochunk', which
- * is intended to work around known issues on certain x86 UEFI
- * versions. So ignore for now on other architectures.
- */
- if (!IS_ENABLED(CONFIG_X86))
- return EFI_SUCCESS;
+ str = strstr(cmdline, "nokaslr");
+ if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
+ __nokaslr = 1;
/*
* If no EFI parameters were specified on the cmdline we've got
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index fac67992bede..d0e5acab547f 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -15,6 +15,8 @@
*/
#undef __init
+extern int __pure nokaslr(void);
+
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 24c461dea7af..fd8053f9556e 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -86,30 +86,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
}
static efi_status_t
-__gop_query32(efi_system_table_t *sys_table_arg,
- struct efi_graphics_output_protocol_32 *gop32,
- struct efi_graphics_output_mode_info **info,
- unsigned long *size, u64 *fb_base)
-{
- struct efi_graphics_output_protocol_mode_32 *mode;
- efi_graphics_output_protocol_query_mode query_mode;
- efi_status_t status;
- unsigned long m;
-
- m = gop32->mode;
- mode = (struct efi_graphics_output_protocol_mode_32 *)m;
- query_mode = (void *)(unsigned long)gop32->query_mode;
-
- status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
- info);
- if (status != EFI_SUCCESS)
- return status;
-
- *fb_base = mode->frame_buffer_base;
- return status;
-}
-
-static efi_status_t
setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
efi_guid_t *proto, unsigned long size, void **gop_handle)
{
@@ -121,7 +97,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
u64 fb_base;
struct efi_pixel_bitmask pixel_info;
int pixel_format;
- efi_status_t status = EFI_NOT_FOUND;
+ efi_status_t status;
u32 *handles = (u32 *)(unsigned long)gop_handle;
int i;
@@ -130,6 +106,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
nr_gops = size / sizeof(u32);
for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_protocol_mode_32 *mode;
struct efi_graphics_output_mode_info *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
@@ -147,9 +124,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
if (status == EFI_SUCCESS)
conout_found = true;
- status = __gop_query32(sys_table_arg, gop32, &info, &size,
- &current_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+ mode = (void *)(unsigned long)gop32->mode;
+ info = (void *)(unsigned long)mode->info;
+ current_fb_base = mode->frame_buffer_base;
+
+ if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
@@ -177,7 +156,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
/* Did we find any GOPs? */
if (!first_gop)
- goto out;
+ return EFI_NOT_FOUND;
/* EFI framebuffer */
si->orig_video_isVGA = VIDEO_TYPE_EFI;
@@ -199,32 +178,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
- return status;
-}
-
-static efi_status_t
-__gop_query64(efi_system_table_t *sys_table_arg,
- struct efi_graphics_output_protocol_64 *gop64,
- struct efi_graphics_output_mode_info **info,
- unsigned long *size, u64 *fb_base)
-{
- struct efi_graphics_output_protocol_mode_64 *mode;
- efi_graphics_output_protocol_query_mode query_mode;
- efi_status_t status;
- unsigned long m;
-
- m = gop64->mode;
- mode = (struct efi_graphics_output_protocol_mode_64 *)m;
- query_mode = (void *)(unsigned long)gop64->query_mode;
-
- status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
- info);
- if (status != EFI_SUCCESS)
- return status;
- *fb_base = mode->frame_buffer_base;
- return status;
+ return EFI_SUCCESS;
}
static efi_status_t
@@ -239,7 +194,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
u64 fb_base;
struct efi_pixel_bitmask pixel_info;
int pixel_format;
- efi_status_t status = EFI_NOT_FOUND;
+ efi_status_t status;
u64 *handles = (u64 *)(unsigned long)gop_handle;
int i;
@@ -248,6 +203,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
nr_gops = size / sizeof(u64);
for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_protocol_mode_64 *mode;
struct efi_graphics_output_mode_info *info = NULL;
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
bool conout_found = false;
@@ -265,9 +221,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
if (status == EFI_SUCCESS)
conout_found = true;
- status = __gop_query64(sys_table_arg, gop64, &info, &size,
- &current_fb_base);
- if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+ mode = (void *)(unsigned long)gop64->mode;
+ info = (void *)(unsigned long)mode->info;
+ current_fb_base = mode->frame_buffer_base;
+
+ if ((!first_gop || conout_found) &&
info->pixel_format != PIXEL_BLT_ONLY) {
/*
* Systems that use the UEFI Console Splitter may
@@ -295,7 +253,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
/* Did we find any GOPs? */
if (!first_gop)
- goto out;
+ return EFI_NOT_FOUND;
/* EFI framebuffer */
si->orig_video_isVGA = VIDEO_TYPE_EFI;
@@ -317,8 +275,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
si->lfb_size = si->lfb_linelength * si->lfb_height;
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
- return status;
+
+ return EFI_SUCCESS;
}
/*
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 236004b9a50d..9faa09e7c31f 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -93,7 +93,7 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
- if (md->virt_addr == 0) {
+ if (md->virt_addr == 0 && md->phys_addr != 0) {
/* no virtual mapping has been installed by the stub */
break;
}
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index c46387160976..98cdfc2ee0df 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -480,11 +480,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
if (count < sizeof(u32))
return -EINVAL;
param.type = *(u32 *)buf;
- count -= sizeof(u32);
buf += sizeof(u32);
/* The remaining buffer is the data payload */
- if (count > gsmi_dev.data_buf->length)
+ if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
return -EINVAL;
param.data_len = count - sizeof(u32);
@@ -504,7 +503,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
- return rc;
+ return (rc == 0) ? count : rc;
}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 132b9bae4b6a..220bbc91cebd 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBFT_ISCSI_VERSION);
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
struct ibft_hdr {
u8 id;
u8 version;
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 1e2e5198db53..7c31d27649fe 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -158,7 +158,7 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
kfree(args_virt);
}
- if (res->a0 < 0)
+ if ((long)res->a0 < 0)
return qcom_scm_remap_error(res->a0);
return 0;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 62b268f9b485..602eff92a477 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -683,6 +683,7 @@ config GPIO_ADP5588
config GPIO_ADP5588_IRQ
bool "Interrupt controller support for ADP5588"
depends on GPIO_ADP5588=y
+ select GPIOLIB_IRQCHIP
help
Say yes here to enable the adp5588 to be used as an interrupt
controller. It requires the driver to be built in the kernel.
@@ -932,6 +933,7 @@ config GPIO_LP873X
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
+ select GPIOLIB_IRQCHIP
help
GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor.
MAX77620 PMIC has 8 pins that can be configured as GPIOs. The
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 8ff7b0d3eac6..3b68c03a281d 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
if (err < 0)
goto out;
- if (err & BIT(pos))
- err = -EACCES;
+ if (value & BIT(pos)) {
+ err = -EPERM;
+ goto out;
+ }
err = 0;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 7847dd34f86f..036a78b70427 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -259,17 +259,16 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
lirq->irq = irq;
uirq = &priv->uirqs[lirq->index];
if (uirq->refcnt == 0) {
+ spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
dev_name(priv->dev), priv);
if (ret) {
dev_err(priv->dev,
"Could not request underlying irq %d\n",
uirq->uirq);
-
- spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
return ret;
}
+ spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
}
uirq->refcnt++;
@@ -315,8 +314,11 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
if (index >= 0) {
uirq = &priv->uirqs[lirq->index];
uirq->refcnt--;
- if (uirq->refcnt == 0)
+ if (uirq->refcnt == 0) {
+ spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
free_irq(uirq->uirq, priv);
+ return;
+ }
}
spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index b46b436cb97f..52cac7c600ee 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -167,13 +167,13 @@ static int max77620_gpio_set_debounce(struct gpio_chip *gc,
case 0:
val = MAX77620_CNFG_GPIO_DBNC_None;
break;
- case 1 ... 8:
+ case 1 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms;
break;
- case 9 ... 16:
+ case 8001 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms;
break;
- case 17 ... 32:
+ case 16001 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms;
break;
default:
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 793518a30afe..b67f61d31e25 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -306,6 +306,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return -ENOMEM;
gc = &mpc8xxx_gc->gc;
+ gc->parent = &pdev->dev;
if (of_property_read_bool(np, "little-endian")) {
ret = bgpio_init(gc, &pdev->dev, 4,
@@ -337,7 +338,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* It's assumed that only a single type of gpio controller is available
* on the current machine, so overwriting global data is fine.
*/
- mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
+ if (devtype->irq_set_type)
+ mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
if (devtype->gpio_dir_out)
gc->direction_output = devtype->gpio_dir_out;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 6f9c9ac6ee70..fc841ce24db7 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -296,6 +296,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
}
}
+/*
+ * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
+ * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
+ * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
+ * are capable waking up the system from off mode.
+ */
+static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask)
+{
+ u32 no_wake = bank->non_wakeup_gpios;
+
+ if (no_wake)
+ return !!(~no_wake & gpio_mask);
+
+ return false;
+}
+
static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
@@ -327,13 +343,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
}
/* This part needs to be executed always for OMAP{34xx, 44xx} */
- if (!bank->regs->irqctrl) {
- /* On omap24xx proceed only when valid GPIO bit is set */
- if (bank->non_wakeup_gpios) {
- if (!(bank->non_wakeup_gpios & gpio_bit))
- goto exit;
- }
-
+ if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
/*
* Log the edge gpio and manually trigger the IRQ
* after resume if the input level changes
@@ -346,7 +356,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
}
-exit:
bank->level_mask =
readl_relaxed(bank->base + bank->regs->leveldetect0) |
readl_relaxed(bank->base + bank->regs->leveldetect1);
@@ -777,9 +786,9 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
raw_spin_lock_irqsave(&bank->lock, flags);
bank->irq_usage &= ~(BIT(offset));
- omap_set_gpio_irqenable(bank, offset, 0);
- omap_clear_gpio_irqstatus(bank, offset);
omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+ omap_clear_gpio_irqstatus(bank, offset);
+ omap_set_gpio_irqenable(bank, offset, 0);
if (!LINE_USED(bank->mod_usage, offset))
omap_clear_gpio_debounce(bank, offset);
omap_disable_gpio_module(bank, offset);
@@ -821,8 +830,8 @@ static void omap_gpio_mask_irq(struct irq_data *d)
unsigned long flags;
raw_spin_lock_irqsave(&bank->lock, flags);
- omap_set_gpio_irqenable(bank, offset, 0);
omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+ omap_set_gpio_irqenable(bank, offset, 0);
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -834,17 +843,20 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
unsigned long flags;
raw_spin_lock_irqsave(&bank->lock, flags);
- if (trigger)
- omap_set_gpio_triggering(bank, offset, trigger);
+ omap_set_gpio_irqenable(bank, offset, 1);
- /* For level-triggered GPIOs, the clearing must be done after
- * the HW source is cleared, thus after the handler has run */
- if (bank->level_mask & BIT(offset)) {
- omap_set_gpio_irqenable(bank, offset, 0);
+ /*
+ * For level-triggered GPIOs, clearing must be done after the source
+ * is cleared, thus after the handler has run. OMAP4 needs this done
+ * after enabing the interrupt to clear the wakeup status.
+ */
+ if (bank->regs->leveldetect0 && bank->regs->wkup_en &&
+ trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
omap_clear_gpio_irqstatus(bank, offset);
- }
- omap_set_gpio_irqenable(bank, offset, 1);
+ if (trigger)
+ omap_set_gpio_triggering(bank, offset, trigger);
+
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -1574,6 +1586,8 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
.clr_dataout = OMAP4_GPIO_CLEARDATAOUT,
.irqstatus = OMAP4_GPIO_IRQSTATUS0,
.irqstatus2 = OMAP4_GPIO_IRQSTATUS1,
+ .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0,
+ .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1,
.irqenable = OMAP4_GPIO_IRQSTATUSSET0,
.irqenable2 = OMAP4_GPIO_IRQSTATUSSET1,
.set_irqenable = OMAP4_GPIO_IRQSTATUSSET0,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 7a6305884f97..32d22bdf7164 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -774,6 +774,9 @@ static int pxa_gpio_suspend(void)
struct pxa_gpio_bank *c;
int gpio;
+ if (!pchip)
+ return 0;
+
for_each_gpio_bank(gpio, c, pchip) {
c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
@@ -792,6 +795,9 @@ static void pxa_gpio_resume(void)
struct pxa_gpio_bank *c;
int gpio;
+ if (!pchip)
+ return;
+
for_each_gpio_bank(gpio, c, pchip) {
/* restore level with set/clear */
writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 537cec7583fc..cf88a0bfe99e 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -122,7 +122,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS));
}
- priv->data->set(chip, offset, val);
+ chip->set(chip, offset, val);
return 0;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index aac84329c759..b863386be911 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -531,7 +531,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_node_get(chip->of_node);
- return of_gpiochip_scan_gpios(chip);
+ status = of_gpiochip_scan_gpios(chip);
+ if (status) {
+ of_node_put(chip->of_node);
+ gpiochip_remove_pin_ranges(chip);
+ }
+
+ return status;
}
void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9e2fe12c2858..73d02f6089d5 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -188,6 +188,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
chip = gpiod_to_chip(desc);
offset = gpio_chip_hwgpio(desc);
+ /*
+ * Open drain emulation using input mode may incorrectly report
+ * input here, fix that up.
+ */
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
+ test_bit(FLAG_IS_OUT, &desc->flags))
+ return 0;
+
if (!chip->get_direction)
return status;
@@ -426,12 +434,23 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
struct linehandle_state *lh;
struct file *file;
int fd, i, count = 0, ret;
+ u32 lflags;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
return -EFAULT;
if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
return -EINVAL;
+ lflags = handlereq.flags;
+
+ /*
+ * Do not allow both INPUT & OUTPUT flags to be set as they are
+ * contradictory.
+ */
+ if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
+ (lflags & GPIOHANDLE_REQUEST_OUTPUT))
+ return -EINVAL;
+
lh = kzalloc(sizeof(*lh), GFP_KERNEL);
if (!lh)
return -ENOMEM;
@@ -452,7 +471,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
/* Request each GPIO */
for (i = 0; i < handlereq.lines; i++) {
u32 offset = handlereq.lineoffsets[i];
- u32 lflags = handlereq.flags;
struct gpio_desc *desc;
if (offset >= gdev->ngpio) {
@@ -787,7 +805,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
/* This is just wrong: we don't look for events on output lines */
- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
ret = -EINVAL;
goto out_free_label;
}
@@ -801,10 +821,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
ret = gpiod_direction_input(desc);
if (ret)
@@ -817,9 +833,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= IRQF_TRIGGER_RISING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= IRQF_TRIGGER_FALLING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
irqflags |= IRQF_SHARED;
@@ -951,9 +969,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
+ GPIOLINE_FLAG_IS_OUT);
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
+ GPIOLINE_FLAG_IS_OUT);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
@@ -1212,31 +1232,14 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
struct gpio_desc *desc = &gdev->descs[i];
desc->gdev = gdev;
- /*
- * REVISIT: most hardware initializes GPIOs as inputs
- * (often with pullups enabled) so power usage is
- * minimized. Linux code should set the gpio direction
- * first thing; but until it does, and in case
- * chip->get_direction is not set, we may expose the
- * wrong direction in sysfs.
- */
-
- if (chip->get_direction) {
- /*
- * If we have .get_direction, set up the initial
- * direction flag from the hardware.
- */
- int dir = chip->get_direction(chip, i);
- if (!dir)
- set_bit(FLAG_IS_OUT, &desc->flags);
- } else if (!chip->direction_input) {
- /*
- * If the chip lacks the .direction_input callback
- * we logically assume all lines are outputs.
- */
- set_bit(FLAG_IS_OUT, &desc->flags);
- }
+ /* REVISIT: most hardware initializes GPIOs as inputs (often
+ * with pullups enabled) so power usage is minimized. Linux
+ * code should set the gpio direction first thing; but until
+ * it does, and in case chip->get_direction is not set, we may
+ * expose the wrong direction in sysfs.
+ */
+ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
#ifdef CONFIG_PINCTRL
@@ -2411,7 +2414,7 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
int gpiod_get_raw_value(const struct gpio_desc *desc)
{
VALIDATE_DESC(desc);
- /* Should be using gpio_get_value_cansleep() */
+ /* Should be using gpiod_get_raw_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
return _gpiod_get_raw_value(desc);
}
@@ -2432,7 +2435,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
int value;
VALIDATE_DESC(desc);
- /* Should be using gpio_get_value_cansleep() */
+ /* Should be using gpiod_get_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
value = _gpiod_get_raw_value(desc);
@@ -2608,7 +2611,7 @@ void gpiod_set_array_value_complex(bool raw, bool can_sleep,
void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
VALIDATE_DESC_VOID(desc);
- /* Should be using gpiod_set_value_cansleep() */
+ /* Should be using gpiod_set_raw_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
_gpiod_set_raw_value(desc, value);
}
@@ -3029,8 +3032,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (chip->ngpio <= p->chip_hwnum) {
dev_err(dev,
- "requested GPIO %d is out of range [0..%d] for chip %s\n",
- idx, chip->ngpio, chip->label);
+ "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
+ idx, p->chip_hwnum, chip->ngpio - 1,
+ chip->label);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 26afdffab5a0..0c2ed1254585 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -336,17 +336,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
path_size += le16_to_cpu(path->usSize);
if (device_support & le16_to_cpu(path->usDeviceTag)) {
- uint8_t con_obj_id, con_obj_num, con_obj_type;
-
- con_obj_id =
+ uint8_t con_obj_id =
(le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
>> OBJECT_ID_SHIFT;
- con_obj_num =
- (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
- >> ENUM_ID_SHIFT;
- con_obj_type =
- (le16_to_cpu(path->usConnObjectId) &
- OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
/* Skip TV/CV support */
if ((le16_to_cpu(path->usDeviceTag) ==
@@ -371,15 +363,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
- uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
-
- grph_obj_id =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- grph_obj_num =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- grph_obj_type =
+ uint8_t grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 3938fca1ea8e..24941a7b659f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -430,6 +430,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
+ if (info->read_mmr_reg.count > 128)
+ return -EINVAL;
+
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
if (!regs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 327bdf13e8bc..b0beb5e537bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1606,7 +1606,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
if (orig != data)
si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
- if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
+ if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
data &= ~PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
@@ -1655,14 +1655,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
data |= LS2_EXIT_TIME(5);
if (orig != data)
si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
+ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
data |= LS2_EXIT_TIME(5);
if (orig != data)
si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 3907439417e7..c0db3b57dfe5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3739,6 +3739,11 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
data->frame_time_x2 = frame_time_in_us * 2 / 100;
+ if (data->frame_time_x2 < 280) {
+ pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
+ data->frame_time_x2 = 280;
+ }
+
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 9da37d3a15c6..01e945de8165 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -170,7 +170,8 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
long rate, clk_rate = mode->clock * 1000;
rate = clk_round_rate(hdlcd->clk, clk_rate);
- if (rate != clk_rate) {
+ /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */
+ if (abs(rate - clk_rate) * 1000 > clk_rate) {
/* clock required by mode not supported by hardware */
return -EINVAL;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 9b17a66cf0e1..aa54a6a2ad1d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -81,7 +81,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
struct videomode vm;
unsigned long prate;
unsigned int cfg;
- int div;
+ int div, ret;
+
+ ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
+ if (ret)
+ return;
vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
@@ -140,6 +144,8 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK,
cfg);
+
+ clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
}
static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *c,
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index a39b0343c197..401c218567af 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -97,10 +97,8 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
size = min(size, mem);
}
- if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
- DRM_ERROR("Cannot request framebuffer\n");
- return -EBUSY;
- }
+ if (pci_request_region(pdev, 0, "bochs-drm") != 0)
+ DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
bochs->fb_map = ioremap(addr, size);
if (bochs->fb_map == NULL) {
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index f2348b79b56f..ff3f73fd4867 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -747,11 +747,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (mode->vrefresh <= 24)
+ if (drm_mode_vrefresh(mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (mode->vrefresh <= 25)
+ else if (drm_mode_vrefresh(mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (mode->vrefresh <= 30)
+ else if (drm_mode_vrefresh(mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index a2a82366a771..eb97e88a103c 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -725,7 +725,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
/* 1.0V digital core power regulator */
pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
if (IS_ERR(pdata->dvdd10)) {
- DRM_ERROR("DVDD10 regulator not found\n");
+ if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
+ DRM_ERROR("DVDD10 regulator not found\n");
+
return PTR_ERR(pdata->dvdd10);
}
@@ -1344,7 +1346,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
err = anx78xx_init_pdata(anx78xx);
if (err) {
- DRM_ERROR("Failed to initialize pdata: %d\n", err);
+ if (err != -EPROBE_DEFER)
+ DRM_ERROR("Failed to initialize pdata: %d\n", err);
+
return err;
}
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 9126d0306ab5..51e2d03995a1 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -250,10 +250,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
struct regmap *regmap = sii902x->regmap;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
+ u16 pixel_clock_10kHz = adj->clock / 10;
int ret;
- buf[0] = adj->clock;
- buf[1] = adj->clock >> 8;
+ buf[0] = pixel_clock_10kHz & 0xff;
+ buf[1] = pixel_clock_10kHz >> 8;
buf[2] = adj->vrefresh;
buf[3] = 0x00;
buf[4] = adj->hdisplay;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index fa3f2f039a74..8b6f8fac92e8 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -300,7 +300,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct tc_data *tc = aux_to_tc(aux);
- size_t size = min_t(size_t, 8, msg->size);
+ size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
u8 request = msg->request & ~DP_AUX_I2C_MOT;
u8 *buf = msg->buffer;
u32 tmp = 0;
@@ -1153,6 +1153,13 @@ static int tc_connector_get_modes(struct drm_connector *connector)
struct tc_data *tc = connector_to_tc(connector);
struct edid *edid;
unsigned int count;
+ int ret;
+
+ ret = tc_get_display_props(tc);
+ if (ret < 0) {
+ dev_err(tc->dev, "failed to read display props: %d\n", ret);
+ return 0;
+ }
if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
count = tc->panel->funcs->get_modes(tc->panel);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index b59441d109a5..41e67e983a7f 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -272,7 +272,7 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
idx += req->u.i2c_read.transactions[i].num_bytes;
- buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
idx++;
}
@@ -431,6 +431,7 @@ static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx
if (idx > raw->curlen)
goto fail_len;
repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+ idx++;
if (idx > raw->curlen)
goto fail_len;
@@ -1040,10 +1041,12 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
lct = drm_dp_calculate_rad(port, rad);
port->mstb = drm_dp_add_mst_branch_device(lct, rad);
- port->mstb->mgr = port->mgr;
- port->mstb->port_parent = port;
+ if (port->mstb) {
+ port->mstb->mgr = port->mgr;
+ port->mstb->port_parent = port;
- send_link = true;
+ send_link = true;
+ }
break;
}
return send_link;
@@ -1536,7 +1539,11 @@ static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
if (ret != 1)
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ if (txmsg->seqno != -1) {
+ WARN_ON((unsigned int)txmsg->seqno >
+ ARRAY_SIZE(txmsg->dst->tx_slots));
+ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ }
}
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@@ -2029,6 +2036,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int ret = 0;
struct drm_dp_mst_branch *mstb = NULL;
+ mutex_lock(&mgr->payload_lock);
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
@@ -2091,7 +2099,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ memset(mgr->payloads, 0,
+ mgr->max_payloads * sizeof(mgr->payloads[0]));
+ memset(mgr->proposed_vcpis, 0,
+ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
mgr->vcpi_mask = 0;
@@ -2099,6 +2110,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
out_unlock:
mutex_unlock(&mgr->lock);
+ mutex_unlock(&mgr->payload_lock);
if (mstb)
drm_dp_put_mst_branch_device(mstb);
return ret;
@@ -3069,6 +3081,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
+ msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
}
msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b981595e35fc..26834eb69b05 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -163,6 +163,9 @@ static const struct edid_quirk {
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ /* Lenovo G50 */
+ { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c37b7b5f1dd3..921f7f690ae9 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -515,6 +515,7 @@ put_back_event:
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
+ wake_up_interruptible(&file_priv->event_wait);
break;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 48e99ab525c3..ae5c0952a7a3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -996,6 +996,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EACCES;
}
+ if (node->readonly) {
+ if (vma->vm_flags & VM_WRITE) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 3ceea9cb9d3e..d5de4dd19701 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -42,8 +42,6 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
- unsigned long addr;
- size_t sz;
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -57,22 +55,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+ dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
- memset(dmah->vaddr, 0, size);
-
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Reserve */
- for (addr = (unsigned long)dmah->vaddr, sz = size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page((void *)addr));
- }
-
return dmah;
}
@@ -85,19 +74,9 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
- unsigned long addr;
- size_t sz;
-
- if (dmah->vaddr) {
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Unreserve */
- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page((void *)addr));
- }
+ if (dmah->vaddr)
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
- }
}
/**
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index c1267f070745..acfee8bf6f4a 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -391,6 +391,9 @@ static void output_poll_execute(struct work_struct *work)
enum drm_connector_status old_status;
bool repoll = false, changed;
+ if (!dev->mode_config.poll_enabled)
+ return;
+
/* Pick up any changes detected by the probe functions. */
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
@@ -554,7 +557,11 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
*/
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
- drm_kms_helper_poll_disable(dev);
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ dev->mode_config.poll_enabled = false;
+ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 96f9060a3831..2ba79460a276 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -554,7 +554,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
struct drm_property_blob *blob;
int ret;
- if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+ if (!length || length > INT_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index d9230132dfbc..d71fa2d9a196 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -257,6 +257,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
+ unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
+ bool need_flush = gpu->flush_seq != new_flush_seq;
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
@@ -269,14 +271,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
- if (gpu->mmu->need_flush || gpu->switch_context) {
+ if (need_flush || gpu->switch_context) {
u32 target, extra_dwords;
/* link command */
extra_dwords = 1;
/* flush command */
- if (gpu->mmu->need_flush) {
+ if (need_flush) {
if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1;
else
@@ -289,7 +291,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
- if (gpu->mmu->need_flush) {
+ if (need_flush) {
/* Add the MMU flush */
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
@@ -309,7 +311,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
SYNC_RECIPIENT_PE);
}
- gpu->mmu->need_flush = false;
+ gpu->flush_seq = new_flush_seq;
}
if (gpu->switch_context) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 2bef501d4a17..5af7c2594a79 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -206,7 +206,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mutex_lock(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
mutex_unlock(&obj->lock);
- if (pages) {
+ if (!IS_ERR(pages)) {
int j;
iter.hdr->data[0] = bomap - bomap_start;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index a336754698f8..dba0d769d17a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1313,7 +1313,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
gpu->active_fence = submit->fence;
if (gpu->lastctx != cmdbuf->ctx) {
- gpu->mmu->need_flush = true;
+ gpu->mmu->flush_seq++;
gpu->switch_context = true;
gpu->lastctx = cmdbuf->ctx;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 73c278dc3706..416940b254a6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -135,6 +135,7 @@ struct etnaviv_gpu {
int irq;
struct etnaviv_iommu *mmu;
+ unsigned int flush_seq;
/* Power Control: */
struct clk *clk_bus;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index fe0e85b41310..ef9df6158dc1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -134,7 +134,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
*/
if (mmu->last_iova) {
mmu->last_iova = 0;
- mmu->need_flush = true;
+ mmu->flush_seq++;
continue;
}
@@ -197,7 +197,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
* associated commit requesting this mapping, and retry the
* allocation one more time.
*/
- mmu->need_flush = true;
+ mmu->flush_seq++;
}
return ret;
@@ -354,7 +354,7 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
* that the FE MMU prefetch won't load invalid entries.
*/
mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
- gpu->mmu->need_flush = true;
+ mmu->flush_seq++;
mutex_unlock(&mmu->lock);
return (u32)buf->vram_node.start;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index e787e49c9693..5bdc5f5601b1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -44,7 +44,7 @@ struct etnaviv_iommu {
struct list_head mappings;
struct drm_mm mm;
u32 last_iova;
- bool need_flush;
+ unsigned int flush_seq;
};
struct etnaviv_gem_object;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e07cb1fe4860..2b6c04acb24f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1775,8 +1775,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
if (ret) {
- dev_info(dev, "failed to get regulators: %d\n", ret);
- return -EPROBE_DEFER;
+ if (ret != -EPROBE_DEFER)
+ dev_info(dev, "failed to get regulators: %d\n", ret);
+ return ret;
}
dsi->clks = devm_kzalloc(dev,
@@ -1789,9 +1790,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(dsi->clks[i])) {
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
- i--;
- continue;
+ dsi->clks[i] = devm_clk_get(dev,
+ OLD_SCLK_MIPI_CLK_NAME);
+ if (!IS_ERR(dsi->clks[i]))
+ continue;
}
dev_info(dev, "failed to get the clock: %s\n",
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index ea733ab5b1e0..d83be4623c7f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -609,6 +609,9 @@ void cdv_intel_lvds_init(struct drm_device *dev,
int pipe;
u8 pin;
+ if (!dev_priv->lvds_enabled_in_vbt)
+ return;
+
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 3a44e705db53..d224fc12b757 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -516,6 +516,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
container_of(helper, struct psb_fbdev, psb_fb_helper);
struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned int fb_size;
int bytespp;
bytespp = sizes->surface_bpp / 8;
@@ -525,8 +526,11 @@ static int psbfb_probe(struct drm_fb_helper *helper,
/* If the mode will not fit in 32bit then switch to 16bit to get
a console on full resolution. The X mode setting server will
allocate its own 32bit GEM framebuffer */
- if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
- dev_priv->vram_stolen_size) {
+ fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
+ sizes->surface_height;
+ fb_size = ALIGN(fb_size, PAGE_SIZE);
+
+ if (fb_size > dev_priv->vram_stolen_size) {
sizes->surface_bpp = 16;
sizes->surface_depth = 16;
}
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 63bde4e86c6a..e019ea271ffc 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -436,6 +436,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
dev_priv->edp.support = 1;
+ dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0;
+ DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config);
+
/* This bit means to use 96Mhz for DPLL_A or not */
if (driver->primary_lfp_id)
dev_priv->dplla_96mhz = true;
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index da9fd34b9550..caa6da02206a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -139,6 +139,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
s32 freq_error, min_error = 100000;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max;
@@ -195,6 +196,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b74372760d7f..d18e11d61312 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -538,6 +538,7 @@ struct drm_psb_private {
int lvds_ssc_freq;
bool is_lvds_on;
bool is_mipi_on;
+ bool lvds_enabled_in_vbt;
u32 mipi_ctrl_display;
unsigned int core_freq;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index d91856779beb..70de29f4e2b4 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -723,7 +723,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev,
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
if (sarea_priv->dirty)
@@ -1043,7 +1043,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in
if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
sarea_priv->dirty = 0x7f;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 70980f82a15b..1e104518192d 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -26,6 +26,7 @@
*/
#include "i915_drv.h"
+#include "intel_ringbuffer.h"
/**
* DOC: batch buffer command parser
@@ -50,13 +51,11 @@
* granting userspace undue privileges. There are three categories of privilege.
*
* First, commands which are explicitly defined as privileged or which should
- * only be used by the kernel driver. The parser generally rejects such
- * commands, though it may allow some from the drm master process.
+ * only be used by the kernel driver. The parser rejects such commands
*
* Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser
- * provides a whitelist of registers which userspace may safely access (for both
- * normal and drm master processes).
+ * provides a whitelist of registers which userspace may safely access
*
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands.
@@ -81,11 +80,104 @@
* in the per-engine command tables.
*
* Other command table entries map fairly directly to high level categories
- * mentioned above: rejected, master-only, register whitelist. The parser
- * implements a number of checks, including the privileged memory checks, via a
- * general bitmasking mechanism.
+ * mentioned above: rejected, register whitelist. The parser implements a number
+ * of checks, including the privileged memory checks, via a general bitmasking
+ * mechanism.
*/
+/*
+ * A command that requires special handling by the command parser.
+ */
+struct drm_i915_cmd_descriptor {
+ /*
+ * Flags describing how the command parser processes the command.
+ *
+ * CMD_DESC_FIXED: The command has a fixed length if this is set,
+ * a length mask if not set
+ * CMD_DESC_SKIP: The command is allowed but does not follow the
+ * standard length encoding for the opcode range in
+ * which it falls
+ * CMD_DESC_REJECT: The command is never allowed
+ * CMD_DESC_REGISTER: The command should be checked against the
+ * register whitelist for the appropriate ring
+ */
+ u32 flags;
+#define CMD_DESC_FIXED (1<<0)
+#define CMD_DESC_SKIP (1<<1)
+#define CMD_DESC_REJECT (1<<2)
+#define CMD_DESC_REGISTER (1<<3)
+#define CMD_DESC_BITMASK (1<<4)
+
+ /*
+ * The command's unique identification bits and the bitmask to get them.
+ * This isn't strictly the opcode field as defined in the spec and may
+ * also include type, subtype, and/or subop fields.
+ */
+ struct {
+ u32 value;
+ u32 mask;
+ } cmd;
+
+ /*
+ * The command's length. The command is either fixed length (i.e. does
+ * not include a length field) or has a length field mask. The flag
+ * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
+ * a length mask. All command entries in a command table must include
+ * length information.
+ */
+ union {
+ u32 fixed;
+ u32 mask;
+ } length;
+
+ /*
+ * Describes where to find a register address in the command to check
+ * against the ring's register whitelist. Only valid if flags has the
+ * CMD_DESC_REGISTER bit set.
+ *
+ * A non-zero step value implies that the command may access multiple
+ * registers in sequence (e.g. LRI), in that case step gives the
+ * distance in dwords between individual offset fields.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 step;
+ } reg;
+
+#define MAX_CMD_DESC_BITMASKS 3
+ /*
+ * Describes command checks where a particular dword is masked and
+ * compared against an expected value. If the command does not match
+ * the expected value, the parser rejects it. Only valid if flags has
+ * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
+ * are valid.
+ *
+ * If the check specifies a non-zero condition_mask then the parser
+ * only performs the check when the bits specified by condition_mask
+ * are non-zero.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 expected;
+ u32 condition_offset;
+ u32 condition_mask;
+ } bits[MAX_CMD_DESC_BITMASKS];
+};
+
+/*
+ * A table of commands requiring special handling by the command parser.
+ *
+ * Each engine has an array of tables. Each table consists of an array of
+ * command descriptors, which must be sorted with command opcodes in
+ * ascending order.
+ */
+struct drm_i915_cmd_table {
+ const struct drm_i915_cmd_descriptor *table;
+ int count;
+};
+
#define STD_MI_OPCODE_SHIFT (32 - 9)
#define STD_3D_OPCODE_SHIFT (32 - 16)
#define STD_2D_OPCODE_SHIFT (32 - 10)
@@ -95,7 +187,7 @@
#define CMD(op, opm, f, lm, fl, ...) \
{ \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
- .cmd = { (op), ~0u << (opm) }, \
+ .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
.length = { (lm) }, \
__VA_ARGS__ \
}
@@ -110,14 +202,13 @@
#define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK
-#define M CMD_DESC_MASTER
/* Command Mask Fixed Len Action
---------------------------------------------------------- */
-static const struct drm_i915_cmd_descriptor common_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
- CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
@@ -147,7 +238,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
-static const struct drm_i915_cmd_descriptor render_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ),
@@ -214,7 +305,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
@@ -231,7 +322,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
};
-static const struct drm_i915_cmd_descriptor video_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -275,7 +366,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MFX_WAIT, SMFX, F, 1, S ),
};
-static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@@ -313,7 +404,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
}}, ),
};
-static const struct drm_i915_cmd_descriptor blt_cmds[] = {
+static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{
@@ -347,10 +438,64 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
};
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
- CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
};
+/*
+ * For Gen9 we can still rely on the h/w to enforce cmd security, and only
+ * need to re-enforce the register access checks. We therefore only need to
+ * teach the cmdparser how to find the end of each command, and identify
+ * register accesses. The table doesn't need to reject any commands, and so
+ * the only commands listed here are:
+ * 1) Those that touch registers
+ * 2) Those that do not have the default 8-bit length
+ *
+ * Note that the default MI length mask chosen for this table is 0xFF, not
+ * the 0x3F used on older devices. This is because the vast majority of MI
+ * cmds on Gen9 use a standard 8-bit Length field.
+ * All the Gen9 blitter instructions are standard 0xFF length mask, and
+ * none allow access to non-general registers, so in fact no BLT cmds are
+ * included in the table at all.
+ *
+ */
+static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
+ CMD( MI_NOOP, SMI, F, 1, S ),
+ CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
+ CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
+ CMD( MI_FLUSH, SMI, F, 1, S ),
+ CMD( MI_ARB_CHECK, SMI, F, 1, S ),
+ CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
+ CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
+ CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
+ CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
+ CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
+ CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
+ CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
+ CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
+ CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
+
+ /*
+ * We allow BB_START but apply further checks. We just sanitize the
+ * basic fields here.
+ */
+#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
+#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
+ CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
+ .bits = {{
+ .offset = 0,
+ .mask = MI_BB_START_OPERAND_MASK,
+ .expected = MI_BB_START_OPERAND_EXPECT,
+ }}, ),
+};
+
static const struct drm_i915_cmd_descriptor noop_desc =
CMD(MI_NOOP, SMI, F, 1, S);
@@ -364,40 +509,44 @@ static const struct drm_i915_cmd_descriptor noop_desc =
#undef R
#undef W
#undef B
-#undef M
-static const struct drm_i915_cmd_table gen7_render_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
};
-static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { render_cmds, ARRAY_SIZE(render_cmds) },
+static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
};
-static const struct drm_i915_cmd_table gen7_video_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { video_cmds, ARRAY_SIZE(video_cmds) },
+static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
};
-static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
+static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
};
-static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
};
-static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
- { common_cmds, ARRAY_SIZE(common_cmds) },
- { blt_cmds, ARRAY_SIZE(blt_cmds) },
+static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
+ { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+ { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
};
+static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
+ { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
+};
+
+
/*
* Register whitelists, sorted by increasing register offset.
*/
@@ -450,7 +599,6 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
- REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
REG32(GEN7_3DPRIM_END_OFFSET),
@@ -514,17 +662,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
};
-static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
- REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
-};
-
-static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
- REG32(FORCEWAKE_MT),
- REG32(DERRMR),
+static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
+ REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
+ REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
+ REG32(BCS_SWCTRL),
+ REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ REG64_IDX(BCS_GPR, 0),
+ REG64_IDX(BCS_GPR, 1),
+ REG64_IDX(BCS_GPR, 2),
+ REG64_IDX(BCS_GPR, 3),
+ REG64_IDX(BCS_GPR, 4),
+ REG64_IDX(BCS_GPR, 5),
+ REG64_IDX(BCS_GPR, 6),
+ REG64_IDX(BCS_GPR, 7),
+ REG64_IDX(BCS_GPR, 8),
+ REG64_IDX(BCS_GPR, 9),
+ REG64_IDX(BCS_GPR, 10),
+ REG64_IDX(BCS_GPR, 11),
+ REG64_IDX(BCS_GPR, 12),
+ REG64_IDX(BCS_GPR, 13),
+ REG64_IDX(BCS_GPR, 14),
+ REG64_IDX(BCS_GPR, 15),
};
#undef REG64
@@ -533,33 +691,32 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
struct drm_i915_reg_table {
const struct drm_i915_reg_descriptor *regs;
int num_regs;
- bool master;
};
static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
};
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
};
static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
- { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
- { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
+ { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
};
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
- { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
- { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
+};
+
+static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
+ { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
};
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
- u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
@@ -578,7 +735,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
{
- u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
@@ -601,7 +758,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
{
- u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
@@ -612,6 +769,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
+static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
+{
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
+
+ if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
+ return 0xFF;
+
+ DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
+ return 0;
+}
+
static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
@@ -703,22 +871,15 @@ struct cmd_node {
*/
static inline u32 cmd_header_key(u32 x)
{
- u32 shift;
-
switch (x >> INSTR_CLIENT_SHIFT) {
default:
case INSTR_MI_CLIENT:
- shift = STD_MI_OPCODE_SHIFT;
- break;
+ return x >> STD_MI_OPCODE_SHIFT;
case INSTR_RC_CLIENT:
- shift = STD_3D_OPCODE_SHIFT;
- break;
+ return x >> STD_3D_OPCODE_SHIFT;
case INSTR_BC_CLIENT:
- shift = STD_2D_OPCODE_SHIFT;
- break;
+ return x >> STD_2D_OPCODE_SHIFT;
}
-
- return x >> shift;
}
static int init_hash_table(struct intel_engine_cs *engine,
@@ -776,18 +937,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN7(engine->i915))
+ if (!IS_GEN7(engine->i915) && !(IS_GEN9(engine->i915) &&
+ engine->id == BCS))
return;
switch (engine->id) {
case RCS:
if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_render_ring_cmds;
+ cmd_tables = hsw_render_ring_cmd_table;
cmd_table_count =
- ARRAY_SIZE(hsw_render_ring_cmds);
+ ARRAY_SIZE(hsw_render_ring_cmd_table);
} else {
- cmd_tables = gen7_render_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
+ cmd_tables = gen7_render_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
}
if (IS_HASWELL(engine->i915)) {
@@ -797,36 +959,46 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->reg_tables = ivb_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
}
-
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
- cmd_tables = gen7_video_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
+ cmd_tables = gen7_video_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
- if (IS_HASWELL(engine->i915)) {
- cmd_tables = hsw_blt_ring_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
+ engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ if (IS_GEN9(engine->i915)) {
+ cmd_tables = gen9_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
+ engine->get_cmd_length_mask =
+ gen9_blt_get_cmd_length_mask;
+
+ /* BCS Engine unsafe without parser */
+ engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
+ } else if (IS_HASWELL(engine->i915)) {
+ cmd_tables = hsw_blt_ring_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
} else {
- cmd_tables = gen7_blt_cmds;
- cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
+ cmd_tables = gen7_blt_cmd_table;
+ cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
}
- if (IS_HASWELL(engine->i915)) {
+ if (IS_GEN9(engine->i915)) {
+ engine->reg_tables = gen9_blt_reg_tables;
+ engine->reg_table_count =
+ ARRAY_SIZE(gen9_blt_reg_tables);
+ } else if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
engine->reg_tables = ivb_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
}
-
- engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
- cmd_tables = hsw_vebox_cmds;
- cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
+ cmd_tables = hsw_vebox_cmd_table;
+ cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
/* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
@@ -852,7 +1024,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return;
}
- engine->needs_cmd_parser = true;
+ engine->flags |= I915_ENGINE_USING_CMD_PARSER;
}
/**
@@ -864,7 +1036,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{
- if (!engine->needs_cmd_parser)
+ if (!intel_engine_using_cmd_parser(engine))
return;
fini_hash_table(engine);
@@ -938,22 +1110,16 @@ __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
}
static const struct drm_i915_reg_descriptor *
-find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
+find_reg(const struct intel_engine_cs *engine, u32 addr)
{
const struct drm_i915_reg_table *table = engine->reg_tables;
+ const struct drm_i915_reg_descriptor *reg = NULL;
int count = engine->reg_table_count;
- do {
- if (!table->master || is_master) {
- const struct drm_i915_reg_descriptor *reg;
-
- reg = __find_reg(table->regs, table->num_regs, addr);
- if (reg != NULL)
- return reg;
- }
- } while (table++, --count);
+ for (; !reg && (count > 0); ++table, --count)
+ reg = __find_reg(table->regs, table->num_regs, addr);
- return NULL;
+ return reg;
}
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
@@ -1036,32 +1202,9 @@ unpin_src:
return dst;
}
-/**
- * intel_engine_needs_cmd_parser() - should a given engine use software
- * command parsing?
- * @engine: the engine in question
- *
- * Only certain platforms require software batch buffer command parsing, and
- * only when enabled via module parameter.
- *
- * Return: true if the engine requires software command parsing
- */
-bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
-{
- if (!engine->needs_cmd_parser)
- return false;
-
- if (!USES_PPGTT(engine->i915))
- return false;
-
- return (i915.enable_cmd_parser == 1);
-}
-
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
- const u32 *cmd, u32 length,
- const bool is_master,
- bool *oacontrol_set)
+ const u32 *cmd, u32 length)
{
if (desc->flags & CMD_DESC_SKIP)
return true;
@@ -1071,12 +1214,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
- if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
- DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
- *cmd);
- return false;
- }
-
if (desc->flags & CMD_DESC_REGISTER) {
/*
* Get the distance between individual register offset
@@ -1090,7 +1227,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
- find_reg(engine, is_master, reg_addr);
+ find_reg(engine, reg_addr);
if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n",
@@ -1099,31 +1236,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
}
/*
- * OACONTROL requires some special handling for
- * writes. We want to make sure that any batch which
- * enables OA also disables it before the end of the
- * batch. The goal is to prevent one process from
- * snooping on the perf data from another process. To do
- * that, we need to check the value that will be written
- * to the register. Hence, limit OACONTROL writes to
- * only MI_LOAD_REGISTER_IMM commands.
- */
- if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
- return false;
- }
-
- if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
- return false;
- }
-
- if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
- *oacontrol_set = (cmd[offset + 1] != 0);
- }
-
- /*
* Check the value written to the register against the
* allowed mask/value pair given in the whitelist entry.
*/
@@ -1170,6 +1282,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
continue;
}
+ if (desc->bits[i].offset >= length) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
+ *cmd, engine->name);
+ return false;
+ }
+
dword = cmd[desc->bits[i].offset] &
desc->bits[i].mask;
@@ -1187,16 +1305,112 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return true;
}
+static int check_bbstart(const struct i915_gem_context *ctx,
+ u32 *cmd, u32 offset, u32 length,
+ u32 batch_len,
+ u64 batch_start,
+ u64 shadow_batch_start)
+{
+ u64 jump_offset, jump_target;
+ u32 target_cmd_offset, target_cmd_index;
+
+ /* For igt compatibility on older platforms */
+ if (CMDPARSER_USES_GGTT(ctx->i915)) {
+ DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
+ return -EACCES;
+ }
+
+ if (length != 3) {
+ DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
+ length);
+ return -EINVAL;
+ }
+
+ jump_target = *(u64*)(cmd+1);
+ jump_offset = jump_target - batch_start;
+
+ /*
+ * Any underflow of jump_target is guaranteed to be outside the range
+ * of a u32, so >= test catches both too large and too small
+ */
+ if (jump_offset >= batch_len) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ /*
+ * This cannot overflow a u32 because we already checked jump_offset
+ * is within the BB, and the batch_len is a u32
+ */
+ target_cmd_offset = lower_32_bits(jump_offset);
+ target_cmd_index = target_cmd_offset / sizeof(u32);
+
+ *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
+
+ if (target_cmd_index == offset)
+ return 0;
+
+ if (ctx->jump_whitelist_cmds <= target_cmd_index) {
+ DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
+ return -EINVAL;
+ } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
+ DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
+ jump_target);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
+{
+ const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
+ const u32 exact_size = BITS_TO_LONGS(batch_cmds);
+ u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
+ unsigned long *next_whitelist;
+
+ if (CMDPARSER_USES_GGTT(ctx->i915))
+ return;
+
+ if (batch_cmds <= ctx->jump_whitelist_cmds) {
+ bitmap_zero(ctx->jump_whitelist, batch_cmds);
+ return;
+ }
+
+again:
+ next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
+ if (next_whitelist) {
+ kfree(ctx->jump_whitelist);
+ ctx->jump_whitelist = next_whitelist;
+ ctx->jump_whitelist_cmds =
+ next_size * BITS_PER_BYTE * sizeof(long);
+ return;
+ }
+
+ if (next_size > exact_size) {
+ next_size = exact_size;
+ goto again;
+ }
+
+ DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
+ bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
+
+ return;
+}
+
#define LENGTH_BIAS 2
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
+ * @ctx: the context in which the batch is to execute
* @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question
- * @shadow_batch_obj: copy of the batch buffer in question
+ * @batch_start: Canonical base address of batch
* @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj
- * @is_master: is the submitting process the drm master?
+ * @shadow_batch_obj: copy of the batch buffer in question
+ * @shadow_batch_start: Canonical base address of shadow_batch_obj
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
@@ -1204,17 +1418,19 @@ static bool check_cmd(const struct intel_engine_cs *engine,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+
+int intel_engine_cmd_parser(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 batch_start,
u32 batch_start_offset,
u32 batch_len,
- bool is_master)
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start)
{
- u32 *cmd, *batch_end;
+ u32 *cmd, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
- bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
bool needs_clflush_after = false;
int ret = 0;
@@ -1226,13 +1442,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
return PTR_ERR(cmd);
}
+ init_whitelist(ctx, batch_len);
+
/*
* We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra
* space. Parsing should be faster in some cases this way.
*/
batch_end = cmd + (batch_len / sizeof(*batch_end));
- while (cmd < batch_end) {
+ do {
u32 length;
if (*cmd == MI_BATCH_BUFFER_END)
@@ -1243,17 +1461,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
ret = -EINVAL;
- break;
- }
-
- /*
- * If the batch buffer contains a chained batch, return an
- * error that tells the caller to abort and dispatch the
- * workload as a non-secure batch.
- */
- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
- ret = -EACCES;
- break;
+ goto err;
}
if (desc->flags & CMD_DESC_FIXED)
@@ -1267,32 +1475,44 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
length,
batch_end - cmd);
ret = -EINVAL;
- break;
+ goto err;
}
- if (!check_cmd(engine, desc, cmd, length, is_master,
- &oacontrol_set)) {
- ret = -EINVAL;
+ if (!check_cmd(engine, desc, cmd, length)) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+ ret = check_bbstart(ctx, cmd, offset, length,
+ batch_len, batch_start,
+ shadow_batch_start);
+
+ if (ret)
+ goto err;
break;
}
- cmd += length;
- }
+ if (ctx->jump_whitelist_cmds > offset)
+ set_bit(offset, ctx->jump_whitelist);
- if (oacontrol_set) {
- DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
- ret = -EINVAL;
- }
+ cmd += length;
+ offset += length;
+ if (cmd >= batch_end) {
+ DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ } while (1);
- if (cmd >= batch_end) {
- DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
- ret = -EINVAL;
+ if (needs_clflush_after) {
+ void *ptr = ptr_mask_bits(shadow_batch_obj->mapping);
+ drm_clflush_virt_range(ptr,
+ (void *)(cmd + 1) - ptr);
}
- if (ret == 0 && needs_clflush_after)
- drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
+err:
i915_gem_object_unpin_map(shadow_batch_obj);
-
return ret;
}
@@ -1312,7 +1532,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv) {
- if (intel_engine_needs_cmd_parser(engine)) {
+ if (intel_engine_using_cmd_parser(engine)) {
active = true;
break;
}
@@ -1332,6 +1552,12 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
* 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers
* 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
+ * 8. Don't report cmd_check() failures as EINVAL errors to userspace;
+ * rely on the HW to NOOP disallowed commands as it would without
+ * the parser enabled.
+ * 9. Don't whitelist or handle oacontrol specially, as ownership
+ * for oacontrol state is moving to i915-perf.
+ * 10. Support for Gen9 BCS Parsing
*/
- return 7;
+ return 10;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index bae62cf934cf..ff61229d963b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -280,7 +280,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = i915.semaphores;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
- value = capable(CAP_SYS_ADMIN);
+ value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN);
break;
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(dev_priv);
@@ -1470,6 +1470,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
intel_display_set_init_power(dev_priv, false);
+ i915_rc6_ctx_wa_suspend(dev_priv);
fw_csr = !IS_BROXTON(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
@@ -1706,6 +1707,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
else
intel_display_set_init_power(dev_priv, true);
+ i915_rc6_ctx_wa_resume(dev_priv);
+
enable_rpm_wakeref_asserts(dev_priv);
out:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e23748cca0c0..c4f155663ca9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -943,6 +943,13 @@ struct i915_gem_context {
struct list_head link;
u8 remap_slice;
+
+ /** jump_whitelist: Bit array for tracking cmds during cmdparsing */
+ unsigned long *jump_whitelist;
+
+ /** jump_whitelist_cmds: No of cmd slots available */
+ u32 jump_whitelist_cmds;
+
bool closed:1;
};
@@ -1221,6 +1228,7 @@ struct intel_gen6_power_mgmt {
bool client_boost;
bool enabled;
+ bool ctx_corrupted;
struct delayed_work autoenable_work;
unsigned boosts;
@@ -2339,6 +2347,18 @@ i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
__deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+ obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+ return obj->base.vma_node.readonly;
+}
+
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
@@ -2476,102 +2496,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \
((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
-/*
- * A command that requires special handling by the command parser.
- */
-struct drm_i915_cmd_descriptor {
- /*
- * Flags describing how the command parser processes the command.
- *
- * CMD_DESC_FIXED: The command has a fixed length if this is set,
- * a length mask if not set
- * CMD_DESC_SKIP: The command is allowed but does not follow the
- * standard length encoding for the opcode range in
- * which it falls
- * CMD_DESC_REJECT: The command is never allowed
- * CMD_DESC_REGISTER: The command should be checked against the
- * register whitelist for the appropriate ring
- * CMD_DESC_MASTER: The command is allowed if the submitting process
- * is the DRM master
- */
- u32 flags;
-#define CMD_DESC_FIXED (1<<0)
-#define CMD_DESC_SKIP (1<<1)
-#define CMD_DESC_REJECT (1<<2)
-#define CMD_DESC_REGISTER (1<<3)
-#define CMD_DESC_BITMASK (1<<4)
-#define CMD_DESC_MASTER (1<<5)
-
- /*
- * The command's unique identification bits and the bitmask to get them.
- * This isn't strictly the opcode field as defined in the spec and may
- * also include type, subtype, and/or subop fields.
- */
- struct {
- u32 value;
- u32 mask;
- } cmd;
-
- /*
- * The command's length. The command is either fixed length (i.e. does
- * not include a length field) or has a length field mask. The flag
- * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
- * a length mask. All command entries in a command table must include
- * length information.
- */
- union {
- u32 fixed;
- u32 mask;
- } length;
-
- /*
- * Describes where to find a register address in the command to check
- * against the ring's register whitelist. Only valid if flags has the
- * CMD_DESC_REGISTER bit set.
- *
- * A non-zero step value implies that the command may access multiple
- * registers in sequence (e.g. LRI), in that case step gives the
- * distance in dwords between individual offset fields.
- */
- struct {
- u32 offset;
- u32 mask;
- u32 step;
- } reg;
-
-#define MAX_CMD_DESC_BITMASKS 3
- /*
- * Describes command checks where a particular dword is masked and
- * compared against an expected value. If the command does not match
- * the expected value, the parser rejects it. Only valid if flags has
- * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
- * are valid.
- *
- * If the check specifies a non-zero condition_mask then the parser
- * only performs the check when the bits specified by condition_mask
- * are non-zero.
- */
- struct {
- u32 offset;
- u32 mask;
- u32 expected;
- u32 condition_offset;
- u32 condition_mask;
- } bits[MAX_CMD_DESC_BITMASKS];
-};
-
-/*
- * A table of commands requiring special handling by the command parser.
- *
- * Each engine has an array of tables. Each table consists of an array of
- * command descriptors, which must be sorted with command opcodes in
- * ascending order.
- */
-struct drm_i915_cmd_table {
- const struct drm_i915_cmd_descriptor *table;
- int count;
-};
-
/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
#define __I915__(p) ({ \
struct drm_i915_private *__p; \
@@ -2729,6 +2653,12 @@ struct drm_i915_cmd_table {
#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+/*
+ * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
+ * All later gens can run the final buffer from the ppgtt
+ */
+#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN7(dev_priv)
+
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
#define BSD_RING ENGINE_MASK(VCS)
@@ -2745,6 +2675,8 @@ struct drm_i915_cmd_table {
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
+#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
+
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
@@ -2764,11 +2696,13 @@ struct drm_i915_cmd_table {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv) \
+ (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) == 9)
+
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
(IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
- IS_SKL_GT3(dev_priv) || \
- IS_SKL_GT4(dev_priv))
+ (INTEL_GEN(dev_priv) == 9))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -3098,6 +3032,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags);
+struct i915_vma * __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view,
+ u64 size,
+ u64 alignment,
+ u64 flags);
+
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
@@ -3551,13 +3493,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
-int intel_engine_cmd_parser(struct intel_engine_cs *engine,
+int intel_engine_cmd_parser(struct i915_gem_context *cxt,
+ struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
- struct drm_i915_gem_object *shadow_batch_obj,
+ u64 user_batch_start,
u32 batch_start_offset,
u32 batch_len,
- bool is_master);
+ struct drm_i915_gem_object *shadow_batch_obj,
+ u64 shadow_batch_start);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 26c4befcd234..3fb4f9acacba 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1773,6 +1773,10 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
unsigned int flags;
int ret;
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write)
+ return VM_FAULT_SIGBUS;
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
PAGE_SHIFT;
@@ -2759,6 +2763,12 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (INTEL_GEN(dev_priv) >= 6)
gen6_rps_idle(dev_priv);
+
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)) {
+ i915_rc6_ctx_wa_check(dev_priv);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ }
+
intel_runtime_pm_put(dev_priv);
out_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -3822,6 +3832,19 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags)
{
struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
+
+ return i915_gem_object_pin(obj, vm, view, size, alignment,
+ flags | PIN_GLOBAL);
+}
+
+struct i915_vma *
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view,
+ u64 size,
+ u64 alignment,
+ u64 flags)
+{
struct i915_vma *vma;
int ret;
@@ -3846,7 +3869,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
- ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
+ ret = i915_vma_pin(vma, size, alignment, flags);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index df10f4e95736..5d55cd159e89 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -158,6 +158,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
i915_vma_put(ce->state);
}
+ kfree(ctx->jump_whitelist);
+
put_pid(ctx->pid);
list_del(&ctx->link);
@@ -327,6 +329,9 @@ __create_hw_context(struct drm_device *dev,
GEN8_CTX_ADDRESSING_MODE_SHIFT;
ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
+ ctx->jump_whitelist = NULL;
+ ctx->jump_whitelist_cmds = 0;
+
return ctx;
err_out:
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2117f172d7a2..4548d89abcdc 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -55,6 +55,7 @@ struct i915_execbuffer_params {
struct i915_vma *batch;
u32 dispatch_flags;
u32 args_batch_start_offset;
+ u64 args_batch_len;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
struct drm_i915_gem_request *request;
@@ -1401,41 +1402,85 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
return 0;
}
+static struct i915_vma*
+shadow_batch_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ u64 flags;
+
+ /*
+ * PPGTT backed shadow buffers must be mapped RO, to prevent
+ * post-scan tampering
+ */
+ if (CMDPARSER_USES_GGTT(dev_priv)) {
+ flags = PIN_GLOBAL;
+ vm = &dev_priv->ggtt.base;
+ } else if (vm->has_read_only) {
+ flags = PIN_USER;
+ i915_gem_object_set_readonly(obj);
+ } else {
+ DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return i915_gem_object_pin(obj, vm, NULL, 0, 0, flags);
+}
+
static struct i915_vma *
i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
- struct drm_i915_gem_object *batch_obj,
+ struct i915_execbuffer_params *params,
struct eb_vmas *eb,
- u32 batch_start_offset,
- u32 batch_len,
- bool is_master)
+ struct i915_address_space *vm)
{
+ struct drm_i915_gem_object *batch_obj = params->batch->obj;
struct drm_i915_gem_object *shadow_batch_obj;
struct i915_vma *vma;
+ u64 batch_start;
+ u32 batch_start_offset = params->args_batch_start_offset;
+ u32 batch_len = params->args_batch_len;
+ u64 shadow_batch_start;
int ret;
+
shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
return ERR_CAST(shadow_batch_obj);
- ret = intel_engine_cmd_parser(engine,
+ vma = shadow_batch_pin(shadow_batch_obj, vm);
+ if (IS_ERR(vma))
+ goto out;
+
+ batch_start = gen8_canonical_addr(params->batch->node.start) +
+ batch_start_offset;
+ shadow_batch_start = gen8_canonical_addr(vma->node.start);
+
+ ret = intel_engine_cmd_parser(params->ctx,
+ engine,
batch_obj,
- shadow_batch_obj,
+ batch_start,
batch_start_offset,
batch_len,
- is_master);
+ shadow_batch_obj,
+ shadow_batch_start);
if (ret) {
- if (ret == -EACCES) /* unhandled chained batch */
+ i915_vma_unpin(vma);
+
+ /*
+ * Unsafe GGTT-backed buffers can still be submitted safely
+ * as non-secure.
+ * For PPGTT backing however, we have no choice but to forcibly
+ * reject unsafe buffers
+ */
+ if (CMDPARSER_USES_GGTT(eb->i915) && (ret == -EACCES))
+ /* Execute original buffer non-secure */
vma = NULL;
else
vma = ERR_PTR(ret);
- goto out;
- }
- vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
goto out;
+ }
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
@@ -1476,13 +1521,10 @@ execbuf_submit(struct i915_execbuffer_params *params,
return ret;
}
- exec_len = args->batch_len;
+ exec_len = params->args_batch_len;
exec_start = params->batch->node.start +
params->args_batch_start_offset;
- if (exec_len == 0)
- exec_len = params->batch->size - params->args_batch_start_offset;
-
ret = params->engine->emit_bb_start(params->request,
exec_start, exec_len,
params->dispatch_flags);
@@ -1601,8 +1643,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
+ if (INTEL_GEN(dev_priv) >= 11)
+ return -ENODEV;
+
+ /* Return -EPERM to trigger fallback code on old binaries. */
+ if (!HAS_SECURE_BATCHES(dev_priv))
+ return -EPERM;
+
if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
- return -EPERM;
+ return -EPERM;
dispatch_flags |= I915_DISPATCH_SECURE;
}
@@ -1710,32 +1759,26 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
+ params->ctx = ctx;
params->args_batch_start_offset = args->batch_start_offset;
- if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
+ params->args_batch_len = args->batch_len;
+ if (args->batch_len == 0)
+ params->args_batch_len = params->batch->size - params->args_batch_start_offset;
+
+ if (intel_engine_requires_cmd_parser(engine) ||
+ (intel_engine_using_cmd_parser(engine) && args->batch_len)) {
struct i915_vma *vma;
vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
- params->batch->obj,
- eb,
- args->batch_start_offset,
- args->batch_len,
- drm_is_current_master(file));
+ params, eb, vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
}
if (vma) {
- /*
- * Batch parsed and accepted:
- *
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in
- * the dispatch_execbuffer implementations. We
- * specifically don't want that set on batches the
- * command parser has accepted.
- */
- dispatch_flags |= I915_DISPATCH_SECURE;
+ if (CMDPARSER_USES_GGTT(dev_priv))
+ dispatch_flags |= I915_DISPATCH_SECURE;
params->args_batch_start_offset = 0;
params->batch = vma;
}
@@ -1798,7 +1841,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->file = file;
params->engine = engine;
params->dispatch_flags = dispatch_flags;
- params->ctx = ctx;
ret = execbuf_submit(params, args, &eb->vmas);
err_request:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0bb4232f66bc..16f56f14f4d0 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -140,7 +140,8 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
return 0;
- if (enable_ppgtt == 1)
+ /* Full PPGTT is required by the Gen9 cmdparser */
+ if (enable_ppgtt == 1 && INTEL_GEN(dev_priv) != 9)
return 1;
if (enable_ppgtt == 2 && has_full_ppgtt)
@@ -177,8 +178,8 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
vma->pages = vma->obj->pages;
- /* Currently applicable only to VLV */
- if (vma->obj->gt_ro)
+ /* Applicable to VLV, and gen8+ */
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
@@ -197,11 +198,14 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 flags)
{
gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
+
switch (level) {
case I915_CACHE_NONE:
pte |= PPAT_UNCACHED_INDEX;
@@ -472,7 +476,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
gen8_pte_t scratch_pte;
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC, true, 0);
fill_px(vm->dev, pt, scratch_pte);
}
@@ -769,7 +773,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, use_scratch);
+ I915_CACHE_LLC, use_scratch, 0);
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
@@ -790,7 +794,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
struct sg_page_iter *sg_iter,
uint64_t start,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t *pt_vaddr;
@@ -809,7 +814,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
- cache_level, true);
+ cache_level, true, flags);
if (++pte == GEN8_PTES) {
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
@@ -830,7 +835,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sg_page_iter sg_iter;
@@ -839,7 +844,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
- cache_level);
+ cache_level, flags);
} else {
struct i915_page_directory_pointer *pdp;
uint64_t pml4e;
@@ -847,7 +852,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
- start, cache_level);
+ start, cache_level, flags);
}
}
}
@@ -1452,7 +1457,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint64_t start = ppgtt->base.start;
uint64_t length = ppgtt->base.total;
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, true);
+ I915_CACHE_LLC, true, 0);
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
@@ -1520,6 +1525,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
+
+ /*
+ * From bdw, there is support for read-only pages in the PPGTT.
+ *
+ * XXX GVT is not honouring the lack of RW in the PTE bits.
+ */
+ ppgtt->base.has_read_only = !intel_vgpu_active(to_i915(ppgtt->base.dev));
+
ppgtt->debug_dump = gen8_dump_ppgtt;
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
@@ -2321,7 +2334,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, true, 0));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
@@ -2332,7 +2345,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level level, u32 unused)
+ enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
@@ -2343,12 +2356,20 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
int rpm_atomic_seq;
int i = 0;
+ /* The GTT does not support read-only mappings */
+ GEM_BUG_ON(flags & PTE_READ_ONLY);
+
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = gen8_pte_encode(addr, level, true);
+ gtt_entry = gen8_pte_encode(addr, level, true, 0);
gen8_set_pte(&gtt_entries[i++], gtt_entry);
}
@@ -2499,7 +2520,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC,
- use_scratch);
+ use_scratch, 0);
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
@@ -2604,8 +2625,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (ret)
return ret;
- /* Currently applicable only to VLV */
- if (obj->gt_ro)
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
@@ -2634,7 +2655,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
/* Currently applicable only to VLV */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
@@ -3193,6 +3214,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
ggtt->base.total -= PAGE_SIZE;
i915_address_space_init(&ggtt->base, dev_priv);
ggtt->base.total += PAGE_SIZE;
+
+ /* Only VLV supports read-only GGTT mappings */
+ ggtt->base.has_read_only = IS_VALLEYVIEW(dev_priv);
+
if (!HAS_LLC(dev_priv))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index ec78be2f8c77..43a0192242eb 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -392,6 +392,9 @@ struct i915_address_space {
*/
struct list_head unbound_list;
+ /* Some systems support read-only mappings for GGTT and/or PPGTT */
+ bool has_read_only:1;
+
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8832f8ec1583..f597261c264f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -558,6 +558,10 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
return;
intel_runtime_pm_get_noresume(dev_priv);
+
+ if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv))
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
dev_priv->gt.awake = true;
intel_enable_gt_powersave(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 768ad89d9cd4..9d9dfe194b9b 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -49,7 +49,7 @@ struct i915_params i915 __read_mostly = {
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
- .enable_cmd_parser = 1,
+ .enable_cmd_parser = true,
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
@@ -178,9 +178,9 @@ MODULE_PARM_DESC(invert_brightness,
module_param_named(disable_display, i915.disable_display, bool, 0400);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
-module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
+module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400);
MODULE_PARM_DESC(enable_cmd_parser,
- "Enable command parsing (1=enabled [default], 0=disabled)");
+ "Enable command parsing (true=enabled [default], false=disabled)");
module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 3a0dd78ddb38..82ac6e886eed 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -44,7 +44,6 @@ struct i915_params {
int disable_power_well;
int enable_ips;
int invert_brightness;
- int enable_cmd_parser;
int enable_guc_loading;
int enable_guc_submission;
int guc_log_level;
@@ -53,6 +52,7 @@ struct i915_params {
int edp_vswing;
unsigned int inject_load_failure;
/* leave bools at the end to not create holes */
+ bool enable_cmd_parser;
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 70d96162def6..5468e69bf520 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -223,6 +223,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_CONFIG0 _MMIO(0xD00)
#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
+#define GEN8_RC6_CTX_INFO _MMIO(0x8504)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
@@ -295,7 +297,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Instruction field definitions used by the command parser
*/
#define INSTR_CLIENT_SHIFT 29
-#define INSTR_CLIENT_MASK 0xE0000000
#define INSTR_MI_CLIENT 0x0
#define INSTR_BC_CLIENT 0x2
#define INSTR_RC_CLIENT 0x3
@@ -569,6 +570,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
*/
#define BCS_SWCTRL _MMIO(0x22200)
+/* There are 16 GPR registers */
+#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
+#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4)
+
#define GPGPU_THREADS_DISPATCHED _MMIO(0x2290)
#define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4)
#define HS_INVOCATION_COUNT _MMIO(0x2300)
@@ -5936,6 +5941,10 @@ enum {
#define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
+/* Display Internal Timeout Register */
+#define RM_TIMEOUT _MMIO(0x42060)
+#define MMIO_TIMEOUT_US(us) ((us) << 0)
+
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
#define DE_SPRITEB_FLIP_DONE (1 << 29)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 1ea0e1f43397..54d878cb458f 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -280,10 +280,17 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
uint32_t i;
uint32_t *dmc_payload;
uint32_t required_version;
+ size_t fsize;
if (!fw)
return NULL;
+ fsize = sizeof(struct intel_css_header) +
+ sizeof(struct intel_package_header) +
+ sizeof(struct intel_dmc_header);
+ if (fsize > fw->size)
+ goto error_truncated;
+
/* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
@@ -349,6 +356,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return NULL;
}
readcount += dmc_offset;
+ fsize += dmc_offset;
+ if (fsize > fw->size)
+ goto error_truncated;
/* Extract dmc_header information. */
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
@@ -379,6 +389,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
+ fsize += nbytes;
+ if (fsize > fw->size)
+ goto error_truncated;
+
if (nbytes > CSR_MAX_FW_SIZE) {
DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
return NULL;
@@ -392,6 +406,10 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
}
return memcpy(dmc_payload, &fw->data[readcount], nbytes);
+
+error_truncated:
+ DRM_ERROR("Truncated DMC firmware, rejecting.\n");
+ return NULL;
}
static void csr_load_work_fn(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8aafb9601540..b3af565b7027 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1730,6 +1730,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
+bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
+void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
+void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 62bcc770a181..b935d62be918 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1017,6 +1017,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
int ret;
struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+ u32 scratch_addr =
+ i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
/* WaDisableCtxRestoreArbitration:skl,bxt */
if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
@@ -1036,22 +1038,17 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
wa_ctx_emit(batch, index, MI_NOOP);
- /* WaClearSlmSpaceAtContextSwitch:kbl */
+ /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
/* Actual scratch location is at 128 bytes offset */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
- u32 scratch_addr =
- i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
-
- wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
- wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- wa_ctx_emit(batch, index, scratch_addr);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
- }
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ wa_ctx_emit(batch, index, scratch_addr);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
/* WaMediaPoolStateCmdInWABB:bxt */
if (HAS_POOLED_EU(engine->i915)) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 05427d292457..07d2a8e7f78c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -105,6 +105,13 @@ static void bxt_init_clock_gating(struct drm_device *dev)
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
+ /*
+ * Lower the display internal timeout.
+ * This is needed to avoid any hard hangs when DSI port PLL
+ * is off and a MMIO access is attempted by any privilege
+ * application, using batch buffers or any other means.
+ */
+ I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
}
static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -5149,19 +5156,23 @@ static void gen9_disable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
+static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN6_RC_CONTROL, 0);
+}
+
+static void gen6_disable_rps(struct drm_i915_private *dev_priv)
+{
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN6_RC_CONTROL, 0);
}
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
+static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
{
/* we're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
@@ -5426,7 +5437,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
/* 3a: Enable RC6 */
- if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+ if (!dev_priv->rps.ctx_corrupted &&
+ intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
/* WaRsUseTimeoutMode */
@@ -5484,7 +5496,8 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */
- if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+ if (!dev_priv->rps.ctx_corrupted &&
+ intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
intel_print_rc6_info(dev_priv, rc6_mask);
if (IS_BROADWELL(dev_priv))
@@ -6655,6 +6668,95 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
+{
+ return !I915_READ(GEN8_RC6_CTX_INFO);
+}
+
+static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
+{
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return;
+
+ if (i915_rc6_ctx_corrupted(i915)) {
+ DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
+ i915->rps.ctx_corrupted = true;
+ intel_runtime_pm_get(i915);
+ }
+}
+
+static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
+{
+ if (i915->rps.ctx_corrupted) {
+ intel_runtime_pm_put(i915);
+ i915->rps.ctx_corrupted = false;
+ }
+}
+
+/**
+ * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
+ * @i915: i915 device
+ *
+ * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
+ */
+void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
+{
+ if (i915->rps.ctx_corrupted)
+ intel_runtime_pm_put(i915);
+}
+
+/**
+ * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
+ * @i915: i915 device
+ *
+ * Perform any steps needed to re-init the RC6 CTX WA after system resume.
+ */
+void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
+{
+ if (!i915->rps.ctx_corrupted)
+ return;
+
+ if (i915_rc6_ctx_corrupted(i915)) {
+ intel_runtime_pm_get(i915);
+ return;
+ }
+
+ DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
+ i915->rps.ctx_corrupted = false;
+}
+
+static void intel_disable_rc6(struct drm_i915_private *dev_priv);
+
+/**
+ * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
+ * @i915: i915 device
+ *
+ * Check if an RC6 CTX corruption has happened since the last check and if so
+ * disable RC6 and runtime power management.
+ *
+ * Return false if no context corruption has happened since the last call of
+ * this function, true otherwise.
+*/
+bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
+{
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return false;
+
+ if (i915->rps.ctx_corrupted)
+ return false;
+
+ if (!i915_rc6_ctx_corrupted(i915))
+ return false;
+
+ DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
+
+ intel_disable_rc6(i915);
+ i915->rps.ctx_corrupted = true;
+ intel_runtime_pm_get_noresume(i915);
+
+ return true;
+}
+
void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
{
/*
@@ -6669,6 +6771,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
mutex_lock(&dev_priv->rps.hw_lock);
+ i915_rc6_ctx_wa_init(dev_priv);
+
/* Initialize RPS limits (for userspace) */
if (IS_CHERRYVIEW(dev_priv))
cherryview_init_gt_powersave(dev_priv);
@@ -6718,6 +6822,8 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
+ i915_rc6_ctx_wa_cleanup(dev_priv);
+
if (!i915.enable_rc6)
intel_runtime_pm_put(dev_priv);
}
@@ -6749,27 +6855,47 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
gen6_reset_rps_interrupts(dev_priv);
}
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
+static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
{
- if (!READ_ONCE(dev_priv->rps.enabled))
- return;
+ if (INTEL_GEN(dev_priv) >= 9)
+ gen9_disable_rc6(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_disable_rc6(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_disable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_disable_rc6(dev_priv);
+}
+static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+{
mutex_lock(&dev_priv->rps.hw_lock);
+ __intel_disable_rc6(dev_priv);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
- if (INTEL_GEN(dev_priv) >= 9) {
- gen9_disable_rc6(dev_priv);
+static void intel_disable_rps(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 9)
gen9_disable_rps(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- cherryview_disable_rps(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- valleyview_disable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ else if (INTEL_GEN(dev_priv) >= 6)
gen6_disable_rps(dev_priv);
- } else if (IS_IRONLAKE_M(dev_priv)) {
+ else if (IS_IRONLAKE_M(dev_priv))
ironlake_disable_drps(dev_priv);
- }
+}
+
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
+{
+ if (!READ_ONCE(dev_priv->rps.enabled))
+ return;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ __intel_disable_rc6(dev_priv);
+ intel_disable_rps(dev_priv);
dev_priv->rps.enabled = false;
+
mutex_unlock(&dev_priv->rps.hw_lock);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8babfe0ce4e3..29c3123840ae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1951,6 +1951,7 @@ void intel_ring_unpin(struct intel_ring *ring)
static struct i915_vma *
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
{
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -1960,10 +1961,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
if (IS_ERR(obj))
return ERR_CAST(obj);
- /* mark ring buffers as read-only from GPU side by default */
- obj->gt_ro = 1;
+ /*
+ * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+ * if supported by the platform's GGTT.
+ */
+ if (vm->has_read_only)
+ i915_gem_object_set_readonly(obj);
- vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_create(obj, vm, NULL);
if (IS_ERR(vma))
goto err;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ec0b4a0c605d..ce14cd8495e8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -341,7 +341,9 @@ struct intel_engine_cs {
struct intel_engine_hangcheck hangcheck;
- bool needs_cmd_parser;
+#define I915_ENGINE_USING_CMD_PARSER BIT(0)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(3)
+ unsigned int flags;
/*
* Table of commands the command parser needs to know about
@@ -374,7 +376,19 @@ intel_engine_initialized(const struct intel_engine_cs *engine)
return engine->i915 != NULL;
}
-static inline unsigned
+static inline bool
+intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_USING_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
+}
+
+static inline unsigned int
intel_engine_flag(const struct intel_engine_cs *engine)
{
return 1 << engine->id;
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
index 4c7f2e101bf3..494d1609e73f 100644
--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
@@ -77,14 +77,14 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
ipu_dc_disable(ipu);
+ drm_crtc_vblank_off(crtc);
+
spin_lock_irq(&crtc->dev->event_lock);
- if (crtc->state->event) {
+ if (crtc->state->event && !crtc->state->active) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
-
- drm_crtc_vblank_off(crtc);
}
static void ipu_crtc_reset(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 01a21dd835b5..1ed60da76a0c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -306,6 +306,7 @@ err_pm_runtime_put:
static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
+ struct drm_crtc *crtc = &mtk_crtc->base;
int i;
DRM_DEBUG_DRIVER("%s\n", __func__);
@@ -327,6 +328,13 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
mtk_disp_mutex_unprepare(mtk_crtc->mutex);
pm_runtime_put(drm->dev);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
static void mtk_drm_crtc_enable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 48dfc163233e..286587607931 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -423,12 +423,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (!comp) {
ret = -ENOMEM;
+ of_node_put(node);
goto err_node;
}
ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
- if (ret)
+ if (ret) {
+ of_node_put(node);
goto err_node;
+ }
private->ddp_comp[comp_id] = comp;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index eaa5a2240c0c..bdbf358697cd 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -720,6 +720,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
/* Skip connector cleanup if creation was delegated to the bridge */
if (dsi->conn.dev)
drm_connector_cleanup(&dsi->conn);
+ if (dsi->panel)
+ drm_panel_detach(dsi->panel);
}
static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 863d030786e5..e7a6651ceeab 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1473,7 +1473,6 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
if (IS_ERR(regmap))
ret = PTR_ERR(regmap);
if (ret) {
- ret = PTR_ERR(regmap);
dev_err(dev,
"Failed to get system configuration registers: %d\n",
ret);
@@ -1529,6 +1528,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(remote);
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+ of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index fd266ed963b6..25a0e7d13340 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -383,19 +383,17 @@ static const unsigned int a3xx_registers[] = {
0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
- 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
- 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
- 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
- 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
- 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
- 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
- 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
- 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
- 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
- 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
- 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
- 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
- 0x303c, 0x303c, 0x305e, 0x305f,
+ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
+ 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
+ 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
+ 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
+ 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
+ 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
+ 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
+ 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
+ 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
+ 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
+ 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
~0 /* sentinel */
};
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 6f240021705b..e49b414c012c 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -33,6 +33,8 @@
#include "sfpb.xml.h"
#include "dsi_cfg.h"
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
@@ -909,7 +911,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
wmb(); /* clocks need to be enabled before reset */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
}
@@ -1288,7 +1290,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
/* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1);
- wmb(); /* make sure reset happen */
+ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */
dsi_write(msm_host, REG_DSI_CTRL, data0);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index c8d1f19c9a6d..d46b9e75a847 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -306,7 +306,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
return num;
}
-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int id = dsi_mgr_connector_get_id(connector);
@@ -438,6 +438,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
+ struct msm_dsi_pll *src_pll;
bool is_dual_dsi = IS_DUAL_DSI();
int ret;
@@ -471,6 +472,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
id, ret);
}
+ /* Save PLL status if it is a clock source */
+ src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+ msm_dsi_pll_save_state(src_pll);
+
ret = msm_dsi_host_power_off(host);
if (ret)
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 8b4e3004f451..86b0448d2ce5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -542,7 +542,7 @@ fail:
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
- return NULL;
+ return ERR_PTR(ret);
}
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 6abf315fd6da..ce32f41fc28a 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -396,6 +396,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto fail;
+ if (!dev->dma_parms) {
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+ }
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 795660e29b2c..569e8c45a59a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -40,6 +40,46 @@ static bool use_pages(struct drm_gem_object *obj)
return !msm_obj->vram_node;
}
+/*
+ * Cache sync.. this is a bit over-complicated, to fit dma-mapping
+ * API. Really GPU cache is out of scope here (handled on cmdstream)
+ * and all we need to do is invalidate newly allocated pages before
+ * mapping to CPU as uncached/writecombine.
+ *
+ * On top of this, we have the added headache, that depending on
+ * display generation, the display's iommu may be wired up to either
+ * the toplevel drm device (mdss), or to the mdp sub-node, meaning
+ * that here we either have dma-direct or iommu ops.
+ *
+ * Let this be a cautionary tail of abstraction gone wrong.
+ */
+
+static void sync_for_device(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev)) {
+ dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_map_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
+static void sync_for_cpu(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev)) {
+ dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj,
int npages)
@@ -106,8 +146,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
* because display controller, GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_map_sg(dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ sync_for_device(msm_obj);
}
return msm_obj->pages;
@@ -124,9 +163,7 @@ static void put_pages(struct drm_gem_object *obj)
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents,
- DMA_BIDIRECTIONAL);
+ sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 434d1e29f279..cd37d00e9723 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -750,7 +750,9 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
if (crtc)
- drm_crtc_force_disable(crtc);
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->primary->fb);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index a63c5ac69f66..23b8f25402d0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -37,6 +37,7 @@ struct nvkm_i2c_bus {
struct mutex mutex;
struct list_head head;
struct i2c_adapter i2c;
+ u8 enabled;
};
int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *);
@@ -56,6 +57,7 @@ struct nvkm_i2c_aux {
struct mutex mutex;
struct list_head head;
struct i2c_adapter i2c;
+ u8 enabled;
u32 intr;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4bb9ab892ae1..78e521d00251 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -158,7 +158,7 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
fence = list_entry(fctx->pending.next, typeof(*fence), head);
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (nouveau_fence_update(fence->channel, fctx))
+ if (nouveau_fence_update(chan, fctx))
ret = NVIF_NOTIFY_DROP;
}
spin_unlock_irqrestore(&fctx->lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 9d90d8b4b7e6..f5a8db1bb8b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -72,6 +72,8 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug)
if (debug > subdev->debug)
return;
+ if (!mthd)
+ return;
for (i = 0; (list = mthd->data[i].mthd) != NULL; i++) {
u32 base = chan->head * mthd->addr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index de8b806b88fd..7618b2eb4fdf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -143,23 +143,24 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
nent = (fuc.size / sizeof(struct gk20a_fw_av));
- pack = vzalloc((sizeof(*pack) * max_classes) +
- (sizeof(*init) * (nent + 1)));
+ pack = vzalloc((sizeof(*pack) * (max_classes + 1)) +
+ (sizeof(*init) * (nent + max_classes + 1)));
if (!pack) {
ret = -ENOMEM;
goto end;
}
- init = (void *)(pack + max_classes);
+ init = (void *)(pack + max_classes + 1);
- for (i = 0; i < nent; i++) {
- struct gf100_gr_init *ent = &init[i];
+ for (i = 0; i < nent; i++, init++) {
struct gk20a_fw_av *av = &((struct gk20a_fw_av *)fuc.data)[i];
u32 class = av->addr & 0xffff;
u32 addr = (av->addr & 0xffff0000) >> 14;
if (prevclass != class) {
- pack[classidx].init = ent;
+ if (prevclass) /* Add terminator to the method list. */
+ init++;
+ pack[classidx].init = init;
pack[classidx].type = class;
prevclass = class;
if (++classidx >= max_classes) {
@@ -169,10 +170,10 @@ gk20a_gr_av_to_method(struct gf100_gr *gr, const char *fw_name,
}
}
- ent->addr = addr;
- ent->data = av->data;
- ent->count = 1;
- ent->pitch = 1;
+ init->addr = addr;
+ init->data = av->data;
+ init->count = 1;
+ init->pitch = 1;
}
*ppack = pack;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
index 60ece0a8a2e1..1d2d6bae73cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
@@ -87,7 +87,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
/* XXX: Get these values from the VBIOS instead */
DLL = !(ram->mr[1] & 0x1);
- RON = !(ram->mr[1] & 0x300) >> 8;
+ RON = !((ram->mr[1] & 0x300) >> 8);
break;
default:
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index f0851d57df2f..f89692cb2bc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -105,9 +105,15 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
{
struct nvkm_i2c_pad *pad = aux->pad;
int ret;
+
AUX_TRACE(aux, "acquire");
mutex_lock(&aux->mutex);
- ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+
+ if (aux->enabled)
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+ else
+ ret = -EIO;
+
if (ret)
mutex_unlock(&aux->mutex);
return ret;
@@ -141,6 +147,24 @@ nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
}
}
+void
+nvkm_i2c_aux_init(struct nvkm_i2c_aux *aux)
+{
+ AUX_TRACE(aux, "init");
+ mutex_lock(&aux->mutex);
+ aux->enabled = true;
+ mutex_unlock(&aux->mutex);
+}
+
+void
+nvkm_i2c_aux_fini(struct nvkm_i2c_aux *aux)
+{
+ AUX_TRACE(aux, "fini");
+ mutex_lock(&aux->mutex);
+ aux->enabled = false;
+ mutex_unlock(&aux->mutex);
+}
+
int
nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
struct nvkm_i2c_pad *pad, int id,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index fc6b162fa0b1..1be175a8fb02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -14,6 +14,8 @@ int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_aux **);
void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
+void nvkm_i2c_aux_init(struct nvkm_i2c_aux *);
+void nvkm_i2c_aux_fini(struct nvkm_i2c_aux *);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 4f197b15acf6..719345074711 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -160,8 +160,18 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_pad *pad;
+ struct nvkm_i2c_bus *bus;
+ struct nvkm_i2c_aux *aux;
u32 mask;
+ list_for_each_entry(aux, &i2c->aux, head) {
+ nvkm_i2c_aux_fini(aux);
+ }
+
+ list_for_each_entry(bus, &i2c->bus, head) {
+ nvkm_i2c_bus_fini(bus);
+ }
+
if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
@@ -175,11 +185,31 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
}
static int
+nvkm_i2c_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_i2c *i2c = nvkm_i2c(subdev);
+ struct nvkm_i2c_bus *bus;
+ struct nvkm_i2c_pad *pad;
+
+ /*
+ * We init our i2c busses as early as possible, since they may be
+ * needed by the vbios init scripts on some cards
+ */
+ list_for_each_entry(pad, &i2c->pad, head)
+ nvkm_i2c_pad_init(pad);
+ list_for_each_entry(bus, &i2c->bus, head)
+ nvkm_i2c_bus_init(bus);
+
+ return 0;
+}
+
+static int
nvkm_i2c_init(struct nvkm_subdev *subdev)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_bus *bus;
struct nvkm_i2c_pad *pad;
+ struct nvkm_i2c_aux *aux;
list_for_each_entry(pad, &i2c->pad, head) {
nvkm_i2c_pad_init(pad);
@@ -189,6 +219,10 @@ nvkm_i2c_init(struct nvkm_subdev *subdev)
nvkm_i2c_bus_init(bus);
}
+ list_for_each_entry(aux, &i2c->aux, head) {
+ nvkm_i2c_aux_init(aux);
+ }
+
return 0;
}
@@ -223,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func
nvkm_i2c = {
.dtor = nvkm_i2c_dtor,
+ .preinit = nvkm_i2c_preinit,
.init = nvkm_i2c_init,
.fini = nvkm_i2c_fini,
.intr = nvkm_i2c_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
index 807a2b67bd64..ed50cc3736b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
@@ -110,6 +110,19 @@ nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
BUS_TRACE(bus, "init");
if (bus->func->init)
bus->func->init(bus);
+
+ mutex_lock(&bus->mutex);
+ bus->enabled = true;
+ mutex_unlock(&bus->mutex);
+}
+
+void
+nvkm_i2c_bus_fini(struct nvkm_i2c_bus *bus)
+{
+ BUS_TRACE(bus, "fini");
+ mutex_lock(&bus->mutex);
+ bus->enabled = false;
+ mutex_unlock(&bus->mutex);
}
void
@@ -126,9 +139,15 @@ nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
{
struct nvkm_i2c_pad *pad = bus->pad;
int ret;
+
BUS_TRACE(bus, "acquire");
mutex_lock(&bus->mutex);
- ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+
+ if (bus->enabled)
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+ else
+ ret = -EIO;
+
if (ret)
mutex_unlock(&bus->mutex);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
index e1be14c23e54..2fdb1b8e7164 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -17,6 +17,7 @@ int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_bus **);
void nvkm_i2c_bus_del(struct nvkm_i2c_bus **);
void nvkm_i2c_bus_init(struct nvkm_i2c_bus *);
+void nvkm_i2c_bus_fini(struct nvkm_i2c_bus *);
int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index e6f74168238c..2ef9e942f43a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -87,10 +87,10 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
if (exec) {
nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
memx->base, finish);
+ nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
+ reply[0], reply[1]);
}
- nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
- reply[0], reply[1]);
kfree(memx);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index fd065e5cd97f..a15f7e83fab9 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1983,7 +1983,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = desc->format;
dsi->lanes = desc->lanes;
- return mipi_dsi_attach(dsi);
+ err = mipi_dsi_attach(dsi);
+ if (err) {
+ struct panel_simple *panel = dev_get_drvdata(&dsi->dev);
+
+ drm_panel_remove(&panel->base);
+ }
+
+ return err;
}
static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b99f3e59011c..5fcb5869a489 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7026,8 +7026,8 @@ static int cik_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* XXX this should actually be a bus address, not an MC address. same on older asics */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 98fcf7720ebd..631eccc4eadd 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1824,8 +1824,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[i].use_pitch = 1;
} else {
track->textures[i].use_pitch = 0;
- track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
- track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+ track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
+ track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
}
if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
track->textures[i].tex_coord_type = 2;
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index c70e6d5bcd19..8aa3772e935f 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].use_pitch = 1;
} else {
track->textures[i].use_pitch = 0;
- track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
- track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
+ track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
+ track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
}
if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
track->textures[i].lookup_disable = true;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f2eac6b6c46a..9569c35f8766 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3697,8 +3697,8 @@ int r600_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index c1f7d4829942..1a049b77d45d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -764,7 +764,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
radeon_encoder->output_csc = val;
- if (connector->encoder->crtc) {
+ if (connector->encoder && connector->encoder->crtc) {
struct drm_crtc *crtc = connector->encoder->crtc;
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ca1caf405832..432ad7d73cb9 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -110,6 +110,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+ msleep(10);
+
WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
(NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
@@ -935,12 +937,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
/* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *ref_div = min(max(den/post_div, 1u), ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
- *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+ *ref_div = (*ref_div * fb_div_max)/(*fb_div);
*fb_div = fb_div_max;
}
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index b75d809c292e..919d389869ce 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6018,8 +6018,8 @@ static int si_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index b82ef5ed727c..ac7ae206f2e7 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -1956,6 +1956,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x682C:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_sun_xt;
+ update_dte_from_pl2 = true;
break;
case 0x6825:
case 0x6827:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index aa7678a5e75e..22aa270b16da 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -865,7 +865,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct vop *vop = to_vop(crtc);
adjusted_mode->clock =
- clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
+ DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000),
+ 1000);
return true;
}
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index e7c243f70870..08808e3701de 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -740,7 +740,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 376b0763c874..a5412a6fbeca 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1352,7 +1352,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
hdmi->drm_connector = NULL;
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 97828faf2a1f..d58991b06a47 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -137,6 +137,8 @@ static int sun4i_drv_bind(struct device *dev)
ret = -ENOMEM;
goto free_drm;
}
+
+ dev_set_drvdata(dev, drm);
drm->dev_private = drv;
drm_vblank_init(drm, 1);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index c7e6c9839c9a..51d34e7275ab 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -846,7 +846,7 @@ static void
vc4_crtc_reset(struct drm_crtc *crtc)
{
if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ vc4_crtc_destroy_state(crtc, crtc->state);
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
if (crtc->state)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 54639395aba0..a3559b1a3a0f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -521,6 +521,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
ret = wait_event_timeout(vgdev->resp_wq,
atomic_read(&cache_ent->is_valid), 5 * HZ);
+ /* is_valid check must proceed before copy of the cache entry. */
+ smp_rmb();
+
ptr = cache_ent->caps_cache;
copy_exit:
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 52436b3c01bb..772a5a3b0ce1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -618,6 +618,8 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
cache_ent->id == le32_to_cpu(cmd->capset_id)) {
memcpy(cache_ent->caps_cache, resp->capset_data,
cache_ent->size);
+ /* Copy must occur before is_valid is signalled. */
+ smp_wmb();
atomic_set(&cache_ent->is_valid, 1);
break;
}
@@ -679,11 +681,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_get_capset *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- int max_size = vgdev->capsets[idx].max_size;
+ int max_size;
struct virtio_gpu_drv_cap_cache *cache_ent;
void *resp_buf;
- if (idx > vgdev->num_capsets)
+ if (idx >= vgdev->num_capsets)
return -EINVAL;
if (version > vgdev->capsets[idx].max_version)
@@ -693,6 +695,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
if (!cache_ent)
return -ENOMEM;
+ max_size = vgdev->capsets[idx].max_size;
cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
if (!cache_ent->caps_cache) {
kfree(cache_ent);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 1f013d45c9e9..0c7c3005594c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -210,8 +210,10 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
cres->hash.key = user_key | (res_type << 24);
ret = drm_ht_insert_item(&man->resources, &cres->hash);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ kfree(cres);
goto out_invalid_key;
+ }
cres->state = VMW_CMDBUF_RES_ADD;
cres->res = vmw_resource_reference(res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4b556e698f13..6c30c24c779f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1245,7 +1245,13 @@ static int vmw_master_set(struct drm_device *dev,
}
dev_priv->active_master = vmaster;
- drm_sysfs_hotplug_event(dev);
+
+ /*
+ * Inform a new master that the layout may have changed while
+ * it was gone.
+ */
+ if (!from_open)
+ drm_sysfs_hotplug_event(dev);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 9fe8eda7c859..40c1e89ed361 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2493,7 +2493,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
- if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
+ if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
+ cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
DRM_ERROR("Illegal shader type %u.\n",
(unsigned) cmd->body.type);
return -EINVAL;
@@ -2732,6 +2733,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
if (view_type == vmw_view_max)
return -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
+ if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
+ DRM_ERROR("Invalid surface id.\n");
+ return -EINVAL;
+ }
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->sid, &srf_node);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e57a0bad7a62..123d85de80d6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -264,7 +264,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
-
+ reply = NULL;
if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
@@ -288,7 +288,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
kfree(reply);
-
+ reply = NULL;
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
/* A checkpoint occurred. Retry. */
continue;
@@ -300,7 +300,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
break;
}
- if (retries == RETRIES)
+ if (!reply)
return -EINVAL;
*msg_len = reply_len;
diff --git a/drivers/gpu/imx/ipu-v3/ipu-dp.c b/drivers/gpu/imx/ipu-v3/ipu-dp.c
index 98686edbcdbb..33de3a1bac49 100644
--- a/drivers/gpu/imx/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/imx/ipu-v3/ipu-dp.c
@@ -195,7 +195,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
DP_COM_CONF_CSC_DEF_BOTH);
} else {
- if (flow->foreground.in_cs == flow->out_cs)
+ if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
+ flow->foreground.in_cs == flow->out_cs)
/*
* foreground identical to output, apply color
* conversion on background
@@ -261,6 +262,8 @@ void ipu_dp_disable_channel(struct ipu_dp *dp)
struct ipu_dp_priv *priv = flow->priv;
u32 reg, csc;
+ dp->in_cs = IPUV3_COLORSPACE_UNKNOWN;
+
if (!dp->foreground)
return;
@@ -268,8 +271,9 @@ void ipu_dp_disable_channel(struct ipu_dp *dp)
reg = readl(flow->base + DP_COM_CONF);
csc = reg & DP_COM_CONF_CSC_DEF_MASK;
- if (csc == DP_COM_CONF_CSC_DEF_FG)
- reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+ if (csc == DP_COM_CONF_CSC_DEF_BOTH || csc == DP_COM_CONF_CSC_DEF_BG)
+ reg |= DP_COM_CONF_CSC_DEF_BG;
reg &= ~DP_COM_CONF_FG_EN;
writel(reg, flow->base + DP_COM_CONF);
@@ -350,6 +354,8 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
mutex_init(&priv->mutex);
for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
+ priv->flow[i].background.in_cs = IPUV3_COLORSPACE_UNKNOWN;
+ priv->flow[i].foreground.in_cs = IPUV3_COLORSPACE_UNKNOWN;
priv->flow[i].foreground.foreground = true;
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
priv->flow[i].priv = priv;
diff --git a/drivers/gpu/imx/ipu-v3/ipu-ic.c b/drivers/gpu/imx/ipu-v3/ipu-ic.c
index 321eb983c2f5..65d7daf944b0 100644
--- a/drivers/gpu/imx/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/imx/ipu-v3/ipu-ic.c
@@ -256,7 +256,7 @@ static int init_csc(struct ipu_ic *ic,
writel(param, base++);
param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) |
- (params->sat << 9);
+ (params->sat << 10);
writel(param, base++);
param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 9428ea7cdf8a..c52bd163abb3 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -26,12 +26,36 @@
#define A4_2WHEEL_MOUSE_HACK_7 0x01
#define A4_2WHEEL_MOUSE_HACK_B8 0x02
+#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
+
struct a4tech_sc {
unsigned long quirks;
unsigned int hw_wheel;
__s32 delayed_value;
};
+static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
+
+ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
+ usage->hid == A4_WHEEL_ORIENTATION) {
+ /*
+ * We do not want to have this usage mapped to anything as it's
+ * nonstandard and doesn't really behave like an HID report.
+ * It's only selecting the orientation (vertical/horizontal) of
+ * the previous mouse wheel report. The input_events will be
+ * generated once both reports are recorded in a4_event().
+ */
+ return -1;
+ }
+
+ return 0;
+
+}
+
static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
struct input_dev *input;
- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
- !usage->type)
+ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
return 0;
input = field->hidinput->input;
@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
return 1;
}
- if (usage->hid == 0x000100b8) {
+ if (usage->hid == A4_WHEEL_ORIENTATION) {
input_event(input, EV_REL, value ? REL_HWHEEL :
REL_WHEEL, a4->delayed_value);
return 1;
@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
static struct hid_driver a4_driver = {
.name = "a4tech",
.id_table = a4_devices,
+ .input_mapping = a4_input_mapping,
.input_mapped = a4_input_mapped,
.event = a4_event,
.probe = a4_probe,
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 65a0c79f212e..197eb75d10ef 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -55,7 +55,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
- DECLARE_BITMAP(pressed_fn, KEY_CNT);
DECLARE_BITMAP(pressed_numlock, KEY_CNT);
};
@@ -182,6 +181,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
{
struct apple_sc *asc = hid_get_drvdata(hid);
const struct apple_key_translation *trans, *table;
+ bool do_translate;
+ u16 code = 0;
if (usage->code == KEY_FN) {
asc->fn_on = !!value;
@@ -190,8 +191,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
if (fnmode) {
- int do_translate;
-
if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
table = macbookair_fn_keys;
@@ -203,25 +202,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
trans = apple_find_translation (table, usage->code);
if (trans) {
- if (test_bit(usage->code, asc->pressed_fn))
- do_translate = 1;
- else if (trans->flags & APPLE_FLAG_FKEY)
- do_translate = (fnmode == 2 && asc->fn_on) ||
- (fnmode == 1 && !asc->fn_on);
- else
- do_translate = asc->fn_on;
-
- if (do_translate) {
- if (value)
- set_bit(usage->code, asc->pressed_fn);
- else
- clear_bit(usage->code, asc->pressed_fn);
-
- input_event(input, usage->type, trans->to,
- value);
-
- return 1;
+ if (test_bit(trans->from, input->key))
+ code = trans->from;
+ else if (test_bit(trans->to, input->key))
+ code = trans->to;
+
+ if (!code) {
+ if (trans->flags & APPLE_FLAG_FKEY) {
+ switch (fnmode) {
+ case 1:
+ do_translate = !asc->fn_on;
+ break;
+ case 2:
+ do_translate = asc->fn_on;
+ break;
+ default:
+ /* should never happen */
+ do_translate = false;
+ }
+ } else {
+ do_translate = asc->fn_on;
+ }
+
+ code = do_translate ? trans->to : trans->from;
}
+
+ input_event(input, usage->type, code, value);
+ return 1;
}
if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
@@ -334,7 +341,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
unsigned long **bit, int *max)
{
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
- usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
+ usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
+ usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index a594e478a1e2..843aed4dec80 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -75,13 +75,20 @@ static int axff_init(struct hid_device *hid)
{
struct axff_device *axff;
struct hid_report *report;
- struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int field_count = 0;
int i, j;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 70597854397f..b4b9d8152536 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -197,16 +197,37 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
}
/*
+ * Concatenate usage which defines 16 bits or less with the
+ * currently defined usage page to form a 32 bit usage
+ */
+
+static void complete_usage(struct hid_parser *parser, unsigned int index)
+{
+ parser->local.usage[index] &= 0xFFFF;
+ parser->local.usage[index] |=
+ (parser->global.usage_page & 0xFFFF) << 16;
+}
+
+/*
* Add a usage to the temporary parser table.
*/
-static int hid_add_usage(struct hid_parser *parser, unsigned usage)
+static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
{
if (parser->local.usage_index >= HID_MAX_USAGES) {
hid_err(parser->device, "usage index exceeded\n");
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
+
+ /*
+ * If Usage item only includes usage id, concatenate it with
+ * currently defined usage page
+ */
+ if (size <= 2)
+ complete_usage(parser, parser->local.usage_index);
+
+ parser->local.usage_size[parser->local.usage_index] = size;
parser->local.collection_index[parser->local.usage_index] =
parser->collection_stack_ptr ?
parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
@@ -248,6 +269,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
offset = report->size;
report->size += parser->global.report_size * parser->global.report_count;
+ /* Total size check: Allow for possible report index byte */
+ if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
+ hid_err(parser->device, "report is too long\n");
+ return -1;
+ }
+
if (!parser->local.usage_index) /* Ignore padding fields */
return 0;
@@ -463,10 +490,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
- return hid_add_usage(parser, data);
+ return hid_add_usage(parser, data, item->size);
case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
@@ -475,9 +499,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
parser->local.usage_minimum = data;
return 0;
@@ -488,9 +509,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
count = data - parser->local.usage_minimum;
if (count + parser->local.usage_index >= HID_MAX_USAGES) {
/*
@@ -510,7 +528,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
for (n = parser->local.usage_minimum; n <= data; n++)
- if (hid_add_usage(parser, n)) {
+ if (hid_add_usage(parser, n, item->size)) {
dbg_hid("hid_add_usage failed\n");
return -1;
}
@@ -525,6 +543,41 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
/*
+ * Concatenate Usage Pages into Usages where relevant:
+ * As per specification, 6.2.2.8: "When the parser encounters a main item it
+ * concatenates the last declared Usage Page with a Usage to form a complete
+ * usage value."
+ */
+
+static void hid_concatenate_last_usage_page(struct hid_parser *parser)
+{
+ int i;
+ unsigned int usage_page;
+ unsigned int current_page;
+
+ if (!parser->local.usage_index)
+ return;
+
+ usage_page = parser->global.usage_page;
+
+ /*
+ * Concatenate usage page again only if last declared Usage Page
+ * has not been already used in previous usages concatenation
+ */
+ for (i = parser->local.usage_index - 1; i >= 0; i--) {
+ if (parser->local.usage_size[i] > 2)
+ /* Ignore extended usages */
+ continue;
+
+ current_page = parser->local.usage[i] >> 16;
+ if (current_page == usage_page)
+ break;
+
+ complete_usage(parser, i);
+ }
+}
+
+/*
* Process a main item.
*/
@@ -533,6 +586,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int ret;
+ hid_concatenate_last_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
@@ -712,6 +767,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
parser->global.report_size == 8)
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
+
+ if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
+ parser->global.report_size == 8)
+ parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
}
static void hid_scan_collection(struct hid_parser *parser, unsigned type)
@@ -746,6 +805,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int i;
+ hid_concatenate_last_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
@@ -947,6 +1008,7 @@ int hid_open_report(struct hid_device *device)
__u8 *start;
__u8 *buf;
__u8 *end;
+ __u8 *next;
int ret;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
@@ -1000,7 +1062,8 @@ int hid_open_report(struct hid_device *device)
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
ret = -EINVAL;
- while ((start = fetch_item(start, end, &item)) != NULL) {
+ while ((next = fetch_item(start, end, &item)) != NULL) {
+ start = next;
if (item.format != HID_ITEM_FORMAT_SHORT) {
hid_err(device, "unexpected long global item\n");
@@ -1029,7 +1092,8 @@ int hid_open_report(struct hid_device *device)
}
}
- hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
+ hid_err(device, "item fetching failed at offset %u/%u\n",
+ size - (unsigned int)(end - start), size);
err:
vfree(parser);
hid_close_report(device);
@@ -1483,7 +1547,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
rsize = ((report->size - 1) >> 3) + 1;
- if (rsize > HID_MAX_BUFFER_SIZE)
+ if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
+ rsize = HID_MAX_BUFFER_SIZE - 1;
+ else if (rsize > HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE;
if (csize < rsize) {
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index d7179dd3c9ef..3cafa1d28fed 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1058,10 +1058,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
seq_printf(f, "\n\n");
/* dump parsed data and input mappings */
+ if (down_interruptible(&hdev->driver_input_lock))
+ return 0;
+
hid_dump_device(hdev, f);
seq_printf(f, "\n");
hid_dump_input_mapping(hdev, f);
+ up(&hdev->driver_input_lock);
+
return 0;
}
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 818ea7d93533..309969b8dc2e 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -87,13 +87,19 @@ static int drff_init(struct hid_device *hid)
{
struct drff_device *drff;
struct hid_report *report;
- struct hid_input *hidinput = list_first_entry(&hid->inputs,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index d82d75bb11f7..80f9a02dfa69 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -59,13 +59,19 @@ static int emsff_init(struct hid_device *hid)
{
struct emsff_device *emsff;
struct hid_report *report;
- struct hid_input *hidinput = list_first_entry(&hid->inputs,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c
index 2d8cead3adca..5a02c50443cb 100644
--- a/drivers/hid/hid-gaff.c
+++ b/drivers/hid/hid-gaff.c
@@ -77,14 +77,20 @@ static int gaff_init(struct hid_device *hid)
{
struct gaff_device *gaff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct list_head *report_ptr = report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index 6e1a4a4fc0c1..ab9da597106f 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
/* Locate the boot interface, to receive the LED change events */
struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+ struct hid_device *boot_hid;
+ struct hid_input *boot_hid_input;
- struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
- struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+ if (unlikely(boot_interface == NULL))
+ return -ENODEV;
+
+ boot_hid = usb_get_intfdata(boot_interface);
+ boot_hid_input = list_first_entry(&boot_hid->inputs,
struct hid_input, list);
return boot_hid_input->input->event(boot_hid_input->input, type, code,
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index 9325545fc3ae..3e84551cca9c 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -140,13 +140,19 @@ static int holtekff_init(struct hid_device *hid)
{
struct holtekff_device *holtekff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output report found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6f4c84d824e6..25c006338100 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -509,6 +509,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fc7ada26457e..26e967730997 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -607,6 +607,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
+ if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */
+ switch (usage->hid & 0xf) {
+ case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+ default: goto ignore;
+ }
+ break;
+ }
+
/*
* Some lazy vendors declare 255 usages for System Control,
* leading to the creation of ABS_X|Y axis and too many others.
@@ -802,6 +810,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break;
case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break;
+ case 0x079: map_key_clear(KEY_KBDILLUMUP); break;
+ case 0x07a: map_key_clear(KEY_KBDILLUMDOWN); break;
+ case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE); break;
+
case 0x082: map_key_clear(KEY_VIDEO_NEXT); break;
case 0x083: map_key_clear(KEY_LAST); break;
case 0x084: map_key_clear(KEY_ENTER); break;
@@ -932,6 +944,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
+ case 0x29f: map_key_clear(KEY_SCALE); break;
+
default: map_key_clear(KEY_UNKNOWN);
}
break;
@@ -1012,9 +1026,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
mapped:
- if (device->driver->input_mapped && device->driver->input_mapped(device,
- hidinput, field, usage, &bit, &max) < 0)
- goto ignore;
+ if (device->driver->input_mapped &&
+ device->driver->input_mapped(device, hidinput, field, usage,
+ &bit, &max) < 0) {
+ /*
+ * The driver indicated that no further generic handling
+ * of the usage is desired.
+ */
+ return;
+ }
set_bit(usage->type, input->evbit);
@@ -1073,9 +1093,11 @@ mapped:
set_bit(MSC_SCAN, input->mscbit);
}
-ignore:
return;
+ignore:
+ usage->type = 0;
+ usage->code = 0;
}
void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value)
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 52026dc94d5c..7e55d3f755dd 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -761,7 +761,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!buf) {
ret = -ENOMEM;
- goto err_free;
+ goto err_stop;
}
ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
@@ -793,9 +793,12 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = lg4ff_init(hdev);
if (ret)
- goto err_free;
+ goto err_stop;
return 0;
+
+err_stop:
+ hid_hw_stop(hdev);
err_free:
kfree(drv_data);
return ret;
@@ -806,8 +809,7 @@ static void lg_remove(struct hid_device *hdev)
struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
if (drv_data->quirks & LG_FF4)
lg4ff_deinit(hdev);
- else
- hid_hw_stop(hdev);
+ hid_hw_stop(hdev);
kfree(drv_data);
}
diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
index 0e3fb1a7e421..6909d9c2fc67 100644
--- a/drivers/hid/hid-lg2ff.c
+++ b/drivers/hid/hid-lg2ff.c
@@ -62,11 +62,17 @@ int lg2ff_init(struct hid_device *hid)
{
struct lg2ff_device *lg2ff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
int error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
if (!report)
diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
index 8c2da183d3bc..acf739fc4060 100644
--- a/drivers/hid/hid-lg3ff.c
+++ b/drivers/hid/hid-lg3ff.c
@@ -129,12 +129,19 @@ static const signed short ff3_joystick_ac[] = {
int lg3ff_init(struct hid_device *hid)
{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
const signed short *ff_bits = ff3_joystick_ac;
int error;
int i;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
return -ENODEV;
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 1fc12e357035..1b109a5cf922 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -1261,8 +1261,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc
int lg4ff_init(struct hid_device *hid)
{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
@@ -1274,6 +1274,13 @@ int lg4ff_init(struct hid_device *hid)
int mmode_ret, mmode_idx = -1;
u16 real_product_id;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;
@@ -1485,7 +1492,6 @@ int lg4ff_deinit(struct hid_device *hid)
}
}
#endif
- hid_hw_stop(hid);
drv_data->device_props = NULL;
kfree(entry);
diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
index e1394af0ae7b..1871cdcd1e0a 100644
--- a/drivers/hid/hid-lgff.c
+++ b/drivers/hid/hid-lgff.c
@@ -127,12 +127,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
int lgff_init(struct hid_device* hid)
{
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
const signed short *ff_bits = ff_joystick;
int error;
int i;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 2e2515a4c070..434438334fb1 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -449,13 +449,16 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
{
+ const u8 ping_byte = 0x5a;
+ u8 ping_data[3] = { 0, 0, ping_byte };
struct hidpp_report response;
int ret;
- ret = hidpp_send_fap_command_sync(hidpp,
+ ret = hidpp_send_rap_command_sync(hidpp,
+ REPORT_ID_HIDPP_SHORT,
HIDPP_PAGE_ROOT_IDX,
CMD_ROOT_GET_PROTOCOL_VERSION,
- NULL, 0, &response);
+ ping_data, sizeof(ping_data), &response);
if (ret == HIDPP_ERROR_INVALID_SUBID) {
hidpp->protocol_major = 1;
@@ -475,8 +478,14 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
if (ret)
return ret;
- hidpp->protocol_major = response.fap.params[0];
- hidpp->protocol_minor = response.fap.params[1];
+ if (response.rap.params[2] != ping_byte) {
+ hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n",
+ __func__, response.rap.params[2], ping_byte);
+ return -EPROTO;
+ }
+
+ hidpp->protocol_major = response.rap.params[0];
+ hidpp->protocol_minor = response.rap.params[1];
return ret;
}
@@ -1229,8 +1238,8 @@ static void hidpp_ff_destroy(struct ff_device *ff)
static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
{
struct hid_device *hid = hidpp->hid_dev;
- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
struct ff_device *ff;
@@ -1239,6 +1248,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
int error, j, num_slots;
u8 version;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
if (!dev) {
hid_err(hid, "Struct input_dev not set!\n");
return -EINVAL;
@@ -1282,6 +1298,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
kfree(data);
return -ENOMEM;
}
+ data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+ if (!data->wq) {
+ kfree(data->effect_ids);
+ kfree(data);
+ return -ENOMEM;
+ }
+
data->hidpp = hidpp;
data->feature_index = feature_index;
data->version = version;
@@ -1326,7 +1349,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
/* ignore boost value at response.fap.params[2] */
/* init the hardware command queue */
- data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
atomic_set(&data->workqueue_size, 0);
/* initialize with zero autocenter to get wheel in usable state */
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index f095bf8a3aa9..762f33817dd0 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -556,10 +556,14 @@ static void pcmidi_setup_extra_keys(
static int pcmidi_set_operational(struct pcmidi_snd *pm)
{
+ int rc;
+
if (pm->ifnum != 1)
return 0; /* only set up ONCE for interace 1 */
- pcmidi_get_output_report(pm);
+ rc = pcmidi_get_output_report(pm);
+ if (rc < 0)
+ return rc;
pcmidi_submit_output_report(pm, 0xc1);
return 0;
}
@@ -688,7 +692,11 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
spin_lock_init(&pm->rawmidi_in_lock);
init_sustain_timers(pm);
- pcmidi_set_operational(pm);
+ err = pcmidi_set_operational(pm);
+ if (err < 0) {
+ pk_error("failed to find output report\n");
+ goto fail_register;
+ }
/* register it */
err = snd_card_register(card);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index eee58d15e745..9633d7a7ac2e 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2008,9 +2008,15 @@ static int sony_play_effect(struct input_dev *dev, void *data,
static int sony_init_ff(struct sony_sc *sc)
{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *input_dev;
+
+ if (list_empty(&sc->hdev->inputs)) {
+ hid_err(sc->hdev, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list);
+ input_dev = hidinput->input;
input_set_capability(input_dev, EV_FF, FF_RUMBLE);
return input_ff_create_memless(input_dev, NULL, sony_play_effect);
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index b83376077d72..d98e471a5f7b 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -34,6 +34,8 @@
#include "hid-ids.h"
+#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
+
static const signed short ff_rumble[] = {
FF_RUMBLE,
-1
@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
struct hid_field *ff_field = tmff->ff_field;
int x, y;
int left, right; /* Rumbling */
+ int motor_swap;
switch (effect->type) {
case FF_CONSTANT:
@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
ff_field->logical_minimum,
ff_field->logical_maximum);
+ /* 2-in-1 strong motor is left */
+ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
+ motor_swap = left;
+ left = right;
+ right = motor_swap;
+ }
+
dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
ff_field->value[0] = left;
ff_field->value[1] = right;
@@ -126,12 +136,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
struct tmff_device *tmff;
struct hid_report *report;
struct list_head *report_list;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *input_dev;
int error;
int i;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ input_dev = hidinput->input;
+
tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
if (!tmff)
return -ENOMEM;
@@ -238,6 +254,8 @@ static const struct hid_device_id tm_devices[] = {
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
.driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
+ .driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
index a29756c6ca02..4e7e01be99b1 100644
--- a/drivers/hid/hid-zpff.c
+++ b/drivers/hid/hid-zpff.c
@@ -66,11 +66,17 @@ static int zpff_init(struct hid_device *hid)
{
struct zpff_device *zpff;
struct hid_report *report;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
- struct input_dev *dev = hidinput->input;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
int i, error;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
for (i = 0; i < 4; i++) {
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
if (!report)
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 216f0338a1f7..ed6591f92f71 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -257,13 +257,14 @@ out:
static unsigned int hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
+ unsigned int mask = POLLOUT | POLLWRNORM; /* hidraw is always writable */
poll_wait(file, &list->hidraw->wait, wait);
if (list->head != list->tail)
- return POLLIN | POLLRDNORM;
+ mask |= POLLIN | POLLRDNORM;
if (!list->hidraw->exist)
- return POLLERR | POLLHUP;
- return 0;
+ mask |= POLLERR | POLLHUP;
+ return mask;
}
static int hidraw_open(struct inode *inode, struct file *file)
@@ -378,7 +379,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
mutex_lock(&minors_lock);
dev = hidraw_table[minor];
- if (!dev) {
+ if (!dev || !dev->exist) {
ret = -ENODEV;
goto out;
}
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
index 832d8f9aaba2..099e1ce2f234 100644
--- a/drivers/hid/i2c-hid/Makefile
+++ b/drivers/hid/i2c-hid/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_I2C_HID) += i2c-hid.o
+
+i2c-hid-objs = i2c-hid-core.o
+i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index ce2b80009c19..850527d5fab1 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -42,6 +42,7 @@
#include <linux/i2c/i2c-hid.h>
#include "../hid-ids.h"
+#include "i2c-hid.h"
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
@@ -724,6 +725,7 @@ static int i2c_hid_parse(struct hid_device *hid)
char *rdesc;
int ret;
int tries = 3;
+ char *use_override;
i2c_hid_dbg(ihid, "entering %s\n", __func__);
@@ -742,26 +744,37 @@ static int i2c_hid_parse(struct hid_device *hid)
if (ret)
return ret;
- rdesc = kzalloc(rsize, GFP_KERNEL);
+ use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
+ &rsize);
- if (!rdesc) {
- dbg_hid("couldn't allocate rdesc memory\n");
- return -ENOMEM;
- }
+ if (use_override) {
+ rdesc = use_override;
+ i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
+ } else {
+ rdesc = kzalloc(rsize, GFP_KERNEL);
- i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+ if (!rdesc) {
+ dbg_hid("couldn't allocate rdesc memory\n");
+ return -ENOMEM;
+ }
- ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
- if (ret) {
- hid_err(hid, "reading report descriptor failed\n");
- kfree(rdesc);
- return -EIO;
+ i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+
+ ret = i2c_hid_command(client, &hid_report_descr_cmd,
+ rdesc, rsize);
+ if (ret) {
+ hid_err(hid, "reading report descriptor failed\n");
+ kfree(rdesc);
+ return -EIO;
+ }
}
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize);
- kfree(rdesc);
+ if (!use_override)
+ kfree(rdesc);
+
if (ret) {
dbg_hid("parsing report descriptor failed\n");
return ret;
@@ -899,12 +912,19 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
int ret;
/* i2c hid fetch using a fixed descriptor size (30 bytes) */
- i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
- ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
- sizeof(struct i2c_hid_desc));
- if (ret) {
- dev_err(&client->dev, "hid_descr_cmd failed\n");
- return -ENODEV;
+ if (i2c_hid_get_dmi_i2c_hid_desc_override(client->name)) {
+ i2c_hid_dbg(ihid, "Using a HID descriptor override\n");
+ ihid->hdesc =
+ *i2c_hid_get_dmi_i2c_hid_desc_override(client->name);
+ } else {
+ i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
+ ret = i2c_hid_command(client, &hid_descr_cmd,
+ ihid->hdesc_buffer,
+ sizeof(struct i2c_hid_desc));
+ if (ret) {
+ dev_err(&client->dev, "hid_descr_cmd failed\n");
+ return -ENODEV;
+ }
}
/* Validate the length of HID descriptor, the 4 first bytes:
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
new file mode 100644
index 000000000000..95052373a828
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Quirks for I2C-HID devices that do not supply proper descriptors
+ *
+ * Copyright (c) 2018 Julian Sax <jsbc@gmx.de>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+
+#include "i2c-hid.h"
+
+
+struct i2c_hid_desc_override {
+ union {
+ struct i2c_hid_desc *i2c_hid_desc;
+ uint8_t *i2c_hid_desc_buffer;
+ };
+ uint8_t *hid_report_desc;
+ unsigned int hid_report_desc_size;
+ uint8_t *i2c_name;
+};
+
+
+/*
+ * descriptors for the SIPODEV SP1064 touchpad
+ *
+ * This device does not supply any descriptors and on windows a filter
+ * driver operates between the i2c-hid layer and the device and injects
+ * these descriptors when the device is prompted. The descriptors were
+ * extracted by listening to the i2c-hid traffic that occurs between the
+ * windows filter driver and the windows i2c-hid driver.
+ */
+
+static const struct i2c_hid_desc_override sipodev_desc = {
+ .i2c_hid_desc_buffer = (uint8_t [])
+ {0x1e, 0x00, /* Length of descriptor */
+ 0x00, 0x01, /* Version of descriptor */
+ 0xdb, 0x01, /* Length of report descriptor */
+ 0x21, 0x00, /* Location of report descriptor */
+ 0x24, 0x00, /* Location of input report */
+ 0x1b, 0x00, /* Max input report length */
+ 0x25, 0x00, /* Location of output report */
+ 0x11, 0x00, /* Max output report length */
+ 0x22, 0x00, /* Location of command register */
+ 0x23, 0x00, /* Location of data register */
+ 0x11, 0x09, /* Vendor ID */
+ 0x88, 0x52, /* Product ID */
+ 0x06, 0x00, /* Version ID */
+ 0x00, 0x00, 0x00, 0x00 /* Reserved */
+ },
+
+ .hid_report_desc = (uint8_t [])
+ {0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x02, /* Usage (Mouse), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0xA1, 0x00, /* Collection (Physical), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x02, /* Usage Maximum (02h), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x15, 0x81, /* Logical Minimum (-127), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0xC0, /* End Collection, */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x05, /* Usage (Touchpad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x04, /* Report ID (4), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x55, 0x0E, /* Unit Exponent (14), */
+ 0x65, 0x11, /* Unit (Centimeter), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x55, 0x0C, /* Unit Exponent (12), */
+ 0x66, 0x01, 0x10, /* Unit (Seconds), */
+ 0x47, 0xFF, 0xFF, 0x00, 0x00,/* Physical Maximum (65535), */
+ 0x27, 0xFF, 0xFF, 0x00, 0x00,/* Logical Maximum (65535), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x09, 0x56, /* Usage (Scan Time), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x54, /* Usage (Contact Count), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x07, /* Report Count (7), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x09, 0x55, /* Usage (Contact Count Maximum), */
+ 0x09, 0x59, /* Usage (59h), */
+ 0x75, 0x04, /* Report Size (4), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x25, 0x0F, /* Logical Maximum (15), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x85, 0x07, /* Report ID (7), */
+ 0x09, 0x60, /* Usage (60h), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x95, 0x07, /* Report Count (7), */
+ 0xB1, 0x03, /* Feature (Constant, Variable), */
+ 0x85, 0x06, /* Report ID (6), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0xC5, /* Usage (C5h), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x96, 0x00, 0x01, /* Report Count (256), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x0D, /* Report ID (13), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x02, /* Usage Maximum (02h), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x0E, /* Usage (Configuration), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x03, /* Report ID (3), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x09, 0x52, /* Usage (Device Mode), */
+ 0x25, 0x0A, /* Logical Maximum (10), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x00, /* Collection (Physical), */
+ 0x85, 0x05, /* Report ID (5), */
+ 0x09, 0x57, /* Usage (57h), */
+ 0x09, 0x58, /* Usage (58h), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x03, /* Feature (Constant, Variable),*/
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+ },
+ .hid_report_desc_size = 475,
+ .i2c_name = "SYNA3602:00"
+};
+
+
+static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ {
+ .ident = "Teclast F6 Pro",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F6 Pro"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Teclast F7",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F7"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Trekstor Primebook C13",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Trekstor Primebook C11",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ /*
+ * There are at least 2 Primebook C11B versions, the older
+ * version has a product-name of "Primebook C11B", and a
+ * bios version / release / firmware revision of:
+ * V2.1.2 / 05/03/2018 / 18.2
+ * The new version has "PRIMEBOOK C11B" as product-name and a
+ * bios version / release / firmware revision of:
+ * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2
+ * Only the older version needs this quirk, note the newer
+ * version will not match as it has a different product-name.
+ */
+ .ident = "Trekstor Primebook C11B",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Trekstor SURFBOOK E11B",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Direkt-Tek DTLAPY116-2",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY116-2"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Direkt-Tek DTLAPY133-1",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Mediacom Flexbook Edge 11",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Odys Winbook 13",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WINBOOK 13"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ { } /* Terminate list */
+};
+
+
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{
+ struct i2c_hid_desc_override *override;
+ const struct dmi_system_id *system_id;
+
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+ if (!system_id)
+ return NULL;
+
+ override = system_id->driver_data;
+ if (strcmp(override->i2c_name, i2c_name))
+ return NULL;
+
+ return override->i2c_hid_desc;
+}
+
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size)
+{
+ struct i2c_hid_desc_override *override;
+ const struct dmi_system_id *system_id;
+
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+ if (!system_id)
+ return NULL;
+
+ override = system_id->driver_data;
+ if (strcmp(override->i2c_name, i2c_name))
+ return NULL;
+
+ *size = override->hid_report_desc_size;
+ return override->hid_report_desc;
+}
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
new file mode 100644
index 000000000000..a8c19aef5824
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef I2C_HID_H
+#define I2C_HID_H
+
+
+#ifdef CONFIG_DMI
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size);
+#else
+static inline struct i2c_hid_desc
+ *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{ return NULL; }
+static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size)
+{ return NULL; }
+#endif
+
+#endif
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 0c9ac4d5d850..41d44536aa15 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -92,7 +92,10 @@ static bool check_generated_interrupt(struct ishtp_device *dev)
IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
} else {
pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT);
- interrupt_generated = IPC_INT_FROM_ISH_TO_HOST_BXT(pisr_val);
+ interrupt_generated = !!pisr_val;
+ /* only busy-clear bit is RW, others are RO */
+ if (pisr_val)
+ ish_reg_write(dev, IPC_REG_PISR_BXT, pisr_val);
}
return interrupt_generated;
@@ -795,11 +798,11 @@ int ish_hw_start(struct ishtp_device *dev)
{
ish_set_host_rdy(dev);
+ set_host_ready(dev);
+
/* After that we can enable ISH DMA operation and wakeup ISHFW */
ish_wakeup(dev);
- set_host_ready(dev);
-
/* wait for FW-initiated reset flow */
if (!dev->recvd_hw_ready)
wait_event_interruptible_timeout(dev->wait_hw_ready,
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index 277983aa1d90..d0b902285fc3 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -222,7 +222,7 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
err_hid_device:
kfree(hid_data);
err_hid_data:
- kfree(hid);
+ hid_destroy_device(hid);
return rv;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 256521509d20..0de18c76f8d4 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -628,7 +628,8 @@ int ishtp_cl_device_bind(struct ishtp_cl *cl)
spin_lock_irqsave(&cl->dev->device_list_lock, flags);
list_for_each_entry(cl_device, &cl->dev->device_list,
device_link) {
- if (cl_device->fw_client->client_id == cl->fw_client_id) {
+ if (cl_device->fw_client &&
+ cl_device->fw_client->client_id == cl->fw_client_id) {
cl->device = cl_device;
rv = 0;
break;
@@ -688,6 +689,7 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
device_link) {
+ cl_device->fw_client = NULL;
if (warm_reset && cl_device->reference_count)
continue;
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
index b9b917d2d50d..c41dbb167c91 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -90,7 +90,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
return 0;
out:
dev_err(&cl->device->dev, "error in allocating Tx pool\n");
- ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
return -ENOMEM;
}
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index d02ee5304217..731a7b3e0187 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -26,6 +26,7 @@
#include <linux/uhid.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
+#include <linux/eventpoll.h>
#define UHID_NAME "uhid"
#define UHID_BUFSIZE 32
@@ -768,13 +769,14 @@ unlock:
static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
{
struct uhid_device *uhid = file->private_data;
+ unsigned int mask = POLLOUT | POLLWRNORM; /* uhid is always writable */
poll_wait(file, &uhid->waitq, wait);
if (uhid->head != uhid->tail)
- return POLLIN | POLLRDNORM;
+ mask |= POLLIN | POLLRDNORM;
- return 0;
+ return mask;
}
static const struct file_operations uhid_fops = {
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 617ae294a318..e851926be8b0 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -98,6 +98,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 308d8432fea3..dbdd265075da 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -308,6 +308,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
spin_unlock_irq(&list->hiddev->list_lock);
mutex_lock(&hiddev->existancelock);
+ /*
+ * recheck exist with existance lock held to
+ * avoid opening a disconnected device
+ */
+ if (!list->hiddev->exist) {
+ res = -ENODEV;
+ goto bail_unlock;
+ }
if (!list->hiddev->open++)
if (list->hiddev->exist) {
struct hid_device *hid = hiddev->hid;
@@ -322,6 +330,10 @@ static int hiddev_open(struct inode *inode, struct file *file)
return 0;
bail_unlock:
mutex_unlock(&hiddev->existancelock);
+
+ spin_lock_irq(&list->hiddev->list_lock);
+ list_del(&list->node);
+ spin_unlock_irq(&list->hiddev->list_lock);
bail:
file->private_data = NULL;
vfree(list);
@@ -950,9 +962,9 @@ void hiddev_disconnect(struct hid_device *hid)
hiddev->exist = 0;
if (hiddev->open) {
- mutex_unlock(&hiddev->existancelock);
usbhid_close(hiddev->hid);
wake_up_interruptible(&hiddev->wait);
+ mutex_unlock(&hiddev->existancelock);
} else {
mutex_unlock(&hiddev->existancelock);
kfree(hiddev);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b1ad378cb2a6..fbf14a14bdd4 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -529,14 +529,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
*/
buttons = (data[4] << 1) | (data[3] & 0x01);
} else if (features->type == CINTIQ_COMPANION_2) {
- /* d-pad right -> data[4] & 0x10
- * d-pad up -> data[4] & 0x20
- * d-pad left -> data[4] & 0x40
- * d-pad down -> data[4] & 0x80
- * d-pad center -> data[3] & 0x01
+ /* d-pad right -> data[2] & 0x10
+ * d-pad up -> data[2] & 0x20
+ * d-pad left -> data[2] & 0x40
+ * d-pad down -> data[2] & 0x80
+ * d-pad center -> data[1] & 0x01
*/
buttons = ((data[2] >> 4) << 7) |
- ((data[1] & 0x04) << 6) |
+ ((data[1] & 0x04) << 4) |
((data[2] & 0x0F) << 2) |
(data[1] & 0x03);
} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
@@ -819,7 +819,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
input_report_key(input, BTN_BASE2, (data[11] & 0x02));
if (data[12] & 0x80)
- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
else
input_report_abs(input, ABS_WHEEL, 0);
@@ -949,6 +949,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
y >>= 1;
distance >>= 1;
}
+ if (features->type == INTUOSHT2)
+ distance = features->distance_max - distance;
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_report_abs(input, ABS_DISTANCE, distance);
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 579bdf93be43..e27f7e12c05b 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -693,8 +693,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
if (resource->caps.flags & POWER_METER_CAN_CAP) {
if (!can_cap_in_hardware()) {
- dev_err(&resource->acpi_dev->dev,
- "Ignoring unsafe software power cap!\n");
+ dev_warn(&resource->acpi_dev->dev,
+ "Ignoring unsafe software power cap!\n");
goto skip_unsafe_cap;
}
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 5929e126da63..d9923d63eb4f 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -426,7 +426,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
return 0x95;
break;
}
- return -ENODEV;
+ return 0;
}
/* Provide labels for sysfs */
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 9c262d955331..d2583caa8087 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -268,9 +268,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
long reg;
if (bypass_attn & (1 << channel))
- reg = (volt * 1024) / 2250;
+ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
else
- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
+ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
+ (r[0] + r[1]) * 2250);
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index facd05cda26d..e8c089886427 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -96,17 +96,23 @@ superio_select(int base, int ld)
outb(ld, base + 1);
}
-static inline void
+static inline int
superio_enter(int base)
{
+ if (!request_muxed_region(base, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, base);
outb(0x87, base);
+
+ return 0;
}
static inline void
superio_exit(int base)
{
outb(0xaa, base);
+ release_region(base, 2);
}
/*
@@ -1561,7 +1567,7 @@ exit:
static int __init f71805f_find(int sioaddr, unsigned short *address,
struct f71805f_sio_data *sio_data)
{
- int err = -ENODEV;
+ int err;
u16 devid;
static const char * const names[] = {
@@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address,
"F71872F/FG or F71806F/FG",
};
- superio_enter(sioaddr);
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
+ err = -ENODEV;
devid = superio_inw(sioaddr, SIO_REG_MANID);
if (devid != SIO_FINTEK_ID)
goto exit;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index a74c075a30ec..e0a1a118514f 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -38,16 +38,20 @@ struct hwmon_device {
#define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
+#define MAX_SYSFS_ATTR_NAME_LENGTH 32
+
struct hwmon_device_attribute {
struct device_attribute dev_attr;
const struct hwmon_ops *ops;
enum hwmon_sensor_types type;
u32 attr;
int index;
+ char name[MAX_SYSFS_ATTR_NAME_LENGTH];
};
#define to_hwmon_attr(d) \
container_of(d, struct hwmon_device_attribute, dev_attr)
+#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
/*
* Thermal zone information
@@ -55,7 +59,7 @@ struct hwmon_device_attribute {
* also provides the sensor index.
*/
struct hwmon_thermal_data {
- struct hwmon_device *hwdev; /* Reference to hwmon device */
+ struct device *dev; /* Reference to hwmon device */
int index; /* sensor index */
};
@@ -92,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
NULL
};
+static void hwmon_free_attrs(struct attribute **attrs)
+{
+ int i;
+
+ for (i = 0; attrs[i]; i++) {
+ struct device_attribute *dattr = to_dev_attr(attrs[i]);
+ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
+
+ kfree(hattr);
+ }
+ kfree(attrs);
+}
+
static void hwmon_dev_release(struct device *dev)
{
- kfree(to_hwmon_device(dev));
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+ if (hwdev->group.attrs)
+ hwmon_free_attrs(hwdev->group.attrs);
+ kfree(hwdev->groups);
+ kfree(hwdev);
}
static struct class hwmon_class = {
@@ -118,11 +140,11 @@ static DEFINE_IDA(hwmon_ida);
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
- struct hwmon_device *hwdev = tdata->hwdev;
+ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
int ret;
long t;
- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
+ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
tdata->index, &t);
if (ret < 0)
return ret;
@@ -136,26 +158,31 @@ static struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
};
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
struct hwmon_thermal_data *tdata;
+ struct thermal_zone_device *tzd;
tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
if (!tdata)
return -ENOMEM;
- tdata->hwdev = hwdev;
+ tdata->dev = dev;
tdata->index = index;
- devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
- &hwmon_thermal_ops);
+ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
+ &hwmon_thermal_ops);
+ /*
+ * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
+ * so ignore that error but forward any other error.
+ */
+ if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
+ return PTR_ERR(tzd);
return 0;
}
#else
-static int hwmon_thermal_add_sensor(struct device *dev,
- struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
{
return 0;
}
@@ -205,8 +232,7 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
return 1;
}
-static struct attribute *hwmon_genattr(struct device *dev,
- const void *drvdata,
+static struct attribute *hwmon_genattr(const void *drvdata,
enum hwmon_sensor_types type,
u32 attr,
int index,
@@ -232,20 +258,18 @@ static struct attribute *hwmon_genattr(struct device *dev,
if ((mode & S_IWUGO) && !ops->write)
return ERR_PTR(-EINVAL);
+ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
+ if (!hattr)
+ return ERR_PTR(-ENOMEM);
+
if (type == hwmon_chip) {
name = (char *)template;
} else {
- name = devm_kzalloc(dev, strlen(template) + 16, GFP_KERNEL);
- if (!name)
- return ERR_PTR(-ENOMEM);
- scnprintf(name, strlen(template) + 16, template,
+ scnprintf(hattr->name, sizeof(hattr->name), template,
index + hwmon_attr_base(type));
+ name = hattr->name;
}
- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
- if (!hattr)
- return ERR_PTR(-ENOMEM);
-
hattr->type = type;
hattr->attr = attr;
hattr->index = index;
@@ -433,8 +457,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
return n;
}
-static int hwmon_genattrs(struct device *dev,
- const void *drvdata,
+static int hwmon_genattrs(const void *drvdata,
struct attribute **attrs,
const struct hwmon_ops *ops,
const struct hwmon_channel_info *info)
@@ -460,7 +483,7 @@ static int hwmon_genattrs(struct device *dev,
attr_mask &= ~BIT(attr);
if (attr >= template_size)
return -EINVAL;
- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
+ a = hwmon_genattr(drvdata, info->type, attr, i,
templates[attr], ops);
if (IS_ERR(a)) {
if (PTR_ERR(a) != -ENOENT)
@@ -474,8 +497,7 @@ static int hwmon_genattrs(struct device *dev,
}
static struct attribute **
-__hwmon_create_attrs(struct device *dev, const void *drvdata,
- const struct hwmon_chip_info *chip)
+__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
{
int ret, i, aindex = 0, nattrs = 0;
struct attribute **attrs;
@@ -486,15 +508,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
if (nattrs == 0)
return ERR_PTR(-EINVAL);
- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return ERR_PTR(-ENOMEM);
for (i = 0; chip->info[i]; i++) {
- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
+ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
chip->info[i]);
- if (ret < 0)
+ if (ret < 0) {
+ hwmon_free_attrs(attrs);
return ERR_PTR(ret);
+ }
aindex += ret;
}
@@ -534,14 +558,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
for (i = 0; groups[i]; i++)
ngroups++;
- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
- GFP_KERNEL);
+ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
if (!hwdev->groups) {
err = -ENOMEM;
goto free_hwmon;
}
- attrs = __hwmon_create_attrs(dev, drvdata, chip);
+ attrs = __hwmon_create_attrs(drvdata, chip);
if (IS_ERR(attrs)) {
err = PTR_ERR(attrs);
goto free_hwmon;
@@ -585,8 +608,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (!chip->ops->is_visible(drvdata, hwmon_temp,
hwmon_temp_input, j))
continue;
- if (info[i]->config[j] & HWMON_T_INPUT)
- hwmon_thermal_add_sensor(dev, hwdev, j);
+ if (info[i]->config[j] & HWMON_T_INPUT) {
+ err = hwmon_thermal_add_sensor(hdev, j);
+ if (err) {
+ device_unregister(hdev);
+ goto ida_remove;
+ }
+ }
}
}
}
@@ -594,7 +622,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
return hdev;
free_hwmon:
- kfree(hwdev);
+ hwmon_dev_release(hdev);
ida_remove:
ida_simple_remove(&hwmon_ida, id);
return ERR_PTR(err);
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index e6b49500c52a..8c9555313fc3 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -38,9 +38,9 @@
#define INA3221_WARN3 0x0c
#define INA3221_MASK_ENABLE 0x0f
-#define INA3221_CONFIG_MODE_SHUNT BIT(1)
-#define INA3221_CONFIG_MODE_BUS BIT(2)
-#define INA3221_CONFIG_MODE_CONTINUOUS BIT(3)
+#define INA3221_CONFIG_MODE_SHUNT BIT(0)
+#define INA3221_CONFIG_MODE_BUS BIT(1)
+#define INA3221_CONFIG_MODE_CONTINUOUS BIT(2)
#define INA3221_RSHUNT_DEFAULT 10000
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index eff3b24d8473..fc31669a86ba 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -164,7 +164,7 @@ static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
temp = DIV_ROUND_CLOSEST(temp << (resolution - 8),
1000) << (16 - resolution);
- return regmap_write(data->regmap, reg, temp);
+ return regmap_write(data->regmap, reg, (u16)temp);
}
static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 2b31b84d0a5b..006f090c1b0a 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -698,7 +698,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
@@ -3481,6 +3481,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6106_REG_PWM;
data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 12b94b094c0d..40addb213bdf 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -32,8 +32,8 @@
static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
- { 0x40, 0x00, 0x42, 0x44, 0x46 },
- { 0x3f, 0x00, 0x41, 0x43, 0x45 },
+ { 0x46, 0x00, 0x40, 0x42, 0x44 },
+ { 0x45, 0x00, 0x3f, 0x41, 0x43 },
};
static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
@@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = {
&sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in3_beep.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */
+ &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
@@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
return 0;
- if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */
+ if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */
return 0;
- if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */
+ if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */
return 0;
return attr->mode;
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index cb9fdd37bd0d..2b5b8c3de8fc 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" };
#define LD_IN 1
#define LD_TEMP 1
+static inline int superio_enter(int sioaddr)
+{
+ if (!request_muxed_region(sioaddr, 2, DRVNAME))
+ return -EBUSY;
+ return 0;
+}
+
static inline void superio_outb(int sioaddr, int reg, int val)
{
outb(reg, sioaddr);
@@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr)
{
outb(0x02, sioaddr);
outb(0x02, sioaddr + 1);
+ release_region(sioaddr, 2);
}
/*
@@ -1220,7 +1228,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
{
u16 val;
u8 cfg, cfg_b;
- int i, err = 0;
+ int i, err;
+
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
/* Identify device */
val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID);
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 58b789c28b48..94eea2ac6251 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -89,8 +89,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
#define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
-#define LTC_NOT_BUSY BIT(5)
-#define LTC_NOT_PENDING BIT(4)
+#define LTC_NOT_BUSY BIT(6)
+#define LTC_NOT_PENDING BIT(5)
/*
* LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index c00bad02761a..0d75bc7b5065 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1028,14 +1028,15 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
const struct pmbus_driver_info *info,
const char *name,
int index, int page,
- const struct pmbus_sensor_attr *attr)
+ const struct pmbus_sensor_attr *attr,
+ bool paged)
{
struct pmbus_sensor *base;
int ret;
if (attr->label) {
ret = pmbus_add_label(data, name, index, attr->label,
- attr->paged ? page + 1 : 0);
+ paged ? page + 1 : 0);
if (ret)
return ret;
}
@@ -1067,6 +1068,30 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
return 0;
}
+static bool pmbus_sensor_is_paged(const struct pmbus_driver_info *info,
+ const struct pmbus_sensor_attr *attr)
+{
+ int p;
+
+ if (attr->paged)
+ return true;
+
+ /*
+ * Some attributes may be present on more than one page despite
+ * not being marked with the paged attribute. If that is the case,
+ * then treat the sensor as being paged and add the page suffix to the
+ * attribute name.
+ * We don't just add the paged attribute to all such attributes, in
+ * order to maintain the un-suffixed labels in the case where the
+ * attribute is only on page 0.
+ */
+ for (p = 1; p < info->pages; p++) {
+ if (info->func[p] & attr->func)
+ return true;
+ }
+ return false;
+}
+
static int pmbus_add_sensor_attrs(struct i2c_client *client,
struct pmbus_data *data,
const char *name,
@@ -1080,14 +1105,15 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client,
index = 1;
for (i = 0; i < nattrs; i++) {
int page, pages;
+ bool paged = pmbus_sensor_is_paged(info, attrs);
- pages = attrs->paged ? info->pages : 1;
+ pages = paged ? info->pages : 1;
for (page = 0; page < pages; page++) {
if (!(info->func[page] & attrs->func))
continue;
ret = pmbus_add_sensor_attrs_one(client, data, info,
name, index, page,
- attrs);
+ attrs, paged);
if (ret)
return ret;
index++;
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index fb03449de2e0..aa6333620c37 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -231,8 +231,12 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL);
if (IS_ERR(ctx->pwm)) {
- dev_err(&pdev->dev, "Could not get PWM\n");
- return PTR_ERR(ctx->pwm);
+ ret = PTR_ERR(ctx->pwm);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get PWM: %d\n", ret);
+
+ return ret;
}
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
index decd7df995ab..2a18539591ea 100644
--- a/drivers/hwmon/shtc1.c
+++ b/drivers/hwmon/shtc1.c
@@ -38,7 +38,7 @@ static const unsigned char shtc1_cmd_read_id_reg[] = { 0xef, 0xc8 };
/* constants for reading the ID register */
#define SHTC1_ID 0x07
-#define SHTC1_ID_REG_MASK 0x1f
+#define SHTC1_ID_REG_MASK 0x3f
/* delays for non-blocking i2c commands, both in us */
#define SHTC1_NONBLOCKING_WAIT_TIME_HPM 14400
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 6bd200756560..cbdb5c4991ae 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -72,14 +72,19 @@ static inline void superio_select(int ld)
superio_outb(0x07, ld);
}
-static inline void superio_enter(void)
+static inline int superio_enter(void)
{
+ if (!request_muxed_region(REG, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x55, REG);
+ return 0;
}
static inline void superio_exit(void)
{
outb(0xAA, REG);
+ release_region(REG, 2);
}
#define SUPERIO_REG_DEVID 0x20
@@ -300,8 +305,12 @@ static int __init smsc47b397_find(void)
u8 id, rev;
char *name;
unsigned short addr;
+ int err;
+
+ err = superio_enter();
+ if (err)
+ return err;
- superio_enter();
id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
switch (id) {
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 5d323186d2c1..d24df0c50bea 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -73,16 +73,21 @@ superio_inb(int reg)
/* logical device for fans is 0x0A */
#define superio_select() superio_outb(0x07, 0x0A)
-static inline void
+static inline int
superio_enter(void)
{
+ if (!request_muxed_region(REG, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x55, REG);
+ return 0;
}
static inline void
superio_exit(void)
{
outb(0xAA, REG);
+ release_region(REG, 2);
}
#define SUPERIO_REG_ACT 0x30
@@ -531,8 +536,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
{
u8 val;
unsigned short addr;
+ int err;
+
+ err = superio_enter();
+ if (err)
+ return err;
- superio_enter();
val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
/*
@@ -608,13 +617,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
{
if ((sio_data->activate & 0x01) == 0) {
- superio_enter();
- superio_select();
-
- pr_info("Disabling device\n");
- superio_outb(SUPERIO_REG_ACT, sio_data->activate);
-
- superio_exit();
+ if (!superio_enter()) {
+ superio_select();
+ pr_info("Disabling device\n");
+ superio_outb(SUPERIO_REG_ACT, sio_data->activate);
+ superio_exit();
+ } else {
+ pr_warn("Failed to disable device\n");
+ }
}
}
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 3a6bfa51cb94..95d5e8ec8b7f 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn)
outb(ldn, sio_cip + 1);
}
-static inline void superio_enter(int sio_cip)
+static inline int superio_enter(int sio_cip)
{
+ if (!request_muxed_region(sio_cip, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, sio_cip);
outb(0x87, sio_cip);
+
+ return 0;
}
static inline void superio_exit(int sio_cip)
{
outb(0xaa, sio_cip);
+ release_region(sio_cip, 2);
}
/* ---------------------------------------------------------------------
@@ -1282,11 +1288,14 @@ EXIT:
static int __init vt1211_find(int sio_cip, unsigned short *address)
{
- int err = -ENODEV;
+ int err;
int devid;
- superio_enter(sio_cip);
+ err = superio_enter(sio_cip);
+ if (err)
+ return err;
+ err = -ENODEV;
devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID);
if (devid != SIO_VT1211_ID)
goto EXIT;
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 721295b9a051..43c0f89cefdf 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -130,17 +130,23 @@ superio_select(struct w83627hf_sio_data *sio, int ld)
outb(ld, sio->sioaddr + 1);
}
-static inline void
+static inline int
superio_enter(struct w83627hf_sio_data *sio)
{
+ if (!request_muxed_region(sio->sioaddr, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, sio->sioaddr);
outb(0x87, sio->sioaddr);
+
+ return 0;
}
static inline void
superio_exit(struct w83627hf_sio_data *sio)
{
outb(0xAA, sio->sioaddr);
+ release_region(sio->sioaddr, 2);
}
#define W627_DEVID 0x52
@@ -1275,7 +1281,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static int __init w83627hf_find(int sioaddr, unsigned short *addr,
struct w83627hf_sio_data *sio_data)
{
- int err = -ENODEV;
+ int err;
u16 val;
static __initconst char *const names[] = {
@@ -1287,7 +1293,11 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
};
sio_data->sioaddr = sioaddr;
- superio_enter(sio_data);
+ err = superio_enter(sio_data);
+ if (err)
+ return err;
+
+ err = -ENODEV;
val = force_id ? force_id : superio_inb(sio_data, DEVID);
switch (val) {
case W627_DEVID:
@@ -1641,9 +1651,21 @@ static int w83627thf_read_gpio5(struct platform_device *pdev)
struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff, sel;
- superio_enter(sio_data);
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
superio_select(sio_data, W83627HF_LD_GPIO5);
+ res = 0xff;
+
/* Make sure these GPIO pins are enabled */
if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) {
dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n");
@@ -1674,7 +1696,17 @@ static int w83687thf_read_vid(struct platform_device *pdev)
struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff;
- superio_enter(sio_data);
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
superio_select(sio_data, W83627HF_LD_HWM);
/* Make sure these GPIO pins are enabled */
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index ace55385b26f..ff10deed3fde 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -279,9 +279,7 @@ static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
int node;
struct cs_buffers *buf;
- if (cpu == -1)
- cpu = smp_processor_id();
- node = cpu_to_node(cpu);
+ node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
if (!buf)
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index b9b1e9c8f4c4..00904c6b5b5e 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -667,10 +667,13 @@ static ssize_t cyc_threshold_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
+
+ /* mask off max threshold before checking min value */
+ val &= ETM_CYC_THRESHOLD_MASK;
if (val < drvdata->ccitmin)
return -EINVAL;
- config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
+ config->ccctlr = val;
return size;
}
static DEVICE_ATTR_RW(cyc_threshold);
@@ -701,14 +704,16 @@ static ssize_t bb_ctrl_store(struct device *dev,
return -EINVAL;
if (!drvdata->nr_addr_cmp)
return -EINVAL;
+
/*
- * Bit[7:0] selects which address range comparator is used for
- * branch broadcast control.
+ * Bit[8] controls include(1) / exclude(0), bits[0-7] select
+ * individual range comparators. If include then at least 1
+ * range must be selected.
*/
- if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
+ if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
return -EINVAL;
- config->bb_ctrl = val;
+ config->bb_ctrl = val & GENMASK(8, 0);
return size;
}
static DEVICE_ATTR_RW(bb_ctrl);
@@ -1341,8 +1346,8 @@ static ssize_t seq_event_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->seq_idx;
- /* RST, bits[7:0] */
- config->seq_ctrl[idx] = val & 0xFF;
+ /* Seq control has two masks B[15:8] F[7:0] */
+ config->seq_ctrl[idx] = val & 0xFFFF;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1597,7 +1602,7 @@ static ssize_t res_ctrl_store(struct device *dev,
if (idx % 2 != 0)
/* PAIRINV, bit[21] */
val &= ~BIT(21);
- config->res_ctrl[idx] = val;
+ config->res_ctrl[idx] = val & GENMASK(21, 0);
spin_unlock(&drvdata->spinlock);
return size;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 4db8d6a4d0cb..22079f886f45 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -35,6 +35,7 @@
#include <linux/pm_runtime.h>
#include <asm/sections.h>
#include <asm/local.h>
+#include <asm/virt.h>
#include "coresight-etm4x.h"
#include "coresight-etm-perf.h"
@@ -61,7 +62,8 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
static bool etm4_arch_supported(u8 arch)
{
- switch (arch) {
+ /* Mask out the minor version number */
+ switch (arch & 0xf0) {
case ETM_ARCH_V4:
break;
default:
@@ -180,6 +182,12 @@ static void etm4_enable_hw(void *info)
if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
dev_err(drvdata->dev,
"timeout while waiting for Idle Trace Status\n");
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using the
+ * memory-mapped interface") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
CS_LOCK(drvdata->base);
@@ -322,8 +330,12 @@ static void etm4_disable_hw(void *info)
/* EN, bit[0] Trace unit enable bit */
control &= ~0x1;
- /* make sure everything completes before disabling */
- mb();
+ /*
+ * Make sure everything completes before disabling, as recommended
+ * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
+ * SSTATUS") of ARM IHI 0064D
+ */
+ dsb(sy);
isb();
writel_relaxed(control, drvdata->base + TRCPRGCTLR);
@@ -604,7 +616,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
config->vinst_ctrl |= BIT(0);
}
-static u64 etm4_get_access_type(struct etmv4_config *config)
+static u64 etm4_get_ns_access_type(struct etmv4_config *config)
{
u64 access_type = 0;
@@ -615,17 +627,26 @@ static u64 etm4_get_access_type(struct etmv4_config *config)
* Bit[13] Exception level 1 - OS
* Bit[14] Exception level 2 - Hypervisor
* Bit[15] Never implemented
- *
- * Always stay away from hypervisor mode.
*/
- access_type = ETM_EXLEVEL_NS_HYP;
-
- if (config->mode & ETM_MODE_EXCL_KERN)
- access_type |= ETM_EXLEVEL_NS_OS;
+ if (!is_kernel_in_hyp_mode()) {
+ /* Stay away from hypervisor mode for non-VHE */
+ access_type = ETM_EXLEVEL_NS_HYP;
+ if (config->mode & ETM_MODE_EXCL_KERN)
+ access_type |= ETM_EXLEVEL_NS_OS;
+ } else if (config->mode & ETM_MODE_EXCL_KERN) {
+ access_type = ETM_EXLEVEL_NS_HYP;
+ }
if (config->mode & ETM_MODE_EXCL_USER)
access_type |= ETM_EXLEVEL_NS_APP;
+ return access_type;
+}
+
+static u64 etm4_get_access_type(struct etmv4_config *config)
+{
+ u64 access_type = etm4_get_ns_access_type(config);
+
/*
* EXLEVEL_S, bits[11:8], don't trace anything happening
* in secure state.
@@ -879,20 +900,10 @@ void etm4_config_trace_mode(struct etmv4_config *config)
addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
/* clear default config */
- addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS);
+ addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
+ ETM_EXLEVEL_NS_HYP);
- /*
- * EXLEVEL_NS, bits[15:12]
- * The Exception levels are:
- * Bit[12] Exception level 0 - Application
- * Bit[13] Exception level 1 - OS
- * Bit[14] Exception level 2 - Hypervisor
- * Bit[15] Never implemented
- */
- if (mode & ETM_MODE_EXCL_KERN)
- addr_acc |= ETM_EXLEVEL_NS_OS;
- else
- addr_acc |= ETM_EXLEVEL_NS_APP;
+ addr_acc |= etm4_get_ns_access_type(config);
config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d6941ea24d8d..03ce12ad4317 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -292,9 +292,7 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
int node;
struct cs_buffers *buf;
- if (cpu == -1)
- cpu = smp_processor_id();
- node = cpu_to_node(cpu);
+ node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
/* Allocate memory structure for interaction with Perf */
buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
@@ -425,10 +423,10 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
case TMC_MEM_INTF_WIDTH_32BITS:
case TMC_MEM_INTF_WIDTH_64BITS:
case TMC_MEM_INTF_WIDTH_128BITS:
- mask = GENMASK(31, 5);
+ mask = GENMASK(31, 4);
break;
case TMC_MEM_INTF_WIDTH_256BITS:
- mask = GENMASK(31, 6);
+ mask = GENMASK(31, 5);
break;
}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 398e44a9ec45..5ffabc388630 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -132,12 +132,14 @@ static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
{
int ret;
- if (!csdev->enable) {
- if (sink_ops(csdev)->enable) {
- ret = sink_ops(csdev)->enable(csdev, mode);
- if (ret)
- return ret;
- }
+ /*
+ * We need to make sure the "new" session is compatible with the
+ * existing "mode" of operation.
+ */
+ if (sink_ops(csdev)->enable) {
+ ret = sink_ops(csdev)->enable(csdev, mode);
+ if (ret)
+ return ret;
csdev->enable = true;
}
@@ -331,8 +333,14 @@ int coresight_enable_path(struct list_head *path, u32 mode)
switch (type) {
case CORESIGHT_DEV_TYPE_SINK:
ret = coresight_enable_sink(csdev, mode);
+ /*
+ * Sink is the first component turned on. If we
+ * failed to enable the sink, there are no components
+ * that need disabling. Disabling the path here
+ * would mean we could disrupt an existing session.
+ */
if (ret)
- goto err;
+ goto out;
break;
case CORESIGHT_DEV_TYPE_SOURCE:
/* sources are enabled from either sysFS or Perf */
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index b0502e2782c1..98a4cb5d4993 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -605,7 +605,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
- for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
+ for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
if (gth->master[master] == port)
gth->master[master] = -1;
spin_unlock(&gth->gth_lock);
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index f91d9faf14ea..73713b239464 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -90,6 +90,7 @@ struct msc_iter {
* @reg_base: register window base address
* @thdev: intel_th_device pointer
* @win_list: list of windows in multiblock mode
+ * @single_sgt: single mode buffer
* @nr_pages: total number of pages allocated for this buffer
* @single_sz: amount of data in single mode
* @single_wrap: single mode wrap occurred
@@ -110,6 +111,7 @@ struct msc {
struct intel_th_device *thdev;
struct list_head win_list;
+ struct sg_table single_sgt;
unsigned long nr_pages;
unsigned long single_sz;
unsigned int single_wrap : 1;
@@ -495,7 +497,7 @@ static int msc_configure(struct msc *msc)
lockdep_assert_held(&msc->buf_mutex);
if (msc->mode > MSC_MODE_MULTI)
- return -ENOTSUPP;
+ return -EINVAL;
if (msc->mode == MSC_MODE_MULTI)
msc_buffer_clear_hw_header(msc);
@@ -623,22 +625,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
*/
static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
{
+ unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned int order = get_order(size);
struct page *page;
+ int ret;
if (!size)
return 0;
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
+ if (ret)
+ goto err_out;
+
+ ret = -ENOMEM;
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
if (!page)
- return -ENOMEM;
+ goto err_free_sgt;
split_page(page, order);
- msc->nr_pages = size >> PAGE_SHIFT;
+ sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
+
+ ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
+ DMA_FROM_DEVICE);
+ if (ret < 0)
+ goto err_free_pages;
+
+ msc->nr_pages = nr_pages;
msc->base = page_address(page);
- msc->base_addr = page_to_phys(page);
+ msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
return 0;
+
+err_free_pages:
+ __free_pages(page, order);
+
+err_free_sgt:
+ sg_free_table(&msc->single_sgt);
+
+err_out:
+ return ret;
}
/**
@@ -649,6 +674,10 @@ static void msc_buffer_contig_free(struct msc *msc)
{
unsigned long off;
+ dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
+ 1, DMA_FROM_DEVICE);
+ sg_free_table(&msc->single_sgt);
+
for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
struct page *page = virt_to_page(msc->base + off);
@@ -919,7 +948,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
} else if (msc->mode == MSC_MODE_MULTI) {
ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
} else {
- ret = -ENOTSUPP;
+ ret = -EINVAL;
}
if (!ret) {
@@ -1142,7 +1171,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
if (ret >= 0)
*ppos = iter->offset;
} else {
- ret = -ENOTSUPP;
+ ret = -EINVAL;
}
put_count:
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 3c45c1c15f7e..beefec9701ed 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -226,8 +226,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
bitmap_release_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
- output->nr_chans = 0;
master->nr_free += output->nr_chans;
+ output->nr_chans = 0;
}
/*
@@ -1107,7 +1107,6 @@ int stm_source_register_device(struct device *parent,
err:
put_device(&src->dev);
- kfree(src);
return err;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6f6315d7d10d..9c625a0cd83a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -397,12 +397,13 @@ config I2C_BCM_KONA
If you do not need KONA I2C interface, say N.
config I2C_BRCMSTB
- tristate "BRCM Settop I2C controller"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+ tristate "BRCM Settop/DSL I2C controller"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_63XX || \
+ COMPILE_TEST
default y
help
If you say yes to this option, support will be included for the
- I2C interface on the Broadcom Settop SoCs.
+ I2C interface on the Broadcom Settop/DSL SoCs.
If you do not need I2C interface, say N.
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index 9d7be5af2bf2..6618db75fa25 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -83,6 +83,7 @@ static struct i2c_algo_bit_data ioc_data = {
static struct i2c_adapter ioc_ops = {
.nr = 0,
+ .name = "ioc",
.algo_data = &ioc_data,
};
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 96bb4e749012..0218ba6eb26a 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -72,6 +72,7 @@ struct em_i2c_device {
struct completion msg_done;
struct clk *sclk;
struct i2c_client *slave;
+ int irq;
};
static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
@@ -342,6 +343,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
writeb(0, priv->base + I2C_OFS_SVA0);
+ /*
+ * Wait for interrupt to finish. New slave irqs cannot happen because we
+ * cleared the slave address and, thus, only extension codes will be
+ * detected which do not use the slave ptr.
+ */
+ synchronize_irq(priv->irq);
priv->slave = NULL;
return 0;
@@ -358,7 +365,7 @@ static int em_i2c_probe(struct platform_device *pdev)
{
struct em_i2c_device *priv;
struct resource *r;
- int irq, ret;
+ int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -391,8 +398,8 @@ static int em_i2c_probe(struct platform_device *pdev)
em_i2c_reset(&priv->adap);
- irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
+ priv->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
"em_i2c", priv);
if (ret)
goto err_clk;
@@ -402,7 +409,8 @@ static int em_i2c_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
- dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
+ dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
+ priv->irq);
return 0;
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index ae7f3180f7e8..75c8bbb4774f 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -498,6 +498,7 @@ static int hix5hd2_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&priv->adap);
pm_runtime_disable(priv->dev);
pm_runtime_set_suspended(priv->dev);
+ clk_disable_unprepare(priv->clk);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 71c32f456a33..9a2422a0177a 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1098,7 +1098,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
/* Get I2C clock */
i2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c_imx->clk)) {
- dev_err(&pdev->dev, "can't get I2C clock\n");
+ if (PTR_ERR(i2c_imx->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "can't get I2C clock\n");
return PTR_ERR(i2c_imx->clk);
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 30132c3957cd..41ca9ff7b5da 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -82,25 +82,6 @@
#define JZ4780_I2C_STA_TFNF BIT(1)
#define JZ4780_I2C_STA_ACT BIT(0)
-static const char * const jz4780_i2c_abrt_src[] = {
- "ABRT_7B_ADDR_NOACK",
- "ABRT_10ADDR1_NOACK",
- "ABRT_10ADDR2_NOACK",
- "ABRT_XDATA_NOACK",
- "ABRT_GCALL_NOACK",
- "ABRT_GCALL_READ",
- "ABRT_HS_ACKD",
- "SBYTE_ACKDET",
- "ABRT_HS_NORSTRT",
- "SBYTE_NORSTRT",
- "ABRT_10B_RD_NORSTRT",
- "ABRT_MASTER_DIS",
- "ARB_LOST",
- "SLVFLUSH_TXFIFO",
- "SLV_ARBLOST",
- "SLVRD_INTX",
-};
-
#define JZ4780_I2C_INTST_IGC BIT(11)
#define JZ4780_I2C_INTST_ISTT BIT(10)
#define JZ4780_I2C_INTST_ISTP BIT(9)
@@ -538,21 +519,8 @@ done:
static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
{
- int i;
-
- dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
- dev_err(&i2c->adap.dev, "device addr=%x\n",
- jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
- dev_err(&i2c->adap.dev, "send cmd count:%d %d\n",
- i2c->cmd, i2c->cmd_buf[i2c->cmd]);
- dev_err(&i2c->adap.dev, "receive data count:%d %d\n",
- i2c->cmd, i2c->data_buf[i2c->cmd]);
-
- for (i = 0; i < 16; i++) {
- if (src & BIT(i))
- dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
- i, jz4780_i2c_abrt_src[i]);
- }
+ dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n",
+ src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]);
}
static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 8f1c5f24c1df..62785aa76b3f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -96,7 +96,7 @@
#define SB800_PIIX4_PORT_IDX_MASK 0x06
#define SB800_PIIX4_PORT_IDX_SHIFT 1
-/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
@@ -355,18 +355,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
/* Find which register is used for port selection */
if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
- switch (PIIX4_dev->device) {
- case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+ if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
+ (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ PIIX4_dev->revision >= 0x1F)) {
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
- break;
- case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
- default:
+ } else {
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
- break;
}
} else {
mutex_lock(&piix4_mutex_sb800);
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index a8497cfdae6f..7524e17ac966 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -808,6 +808,8 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
}
if (ret || qup->bus_err || qup->qup_err) {
+ reinit_completion(&qup->xfer);
+
if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
dev_err(qup->dev, "change to run state timed out");
goto desc_err;
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index c811af4c8d81..e420b41a34ba 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -212,6 +212,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
/* We got a NACKIE */
readb(riic->base + RIIC_ICDRR); /* dummy read */
+ riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
riic->err = -ENXIO;
} else if (riic->bytes_left) {
return IRQ_NONE;
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 1371547ce1a3..185653b8ec3a 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -437,6 +437,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev)
/**
* st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode
* @i2c_dev: Controller's private data
+ * @max: Maximum amount of data to fill into the Tx FIFO
*
* This functions fills the Tx FIFO with fixed pattern when
* in read mode to trigger clock.
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 7484aac1e14d..80d82c6792d8 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -3250,16 +3250,16 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
the underlying bus driver */
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&adapter->dev, "Invalid block %s size %d\n",
+ read_write == I2C_SMBUS_READ ? "read" : "write",
+ data->block[0]);
+ return -EINVAL;
+ }
if (read_write == I2C_SMBUS_READ) {
msg[1].len = data->block[0];
} else {
msg[0].len = data->block[0] + 1;
- if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
- dev_err(&adapter->dev,
- "Invalid block write size %d\n",
- data->block[0]);
- return -EINVAL;
- }
for (i = 1; i <= data->block[0]; i++)
msgbuf0[i] = data->block[i];
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 00e8e675cbeb..eaa312bc3a3c 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -297,6 +297,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
rdwr_pa[i].buf[0] < 1 ||
rdwr_pa[i].len < rdwr_pa[i].buf[0] +
I2C_SMBUS_BLOCK_MAX) {
+ i++;
res = -EINVAL;
break;
}
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index b127ed60c733..9dde8390da09 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -65,6 +65,9 @@ static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
struct ide_timing t;
u8 arttim = 0;
+ if (drive->dn >= ARRAY_SIZE(drwtim_regs))
+ return;
+
ide_timing_compute(drive, mode, &t, T, 0);
/*
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index a97affca18ab..0f57d45484d1 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -114,6 +114,9 @@ static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
struct pci_dev *dev = to_pci_dev(hwif->dev);
const u8 pio = drive->pio_mode - XFER_PIO_0;
+ if (drive->dn >= ARRAY_SIZE(drive_pci))
+ return;
+
pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
if (svwks_csb_check(dev)) {
@@ -140,6 +143,9 @@ static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
+ if (drive->dn >= ARRAY_SIZE(drive_pci2))
+ return;
+
pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
pci_read_config_byte(dev, 0x54, &ultra_enable);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5ded9b22b015..a6fa32c7e068 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1107,14 +1107,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem),
ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem),
ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem),
- ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom),
- ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft),
+ ICPU(INTEL_FAM6_ATOM_BONNELL, idle_cpu_atom),
+ ICPU(INTEL_FAM6_ATOM_BONNELL_MID, idle_cpu_lincroft),
ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem),
ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb),
ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
- ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
+ ICPU(INTEL_FAM6_ATOM_SALTWELL, idle_cpu_atom),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT, idle_cpu_byt),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, idle_cpu_tangier),
ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
@@ -1122,7 +1122,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw),
ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw),
ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_X, idle_cpu_avn),
ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw),
ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw),
ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw),
@@ -1134,7 +1134,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
- ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT_X, idle_cpu_dnv),
{}
};
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index c3888822add1..b6254ce9ab3b 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -125,7 +125,7 @@
#define BMC150_ACCEL_SLEEP_1_SEC 0x0F
#define BMC150_ACCEL_REG_TEMP 0x08
-#define BMC150_ACCEL_TEMP_CENTER_VAL 24
+#define BMC150_ACCEL_TEMP_CENTER_VAL 23
#define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
#define BMC150_AUTO_SUSPEND_DELAY_MS 2000
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 784636800361..780f886ccbfe 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1340,6 +1340,8 @@ static int kxcjk1013_resume(struct device *dev)
mutex_lock(&data->mutex);
ret = kxcjk1013_set_mode(data, OPERATION);
+ if (ret == 0)
+ ret = kxcjk1013_set_range(data, data->range);
mutex_unlock(&data->mutex);
return ret;
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 9704090b7908..cd6dbe95125b 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -817,10 +817,10 @@ static int ad799x_probe(struct i2c_client *client,
ret = ad799x_write_config(st, st->chip_config->default_config);
if (ret < 0)
- goto error_disable_reg;
+ goto error_disable_vref;
ret = ad799x_read_config(st);
if (ret < 0)
- goto error_disable_reg;
+ goto error_disable_vref;
st->config = ret;
ret = iio_triggered_buffer_setup(indio_dev, NULL,
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 22c4c17cd996..30f200ad6b97 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -62,7 +62,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
struct spi_transfer t = {
.tx_buf = data,
.len = size + 1,
- .cs_change = sigma_delta->bus_locked,
+ .cs_change = sigma_delta->keep_cs_asserted,
};
struct spi_message m;
int ret;
@@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
if (sigma_delta->info->has_registers) {
data[0] = reg << sigma_delta->info->addr_shift;
data[0] |= sigma_delta->info->read_mask;
+ data[0] |= sigma_delta->comm;
spi_message_add_tail(&t[0], &m);
}
spi_message_add_tail(&t[1], &m);
@@ -216,6 +217,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
+ sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
ret = ad_sigma_delta_set_mode(sigma_delta, mode);
@@ -233,9 +235,10 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
ret = 0;
}
out:
+ sigma_delta->keep_cs_asserted = false;
+ ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->master);
- ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
return ret;
}
@@ -287,6 +290,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
+ sigma_delta->keep_cs_asserted = true;
reinit_completion(&sigma_delta->completion);
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
@@ -296,9 +300,6 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
ret = wait_for_completion_interruptible_timeout(
&sigma_delta->completion, HZ);
- sigma_delta->bus_locked = false;
- spi_bus_unlock(sigma_delta->spi->master);
-
if (ret == 0)
ret = -EIO;
if (ret < 0)
@@ -314,7 +315,10 @@ out:
sigma_delta->irq_dis = true;
}
+ sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ sigma_delta->bus_locked = false;
+ spi_bus_unlock(sigma_delta->spi->master);
mutex_unlock(&indio_dev->mlock);
if (ret)
@@ -351,6 +355,8 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
spi_bus_lock(sigma_delta->spi->master);
sigma_delta->bus_locked = true;
+ sigma_delta->keep_cs_asserted = true;
+
ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS);
if (ret)
goto err_unlock;
@@ -379,6 +385,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
sigma_delta->irq_dis = true;
}
+ sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
sigma_delta->bus_locked = false;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index e3e2155b0386..dd9f2280927b 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
ret = wait_event_interruptible_timeout(st->wq_data_avail,
st->done,
msecs_to_jiffies(1000));
- if (ret == 0)
- ret = -ETIMEDOUT;
- if (ret < 0) {
- mutex_unlock(&st->lock);
- return ret;
- }
-
- *val = st->last_value;
+ /* Disable interrupts, regardless if adc conversion was
+ * successful or not
+ */
at91_adc_writel(st, AT91_ADC_CHDR,
AT91_ADC_CH(chan->channel));
at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
- st->last_value = 0;
- st->done = false;
+ if (ret > 0) {
+ /* a valid conversion took place */
+ *val = st->last_value;
+ st->last_value = 0;
+ st->done = false;
+ ret = IIO_VAL_INT;
+ } else if (ret == 0) {
+ /* conversion timeout */
+ dev_err(&idev->dev, "ADC Channel %d timeout.\n",
+ chan->channel);
+ ret = -ETIMEDOUT;
+ }
+
mutex_unlock(&st->lock);
- return IIO_VAL_INT;
+ return ret;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_mv;
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 712fbd2b1f16..ec3f7bc70b75 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -471,6 +471,14 @@ static int max1027_probe(struct spi_device *spi)
goto fail_dev_register;
}
+ /* Internal reset */
+ st->reg = MAX1027_RST_REG;
+ ret = spi_write(st->spi, &st->reg, 1);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to reset the ADC\n");
+ return ret;
+ }
+
/* Disable averaging */
st->reg = MAX1027_AVG_REG;
ret = spi_write(st->spi, &st->reg, 1);
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 56cf5907a5f0..143894a315d9 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1299,7 +1299,7 @@ static int xadc_remove(struct platform_device *pdev)
}
free_irq(irq, indio_dev);
clk_disable_unprepare(xadc->clk);
- cancel_delayed_work(&xadc->zynq_unmask_work);
+ cancel_delayed_work_sync(&xadc->zynq_unmask_work);
kfree(xadc->data);
kfree(indio_dev->channels);
diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c
index a3ae165f8d9f..16180e6321bd 100644
--- a/drivers/iio/common/ssp_sensors/ssp_iio.c
+++ b/drivers/iio/common/ssp_sensors/ssp_iio.c
@@ -80,7 +80,7 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
unsigned int len, int64_t timestamp)
{
__le32 time;
- int64_t calculated_time;
+ int64_t calculated_time = 0;
struct ssp_sensor_data *spd = iio_priv(indio_dev);
if (indio_dev->scan_bytes == 0)
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 97d2c5111f43..8bf7fc626a9d 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -221,7 +221,7 @@ static int ad5380_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
*val >>= chan->scan_type.shift;
- val -= (1 << chan->scan_type.realbits) / 2;
+ *val -= (1 << chan->scan_type.realbits) / 2;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 2 * st->vref;
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index 3854d201a5d6..68dd0be1ac07 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -94,17 +94,22 @@ static int mcp4922_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct mcp4922_state *state = iio_priv(indio_dev);
+ int ret;
if (val2 != 0)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val > GENMASK(chan->scan_type.realbits-1, 0))
+ if (val < 0 || val > GENMASK(chan->scan_type.realbits - 1, 0))
return -EINVAL;
val <<= chan->scan_type.shift;
- state->value[chan->channel] = val;
- return mcp4922_spi_write(state, chan->channel, val);
+
+ ret = mcp4922_spi_write(state, chan->channel, val);
+ if (!ret)
+ state->value[chan->channel] = val;
+ return ret;
+
default:
return -EINVAL;
}
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 821919dd245b..b5a5517e3ce1 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -583,11 +583,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
return bmg160_get_filter(data, val);
case IIO_CHAN_INFO_SCALE:
- *val = 0;
switch (chan->type) {
case IIO_TEMP:
- *val2 = 500000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = 500;
+ return IIO_VAL_INT;
case IIO_ANGL_VEL:
{
int i;
@@ -595,6 +594,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
if (bmg160_scale_table[i].dps_range ==
data->dps_range) {
+ *val = 0;
*val2 = bmg160_scale_table[i].scale;
return IIO_VAL_INT_PLUS_MICRO;
}
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index a5c5c4e29add..48923ae6ac3b 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -19,6 +19,7 @@
#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
#define L3GD20_GYRO_DEV_NAME "l3gd20"
+#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
#define LSM9DS0_GYRO_DEV_NAME "lsm9ds0_gyro"
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index aea034d8fe0f..e366422e8512 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -35,83 +35,11 @@
#define ST_GYRO_DEFAULT_OUT_Z_L_ADDR 0x2c
/* FULLSCALE */
+#define ST_GYRO_FS_AVL_245DPS 245
#define ST_GYRO_FS_AVL_250DPS 250
#define ST_GYRO_FS_AVL_500DPS 500
#define ST_GYRO_FS_AVL_2000DPS 2000
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_GYRO_1_WAI_EXP 0xd3
-#define ST_GYRO_1_ODR_ADDR 0x20
-#define ST_GYRO_1_ODR_MASK 0xc0
-#define ST_GYRO_1_ODR_AVL_100HZ_VAL 0x00
-#define ST_GYRO_1_ODR_AVL_200HZ_VAL 0x01
-#define ST_GYRO_1_ODR_AVL_400HZ_VAL 0x02
-#define ST_GYRO_1_ODR_AVL_800HZ_VAL 0x03
-#define ST_GYRO_1_PW_ADDR 0x20
-#define ST_GYRO_1_PW_MASK 0x08
-#define ST_GYRO_1_FS_ADDR 0x23
-#define ST_GYRO_1_FS_MASK 0x30
-#define ST_GYRO_1_FS_AVL_250_VAL 0x00
-#define ST_GYRO_1_FS_AVL_500_VAL 0x01
-#define ST_GYRO_1_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_1_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_1_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_1_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_1_BDU_ADDR 0x23
-#define ST_GYRO_1_BDU_MASK 0x80
-#define ST_GYRO_1_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_1_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_1_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_GYRO_2_WAI_EXP 0xd4
-#define ST_GYRO_2_ODR_ADDR 0x20
-#define ST_GYRO_2_ODR_MASK 0xc0
-#define ST_GYRO_2_ODR_AVL_95HZ_VAL 0x00
-#define ST_GYRO_2_ODR_AVL_190HZ_VAL 0x01
-#define ST_GYRO_2_ODR_AVL_380HZ_VAL 0x02
-#define ST_GYRO_2_ODR_AVL_760HZ_VAL 0x03
-#define ST_GYRO_2_PW_ADDR 0x20
-#define ST_GYRO_2_PW_MASK 0x08
-#define ST_GYRO_2_FS_ADDR 0x23
-#define ST_GYRO_2_FS_MASK 0x30
-#define ST_GYRO_2_FS_AVL_250_VAL 0x00
-#define ST_GYRO_2_FS_AVL_500_VAL 0x01
-#define ST_GYRO_2_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_2_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_2_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_2_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_2_BDU_ADDR 0x23
-#define ST_GYRO_2_BDU_MASK 0x80
-#define ST_GYRO_2_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_2_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_2_MULTIREAD_BIT true
-
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_GYRO_3_WAI_EXP 0xd7
-#define ST_GYRO_3_ODR_ADDR 0x20
-#define ST_GYRO_3_ODR_MASK 0xc0
-#define ST_GYRO_3_ODR_AVL_95HZ_VAL 0x00
-#define ST_GYRO_3_ODR_AVL_190HZ_VAL 0x01
-#define ST_GYRO_3_ODR_AVL_380HZ_VAL 0x02
-#define ST_GYRO_3_ODR_AVL_760HZ_VAL 0x03
-#define ST_GYRO_3_PW_ADDR 0x20
-#define ST_GYRO_3_PW_MASK 0x08
-#define ST_GYRO_3_FS_ADDR 0x23
-#define ST_GYRO_3_FS_MASK 0x30
-#define ST_GYRO_3_FS_AVL_250_VAL 0x00
-#define ST_GYRO_3_FS_AVL_500_VAL 0x01
-#define ST_GYRO_3_FS_AVL_2000_VAL 0x02
-#define ST_GYRO_3_FS_AVL_250_GAIN IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_3_FS_AVL_500_GAIN IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_3_FS_AVL_2000_GAIN IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_3_BDU_ADDR 0x23
-#define ST_GYRO_3_BDU_MASK 0x80
-#define ST_GYRO_3_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_3_DRDY_IRQ_INT2_MASK 0x08
-#define ST_GYRO_3_MULTIREAD_BIT true
-
-
static const struct iio_chan_spec st_gyro_16bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -130,7 +58,7 @@ static const struct iio_chan_spec st_gyro_16bit_channels[] = {
static const struct st_sensor_settings st_gyro_sensors_settings[] = {
{
- .wai = ST_GYRO_1_WAI_EXP,
+ .wai = 0xd3,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3G4200D_GYRO_DEV_NAME,
@@ -138,18 +66,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_1_ODR_ADDR,
- .mask = ST_GYRO_1_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 100, ST_GYRO_1_ODR_AVL_100HZ_VAL, },
- { 200, ST_GYRO_1_ODR_AVL_200HZ_VAL, },
- { 400, ST_GYRO_1_ODR_AVL_400HZ_VAL, },
- { 800, ST_GYRO_1_ODR_AVL_800HZ_VAL, },
+ { .hz = 100, .value = 0x00, },
+ { .hz = 200, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 800, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_1_PW_ADDR,
- .mask = ST_GYRO_1_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -158,33 +86,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_1_FS_ADDR,
- .mask = ST_GYRO_1_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_1_FS_AVL_250_VAL,
- .gain = ST_GYRO_1_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_1_FS_AVL_500_VAL,
- .gain = ST_GYRO_1_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_1_FS_AVL_2000_VAL,
- .gain = ST_GYRO_1_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_1_BDU_ADDR,
- .mask = ST_GYRO_1_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_1_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -192,11 +120,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_GYRO_2_WAI_EXP,
+ .wai = 0xd4,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
@@ -208,18 +136,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_2_ODR_ADDR,
- .mask = ST_GYRO_2_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 95, ST_GYRO_2_ODR_AVL_95HZ_VAL, },
- { 190, ST_GYRO_2_ODR_AVL_190HZ_VAL, },
- { 380, ST_GYRO_2_ODR_AVL_380HZ_VAL, },
- { 760, ST_GYRO_2_ODR_AVL_760HZ_VAL, },
+ { .hz = 95, .value = 0x00, },
+ { .hz = 190, .value = 0x01, },
+ { .hz = 380, .value = 0x02, },
+ { .hz = 760, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_2_PW_ADDR,
- .mask = ST_GYRO_2_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -228,33 +156,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_2_FS_ADDR,
- .mask = ST_GYRO_2_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
.num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_2_FS_AVL_250_VAL,
- .gain = ST_GYRO_2_FS_AVL_250_GAIN,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_2_FS_AVL_500_VAL,
- .gain = ST_GYRO_2_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_2_FS_AVL_2000_VAL,
- .gain = ST_GYRO_2_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_2_BDU_ADDR,
- .mask = ST_GYRO_2_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_2_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -262,29 +190,29 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
{
- .wai = ST_GYRO_3_WAI_EXP,
+ .wai = 0xd7,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
- [0] = L3GD20_GYRO_DEV_NAME,
+ [0] = L3GD20H_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
- .addr = ST_GYRO_3_ODR_ADDR,
- .mask = ST_GYRO_3_ODR_MASK,
+ .addr = 0x20,
+ .mask = 0xc0,
.odr_avl = {
- { 95, ST_GYRO_3_ODR_AVL_95HZ_VAL, },
- { 190, ST_GYRO_3_ODR_AVL_190HZ_VAL, },
- { 380, ST_GYRO_3_ODR_AVL_380HZ_VAL, },
- { 760, ST_GYRO_3_ODR_AVL_760HZ_VAL, },
+ { .hz = 100, .value = 0x00, },
+ { .hz = 200, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 800, .value = 0x03, },
},
},
.pw = {
- .addr = ST_GYRO_3_PW_ADDR,
- .mask = ST_GYRO_3_PW_MASK,
+ .addr = 0x20,
+ .mask = 0x08,
.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
},
@@ -293,33 +221,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
},
.fs = {
- .addr = ST_GYRO_3_FS_ADDR,
- .mask = ST_GYRO_3_FS_MASK,
+ .addr = 0x23,
+ .mask = 0x30,
.fs_avl = {
[0] = {
- .num = ST_GYRO_FS_AVL_250DPS,
- .value = ST_GYRO_3_FS_AVL_250_VAL,
- .gain = ST_GYRO_3_FS_AVL_250_GAIN,
+ .num = ST_GYRO_FS_AVL_245DPS,
+ .value = 0x00,
+ .gain = IIO_DEGREE_TO_RAD(8750),
},
[1] = {
.num = ST_GYRO_FS_AVL_500DPS,
- .value = ST_GYRO_3_FS_AVL_500_VAL,
- .gain = ST_GYRO_3_FS_AVL_500_GAIN,
+ .value = 0x01,
+ .gain = IIO_DEGREE_TO_RAD(17500),
},
[2] = {
.num = ST_GYRO_FS_AVL_2000DPS,
- .value = ST_GYRO_3_FS_AVL_2000_VAL,
- .gain = ST_GYRO_3_FS_AVL_2000_GAIN,
+ .value = 0x02,
+ .gain = IIO_DEGREE_TO_RAD(70000),
},
},
},
.bdu = {
- .addr = ST_GYRO_3_BDU_ADDR,
- .mask = ST_GYRO_3_BDU_MASK,
+ .addr = 0x23,
+ .mask = 0x80,
},
.drdy_irq = {
- .addr = ST_GYRO_3_DRDY_IRQ_ADDR,
- .mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK,
+ .addr = 0x22,
+ .mask_int2 = 0x08,
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
@@ -327,7 +255,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
*/
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
+ .multi_read_bit = true,
.bootime = 2,
},
};
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 40056b821036..3f628746cb93 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -41,6 +41,10 @@ static const struct of_device_id st_gyro_of_match[] = {
.data = L3GD20_GYRO_DEV_NAME,
},
{
+ .compatible = "st,l3gd20h-gyro",
+ .data = L3GD20H_GYRO_DEV_NAME,
+ },
+ {
.compatible = "st,l3g4is-gyro",
.data = L3G4IS_GYRO_DEV_NAME,
},
@@ -95,6 +99,7 @@ static const struct i2c_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
+ { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{ LSM9DS0_GYRO_DEV_NAME },
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index fbf2faed501c..fa14d8f2170d 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -52,6 +52,7 @@ static const struct spi_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
+ { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{ LSM9DS0_GYRO_DEV_NAME },
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index e0c9c70c2a4a..c0e2e78c5c62 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -202,7 +202,7 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
*val2 = 65536;
return IIO_VAL_FRACTIONAL;
} else {
- *val = 100;
+ *val = 100000;
*val2 = 65536;
return IIO_VAL_FRACTIONAL;
}
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 12898424d838..5abe095901c8 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -266,8 +266,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
struct adis16480 *st = iio_priv(indio_dev);
unsigned int t;
+ if (val < 0 || val2 < 0)
+ return -EINVAL;
+
t = val * 1000 + val2 / 1000;
- if (t <= 0)
+ if (t == 0)
return -EINVAL;
t = 2460000 / t;
@@ -369,12 +372,14 @@ static int adis16480_get_calibbias(struct iio_dev *indio_dev,
case IIO_MAGN:
case IIO_PRESSURE:
ret = adis_read_reg_16(&st->adis, reg, &val16);
- *bias = sign_extend32(val16, 15);
+ if (ret == 0)
+ *bias = sign_extend32(val16, 15);
break;
case IIO_ANGL_VEL:
case IIO_ACCEL:
ret = adis_read_reg_32(&st->adis, reg, &val32);
- *bias = sign_extend32(val32, 31);
+ if (ret == 0)
+ *bias = sign_extend32(val32, 31);
break;
default:
ret = -EINVAL;
@@ -721,6 +726,7 @@ static const struct iio_info adis16480_info = {
.write_raw = &adis16480_write_raw,
.update_scan_mode = adis_update_scan_mode,
.driver_module = THIS_MODULE,
+ .debugfs_reg_access = adis_debugfs_reg_access,
};
static int adis16480_stop_device(struct iio_dev *indio_dev)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 5d05c38c4ba9..2f037cd59d53 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -546,7 +546,7 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
{
unsigned bytes = 0;
- int length, i;
+ int length, i, largest = 0;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask,
@@ -554,13 +554,17 @@ static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
length = iio_storage_bytes_for_si(indio_dev, i);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
if (timestamp) {
length = iio_storage_bytes_for_timestamp(indio_dev);
bytes = ALIGN(bytes, length);
bytes += length;
+ largest = max(largest, length);
}
+
+ bytes = ALIGN(bytes, largest);
return bytes;
}
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index b05946604f80..6d5bb11594dc 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -62,9 +62,9 @@ struct bh1750_chip_info {
u16 int_time_low_mask;
u16 int_time_high_mask;
-}
+};
-static const bh1750_chip_info_tbl[] = {
+static const struct bh1750_chip_info bh1750_chip_info_tbl[] = {
[BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 },
[BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 },
[BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 },
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 78c9b3a6453a..be55477de2ac 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -695,6 +695,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
struct iio_dev *iio = _iio;
struct opt3001 *opt = iio_priv(iio);
int ret;
+ bool wake_result_ready_queue = false;
if (!opt->ok_to_ignore_lock)
mutex_lock(&opt->lock);
@@ -729,13 +730,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
}
opt->result = ret;
opt->result_ready = true;
- wake_up(&opt->result_ready_queue);
+ wake_result_ready_queue = true;
}
out:
if (!opt->ok_to_ignore_lock)
mutex_unlock(&opt->lock);
+ if (wake_result_ready_queue)
+ wake_up(&opt->result_ready_queue);
+
return IRQ_HANDLED;
}
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index dd3fcd1704f8..752237f0889e 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -477,7 +477,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
* We read all axes and discard all but one, for optimized
* reading, use the triggered buffer.
*/
- *val = le16_to_cpu(hw_values[chan->address]);
+ *val = (s16)le16_to_cpu(hw_values[chan->address]);
ret = IIO_VAL_INT;
}
diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 3de7f4426ac4..86abba5827a2 100644
--- a/drivers/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -58,8 +58,13 @@ static const struct regmap_config hmc5843_i2c_regmap_config = {
static int hmc5843_i2c_probe(struct i2c_client *cli,
const struct i2c_device_id *id)
{
+ struct regmap *regmap = devm_regmap_init_i2c(cli,
+ &hmc5843_i2c_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
return hmc5843_common_probe(&cli->dev,
- devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config),
+ regmap,
id->driver_data, id->name);
}
diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index 535f03a70d63..79b2b707f90e 100644
--- a/drivers/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -58,6 +58,7 @@ static const struct regmap_config hmc5843_spi_regmap_config = {
static int hmc5843_spi_probe(struct spi_device *spi)
{
int ret;
+ struct regmap *regmap;
const struct spi_device_id *id = spi_get_device_id(spi);
spi->mode = SPI_MODE_3;
@@ -67,8 +68,12 @@ static int hmc5843_spi_probe(struct spi_device *spi)
if (ret)
return ret;
+ regmap = devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
return hmc5843_common_probe(&spi->dev,
- devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config),
+ regmap,
id->driver_data, id->name);
}
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 978b8d94f9a4..f7d23c1081dc 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -141,7 +141,7 @@ int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
if (ib_nl_is_good_ip_resp(nlh))
ib_nl_process_good_ip_rsep(nlh);
- return skb->len;
+ return 0;
}
static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
@@ -735,14 +735,13 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
struct net_device *dev;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
if (if_index)
@@ -751,8 +750,9 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
ctx.addr = &dev_addr;
init_completion(&ctx.comp);
- ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
- &dev_addr, 1000, resolve_cb, &ctx);
+ ret = rdma_resolve_ip(&self, (struct sockaddr *)&sgid_addr,
+ (struct sockaddr *)&dgid_addr, &dev_addr, 1000,
+ resolve_cb, &ctx);
if (ret)
return ret;
@@ -782,16 +782,15 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
int ret = 0;
struct rdma_dev_addr dev_addr;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} gid_addr;
- rdma_gid2ip(&gid_addr._sockaddr, sgid);
+ rdma_gid2ip((struct sockaddr *)&gid_addr, sgid);
memset(&dev_addr, 0, sizeof(dev_addr));
dev_addr.net = &init_net;
- ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
+ ret = rdma_translate_ip((struct sockaddr *)&gid_addr, &dev_addr, vlan_id);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 71c7c4c328ef..304429fd04dd 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1073,6 +1073,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
/* Sharing an ib_cm_id with different handlers is not
* supported */
spin_unlock_irqrestore(&cm.lock, flags);
+ ib_destroy_cm_id(cm_id);
return ERR_PTR(-EINVAL);
}
atomic_inc(&cm_id_priv->refcount);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 85d4ef319c90..0a6cc78ebcf7 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2119,9 +2119,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
conn_id->cm_id.iw = NULL;
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
+ mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id);
- goto out;
+ return ret;
}
mutex_unlock(&conn_id->handler_mutex);
@@ -2567,6 +2568,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
err2:
kfree(route->path_rec);
route->path_rec = NULL;
+ route->num_paths = 0;
err1:
kfree(work);
return ret;
@@ -4439,6 +4441,7 @@ err:
unregister_netdevice_notifier(&cma_nb);
rdma_addr_unregister_client(&addr_client);
ib_sa_unregister_client(&sa_client);
+ unregister_pernet_subsys(&cma_pernet_operations);
err_wq:
destroy_workqueue(cma_wq);
return ret;
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index ff12b8d176ce..c4e7aa91498b 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -102,12 +102,12 @@ static void ib_cq_poll_work(struct work_struct *work)
completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE);
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
- queue_work(ib_comp_wq, &cq->work);
+ queue_work(cq->comp_wq, &cq->work);
}
static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
{
- queue_work(ib_comp_wq, &cq->work);
+ queue_work(cq->comp_wq, &cq->work);
}
/**
@@ -159,9 +159,12 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
break;
case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
cq->comp_handler = ib_cq_completion_workqueue;
INIT_WORK(&cq->work, ib_cq_poll_work);
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
+ ib_comp_wq : ib_comp_unbound_wq;
break;
default:
ret = -EINVAL;
@@ -196,6 +199,7 @@ void ib_free_cq(struct ib_cq *cq)
irq_poll_disable(&cq->iop);
break;
case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
cancel_work_sync(&cq->work);
break;
default:
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 15f4bdf89fe1..4b947d5cafe2 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -59,6 +59,7 @@ struct ib_client_data {
};
struct workqueue_struct *ib_comp_wq;
+struct workqueue_struct *ib_comp_unbound_wq;
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
@@ -1005,10 +1006,19 @@ static int __init ib_core_init(void)
goto err;
}
+ ib_comp_unbound_wq =
+ alloc_workqueue("ib-comp-unb-wq",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
+ if (!ib_comp_unbound_wq) {
+ ret = -ENOMEM;
+ goto err_comp;
+ }
+
ret = class_register(&ib_class);
if (ret) {
pr_warn("Couldn't create InfiniBand device class\n");
- goto err_comp;
+ goto err_comp_unbound;
}
ret = ibnl_init();
@@ -1055,6 +1065,8 @@ err_ibnl:
ibnl_cleanup();
err_sysfs:
class_unregister(&ib_class);
+err_comp_unbound:
+ destroy_workqueue(ib_comp_unbound_wq);
err_comp:
destroy_workqueue(ib_comp_wq);
err:
@@ -1071,6 +1083,7 @@ static void __exit ib_core_cleanup(void)
addr_cleanup();
ibnl_cleanup();
class_unregister(&ib_class);
+ destroy_workqueue(ib_comp_unbound_wq);
destroy_workqueue(ib_comp_wq);
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 5495e22839a7..1f71c306923f 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -137,8 +137,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
{
struct list_head *e, *tmp;
- list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
+ list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
+ list_del(e);
kfree(list_entry(e, struct iwcm_work, free_list));
+ }
}
static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 3e2ab04201e2..a1f059a9c751 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3155,18 +3155,18 @@ static int ib_mad_port_open(struct ib_device *device,
if (has_smi)
cq_size *= 2;
- port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
- IB_POLL_WORKQUEUE);
- if (IS_ERR(port_priv->cq)) {
- dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
- ret = PTR_ERR(port_priv->cq);
- goto error3;
- }
-
port_priv->pd = ib_alloc_pd(device, 0);
if (IS_ERR(port_priv->pd)) {
dev_err(&device->dev, "Couldn't create ib_mad PD\n");
ret = PTR_ERR(port_priv->pd);
+ goto error3;
+ }
+
+ port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
+ IB_POLL_UNBOUND_WORKQUEUE);
+ if (IS_ERR(port_priv->cq)) {
+ dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
+ ret = PTR_ERR(port_priv->cq);
goto error4;
}
@@ -3209,11 +3209,11 @@ error8:
error7:
destroy_mad_qp(&port_priv->qp_info[0]);
error6:
- ib_dealloc_pd(port_priv->pd);
-error4:
ib_free_cq(port_priv->cq);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+ ib_dealloc_pd(port_priv->pd);
error3:
kfree(port_priv);
@@ -3243,8 +3243,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
destroy_workqueue(port_priv->wq);
destroy_mad_qp(&port_priv->qp_info[1]);
destroy_mad_qp(&port_priv->qp_info[0]);
- ib_dealloc_pd(port_priv->pd);
ib_free_cq(port_priv->cq);
+ ib_dealloc_pd(port_priv->pd);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
/* XXX: Handle deallocation of MAD registration tables */
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 4baf3b864a57..1c459725d64e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -848,7 +848,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
}
settimeout_out:
- return skb->len;
+ return 0;
}
static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
@@ -920,7 +920,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
}
resp_out:
- return skb->len;
+ return 0;
}
static void free_sm_ah(struct kref *kref)
@@ -1109,7 +1109,6 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
.net = rec->net ? rec->net :
&init_net};
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
@@ -1117,12 +1116,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
if (!device->get_netdev)
return -EOPNOTSUPP;
- rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
/* validate the route */
- ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
- &dgid_addr._sockaddr, &dev_addr);
+ ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
+ (struct sockaddr *)&dgid_addr,
+ &dev_addr);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 415a3185cde7..cf93a96b6324 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,6 +49,7 @@
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <asm/uaccess.h>
@@ -843,11 +844,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
if (get_user(id, arg))
return -EFAULT;
+ if (id >= IB_UMAD_MAX_AGENTS)
+ return -EINVAL;
mutex_lock(&file->port->file_mutex);
mutex_lock(&file->mutex);
- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+ if (!__get_agent(file, id)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index dd18b74cd01d..a04a53acb24f 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -455,6 +455,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
skb_reset_transport_header(skb);
} else {
skb = alloc_skb(len, gfp);
+ if (!skb)
+ return NULL;
}
t4_set_arp_err_handler(skb, NULL, NULL);
return skb;
@@ -488,7 +490,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
release_ep_resources(ep);
- kfree_skb(skb);
return 0;
}
@@ -499,7 +500,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
c4iw_put_ep(&ep->parent_ep->com);
release_ep_resources(ep);
- kfree_skb(skb);
return 0;
}
@@ -1872,8 +1872,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
}
mutex_unlock(&ep->com.mutex);
- if (release)
+ if (release) {
+ close_complete_upcall(ep, -ECONNRESET);
release_ep_resources(ep);
+ }
c4iw_put_ep(&ep->com);
return 0;
}
@@ -2042,7 +2044,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
} else {
pdev = get_real_dev(n->dev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, pdev, 0);
+ n, pdev, rt_tos2priority(tos));
if (!ep->l2t)
goto out;
ep->mtu = dst_mtu(dst);
@@ -2133,7 +2135,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
laddr6->sin6_addr.s6_addr,
raddr6->sin6_addr.s6_addr,
laddr6->sin6_port,
- raddr6->sin6_port, 0,
+ raddr6->sin6_port,
+ ep->com.cm_id->tos,
raddr6->sin6_scope_id);
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
@@ -3276,7 +3279,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
laddr6->sin6_addr.s6_addr,
raddr6->sin6_addr.s6_addr,
laddr6->sin6_port,
- raddr6->sin6_port, 0,
+ raddr6->sin6_port, cm_id->tos,
raddr6->sin6_scope_id);
}
if (!ep->dst) {
@@ -3567,7 +3570,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (close) {
if (abrupt) {
set_bit(EP_DISC_ABORT, &ep->com.history);
- close_complete_upcall(ep, -ECONNRESET);
ret = send_abort(ep);
} else {
set_bit(EP_DISC_CLOSE, &ep->com.history);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 0c215353adb9..2b1dd60a29fa 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -264,13 +264,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
struct sk_buff *skb)
{
int err;
- struct fw_ri_tpte tpt;
+ struct fw_ri_tpte *tpt;
u32 stag_idx;
static atomic_t key;
if (c4iw_fatal_error(rdev))
return -EIO;
+ tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
+ if (!tpt)
+ return -ENOMEM;
+
stag_state = stag_state > 0;
stag_idx = (*stag) >> 8;
@@ -280,6 +284,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
+ kfree(tpt);
return -ENOMEM;
}
mutex_lock(&rdev->stats.lock);
@@ -294,28 +299,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
/* write TPT entry */
if (reset_tpt_entry)
- memset(&tpt, 0, sizeof(tpt));
+ memset(tpt, 0, sizeof(*tpt));
else {
- tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+ tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
FW_RI_TPTE_STAGSTATE_V(stag_state) |
FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
- tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+ tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
(bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
FW_RI_VA_BASED_TO))|
FW_RI_TPTE_PS_V(page_size));
- tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+ tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
- tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
- tpt.va_hi = cpu_to_be32((u32)(to >> 32));
- tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
- tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
- tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+ tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+ tpt->va_hi = cpu_to_be32((u32)(to >> 32));
+ tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+ tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
+ tpt->len_hi = cpu_to_be32((u32)(len >> 32));
}
err = write_adapter_mem(rdev, stag_idx +
(rdev->lldi.vr->stag.start >> 5),
- sizeof(tpt), &tpt, skb);
+ sizeof(*tpt), tpt, skb);
if (reset_tpt_entry) {
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -323,6 +328,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
}
+ kfree(tpt);
return err;
}
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index d30b3b908621..85db856047a9 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -9620,6 +9620,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
/* disable the port */
clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+ cancel_work_sync(&ppd->freeze_work);
}
static inline int init_cpu_counters(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 9487c9bb8920..908d4dd01562 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -2016,7 +2016,7 @@ struct opa_port_status_req {
__be32 vl_select_mask;
};
-#define VL_MASK_ALL 0x000080ff
+#define VL_MASK_ALL 0x00000000000080ffUL
struct opa_port_status_rsp {
__u8 port_num;
@@ -2315,15 +2315,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
}
static void a0_portstatus(struct hfi1_pportdata *ppd,
- struct opa_port_status_rsp *rsp, u32 vl_select_mask)
+ struct opa_port_status_rsp *rsp)
{
if (!is_bx(ppd->dd)) {
unsigned long vl;
u64 sum_vl_xmit_wait = 0;
- u32 vl_all_mask = VL_MASK_ALL;
+ unsigned long vl_all_mask = VL_MASK_ALL;
- for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
- 8 * sizeof(vl_all_mask)) {
+ for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
u64 tmp = sum_vl_xmit_wait +
read_port_cntr(ppd, C_TX_WAIT_VL,
idx_from_vl(vl));
@@ -2347,12 +2346,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
(struct opa_port_status_req *)pmp->data;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct opa_port_status_rsp *rsp;
- u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
+ unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
unsigned long vl;
size_t response_data_size;
u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
u8 port_num = req->port_num;
- u8 num_vls = hweight32(vl_select_mask);
+ u8 num_vls = hweight64(vl_select_mask);
struct _vls_pctrs *vlinfo;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -2386,7 +2385,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
- rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
+ rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
CNTR_INVALID_VL));
rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
@@ -2449,8 +2448,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
* So in the for_each_set_bit() loop below, we don't need
* any additional checks for vl.
*/
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(vl_select_mask)) {
+ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
memset(vlinfo, 0, sizeof(*vlinfo));
tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
@@ -2487,7 +2485,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
vfi++;
}
- a0_portstatus(ppd, rsp, vl_select_mask);
+ a0_portstatus(ppd, rsp);
if (resp_len)
*resp_len += response_data_size;
@@ -2534,16 +2532,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
return error_counter_summary;
}
-static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
- u32 vl_select_mask)
+static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
{
if (!is_bx(ppd->dd)) {
unsigned long vl;
u64 sum_vl_xmit_wait = 0;
- u32 vl_all_mask = VL_MASK_ALL;
+ unsigned long vl_all_mask = VL_MASK_ALL;
- for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
- 8 * sizeof(vl_all_mask)) {
+ for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
u64 tmp = sum_vl_xmit_wait +
read_port_cntr(ppd, C_TX_WAIT_VL,
idx_from_vl(vl));
@@ -2599,7 +2595,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
u64 port_mask;
u8 port_num;
unsigned long vl;
- u32 vl_select_mask;
+ unsigned long vl_select_mask;
int vfi;
num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
@@ -2668,8 +2664,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
* So in the for_each_set_bit() loop below, we don't need
* any additional checks for vl.
*/
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(req->vl_select_mask)) {
+ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
memset(vlinfo, 0, sizeof(*vlinfo));
rsp->vls[vfi].port_vl_xmit_data =
@@ -2712,7 +2707,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
vfi++;
}
- a0_datacounters(ppd, rsp, vl_select_mask);
+ a0_datacounters(ppd, rsp);
if (resp_len)
*resp_len += response_data_size;
@@ -2807,7 +2802,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
struct _vls_ectrs *vlinfo;
unsigned long vl;
u64 port_mask, tmp;
- u32 vl_select_mask;
+ unsigned long vl_select_mask;
int vfi;
req = (struct opa_port_error_counters64_msg *)pmp->data;
@@ -2866,8 +2861,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
vlinfo = &rsp->vls[0];
vfi = 0;
vl_select_mask = be32_to_cpu(req->vl_select_mask);
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(req->vl_select_mask)) {
+ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
memset(vlinfo, 0, sizeof(*vlinfo));
rsp->vls[vfi].port_vl_xmit_discards =
cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
@@ -3077,7 +3071,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
u64 portn = be64_to_cpu(req->port_select_mask[3]);
u32 counter_select = be32_to_cpu(req->counter_select_mask);
- u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
+ unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
unsigned long vl;
if ((nports != 1) || (portn != 1 << port)) {
@@ -3169,8 +3163,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
if (counter_select & CS_UNCORRECTABLE_ERRORS)
write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(vl_select_mask)) {
+ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
if (counter_select & CS_PORT_XMIT_DATA)
write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 717626046ee5..fbe662602f0a 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -377,7 +377,9 @@ int pcie_speeds(struct hfi1_devdata *dd)
/*
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
*/
- if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
+ if (parent &&
+ (dd->pcidev->bus->max_bus_speed == PCIE_SPEED_2_5GT ||
+ dd->pcidev->bus->max_bus_speed == PCIE_SPEED_5_0GT)) {
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
dd->link_gen3_capable = 0;
}
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index e8e0fa58cb71..b08786614c1b 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2394,7 +2394,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@@ -2469,7 +2469,7 @@ send_last:
update_ack_queue(qp, next);
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 9cbe52d21077..76e63c88a87a 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde)
sdma_flush_descq(sde);
spin_lock_irqsave(&sde->flushlist_lock, flags);
/* copy flush list */
- list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
- list_del_init(&txp->list);
- list_add_tail(&txp->list, &flushlist);
- }
+ list_splice_init(&sde->flushlist, &flushlist);
spin_unlock_irqrestore(&sde->flushlist_lock, flags);
/* flush from flush list */
list_for_each_entry_safe(txp, txp_next, &flushlist, list)
@@ -2406,7 +2403,7 @@ unlock_noconn:
wait->tx_count++;
wait->count += tx->num_desc;
}
- schedule_work(&sde->flush_worker);
+ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
ret = -ECOMM;
goto unlock;
nodesc:
@@ -2504,7 +2501,7 @@ unlock_noconn:
}
}
spin_unlock(&sde->flushlist_lock);
- schedule_work(&sde->flush_worker);
+ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
ret = -ECOMM;
goto update_tail;
nodesc:
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 621b60ab74ee..5df1e368096c 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -670,7 +670,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping sc2vl sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail;
+ /*
+ * Based on the documentation for kobject_init_and_add(), the
+ * caller should call kobject_put even if this call fails.
+ */
+ goto bail_sc2vl;
}
kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
@@ -680,7 +684,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping sl2sc sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sc2vl;
+ goto bail_sl2sc;
}
kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
@@ -690,7 +694,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping vl2mtu sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sl2sc;
+ goto bail_vl2mtu;
}
kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
@@ -700,7 +704,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_vl2mtu;
+ goto bail_cc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
@@ -738,7 +742,6 @@ bail_sl2sc:
kobject_put(&ppd->sl2sc_kobj);
bail_sc2vl:
kobject_put(&ppd->sc2vl_kobj);
-bail:
return ret;
}
@@ -858,8 +861,13 @@ bail:
for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
device_remove_file(&dev->dev, hfi1_attributes[i]);
- for (i = 0; i < dd->num_sdma; i++)
- kobject_del(&dd->per_sdma[i].kobj);
+ /*
+ * The function kobject_put() will call kobject_del() if the kobject
+ * has been added successfully. The sysfs files created under the
+ * kobject directory will also be removed during the process.
+ */
+ for (; i >= 0; i--)
+ kobject_put(&dd->per_sdma[i].kobj);
return ret;
}
@@ -872,6 +880,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
struct hfi1_pportdata *ppd;
int i;
+ /* Unwind operations in hfi1_verbs_register_sysfs() */
+ for (i = 0; i < dd->num_sdma; i++)
+ kobject_put(&dd->per_sdma[i].kobj);
+
for (i = 0; i < dd->num_pports; i++) {
ppd = &dd->pport[i];
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 4c111162d552..098296aaa225 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -260,7 +260,6 @@ struct user_sdma_txreq {
struct list_head list;
struct user_sdma_request *req;
u16 flags;
- unsigned busycount;
u64 seqnum;
};
@@ -323,25 +322,22 @@ static int defer_packet_queue(
struct hfi1_user_sdma_pkt_q *pq =
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
- struct user_sdma_txreq *tx =
- container_of(txreq, struct user_sdma_txreq, txreq);
- if (sdma_progress(sde, seq, txreq)) {
- if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
- goto eagain;
- }
+ write_seqlock(&dev->iowait_lock);
+ if (sdma_progress(sde, seq, txreq))
+ goto eagain;
/*
* We are assuming that if the list is enqueued somewhere, it
* is to the dmawait list since that is the only place where
* it is supposed to be enqueued.
*/
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
- write_seqlock(&dev->iowait_lock);
if (list_empty(&pq->busy.list))
list_add_tail(&pq->busy.list, &sde->dmawait);
write_sequnlock(&dev->iowait_lock);
return -EBUSY;
eagain:
+ write_sequnlock(&dev->iowait_lock);
return -EAGAIN;
}
@@ -925,7 +921,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
tx->flags = 0;
tx->req = req;
- tx->busycount = 0;
INIT_LIST_HEAD(&tx->list);
if (req->seqnum == req->info.npkts - 1)
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index d9c71750e22d..15054a0cbf6d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1344,8 +1344,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
rdi->dparms.props.max_cq = hfi1_max_cqs;
rdi->dparms.props.max_ah = hfi1_max_ahs;
rdi->dparms.props.max_cqe = hfi1_max_cqes;
- rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_pd = hfi1_max_pds;
rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index d8a5bad49680..837729d0be46 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv;
- tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
+ tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
if (tx)
goto out;
priv = qp->priv;
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 31ded57592ee..0bd58a0772e2 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -71,6 +71,7 @@ struct hfi1_ibdev;
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp);
+#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN)
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
__must_hold(&qp->slock)
@@ -78,7 +79,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct verbs_txreq *tx;
struct hfi1_qp_priv *priv = qp->priv;
- tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
+ tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
if (unlikely(!tx)) {
/* call slow path to get the lock */
tx = __get_txreq(dev, qp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 435748858252..8e8917ebb013 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -52,7 +52,7 @@ enum {
#define HNS_ROCE_HEM_CHUNK_LEN \
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
- (sizeof(struct scatterlist)))
+ (sizeof(struct scatterlist) + sizeof(void *)))
enum {
HNS_ROCE_HEM_PAGE_SHIFT = 12,
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 33cf1035030b..6f3c0ea99dd0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -241,7 +241,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
- hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
}
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 85637696f6e9..282a726351c8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1652,7 +1652,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
unsigned long flags;
rtnl_lock();
- for_each_netdev_rcu(&init_net, ip_dev) {
+ for_each_netdev(&init_net, ip_dev) {
if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
(rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
(ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 095912fb3201..c3d2400e36b9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -812,6 +812,8 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
struct i40iw_qp *iwqp = to_iwqp(ibqp);
struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+ attr->qp_state = iwqp->ibqp_state;
+ attr->cur_qp_state = attr->qp_state;
attr->qp_access_flags = 0;
attr->cap.max_send_wr = qp->qp_uk.sq_size;
attr->cap.max_recv_wr = qp->qp_uk.rq_size;
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 5e9939045852..ec138845a474 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -805,8 +805,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
unsigned long flags;
for (i = 0 ; i < dev->num_ports; i++) {
- cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
det = &sriov->alias_guid.ports_guid[i];
+ cancel_delayed_work_sync(&det->alias_guid_work);
spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
while (!list_empty(&det->cb_list)) {
cb_ctx = list_entry(det->cb_list.next,
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 39a488889fc7..5dc920fe1326 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -39,7 +39,7 @@
#include "mlx4_ib.h"
-#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ)
+#define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
struct id_map_entry {
struct rb_node node;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index d9323d7c479c..f32ffd74ec47 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1643,8 +1643,6 @@ tx_err:
tx_buf_size, DMA_TO_DEVICE);
kfree(tun_qp->tx_ring[i].buf.addr);
}
- kfree(tun_qp->tx_ring);
- tun_qp->tx_ring = NULL;
i = MLX4_NUM_TUNNEL_BUFS;
err:
while (i > 0) {
@@ -1653,6 +1651,8 @@ err:
rx_buf_size, DMA_FROM_DEVICE);
kfree(tun_qp->ring[i].addr);
}
+ kfree(tun_qp->tx_ring);
+ tun_qp->tx_ring = NULL;
kfree(tun_qp->ring);
tun_qp->ring = NULL;
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d59a5905ee8..adc46b809ef2 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1172,6 +1172,8 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
* mlx4_ib_vma_close().
*/
down_write(&owning_mm->mmap_sem);
+ if (!mmget_still_valid(owning_mm))
+ goto skip_mm;
for (i = 0; i < HW_BAR_COUNT; i++) {
vma = context->hw_bar_info[i].vma;
if (!vma)
@@ -1190,7 +1192,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
/* context going to be destroyed, should not access ops any more */
context->hw_bar_info[i].vma->vm_ops = NULL;
}
-
+skip_mm:
up_write(&owning_mm->mmap_sem);
mmput(owning_mm);
put_task_struct(owning_process);
@@ -3029,16 +3031,17 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->ib_active = false;
flush_workqueue(wq);
- mlx4_ib_close_sriov(ibdev);
- mlx4_ib_mad_cleanup(ibdev);
- ib_unregister_device(&ibdev->ib_dev);
- mlx4_ib_diag_cleanup(ibdev);
if (ibdev->iboe.nb.notifier_call) {
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n");
ibdev->iboe.nb.notifier_call = NULL;
}
+ mlx4_ib_close_sriov(ibdev);
+ mlx4_ib_mad_cleanup(ibdev);
+ ib_unregister_device(&ibdev->ib_dev);
+ mlx4_ib_diag_cleanup(ibdev);
+
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
kfree(ibdev->ib_uc_qpns_bitmap);
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index 69fb5ba94d0f..19caacd26f61 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -352,16 +352,12 @@ err:
static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
{
- char base_name[9];
-
- /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
- strlcpy(name, pci_name(dev->dev->persist->pdev), max);
- strncpy(base_name, name, 8); /*till xxxx:yy:*/
- base_name[8] = '\0';
- /* with no ARI only 3 last bits are used so when the fn is higher than 8
+ /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n
+ * with no ARI only 3 last bits are used so when the fn is higher than 8
* need to add it to the dev num, so count in the last number will be
* modulo 8 */
- sprintf(name, "%s%.2d.%d", base_name, (i/8), (i%8));
+ snprintf(name, max, "%.8s%.2d.%d", pci_name(dev->dev->persist->pdev),
+ i / 8, i % 8);
}
struct mlx4_port {
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 79e6309460dc..262c18b2f525 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -507,8 +507,7 @@ int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
if (ret) {
/* Undo the effect of adding the outstanding wr */
- gsi->outstanding_pi = (gsi->outstanding_pi - 1) %
- gsi->cap.max_send_wr;
+ gsi->outstanding_pi--;
goto err;
}
spin_unlock_irqrestore(&gsi->lock, flags);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b1daf5c16117..f94df0e6a0f2 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1307,6 +1307,8 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
* mlx5_ib_vma_close.
*/
down_write(&owning_mm->mmap_sem);
+ if (!mmget_still_valid(owning_mm))
+ goto skip_mm;
list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
list) {
vma = vma_private->vma;
@@ -1321,6 +1323,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
list_del(&vma_private->list);
kfree(vma_private);
}
+skip_mm:
up_write(&owning_mm->mmap_sem);
mmput(owning_mm);
put_task_struct(owning_process);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f89489b28575..4d906a790481 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1421,7 +1421,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, ucmd.rx_hash_key, len);
break;
}
@@ -2325,6 +2324,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_PRI_PORT,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_PRI_PORT,
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
@@ -2358,6 +2362,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PM_STATE,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT,
},
},
[MLX5_QP_STATE_RTS] = {
@@ -2374,6 +2384,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_SRQN |
MLX5_QP_OPTPAR_CQN_RCV,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
},
},
[MLX5_QP_STATE_SQER] = {
@@ -2385,6 +2401,10 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RRE,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
},
},
};
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index ded76c101dde..834b06aacc2b 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -989,7 +989,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_free_dev;
}
- if (mthca_cmd_init(mdev)) {
+ err = mthca_cmd_init(mdev);
+ if (err) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 797362a297b2..35efd40ba47f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -82,7 +82,6 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
u8 nxthdr = 0x11;
struct iphdr ipv4;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
@@ -131,9 +130,9 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
ipv4.tot_len = htons(0);
ipv4.ttl = attr->grh.hop_limit;
ipv4.protocol = nxthdr;
- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
- rdma_gid2ip(&dgid_addr._sockaddr, &attr->grh.dgid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
} else {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 67fc0b6857e1..edfa22847724 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -2505,7 +2505,6 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
u32 vlan_id = 0xFFFF;
u8 mac_addr[6], hdr_type;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
@@ -2550,8 +2549,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
if (hdr_type == RDMA_NETWORK_IPV4) {
- rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, &ah_attr->grh.dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
memcpy(&cmd->params.dgid[0],
&dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
memcpy(&cmd->params.sgid[0],
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 6af44f8db3d5..4d28bd8eff01 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -55,7 +55,7 @@
int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
- if (index > 1)
+ if (index > 0)
return -EINVAL;
*pkey = 0xffff;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index cd0408c2b376..7603a1641c7d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -54,7 +54,7 @@
int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
- if (index > QEDR_ROCE_PKEY_TABLE_LEN)
+ if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
return -EINVAL;
*pkey = QEDR_ROCE_PKEY_DEFAULT;
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 891873b38a1e..5f3f197678b7 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -600,8 +600,10 @@ retry:
dw = (len + 3) >> 2;
addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
dw << 2, DMA_TO_DEVICE);
- if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
+ if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
+ ret = -ENOMEM;
goto unmap;
+ }
sdmadesc[0] = 0;
make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
/* SDmaUseLargeBuf has to be set in every descriptor */
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index fe4cf5e4acec..8ce0f6eef89e 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->show)
+ return -EIO;
+
return pattr->show(ppd, buf);
}
@@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->store)
+ return -EIO;
+
return pattr->store(ppd, buf, len);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 954f15064514..d6e183775e24 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1568,8 +1568,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_cq = ib_qib_max_cqs;
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
rdi->dparms.props.max_qp_init_rd_atom = 255;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index a5bfbba6bbac..cacb720f44a0 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -425,7 +425,7 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
- if (index > 1)
+ if (index > 0)
return -EINVAL;
*pkey = 0xffff;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 46b64970058e..dbd4c0d268e9 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -94,6 +94,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
for (i = 0; i < rdi->lkey_table.max; i++)
RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
+ rdi->dparms.props.max_mr = rdi->lkey_table.max;
+ rdi->dparms.props.max_fmr = rdi->lkey_table.max;
return 0;
}
@@ -497,11 +499,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(mapped_segs == mr->mr.max_segs))
return -ENOMEM;
- if (mr->mr.length == 0) {
- mr->mr.user_base = addr;
- mr->mr.iova = addr;
- }
-
m = mapped_segs / RVT_SEGSZ;
n = mapped_segs % RVT_SEGSZ;
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@@ -518,17 +515,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
* @sg_nents: number of entries in sg
* @sg_offset: offset in bytes into sg
*
+ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
+ *
* Return: number of sg elements mapped to the memory region
*/
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rvt_mr *mr = to_imr(ibmr);
+ int ret;
mr->mr.length = 0;
mr->mr.page_shift = PAGE_SHIFT;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
- rvt_set_page);
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
+ mr->mr.user_base = ibmr->iova;
+ mr->mr.iova = ibmr->iova;
+ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
+ mr->mr.length = (size_t)ibmr->length;
+ return ret;
}
/**
@@ -559,6 +563,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
ibmr->rkey = key;
mr->mr.lkey = key;
mr->mr.access_flags = access;
+ mr->mr.iova = ibmr->iova;
atomic_set(&mr->mr.lkey_invalid, 0);
return 0;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 6500c3b5a89c..8b330b53d636 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -370,7 +370,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
offset = qpt->incr | ((offset & 1) ^ 1);
}
/* there can be no set bits in low-order QoS bits */
- WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
+ WARN_ON(rdi->dparms.qos_shift > 1 &&
+ offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
qpn = mk_qpn(qpt, map, offset);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index df15b6d7b645..b03a6206d9be 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -250,6 +250,17 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
+ /* read retries of partial data may restart from
+ * read response first or response only.
+ */
+ if ((pkt->psn == wqe->first_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
+ (wqe->first_psn == wqe->last_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
+ break;
+
return COMPST_ERROR;
}
break;
@@ -486,11 +497,11 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- qp->comp.opcode = -1;
-
- if (pkt) {
- if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
- qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ if (pkt && wqe->state == wqe_state_pending) {
+ if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
+ qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
+ qp->comp.opcode = -1;
+ }
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index e5e6a5e7dee9..5ac88412f1ff 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -30,7 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@@ -89,7 +89,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
err = do_mmap_info(rxe, udata, false, context, cq->queue->buf,
cq->queue->buf_size, &cq->queue->ip);
if (err) {
- kvfree(cq->queue->buf);
+ vfree(cq->queue->buf);
kfree(cq->queue);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 44b2108253bd..d6672127808b 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -34,6 +34,7 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
@@ -255,7 +256,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->sq.queue->buf_size, &qp->sq.queue->ip);
if (err) {
- kvfree(qp->sq.queue->buf);
+ vfree(qp->sq.queue->buf);
kfree(qp->sq.queue);
return err;
}
@@ -312,7 +313,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
- kvfree(qp->rq.queue->buf);
+ vfree(qp->rq.queue->buf);
kfree(qp->rq.queue);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 0f9fe2ca2a91..6fb771290c56 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -72,9 +72,6 @@ static void req_retry(struct rxe_qp *qp)
int npsn;
int first = 1;
- wqe = queue_head(qp->sq.queue);
- npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
-
qp->req.wqe_index = consumer_index(qp->sq.queue);
qp->req.psn = qp->comp.psn;
qp->req.opcode = -1;
@@ -106,11 +103,17 @@ static void req_retry(struct rxe_qp *qp)
if (first) {
first = 0;
- if (mask & WR_WRITE_OR_SEND_MASK)
+ if (mask & WR_WRITE_OR_SEND_MASK) {
+ npsn = (qp->comp.psn - wqe->first_psn) &
+ BTH_PSN_MASK;
retry_first_write_send(qp, wqe, mask, npsn);
+ }
- if (mask & WR_READ_MASK)
+ if (mask & WR_READ_MASK) {
+ npsn = (wqe->dma.length - wqe->dma.resid) /
+ qp->mtu;
wqe->iova += npsn * qp->mtu;
+ }
}
wqe->state = wqe_state_posted;
@@ -439,7 +442,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
if (pkt->mask & RXE_RETH_MASK) {
reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
reth_set_va(pkt, wqe->iova);
- reth_set_len(pkt, wqe->dma.length);
+ reth_set_len(pkt, wqe->dma.resid);
}
if (pkt->mask & RXE_IMMDT_MASK)
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 297653ab4004..5bfea23f3b60 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -432,6 +432,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
qp->resp.va = reth_va(pkt);
qp->resp.rkey = reth_rkey(pkt);
qp->resp.resid = reth_len(pkt);
+ qp->resp.length = reth_len(pkt);
}
access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
: IB_ACCESS_REMOTE_WRITE;
@@ -841,7 +842,9 @@ static enum resp_states do_complete(struct rxe_qp *qp,
pkt->mask & RXE_WRITE_MASK) ?
IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
wc->vendor_err = 0;
- wc->byte_len = wqe->dma.length - wqe->dma.resid;
+ wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
+ pkt->mask & RXE_WRITE_MASK) ?
+ qp->resp.length : wqe->dma.length - wqe->dma.resid;
/* fields after byte_len are different between kernel and user
* space
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index cac1d52a08f0..dee3853163b6 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -209,6 +209,7 @@ struct rxe_resp_info {
struct rxe_mem *mr;
u32 resid;
u32 rkey;
+ u32 length;
u64 atomic_orig;
/* SRQ only */
@@ -421,7 +422,7 @@ struct rxe_dev {
struct list_head pending_mmaps;
spinlock_t mmap_offset_lock; /* guard mmap_offset */
- int mmap_offset;
+ u64 mmap_offset;
struct rxe_port port;
struct list_head list;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 17c5bc7e8957..45504febbc2a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1751,6 +1751,7 @@ static int ipoib_get_vf_config(struct net_device *dev, int vf,
return err;
ivf->vf = vf;
+ memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index e46e2b095c18..fdf5179a81c1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -649,6 +649,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
if (ib_conn->pi_support) {
u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap;
+ shost->sg_prot_tablesize = shost->sg_tablesize;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index cb48e22afff7..a3614f7f0007 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -197,7 +197,7 @@ struct iser_data_buf {
struct scatterlist *sg;
int size;
unsigned long data_len;
- unsigned int dma_nents;
+ int dma_nents;
};
/* fwd declarations */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 81ae2e30dd12..27a7e4406f34 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -590,13 +590,19 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
ib_conn->post_recv_buf_count--;
}
-static inline void
+static inline int
iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
- if (likely(rkey == desc->rsc.mr->rkey))
+ if (likely(rkey == desc->rsc.mr->rkey)) {
desc->rsc.mr_valid = 0;
- else if (likely(rkey == desc->pi_ctx->sig_mr->rkey))
+ } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) {
desc->pi_ctx->sig_mr_valid = 0;
+ } else {
+ iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int
@@ -624,12 +630,14 @@ iser_check_remote_inv(struct iser_conn *iser_conn,
if (iser_task->dir[ISER_DIR_IN]) {
desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h;
- iser_inv_desc(desc, rkey);
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
}
if (iser_task->dir[ISER_DIR_OUT]) {
desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h;
- iser_inv_desc(desc, rkey);
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
}
} else {
iser_err("failed to get task for itt=%d\n", hdr->itt);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9c3e9ab53a41..759c2fe033e7 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -240,8 +240,8 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
page_vec->npages = 0;
page_vec->fake_mr.page_size = SIZE_4K;
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
- mem->size, NULL, iser_set_page);
- if (unlikely(plen < mem->size)) {
+ mem->dma_nents, NULL, iser_set_page);
+ if (unlikely(plen < mem->dma_nents)) {
iser_err("page vec too short to hold this SG\n");
iser_data_buf_dump(mem, device->ib_device);
iser_dump_page_vec(page_vec);
@@ -450,10 +450,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
- if (unlikely(n != mem->size)) {
+ n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
+ if (unlikely(n != mem->dma_nents)) {
iser_err("failed to map sg (%d/%d)\n",
- n, mem->size);
+ n, mem->dma_nents);
return n < 0 ? n : -EINVAL;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 74de1ae48d4f..af68be201c29 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2180,6 +2180,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
if (srp_post_send(ch, iu, len)) {
shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
+ scmnd->result = DID_ERROR << 16;
goto err_unmap;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2a44a2c3e859..8acf337c3691 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1234,9 +1234,11 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx, u64 tag,
int status)
{
+ struct se_cmd *cmd = &ioctx->cmd;
struct srp_rsp *srp_rsp;
const u8 *sense_data;
int sense_data_len, max_sense_len;
+ u32 resid = cmd->residual_count;
/*
* The lowest bit of all SAM-3 status codes is zero (see also
@@ -1258,6 +1260,28 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
srp_rsp->tag = tag;
srp_rsp->status = status;
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an underflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an underflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an overflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an overflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ }
+
if (sense_data_len) {
BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
@@ -2368,8 +2392,19 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
srpt_queue_response(cmd);
}
+/*
+ * This function is called for aborted commands if no response is sent to the
+ * initiator. Make sure that the credits freed by aborting a command are
+ * returned to the initiator the next time a response is sent by incrementing
+ * ch->req_lim_delta.
+ */
static void srpt_aborted_task(struct se_cmd *cmd)
{
+ struct srpt_send_ioctx *ioctx = container_of(cmd,
+ struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+
+ atomic_inc(&ch->req_lim_delta);
}
static int srpt_queue_status(struct se_cmd *cmd)
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index fcc6c3368182..ea3f0f5eb534 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -501,6 +501,15 @@ static void ml_ff_destroy(struct ff_device *ff)
{
struct ml_device *ml = ff->private;
+ /*
+ * Even though we stop all playing effects when tearing down
+ * an input device (via input_device_flush() that calls into
+ * input_ff_flush() that stops and erases all effects), we
+ * do not actually stop the timer, and therefore we should
+ * do it here.
+ */
+ del_timer_sync(&ml->timer);
+
kfree(ml->private);
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d95c34ee5dc1..5d94fc3fce0b 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -850,16 +850,18 @@ static int input_default_setkeycode(struct input_dev *dev,
}
}
- __clear_bit(*old_keycode, dev->keybit);
- __set_bit(ke->keycode, dev->keybit);
-
- for (i = 0; i < dev->keycodemax; i++) {
- if (input_fetch_keycode(dev, i) == *old_keycode) {
- __set_bit(*old_keycode, dev->keybit);
- break; /* Setting the bit twice is useless, so break */
+ if (*old_keycode <= KEY_MAX) {
+ __clear_bit(*old_keycode, dev->keybit);
+ for (i = 0; i < dev->keycodemax; i++) {
+ if (input_fetch_keycode(dev, i) == *old_keycode) {
+ __set_bit(*old_keycode, dev->keybit);
+ /* Setting the bit twice is useless, so break */
+ break;
+ }
}
}
+ __set_bit(ke->keycode, dev->keybit);
return 0;
}
@@ -915,9 +917,13 @@ int input_set_keycode(struct input_dev *dev,
* Simulate keyup event if keycode is not present
* in the keymap anymore
*/
- if (test_bit(EV_KEY, dev->evbit) &&
- !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
- __test_and_clear_bit(old_keycode, dev->key)) {
+ if (old_keycode > KEY_MAX) {
+ dev_warn(dev->dev.parent ?: &dev->dev,
+ "%s: got too big old keycode %#x\n",
+ __func__, old_keycode);
+ } else if (test_bit(EV_KEY, dev->evbit) &&
+ !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
+ __test_and_clear_bit(old_keycode, dev->key)) {
struct input_value vals[] = {
{ EV_KEY, old_keycode, 0 },
input_value_sync
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index db64adfbe1af..3e1ea912b41d 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -145,7 +145,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
return -ENODEV;
epirq = &interface->endpoint[0].desc;
+ if (!usb_endpoint_is_int_in(epirq))
+ return -ENODEV;
+
epout = &interface->endpoint[1].desc;
+ if (!usb_endpoint_is_int_out(epout))
+ return -ENODEV;
if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
goto fail;
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 9b14cab768d5..9d3971818e05 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -528,7 +528,7 @@ static int imx_keypad_probe(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused imx_kbd_suspend_noirq(struct device *dev)
+static int __maybe_unused imx_kbd_noirq_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct imx_keypad *kbd = platform_get_drvdata(pdev);
@@ -556,7 +556,7 @@ static int __maybe_unused imx_kbd_suspend_noirq(struct device *dev)
return 0;
}
-static int __maybe_unused imx_kbd_resume_noirq(struct device *dev)
+static int __maybe_unused imx_kbd_noirq_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct imx_keypad *kbd = platform_get_drvdata(pdev);
@@ -581,8 +581,7 @@ err_clk:
}
static const struct dev_pm_ops imx_kbd_pm_ops = {
- .suspend_noirq = imx_kbd_suspend_noirq,
- .resume_noirq = imx_kbd_resume_noirq,
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_kbd_noirq_suspend, imx_kbd_noirq_resume)
};
static struct platform_driver imx_keypad_driver = {
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 8567ee47761e..ae3b04557074 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -100,7 +100,7 @@ static int __init ske_keypad_chip_init(struct ske_keypad *keypad)
while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--)
cpu_relax();
- if (!timeout)
+ if (timeout == -1)
return -EINVAL;
/*
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index bb863e062b03..eaf5ecc431c9 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -247,10 +247,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
onkey->input->phys = onkey->phys;
onkey->input->dev.parent = &pdev->dev;
- if (onkey->key_power)
- input_set_capability(onkey->input, EV_KEY, KEY_POWER);
-
- input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
+ input_set_capability(onkey->input, EV_KEY, KEY_POWER);
INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index a3fe4a990cc9..c7b889d13edd 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -344,7 +344,8 @@ static int keyspan_setup(struct usb_device* dev)
int retval = 0;
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
+ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
__func__, retval);
@@ -352,7 +353,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x44, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
__func__, retval);
@@ -360,7 +362,8 @@ static int keyspan_setup(struct usb_device* dev)
}
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
+ 0x22, 0x40, 0x0, 0x0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
if (retval) {
dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
__func__, retval);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index a306453d40d2..89d37d0d45ed 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -991,13 +991,31 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_COMPAT
-#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
+/*
+ * These IOCTLs change their size and thus their numbers between
+ * 32 and 64 bits.
+ */
+#define UI_SET_PHYS_COMPAT \
+ _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
+#define UI_BEGIN_FF_UPLOAD_COMPAT \
+ _IOWR(UINPUT_IOCTL_BASE, 200, struct uinput_ff_upload_compat)
+#define UI_END_FF_UPLOAD_COMPAT \
+ _IOW(UINPUT_IOCTL_BASE, 201, struct uinput_ff_upload_compat)
static long uinput_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- if (cmd == UI_SET_PHYS_COMPAT)
+ switch (cmd) {
+ case UI_SET_PHYS_COMPAT:
cmd = UI_SET_PHYS;
+ break;
+ case UI_BEGIN_FF_UPLOAD_COMPAT:
+ cmd = UI_BEGIN_FF_UPLOAD;
+ break;
+ case UI_END_FF_UPLOAD_COMPAT:
+ cmd = UI_END_FF_UPLOAD;
+ break;
+ }
return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 38edf8f5bf8a..15be3ee6cc50 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1187,6 +1187,8 @@ static const char * const middle_button_pnp_ids[] = {
"LEN2132", /* ThinkPad P52 */
"LEN2133", /* ThinkPad P72 w/ NFC */
"LEN2134", /* ThinkPad P72 */
+ "LEN0407",
+ "LEN0408",
NULL
};
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 88055755f82e..821b446a85dd 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -153,7 +153,8 @@ struct trackpoint_data
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
#else
-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+static inline int trackpoint_detect(struct psmouse *psmouse,
+ bool set_properties)
{
return -ENOSYS;
}
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4a88312fbd25..65038dcc7613 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -772,7 +772,7 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
error = rmi_register_function(fn);
if (error)
- goto err_put_fn;
+ return error;
if (pdt->function_number == 0x01)
data->f01_container = fn;
@@ -780,10 +780,6 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
list_add_tail(&fn->node, &data->function_list);
return RMI_SCAN_CONTINUE;
-
-err_put_fn:
- put_device(&fn->dev);
- return error;
}
int rmi_driver_suspend(struct rmi_device *rmi_dev)
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index f798f427a46f..275f957604f7 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -1198,7 +1198,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
ctrl->ctrl0_11[11] = ctrl->ctrl0_11[11] & ~BIT(0);
rc = f11_write_control_regs(fn, &f11->sens_query,
- &f11->dev_controls, fn->fd.query_base_addr);
+ &f11->dev_controls, fn->fd.control_base_addr);
if (rc)
dev_warn(&fn->dev, "Failed to write control registers\n");
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 2e934aef3d2a..b669cdc8c8c6 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -364,7 +364,7 @@ static const struct vb2_ops rmi_f54_queue_ops = {
static const struct vb2_queue rmi_f54_queue = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ,
- .buf_struct_size = sizeof(struct vb2_buffer),
+ .buf_struct_size = sizeof(struct vb2_v4l2_buffer),
.ops = &rmi_f54_queue_ops,
.mem_ops = &vb2_vmalloc_memops,
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
@@ -617,7 +617,7 @@ static int rmi_f54_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
- drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
return 0;
}
@@ -744,6 +744,7 @@ static void rmi_f54_remove(struct rmi_function *fn)
video_unregister_device(&f54->vdev);
v4l2_device_unregister(&f54->v4l2);
+ destroy_workqueue(f54->workqueue);
}
struct rmi_function_handler rmi_f54_handler = {
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index ecba666afadb..cca26e6f38b3 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -382,9 +382,9 @@ static int gscps2_probe(struct parisc_device *dev)
goto fail;
#endif
- printk(KERN_INFO "serio: %s port at 0x%p irq %d @ %s\n",
+ pr_info("serio: %s port at 0x%08lx irq %d @ %s\n",
ps2port->port->name,
- ps2port->addr,
+ hpa,
ps2port->padev->irq,
ps2port->port->phys);
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 852858e5d8d0..92f541db98a0 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -887,8 +887,8 @@ static int __init hp_sdc_init(void)
"HP SDC NMI", &hp_sdc))
goto err2;
- printk(KERN_INFO PREFIX "HP SDC at 0x%p, IRQ %d (NMI IRQ %d)\n",
- (void *)hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi);
+ pr_info(PREFIX "HP SDC at 0x%08lx, IRQ %d (NMI IRQ %d)\n",
+ hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi);
hp_sdc_status_in8();
hp_sdc_data_in8();
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 34be09651ee8..a4e76084a2af 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -534,6 +534,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
},
+ {
+ /*
+ * Acer Aspire 5738z
+ * Touchpad stops working in mux mode when dis- + re-enabled
+ * with the touchpad enable/disable toggle hotkey
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ },
+ },
{ }
};
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 4613f0aefd08..5a7e5e073e52 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1822,14 +1822,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
/* Verify that a device really has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev,
"interface has %d endpoints, but must have minimum 1\n",
- intf->altsetting[0].desc.bNumEndpoints);
+ intf->cur_altsetting->desc.bNumEndpoints);
err = -EINVAL;
goto fail3;
}
- endpoint = &intf->altsetting[0].endpoint[0].desc;
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
* input.
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 339a0e2d2f86..dd8dc335daca 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -78,6 +78,7 @@ Scott Hill shill@gtcocalcomp.com
/* Max size of a single report */
#define REPORT_MAX_SIZE 10
+#define MAX_COLLECTION_LEVELS 10
/* Bitmask whether pen is in range */
@@ -223,8 +224,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
char maintype = 'x';
char globtype[12];
int indent = 0;
- char indentstr[10] = "";
-
+ char indentstr[MAX_COLLECTION_LEVELS + 1] = { 0 };
dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n");
@@ -350,6 +350,13 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
case TAG_MAIN_COL_START:
maintype = 'S';
+ if (indent == MAX_COLLECTION_LEVELS) {
+ dev_err(ddev, "Collection level %d would exceed limit of %d\n",
+ indent + 1,
+ MAX_COLLECTION_LEVELS);
+ break;
+ }
+
if (data == 0) {
dev_dbg(ddev, "======>>>>>> Physical\n");
strcpy(globtype, "Physical");
@@ -369,8 +376,15 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
break;
case TAG_MAIN_COL_END:
- dev_dbg(ddev, "<<<<<<======\n");
maintype = 'E';
+
+ if (indent == 0) {
+ dev_err(ddev, "Collection level already at zero\n");
+ break;
+ }
+
+ dev_dbg(ddev, "<<<<<<======\n");
+
indent--;
for (x = 0; x < indent; x++)
indentstr[x] = '-';
@@ -861,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
}
/* Sanity check that a device has an endpoint */
- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&usbinterface->dev,
"Invalid number of endpoints\n");
error = -EINVAL;
goto err_free_urb;
}
- /*
- * The endpoint is always altsetting 0, we know this since we know
- * this device only has one interrupt endpoint
- */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
+ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
/* Some debug */
dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
@@ -959,7 +969,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
input_dev->dev.parent = &usbinterface->dev;
/* Setup the URB, it will be posted later on open of input device */
- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
+ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
usb_fill_int_urb(gtco->urbinfo,
udev,
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 4d9d64908b59..b7ea64ec1205 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -125,6 +125,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
if (intf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
+ if (!usb_endpoint_is_int_in(endpoint))
+ return -ENODEV;
+
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbtab || !input_dev)
@@ -163,8 +167,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
- endpoint = &intf->cur_altsetting->endpoint[0].desc;
-
usb_fill_int_urb(kbtab->irq, dev,
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
kbtab->data, 8,
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index 47de5a81172f..2319144802c9 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -260,7 +260,7 @@ static int pegasus_probe(struct usb_interface *intf,
return -ENODEV;
/* Sanity check that the device has an endpoint */
- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
dev_err(&intf->dev, "Invalid number of endpoints\n");
return -EINVAL;
}
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index c2fb0236a47c..8d871fcb7912 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3206,6 +3206,8 @@ static int __maybe_unused mxt_suspend(struct device *dev)
mutex_unlock(&input_dev->mutex);
+ disable_irq(data->irq);
+
return 0;
}
@@ -3218,6 +3220,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
if (!input_dev)
return 0;
+ enable_irq(data->irq);
+
mutex_lock(&input_dev->mutex);
if (input_dev->users)
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 44deca88c579..c1c29d7487bf 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1972,11 +1972,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
/* get sysinfo */
md->si = &cd->sysinfo;
- if (!md->si) {
- dev_err(dev, "%s: Fail get sysinfo pointer from core p=%p\n",
- __func__, md->si);
- goto error_get_sysinfo;
- }
rc = cyttsp4_setup_input_device(cd);
if (rc)
@@ -1986,8 +1981,6 @@ static int cyttsp4_mt_probe(struct cyttsp4 *cd)
error_init_input:
input_free_device(md->input);
-error_get_sysinfo:
- input_set_drvdata(md->input, NULL);
error_alloc_failed:
dev_err(dev, "%s failed.\n", __func__);
return rc;
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 28466e358fee..22c8d2070faa 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -887,6 +887,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
{
const struct edt_i2c_chip_data *chip_data;
struct edt_ft5x06_ts_data *tsdata;
+ u8 buf[2] = { 0xfc, 0x00 };
struct input_dev *input;
unsigned long irq_flags;
int error;
@@ -956,6 +957,12 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
}
+ /*
+ * Dummy read access. EP0700MLP1 returns bogus data on the first
+ * register read access and ignores writes.
+ */
+ edt_ft5x06_ts_readwrite(tsdata->client, 2, buf, 2, buf);
+
edt_ft5x06_ts_set_regs(tsdata);
edt_ft5x06_ts_get_defaults(&client->dev, tsdata);
edt_ft5x06_ts_get_parameters(tsdata);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index c599b5a2373b..6a02e7301297 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -90,6 +90,15 @@ static const unsigned long goodix_irq_flags[] = {
static const struct dmi_system_id rotated_screen[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
+ .ident = "Teclast X89",
+ .matches = {
+ /* tPAD is too generic, also match on bios date */
+ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
+ DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
+ },
+ },
+ {
.ident = "WinBook TW100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index a99fb5cac5a0..1f5b6b5b1018 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -441,7 +441,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
return 0;
}
-static bool raydium_i2c_boot_trigger(struct i2c_client *client)
+static int raydium_i2c_boot_trigger(struct i2c_client *client)
{
static const u8 cmd[7][6] = {
{ 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 },
@@ -469,7 +469,7 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client)
return 0;
}
-static bool raydium_i2c_fw_trigger(struct i2c_client *client)
+static int raydium_i2c_fw_trigger(struct i2c_client *client)
{
static const u8 cmd[5][11] = {
{ 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 },
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index f502c8488be8..867772878c0c 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -504,20 +504,33 @@ static int __maybe_unused silead_ts_suspend(struct device *dev)
static int __maybe_unused silead_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ bool second_try = false;
int error, status;
silead_ts_set_power(client, SILEAD_POWER_ON);
+ retry:
error = silead_ts_reset(client);
if (error)
return error;
+ if (second_try) {
+ error = silead_ts_load_fw(client);
+ if (error)
+ return error;
+ }
+
error = silead_ts_startup(client);
if (error)
return error;
status = silead_ts_get_status(client);
if (status != SILEAD_STATUS_OK) {
+ if (!second_try) {
+ second_try = true;
+ dev_dbg(dev, "Reloading firmware after unsuccessful resume\n");
+ goto retry;
+ }
dev_err(dev, "Resume error, status: 0x%02x\n", status);
return -ENODEV;
}
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index e943678ce54c..f1c574d6be17 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -203,6 +203,7 @@ static int st1232_ts_probe(struct i2c_client *client,
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
__set_bit(EV_SYN, input_dev->evbit);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_ABS, input_dev->evbit);
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index d07dd29d4848..9a94d65bf483 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -246,6 +246,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device *hwmon;
+ struct thermal_zone_device *thermal;
int error;
u32 reg;
bool ts_attached;
@@ -365,7 +366,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
+ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
+ &sun4i_ts_tz_ops);
+ if (IS_ERR(thermal))
+ return PTR_ERR(thermal);
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 4c0eecae065c..2180fa8b695b 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -523,7 +523,7 @@ static int sur40_probe(struct usb_interface *interface,
int error;
/* Check if we really have the right interface. */
- iface_desc = &interface->altsetting[0];
+ iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0xFF)
return -ENODEV;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c1233d0288a0..bb0448c91f67 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -176,10 +176,14 @@ static struct lock_class_key reserved_rbtree_key;
static inline int match_hid_uid(struct device *dev,
struct acpihid_map_entry *entry)
{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
const char *hid, *uid;
- hid = acpi_device_hid(ACPI_COMPANION(dev));
- uid = acpi_device_uid(ACPI_COMPANION(dev));
+ if (!adev)
+ return -ENODEV;
+
+ hid = acpi_device_hid(adev);
+ uid = acpi_device_uid(adev);
if (!hid || !(*hid))
return -ENODEV;
@@ -1321,18 +1325,21 @@ static void domain_flush_devices(struct protection_domain *domain)
* another level increases the size of the address space by 9 bits to a size up
* to 64 bits.
*/
-static bool increase_address_space(struct protection_domain *domain,
+static void increase_address_space(struct protection_domain *domain,
gfp_t gfp)
{
+ unsigned long flags;
u64 *pte;
- if (domain->mode == PAGE_MODE_6_LEVEL)
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
/* address space already 64 bit large */
- return false;
+ goto out;
pte = (void *)get_zeroed_page(gfp);
if (!pte)
- return false;
+ goto out;
*pte = PM_LEVEL_PDE(domain->mode,
virt_to_phys(domain->pt_root));
@@ -1340,7 +1347,10 @@ static bool increase_address_space(struct protection_domain *domain,
domain->mode += 1;
domain->updated = true;
- return true;
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return;
}
static u64 *alloc_pte(struct protection_domain *domain,
@@ -2103,6 +2113,8 @@ skip_ats_check:
*/
domain_flush_tlb_pde(domain);
+ domain_flush_complete(domain);
+
return ret;
}
@@ -2589,7 +2601,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
- ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+ ret = iommu_map_page(domain, bus_addr, phys_addr,
+ PAGE_SIZE, prot,
+ GFP_ATOMIC | __GFP_NOWARN);
if (ret)
goto out_unmap;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 157e93421fb8..c113e46fdc3a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -318,7 +318,7 @@ static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
static void iommu_set_exclusion_range(struct amd_iommu *iommu)
{
u64 start = iommu->exclusion_start & PAGE_MASK;
- u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+ u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
u64 entry;
if (!iommu->exclusion_start)
@@ -383,6 +383,9 @@ static void iommu_enable(struct amd_iommu *iommu)
static void iommu_disable(struct amd_iommu *iommu)
{
+ if (!iommu->mmio_base)
+ return;
+
/* Disable command buffer */
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
@@ -1534,7 +1537,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL,
};
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 0d91785ebdc3..da3fbf82d1cf 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -329,7 +329,7 @@
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
+#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
#define DTE_GCR3_INDEX_A 0
#define DTE_GCR3_INDEX_B 1
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 7bd98585d78d..48d382008788 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1103,7 +1103,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
arm_smmu_sync_ste_for_sid(smmu, sid);
- dst[0] = cpu_to_le64(val);
+ /* See comment in arm_smmu_write_ctx_desc() */
+ WRITE_ONCE(dst[0], cpu_to_le64(val));
arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 1520e7f02c2f..89d191b6a0e0 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -493,7 +493,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
* - and wouldn't make the resulting output segment too long
*/
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
- (cur_len + s_length <= max_len)) {
+ (max_len - cur_len >= s_length)) {
/* ...then concatenate it with the previous one */
cur_len += s_length;
} else {
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 63110fbbb410..977070ce4fe9 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -39,6 +39,7 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include <linux/limits.h>
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
@@ -138,12 +139,19 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
BUG_ON(dev->is_virtfn);
+ /*
+ * Ignore devices that have a domain number higher than what can
+ * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
+ */
+ if (pci_domain_nr(dev->bus) > U16_MAX)
+ return NULL;
+
/* Only generate path[] for device addition event */
if (event == BUS_NOTIFY_ADD_DEVICE)
for (tmp = dev; tmp; tmp = tmp->bus->self)
level++;
- size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+ size = sizeof(*info) + level * sizeof(info->path[0]);
if (size <= sizeof(dmar_pci_notify_info_buf)) {
info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
} else {
@@ -450,12 +458,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
/* Check for NUL termination within the designated length */
if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
- WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+ pr_warn(FW_BUG
"Your BIOS is broken; ANDD object name is not NUL-terminated\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
return -EINVAL;
}
pr_info("ANDD device: %x name: %s\n", andd->device_number,
@@ -481,14 +490,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
return 0;
}
}
- WARN_TAINT(
- 1, TAINT_FIRMWARE_WORKAROUND,
+ pr_warn(FW_BUG
"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->reg_base_addr,
+ rhsa->base_address,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
return 0;
}
@@ -834,14 +843,14 @@ int __init dmar_table_init(void)
static void warn_invalid_dmar(u64 addr, const char *message)
{
- WARN_TAINT_ONCE(
- 1, TAINT_FIRMWARE_WORKAROUND,
+ pr_warn_once(FW_BUG
"Your BIOS is broken; DMAR reported at address %llx%s!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
addr, message,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
}
static int __ref
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 86e349614e21..593a4bfcba42 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1636,6 +1636,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
u32 pmen;
unsigned long flags;
+ if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
+ return;
+
raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
@@ -3342,9 +3345,12 @@ static int __init init_dmars(void)
iommu_identity_mapping |= IDENTMAP_ALL;
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- iommu_identity_mapping |= IDENTMAP_GFX;
+ dmar_map_gfx = 0;
#endif
+ if (!dmar_map_gfx)
+ iommu_identity_mapping |= IDENTMAP_GFX;
+
check_tylersburg_isoch();
if (iommu_identity_mapping) {
@@ -4079,10 +4085,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
/* we know that the this iommu should be at offset 0xa000 from vtbar */
drhd = dmar_find_matched_drhd_unit(pdev);
- if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
- TAINT_FIRMWARE_WORKAROUND,
- "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
+ if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
+ pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+ }
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
@@ -4116,9 +4123,7 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
- if (dmar_map_gfx) {
- intel_iommu_gfx_mapped = 1;
- } else {
+ if (!dmar_map_gfx) {
drhd->ignored = 1;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
@@ -4867,6 +4872,9 @@ int __init intel_iommu_init(void)
goto out_free_reserved_range;
}
+ if (dmar_map_gfx)
+ intel_iommu_gfx_mapped = 1;
+
init_no_remapping_devices();
ret = init_dmars();
@@ -5185,8 +5193,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
u64 phys = 0;
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
- if (pte)
- phys = dma_pte_addr(pte);
+ if (pte && dma_pte_present(pte))
+ phys = dma_pte_addr(pte) +
+ (iova & (BIT_MASK(level_to_offset_bits(level) +
+ VTD_PAGE_SHIFT) - 1));
return phys;
}
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index d68a552cfe8d..3085b47fac1d 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -207,7 +207,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
if (dma != virt_to_phys(table))
goto out_unmap;
}
- kmemleak_ignore(table);
+ if (lvl == 2)
+ kmemleak_ignore(table);
return table;
out_unmap:
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 87d3060f8609..dbcc13efaf3c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -439,6 +439,7 @@ err_put_group:
mutex_unlock(&group->mutex);
dev->iommu_group = NULL;
kobject_put(group->devices_kobj);
+ sysfs_remove_link(group->devices_kobj, device->name);
err_free_name:
kfree(device->name);
err_remove_link:
@@ -1581,9 +1582,9 @@ int iommu_request_dm_for_dev(struct device *dev)
int ret;
/* Device must already be in a group before calling this function */
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ group = iommu_group_get(dev);
+ if (!group)
+ return -EINVAL;
mutex_lock(&group->mutex);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 9305964250ac..04cec050e42b 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -91,7 +91,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
-#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
SMMU_TLB_FLUSH_VA_MATCH_SECTION)
#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
@@ -154,9 +153,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr;
}
-static dma_addr_t smmu_pde_to_dma(u32 pde)
+static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{
- return pde << 12;
+ return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
}
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@@ -194,8 +193,12 @@ static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_MATCH_ALL;
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -205,8 +208,12 @@ static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_SECTION(iova);
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -216,8 +223,12 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
{
u32 value;
- value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
- SMMU_TLB_FLUSH_VA_GROUP(iova);
+ if (smmu->soc->num_asids == 4)
+ value = (asid & 0x3) << 29;
+ else
+ value = (asid & 0x7f) << 24;
+
+ value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
smmu_writel(smmu, value, SMMU_TLB_FLUSH);
}
@@ -529,6 +540,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap)
{
unsigned int pd_index = iova_pd_index(iova);
+ struct tegra_smmu *smmu = as->smmu;
struct page *pt_page;
u32 *pd;
@@ -537,7 +549,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
return NULL;
pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
return tegra_smmu_pte_offset(pt_page, iova);
}
@@ -579,7 +591,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
} else {
u32 *pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -604,7 +616,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
tegra_smmu_set_pde(as, iova, 0);
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 6e24facebb46..a571d9c6e42a 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -282,6 +282,10 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL;
}
+
+ if (of_property_read_bool(dn, "brcm,irq-can-wake"))
+ enable_irq_wake(parent_irq);
+
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
intc);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 83ca754250fb..d1efbb8dadc5 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -365,7 +365,7 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_INVALL);
- its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
+ its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
its_fixup_cmd(cmd);
@@ -1519,14 +1519,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
int i;
+ bitmap_release_region(its_dev->event_map.lpi_map,
+ its_get_event_id(irq_domain_get_irq_data(domain, virq)),
+ get_count_order(nr_irqs));
+
for (i = 0; i < nr_irqs; i++) {
struct irq_data *data = irq_domain_get_irq_data(domain,
virq + i);
- u32 event = its_get_event_id(data);
-
- /* Mark interrupt index as unused */
- clear_bit(event, its_dev->event_map.lpi_map);
-
/* Nuke the entry in the domain */
irq_domain_reset_irq_data(data);
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 30ae43d8cc69..eac9a76fdccb 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1214,6 +1214,7 @@ static struct
struct redist_region *redist_regs;
u32 nr_redist_regions;
bool single_redist;
+ int enabled_rdists;
u32 maint_irq;
int maint_irq_mode;
phys_addr_t vcpu_base;
@@ -1308,8 +1309,10 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
* If GICC is enabled and has valid gicr base address, then it means
* GICR base is presented via GICC
*/
- if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
+ if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
+ acpi_data.enabled_rdists++;
return 0;
+ }
/*
* It's perfectly valid firmware can pass disabled GICC entry, driver
@@ -1339,8 +1342,10 @@ static int __init gic_acpi_count_gicr_regions(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
gic_acpi_match_gicc, 0);
- if (count > 0)
+ if (count > 0) {
acpi_data.single_redist = true;
+ count = acpi_data.enabled_rdists;
+ }
return count;
}
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 2d203b422129..c56da0b13da5 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -145,6 +145,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
.irq_unmask = imx_gpcv2_irq_unmask,
.irq_set_wake = imx_gpcv2_irq_set_wake,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index fc5953dea509..b2e16dca76a6 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -117,6 +117,14 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
+ domain = irq_domain_add_legacy(node, num_chips * 32,
+ JZ4740_IRQ_BASE, 0,
+ &irq_domain_simple_ops, NULL);
+ if (!domain) {
+ err = -ENOMEM;
+ goto out_unmap_base;
+ }
+
for (i = 0; i < num_chips; i++) {
/* Mask all irqs */
writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
@@ -143,14 +151,11 @@ static int __init ingenic_intc_of_init(struct device_node *node,
IRQ_NOPROBE | IRQ_LEVEL);
}
- domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
- &irq_domain_simple_ops, NULL);
- if (!domain)
- pr_warn("unable to register IRQ domain\n");
-
setup_irq(parent_irq, &intc_cascade_action);
return 0;
+out_unmap_base:
+ iounmap(intc->base);
out_unmap_irq:
irq_dispose_mapping(parent_irq);
out_free:
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 05d87f60d929..406bfe618448 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -160,6 +160,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
void __iomem *base = d->chip_data;
u32 val;
+ if (!msg->address_lo && !msg->address_hi)
+ return;
+
base += get_mbigen_vec_reg(d->hwirq);
val = readl_relaxed(base);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 37dd4645bf18..b075409a06a9 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -5,6 +5,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/versatile-fpga.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
@@ -67,12 +68,16 @@ static void fpga_irq_unmask(struct irq_data *d)
static void fpga_irq_handle(struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
- u32 status = readl(f->base + IRQ_STATUS);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+ status = readl(f->base + IRQ_STATUS);
if (status == 0) {
do_bad_IRQ(desc);
- return;
+ goto out;
}
do {
@@ -81,6 +86,9 @@ static void fpga_irq_handle(struct irq_desc *desc)
status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status);
+
+out:
+ chained_irq_exit(chip, desc);
}
/*
@@ -203,6 +211,9 @@ int __init fpga_irq_of_init(struct device_node *node,
if (of_property_read_u32(node, "valid-mask", &valid_mask))
valid_mask = 0;
+ writel(clear_mask, base + IRQ_ENABLE_CLEAR);
+ writel(clear_mask, base + FIQ_ENABLE_CLEAR);
+
/* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
@@ -212,9 +223,6 @@ int __init fpga_irq_of_init(struct device_node *node,
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
- writel(clear_mask, base + IRQ_ENABLE_CLEAR);
- writel(clear_mask, base + FIQ_ENABLE_CLEAR);
-
/*
* On Versatile AB/PB, some secondary interrupts have a direct
* pass-thru to the primary controller for IRQs 20 and 22-31 which need
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6a2df3297e77..691ad069444d 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -687,6 +687,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
if (!cdev->ap.applid)
return -ENODEV;
+ if (count < CAPIMSG_BASELEN)
+ return -EINVAL;
+
skb = alloc_skb(count, GFP_USER);
if (!skb)
return -ENOMEM;
@@ -697,7 +700,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
}
mlen = CAPIMSG_LEN(skb->data);
if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
- if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+ if (count < CAPI_DATA_B3_REQ_LEN ||
+ (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
kfree_skb(skb);
return -EINVAL;
}
@@ -710,6 +714,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
+ if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
mutex_lock(&cdev->lock);
capincci_free(cdev, CAPIMSG_NCCI(skb->data));
mutex_unlock(&cdev->lock);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 5f306e2eece5..aee4880f972f 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -574,8 +574,7 @@ static int gigaset_initcshw(struct cardstate *cs)
{
struct usb_cardstate *ucs;
- cs->hw.usb = ucs =
- kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
+ cs->hw.usb = ucs = kzalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
if (!ucs) {
pr_err("out of memory\n");
return -ENOMEM;
@@ -587,9 +586,6 @@ static int gigaset_initcshw(struct cardstate *cs)
ucs->bchars[3] = 0;
ucs->bchars[4] = 0x11;
ucs->bchars[5] = 0x13;
- ucs->bulk_out_buffer = NULL;
- ucs->bulk_out_urb = NULL;
- ucs->read_urb = NULL;
tasklet_init(&cs->write_tasklet,
gigaset_modem_fill, (unsigned long) cs);
@@ -688,6 +684,11 @@ static int gigaset_probe(struct usb_interface *interface,
return -ENODEV;
}
+ if (hostif->desc.bNumEndpoints < 2) {
+ dev_err(&interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
/* allocate memory for our device state and initialize it */
@@ -707,6 +708,12 @@ static int gigaset_probe(struct usb_interface *interface,
endpoint = &hostif->endpoint[0].desc;
+ if (!usb_endpoint_is_bulk_out(endpoint)) {
+ dev_err(&interface->dev, "missing bulk-out endpoint\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
ucs->bulk_out_size = buffer_size;
ucs->bulk_out_epnum = usb_endpoint_num(endpoint);
@@ -726,6 +733,12 @@ static int gigaset_probe(struct usb_interface *interface,
endpoint = &hostif->endpoint[1].desc;
+ if (!usb_endpoint_is_int_in(endpoint)) {
+ dev_err(&interface->dev, "missing int-in endpoint\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
ucs->busy = 0;
ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 480c2d7794eb..8feb8e9e29a6 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4370,7 +4370,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
if (m->clock2)
test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
- if (ent->device == 0xB410) {
+ if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
+ ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 114f3bcba1b0..726fba452f5f 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1402,6 +1402,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
printk(KERN_DEBUG
"%s: %s: alloc urb for fifo %i failed",
hw->name, __func__, fifo->fifonum);
+ continue;
}
fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
fifo->iso[i].indx = i;
@@ -1700,13 +1701,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
static int
setup_hfcsusb(struct hfcsusb *hw)
{
+ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
u_char b;
+ int ret;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
+
+ memcpy(&b, dmabuf, sizeof(u_char));
+ kfree(dmabuf);
+
/* check the chip id */
- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+ if (ret != 1) {
printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
hw->name, __func__);
return 1;
@@ -1963,6 +1974,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* get endpoint base */
idx = ((ep_addr & 0x7f) - 1) * 2;
+ if (idx > 15)
+ return -EIO;
+
if (ep_addr & 0x80)
idx++;
attr = ep->desc.bmAttributes;
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 99e5f9751e8b..d204e530fcaa 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
memcpy(di.channelmap, dev->channelmap,
sizeof(di.channelmap));
di.nrbchan = dev->nrbchan;
- strcpy(di.name, dev_name(&dev->dev));
+ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
err = -EFAULT;
} else
@@ -678,7 +678,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
memcpy(di.channelmap, dev->channelmap,
sizeof(di.channelmap));
di.nrbchan = dev->nrbchan;
- strcpy(di.name, dev_name(&dev->dev));
+ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
err = -EFAULT;
} else
@@ -692,6 +692,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
err = -EFAULT;
break;
}
+ dn.name[sizeof(dn.name) - 1] = '\0';
dev = get_mdevice(dn.id);
if (dev)
err = device_rename(&dev->dev, dn.name);
@@ -712,10 +713,10 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
struct sock *sk = sock->sk;
int err = 0;
- if (!maddr || maddr->family != AF_ISDN)
+ if (addr_len < sizeof(struct sockaddr_mISDN))
return -EINVAL;
- if (addr_len < sizeof(struct sockaddr_mISDN))
+ if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
lock_sock(sk);
@@ -765,6 +766,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
if (!sk)
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 592f597d8951..8261afbbafb0 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -1180,8 +1180,7 @@ static int
ctrl_teimanager(struct manager *mgr, void *arg)
{
/* currently we only have one option */
- int *val = (int *)arg;
- int ret = 0;
+ unsigned int *val = (unsigned int *)arg;
switch (val[0]) {
case IMCLEAR_L2:
@@ -1197,9 +1196,9 @@ ctrl_teimanager(struct manager *mgr, void *arg)
test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
- return ret;
+ return 0;
}
/* This function does create a L2 for fixed TEI in NT Mode */
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index b75333803a63..9f5e4d04efad 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -263,7 +263,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
{
const struct firmware *fw = chip->fw;
- if (fw->size > LP5562_PROGRAM_LENGTH) {
+ /*
+ * the firmware is encoded in ascii hex character, with 2 chars
+ * per byte
+ */
+ if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) {
dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
fw->size);
return;
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 5377f22ff994..e2655953667c 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -201,7 +201,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
if (!fw) {
dev_err(dev, "firmware request failed\n");
- goto out;
+ return;
}
/* handling firmware data is chip dependent */
@@ -214,9 +214,9 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
mutex_unlock(&chip->lock);
-out:
/* firmware should be released for other channel use */
release_firmware(chip->fw);
+ chip->fw = NULL;
}
static int lp55xx_request_firmware(struct lp55xx_chip *chip)
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 09a7cffbc46f..896b38f6f9c0 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -488,6 +488,7 @@ static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int devid;
+ const struct of_device_id *of_id;
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
dev_get_platdata(&client->dev);
@@ -503,8 +504,11 @@ static int pca9532_probe(struct i2c_client *client,
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
- devid = (int)(uintptr_t)of_match_device(
- of_pca9532_leds_match, &client->dev)->data;
+ of_id = of_match_device(of_pca9532_leds_match,
+ &client->dev);
+ if (unlikely(!of_id))
+ return -EINVAL;
+ devid = (int)(uintptr_t) of_id->data;
} else {
devid = id->driver_data;
}
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index ad6223e88340..3d310dd60a0b 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -22,14 +22,6 @@
#define VERSION "1.0"
-#define DEBUG
-
-#ifdef DEBUG
-#define DBG(args...) printk(args)
-#else
-#define DBG(args...) do { } while(0)
-#endif
-
/* If the cache is older than 800ms we'll refetch it */
#define MAX_AGE msecs_to_jiffies(800)
@@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
buf[i+2] = data[3];
buf[i+3] = data[2];
}
-#ifdef DEBUG
- DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id);
- for (i = 0; i < len; ++i)
- DBG(" %x", buf[i]);
- DBG("\n");
-#endif
+ printk(KERN_DEBUG "sat %d partition %x:", sat_id, id);
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
if (size)
*size = len;
return (struct smu_sdbp_header *) buf;
@@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat)
if (err < 0)
return err;
sat->last_read = jiffies;
+
#ifdef LOTSA_DEBUG
{
int i;
- DBG(KERN_DEBUG "wf_sat_get: data is");
- for (i = 0; i < 16; ++i)
- DBG(" %.2x", sat->cache[i]);
- DBG("\n");
+ printk(KERN_DEBUG "wf_sat_get: data is");
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
+ 16, 1, sat->cache, 16, false);
}
#endif
return 0;
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 87ef465c6947..c1c43800c4aa 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -389,11 +389,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
if (!strncmp(name, mbox_name, strlen(name)))
- break;
+ return mbox_request_channel(cl, index);
index++;
}
- return mbox_request_channel(cl, index);
+ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+ __func__, name);
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index dd344ee9e62b..ebacd21714ef 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -322,10 +322,11 @@ static int bch_allocator_thread(void *arg)
* possibly issue discards to them, then we add the bucket to
* the free list:
*/
- while (!fifo_empty(&ca->free_inc)) {
+ while (1) {
long bucket;
- fifo_pop(&ca->free_inc, bucket);
+ if (!fifo_pop(&ca->free_inc, bucket))
+ break;
if (ca->discard) {
mutex_unlock(&ca->set->bucket_lock);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 646fe85261c1..158eae17031c 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -823,12 +823,22 @@ unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
struct btree_iter iter;
+ struct bkey preceding_key_on_stack = ZERO_KEY;
+ struct bkey *preceding_key_p = &preceding_key_on_stack;
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
- m = bch_btree_iter_init(b, &iter, b->ops->is_extents
- ? PRECEDING_KEY(&START_KEY(k))
- : PRECEDING_KEY(k));
+ /*
+ * If k has preceding key, preceding_key_p will be set to address
+ * of k's preceding key; otherwise preceding_key_p will be set
+ * to NULL inside preceding_key().
+ */
+ if (b->ops->is_extents)
+ preceding_key(&START_KEY(k), &preceding_key_p);
+ else
+ preceding_key(k, &preceding_key_p);
+
+ m = bch_btree_iter_init(b, &iter, preceding_key_p);
if (b->ops->insert_fixup(b, k, &iter, replace_key))
return status;
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index ae964624efb2..f483041eed98 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -380,7 +380,8 @@ void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
/* Bkey utility code */
-#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
+#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \
+ (unsigned int)(i)->keys)
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
{
@@ -417,20 +418,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
return __bch_cut_back(where, k);
}
-#define PRECEDING_KEY(_k) \
-({ \
- struct bkey *_ret = NULL; \
- \
- if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
- _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
- \
- if (!_ret->low) \
- _ret->high--; \
- _ret->low--; \
- } \
- \
- _ret; \
-})
+/*
+ * Pointer '*preceding_key_p' points to a memory object to store preceding
+ * key of k. If the preceding key does not exist, set '*preceding_key_p' to
+ * NULL. So the caller of preceding_key() needs to take care of memory
+ * which '*preceding_key_p' pointed to before calling preceding_key().
+ * Currently the only caller of preceding_key() is bch_btree_insert_key(),
+ * and it points to an on-stack variable, so the memory release is handled
+ * by stackframe itself.
+ */
+static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
+{
+ if (KEY_INODE(k) || KEY_OFFSET(k)) {
+ (**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
+ if (!(*preceding_key_p)->low)
+ (*preceding_key_p)->high--;
+ (*preceding_key_p)->low--;
+ } else {
+ (*preceding_key_p) = NULL;
+ }
+}
static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
{
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 4e34afb6e36a..c8c5e3368b8b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -681,6 +681,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
* IO can always make forward progress:
*/
nr /= c->btree_pages;
+ if (nr == 0)
+ nr = 1;
nr = min_t(unsigned long, nr, mca_can_free(c));
i = 0;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 08f20b7cd199..6ee5370eb916 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -309,6 +309,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
}
+bool is_discard_enabled(struct cache_set *s)
+{
+ struct cache *ca;
+ unsigned int i;
+
+ for_each_cache(ca, s, i)
+ if (ca->discard)
+ return true;
+
+ return false;
+}
+
int bch_journal_replay(struct cache_set *s, struct list_head *list)
{
int ret = 0, keys = 0, entries = 0;
@@ -322,9 +334,17 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
- cache_set_err_on(n != i->j.seq, s,
-"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
- n, i->j.seq - 1, start, end);
+ if (n != i->j.seq) {
+ if (n == start && is_discard_enabled(s))
+ pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
+ n, i->j.seq - 1, start, end);
+ else {
+ pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
+ n, i->j.seq - 1, start, end);
+ ret = -EIO;
+ goto err;
+ }
+ }
for (k = i->j.start;
k < bset_bkey_last(&i->j);
@@ -513,11 +533,11 @@ static void journal_reclaim(struct cache_set *c)
ca->sb.nr_this_dev);
}
- bkey_init(k);
- SET_KEY_PTRS(k, n);
-
- if (n)
+ if (n) {
+ bkey_init(k);
+ SET_KEY_PTRS(k, n);
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
+ }
out:
if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait);
@@ -642,6 +662,9 @@ static void journal_write_unlocked(struct closure *cl)
ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
}
+ /* If KEY_PTRS(k) == 0, this jset gets lost in air */
+ BUG_ON(i == 0);
+
atomic_dec_bug(&fifo_back(&c->journal.pin));
bch_journal_next(&c->journal);
journal_reclaim(c);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 894992ae9be0..526e9d5a4fb1 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -904,6 +904,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
bch_write_bdev_super(dc, &cl);
closure_sync(&cl);
+ calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
@@ -1357,6 +1358,7 @@ static void cache_set_free(struct closure *cl)
bch_btree_cache_free(c);
bch_journal_free(c);
+ mutex_lock(&bch_register_lock);
for_each_cache(ca, c, i)
if (ca) {
ca->set = NULL;
@@ -1379,7 +1381,6 @@ static void cache_set_free(struct closure *cl)
mempool_destroy(c->search);
kfree(c->devices);
- mutex_lock(&bch_register_lock);
list_del(&c->list);
mutex_unlock(&bch_register_lock);
@@ -1397,15 +1398,12 @@ static void cache_set_flush(struct closure *cl)
struct btree *b;
unsigned i;
- if (!c)
- closure_return(cl);
-
bch_cache_accounting_destroy(&c->accounting);
kobject_put(&c->internal);
kobject_del(&c->kobj);
- if (c->gc_thread)
+ if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);
if (!IS_ERR_OR_NULL(c->root))
@@ -1561,7 +1559,7 @@ err:
return NULL;
}
-static void run_cache_set(struct cache_set *c)
+static int run_cache_set(struct cache_set *c)
{
const char *err = "cannot allocate memory";
struct cached_dev *dc, *t;
@@ -1653,7 +1651,9 @@ static void run_cache_set(struct cache_set *c)
if (j->version < BCACHE_JSET_VERSION_UUID)
__uuid_write(c);
- bch_journal_replay(c, &journal);
+ err = "bcache: replay journal failed";
+ if (bch_journal_replay(c, &journal))
+ goto err;
} else {
pr_notice("invalidating existing data");
@@ -1721,11 +1721,13 @@ static void run_cache_set(struct cache_set *c)
flash_devs_run(c);
set_bit(CACHE_SET_RUNNING, &c->flags);
- return;
+ return 0;
err:
closure_sync(&cl);
/* XXX: test this, it's broken */
bch_cache_set_error(c, "%s", err);
+
+ return -EIO;
}
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
@@ -1789,8 +1791,11 @@ found:
ca->set->cache[ca->sb.nr_this_dev] = ca;
c->cache_by_alloc[c->caches_loaded++] = ca;
- if (c->caches_loaded == c->sb.nr_in_set)
- run_cache_set(c);
+ if (c->caches_loaded == c->sb.nr_in_set) {
+ err = "failed to run cache set";
+ if (run_cache_set(c) < 0)
+ goto err;
+ }
return NULL;
err:
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 5a5c1f1bd8a5..463ce6757338 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -215,7 +215,9 @@ STORE(__cached_dev)
d_strtoul(writeback_rate_d_term);
d_strtoul_nonzero(writeback_rate_p_term_inverse);
- d_strtoi_h(sequential_cutoff);
+ sysfs_strtoul_clamp(sequential_cutoff,
+ dc->sequential_cutoff,
+ 0, UINT_MAX);
d_strtoi_h(readahead);
if (attr == &sysfs_clear_stats)
@@ -645,8 +647,17 @@ STORE(__bch_cache_set)
c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT;
/* See count_io_errors() for why 88 */
- if (attr == &sysfs_io_error_halflife)
- c->error_decay = strtoul_or_return(buf) / 88;
+ if (attr == &sysfs_io_error_halflife) {
+ unsigned long v = 0;
+ ssize_t ret;
+
+ ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
+ if (!ret) {
+ c->error_decay = v / 88;
+ return size;
+ }
+ return ret;
+ }
sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
sysfs_strtoul(verify, c->verify);
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
index 0526fe92a683..e7a3c12aa66f 100644
--- a/drivers/md/bcache/sysfs.h
+++ b/drivers/md/bcache/sysfs.h
@@ -80,9 +80,16 @@ do { \
#define sysfs_strtoul_clamp(file, var, min, max) \
do { \
- if (attr == &sysfs_ ## file) \
- return strtoul_safe_clamp(buf, var, min, max) \
- ?: (ssize_t) size; \
+ if (attr == &sysfs_ ## file) { \
+ unsigned long v = 0; \
+ ssize_t ret; \
+ ret = strtoul_safe_clamp(buf, v, min, max); \
+ if (!ret) { \
+ var = v; \
+ return size; \
+ } \
+ return ret; \
+ } \
} while (0)
#define strtoul_or_return(cp) \
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f7ff408567ad..63bff4cc7098 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1699,7 +1699,7 @@ void bitmap_flush(struct mddev *mddev)
/*
* free memory that was allocated
*/
-static void bitmap_free(struct bitmap *bitmap)
+static void md_bitmap_free(struct bitmap *bitmap)
{
unsigned long k, pages;
struct bitmap_page *bp;
@@ -1749,7 +1749,7 @@ void bitmap_destroy(struct mddev *mddev)
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- bitmap_free(bitmap);
+ md_bitmap_free(bitmap);
}
/*
@@ -1834,7 +1834,7 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
return bitmap;
error:
- bitmap_free(bitmap);
+ md_bitmap_free(bitmap);
return ERR_PTR(err);
}
@@ -1936,7 +1936,7 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
*low = lo;
*high = hi;
err:
- bitmap_free(bitmap);
+ md_bitmap_free(bitmap);
return rv;
}
EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index 03af174485d3..fa2432a89bac 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -32,7 +32,7 @@ static struct kmem_cache *_cell_cache;
*/
struct dm_bio_prison *dm_bio_prison_create(void)
{
- struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
+ struct dm_bio_prison *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
if (!prison)
return NULL;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 58b97226050f..1b7d77080d6b 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2192,8 +2192,8 @@ static void wait_for_migrations(struct cache *cache)
static void stop_worker(struct cache *cache)
{
- cancel_delayed_work(&cache->waker);
- flush_workqueue(cache->wq);
+ cancel_delayed_work_sync(&cache->waker);
+ drain_workqueue(cache->wq);
}
static void requeue_deferred_cells(struct cache *cache)
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index cc70871a6d29..3f01c5229e69 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -222,7 +222,8 @@ static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- destroy_workqueue(dc->kdelayd_wq);
+ if (dc->kdelayd_wq)
+ destroy_workqueue(dc->kdelayd_wq);
dm_put_device(ti, dc->dev_read);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 3643cba71351..36a98f4db056 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -69,6 +69,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
arg_name = dm_shift_arg(as);
argc--;
+ if (!arg_name) {
+ ti->error = "Insufficient feature arguments";
+ return -EINVAL;
+ }
+
/*
* drop_writes
*/
@@ -258,20 +263,31 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
{
- unsigned bio_bytes = bio_cur_bytes(bio);
- char *data = bio_data(bio);
+ unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1;
+
+ struct bvec_iter iter;
+ struct bio_vec bvec;
+
+ if (!bio_has_data(bio))
+ return;
/*
- * Overwrite the Nth byte of the data returned.
+ * Overwrite the Nth byte of the bio's data, on whichever page
+ * it falls.
*/
- if (data && bio_bytes >= fc->corrupt_bio_byte) {
- data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
-
- DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
- "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
- bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
- (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
- (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
+ bio_for_each_segment(bvec, bio, iter) {
+ if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
+ char *segment = (page_address(bio_iter_page(bio, iter))
+ + bio_iter_offset(bio, iter));
+ segment[corrupt_bio_byte] = fc->corrupt_bio_value;
+ DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
+ "(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
+ bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
+ (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size);
+ break;
+ }
+ corrupt_bio_byte -= bio_iter_len(bio, iter);
}
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ee6045d6c0bb..201d90f5d1b3 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -50,7 +50,7 @@ struct dm_io_client *dm_io_client_create(void)
struct dm_io_client *client;
unsigned min_ios = dm_get_reserved_bio_based_ios();
- client = kmalloc(sizeof(*client), GFP_KERNEL);
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index e0cfde3501e0..4609c5b481e2 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -828,7 +828,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
- kc = kmalloc(sizeof(*kc), GFP_KERNEL);
+ kc = kzalloc(sizeof(*kc), GFP_KERNEL);
if (!kc)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 85c32b22a420..91c6f6d72eee 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -179,7 +179,7 @@ struct dm_region_hash *dm_region_hash_create(
;
nr_buckets >>= 1;
- rh = kmalloc(sizeof(*rh), GFP_KERNEL);
+ rh = kzalloc(sizeof(*rh), GFP_KERNEL);
if (!rh) {
DMERR("unable to allocate region hash memory");
return ERR_PTR(-ENOMEM);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index b8cf956b577b..771ec28e87cb 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -17,7 +17,7 @@
#include "dm-bufio.h"
#define DM_MSG_PREFIX "persistent snapshot"
-#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
+#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */
#define DM_PREFETCH_CHUNKS 12
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 2da0b9b213c7..c04d9f22d160 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -19,7 +19,6 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
-#include <linux/semaphore.h>
#include "dm.h"
@@ -48,7 +47,7 @@ struct dm_exception_table {
};
struct dm_snapshot {
- struct rw_semaphore lock;
+ struct mutex lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -106,8 +105,8 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
- /* Maximum number of in-flight COW jobs. */
- struct semaphore cow_count;
+ unsigned in_progress;
+ wait_queue_head_t in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@@ -158,8 +157,8 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
-static int cow_threshold = DEFAULT_COW_THRESHOLD;
-module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@@ -456,9 +455,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- down_read(&s->lock);
+ mutex_lock(&s->lock);
active = s->active;
- up_read(&s->lock);
+ mutex_unlock(&s->lock);
if (active) {
if (snap_src)
@@ -926,7 +925,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -941,7 +940,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
if (b)
flush_bios(b);
@@ -1000,9 +999,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
goto shut;
}
@@ -1043,10 +1042,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@@ -1088,10 +1087,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1137,7 +1136,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
origin_mode = FMODE_WRITE;
}
- s = kmalloc(sizeof(*s), GFP_KERNEL);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
ti->error = "Cannot allocate private snapshot structure";
r = -ENOMEM;
@@ -1190,7 +1189,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
INIT_LIST_HEAD(&s->out_of_order_list);
- init_rwsem(&s->lock);
+ mutex_init(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@@ -1206,7 +1205,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+ init_waitqueue_head(&s->in_progress_wait);
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
@@ -1357,9 +1356,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- down_write(&snap_dest->lock);
+ mutex_lock(&snap_dest->lock);
snap_dest->valid = 0;
- up_write(&snap_dest->lock);
+ mutex_unlock(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1390,13 +1389,62 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ mutex_destroy(&s->lock);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
+ WARN_ON(s->in_progress);
+
kfree(s);
}
+static void account_start_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ s->in_progress++;
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static void account_end_copy(struct dm_snapshot *s)
+{
+ spin_lock(&s->in_progress_wait.lock);
+ BUG_ON(!s->in_progress);
+ s->in_progress--;
+ if (likely(s->in_progress <= cow_threshold) &&
+ unlikely(waitqueue_active(&s->in_progress_wait)))
+ wake_up_locked(&s->in_progress_wait);
+ spin_unlock(&s->in_progress_wait.lock);
+}
+
+static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
+{
+ if (unlikely(s->in_progress > cow_threshold)) {
+ spin_lock(&s->in_progress_wait.lock);
+ if (likely(s->in_progress > cow_threshold)) {
+ /*
+ * NOTE: this throttle doesn't account for whether
+ * the caller is servicing an IO that will trigger a COW
+ * so excess throttling may result for chunks not required
+ * to be COW'd. But if cow_threshold was reached, extra
+ * throttling is unlikely to negatively impact performance.
+ */
+ DECLARE_WAITQUEUE(wait, current);
+ __add_wait_queue(&s->in_progress_wait, &wait);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock(&s->in_progress_wait.lock);
+ if (unlock_origins)
+ up_read(&_origins_lock);
+ io_schedule();
+ remove_wait_queue(&s->in_progress_wait, &wait);
+ return false;
+ }
+ spin_unlock(&s->in_progress_wait.lock);
+ }
+ return true;
+}
+
/*
* Flush a list of buffers.
*/
@@ -1412,7 +1460,7 @@ static void flush_bios(struct bio *bio)
}
}
-static int do_origin(struct dm_dev *origin, struct bio *bio);
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
/*
* Flush a list of buffers.
@@ -1425,7 +1473,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- r = do_origin(s->origin, bio);
+ r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = n;
@@ -1477,7 +1525,7 @@ static void pending_complete(void *context, int success)
if (!success) {
/* Read/write error - snapshot is unusable */
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -EIO);
error = 1;
goto out;
@@ -1485,14 +1533,14 @@ static void pending_complete(void *context, int success)
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
error = 1;
goto out;
}
*e = pe->e;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid) {
free_completed_exception(e);
error = 1;
@@ -1517,7 +1565,7 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Submit any pending write bios */
if (error) {
@@ -1579,7 +1627,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
}
list_add(&pe->out_of_order_entry, lh);
}
- up(&s->cow_count);
+ account_end_copy(s);
}
/*
@@ -1603,7 +1651,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
- down(&s->cow_count);
+ account_start_copy(s);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@@ -1623,7 +1671,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
- down(&s->cow_count);
+ account_start_copy(s);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
@@ -1714,9 +1762,12 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!s->valid)
return -EIO;
- /* FIXME: should only take write lock if we need
- * to copy an exception */
- down_write(&s->lock);
+ if (bio_data_dir(bio) == WRITE) {
+ while (unlikely(!wait_for_in_progress(s, false)))
+ ; /* wait_for_in_progress() has slept */
+ }
+
+ mutex_lock(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1739,9 +1790,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
pe = alloc_pending_exception(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe);
@@ -1776,7 +1827,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_full_bio(pe, bio);
goto out;
}
@@ -1786,7 +1837,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_copy(pe);
goto out;
}
@@ -1796,7 +1847,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
out:
return r;
}
@@ -1832,7 +1883,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1863,12 +1914,12 @@ redirect_to_origin:
bio->bi_bdev = s->origin->bdev;
if (bio_data_dir(bio) == WRITE) {
- up_write(&s->lock);
- return do_origin(s->origin, bio);
+ mutex_unlock(&s->lock);
+ return do_origin(s->origin, bio, false);
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
return r;
}
@@ -1899,7 +1950,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_read(&snap_src->lock);
+ mutex_lock(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1909,7 +1960,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
- up_read(&snap_src->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1955,11 +2006,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_write(&snap_src->lock);
- down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ mutex_lock(&snap_src->lock);
+ mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- up_write(&snap_dest->lock);
- up_write(&snap_src->lock);
+ mutex_unlock(&snap_dest->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1974,9 +2025,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->active = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -2016,7 +2067,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2041,7 +2092,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
break;
@@ -2107,7 +2158,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
/* Only deal with valid and active snapshots */
if (!snap->valid || !snap->active)
@@ -2134,9 +2185,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
pe = alloc_pending_exception(snap);
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid) {
free_pending_exception(pe);
@@ -2179,7 +2230,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
@@ -2200,15 +2251,24 @@ next_snapshot:
/*
* Called on a write from the origin driver.
*/
-static int do_origin(struct dm_dev *origin, struct bio *bio)
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{
struct origin *o;
int r = DM_MAPIO_REMAPPED;
+again:
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
- if (o)
+ if (o) {
+ if (limit) {
+ struct dm_snapshot *s;
+ list_for_each_entry(s, &o->snapshots, list)
+ if (unlikely(!wait_for_in_progress(s, true)))
+ goto again;
+ }
+
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
+ }
up_read(&_origins_lock);
return r;
@@ -2321,7 +2381,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, available_sectors);
/* Only tell snapshots if this is a write */
- return do_origin(o->dev, bio);
+ return do_origin(o->dev, bio, true);
}
static long origin_direct_access(struct dm_target *ti, sector_t sector,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5ac239d0f787..29deda7aed04 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1263,7 +1263,7 @@ void dm_table_event(struct dm_table *t)
}
EXPORT_SYMBOL(dm_table_event);
-sector_t dm_table_get_size(struct dm_table *t)
+inline sector_t dm_table_get_size(struct dm_table *t)
{
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
@@ -1288,6 +1288,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
unsigned int l, n = 0, k = 0;
sector_t *node;
+ if (unlikely(sector >= dm_table_get_size(t)))
+ return &t->targets[t->num_targets];
+
for (l = 0; l < t->depth; l++) {
n = get_child(n, k);
node = get_node(t, l, n);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 345f4d81ba07..dcb753dbf86e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2965,7 +2965,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
return (struct pool *)pmd;
}
- pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool) {
*error = "Error allocating memory for pool";
err_p = ERR_PTR(-ENOMEM);
@@ -3295,6 +3295,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
as.argc = argc;
as.argv = argv;
+ /* make sure metadata and data are different devices */
+ if (!strcmp(argv[0], argv[1])) {
+ ti->error = "Error setting metadata or data device";
+ r = -EINVAL;
+ goto out_unlock;
+ }
+
/*
* Set default pool features.
*/
@@ -4177,6 +4184,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
tc->sort_bio_list = RB_ROOT;
if (argc == 3) {
+ if (!strcmp(argv[0], argv[2])) {
+ ti->error = "Error setting origin device";
+ r = -EINVAL;
+ goto bad_origin_dev;
+ }
+
r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
if (r) {
ti->error = "Error opening origin device";
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 78f36012eaca..8f9957d31a3e 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -563,6 +563,7 @@ void verity_fec_dtr(struct dm_verity *v)
mempool_destroy(f->rs_pool);
mempool_destroy(f->prealloc_pool);
mempool_destroy(f->extra_pool);
+ mempool_destroy(f->output_pool);
kmem_cache_destroy(f->cache);
if (f->data_bufio)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0aba34a7b3b3..727f9e571955 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -218,8 +218,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
BUG();
}
- DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
- block);
+ DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
+ type_str, block);
if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
DMERR("%s: reached maximum errors", v->data_dev->name);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2ffe7db75acb..dd154027adc9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1457,7 +1457,6 @@ void dm_init_md_queue(struct mapped_device *md)
* - must do so here (in alloc_dev callchain) before queue is used
*/
md->queue->queuedata = md;
- md->queue->backing_dev_info.congested_data = md;
}
void dm_init_normal_md_queue(struct mapped_device *md)
@@ -1468,6 +1467,7 @@ void dm_init_normal_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
+ md->queue->backing_dev_info.congested_data = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}
@@ -1555,6 +1555,12 @@ static struct mapped_device *alloc_dev(int minor)
goto bad;
dm_init_md_queue(md);
+ /*
+ * default to bio-based required ->make_request_fn until DM
+ * table is loaded and md->type established. If request-based
+ * table is loaded: blk-mq will override accordingly.
+ */
+ blk_queue_make_request(md->queue, dm_make_request);
md->disk = alloc_disk_node(1, numa_node_id);
if (!md->disk)
@@ -1853,7 +1859,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
dm_init_normal_md_queue(md);
- blk_queue_make_request(md->queue, dm_make_request);
/*
* DM handles splitting bios as needed. Free the bio_split bioset
* since it won't be used (saves 1 process per bio-based DM device).
@@ -1946,9 +1951,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(q->queue_lock);
+ blk_set_queue_dying(q);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a7a0e3acdb2f..da8708b65356 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1662,8 +1662,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
if (!(le32_to_cpu(sb->feature_map) &
MD_FEATURE_RECOVERY_BITMAP))
rdev->saved_raid_disk = -1;
- } else
- set_bit(In_sync, &rdev->flags);
+ } else {
+ /*
+ * If the array is FROZEN, then the device can't
+ * be in_sync with rest of array.
+ */
+ if (!test_bit(MD_RECOVERY_FROZEN,
+ &mddev->recovery))
+ set_bit(In_sync, &rdev->flags);
+ }
rdev->raid_disk = role;
break;
}
@@ -2694,8 +2701,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
- rdev->saved_raid_disk >= 0) {
+ if (!rdev->mddev->pers)
+ err = -EINVAL;
+ else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
+ rdev->saved_raid_disk >= 0) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
@@ -7294,9 +7303,9 @@ static void status_unused(struct seq_file *seq)
static int status_resync(struct seq_file *seq, struct mddev *mddev)
{
sector_t max_sectors, resync, res;
- unsigned long dt, db;
- sector_t rt;
- int scale;
+ unsigned long dt, db = 0;
+ sector_t rt, curr_mark_cnt, resync_mark_cnt;
+ int scale, recovery_active;
unsigned int per_milli;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -7366,22 +7375,30 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
* db: blocks written from mark until now
* rt: remaining time
*
- * rt is a sector_t, so could be 32bit or 64bit.
- * So we divide before multiply in case it is 32bit and close
- * to the limit.
- * We scale the divisor (db) by 32 to avoid losing precision
- * near the end of resync when the number of remaining sectors
- * is close to 'db'.
- * We then divide rt by 32 after multiplying by db to compensate.
- * The '+1' avoids division by zero if db is very small.
+ * rt is a sector_t, which is always 64bit now. We are keeping
+ * the original algorithm, but it is not really necessary.
+ *
+ * Original algorithm:
+ * So we divide before multiply in case it is 32bit and close
+ * to the limit.
+ * We scale the divisor (db) by 32 to avoid losing precision
+ * near the end of resync when the number of remaining sectors
+ * is close to 'db'.
+ * We then divide rt by 32 after multiplying by db to compensate.
+ * The '+1' avoids division by zero if db is very small.
*/
dt = ((jiffies - mddev->resync_mark) / HZ);
if (!dt) dt++;
- db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
- - mddev->resync_mark_cnt;
+
+ curr_mark_cnt = mddev->curr_mark_cnt;
+ recovery_active = atomic_read(&mddev->recovery_active);
+ resync_mark_cnt = mddev->resync_mark_cnt;
+
+ if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
+ db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
rt = max_sectors - resync; /* number of remaining sectors */
- sector_div(rt, db/32+1);
+ rt = div64_u64(rt, db/32+1);
rt *= dt;
rt >>= 5;
@@ -8563,7 +8580,8 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+ mddev->degraded != mddev->raid_disks) {
/* success...*/
/* activate any spares */
if (mddev->pers->spare_active(mddev)) {
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 21ea537bd55e..eff04fa23dfa 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned threshold = 2 * merge_threshold(left) + 1;
+ /*
+ * Ensure the number of entries in each child will be greater
+ * than or equal to (max_entries / 3 + 1), so no matter which
+ * child is used for removal, the number will still be not
+ * less than (max_entries / 3).
+ */
+ unsigned int threshold = 2 * (merge_threshold(left) + 1);
if (nr_left + nr_right < threshold) {
/*
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index e4ececd3df00..386215245dfe 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -623,39 +623,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
new_parent = shadow_current(s);
+ pn = dm_block_data(new_parent);
+ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+ sizeof(__le64) : s->info->value_type.size;
+
+ /* create & init the left block */
r = new_block(s->info, &left);
if (r < 0)
return r;
+ ln = dm_block_data(left);
+ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+
+ ln->header.flags = pn->header.flags;
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+ ln->header.max_entries = pn->header.max_entries;
+ ln->header.value_size = pn->header.value_size;
+ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+
+ /* create & init the right block */
r = new_block(s->info, &right);
if (r < 0) {
unlock_block(s->info, left);
return r;
}
- pn = dm_block_data(new_parent);
- ln = dm_block_data(left);
rn = dm_block_data(right);
-
- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
- ln->header.flags = pn->header.flags;
- ln->header.nr_entries = cpu_to_le32(nr_left);
- ln->header.max_entries = pn->header.max_entries;
- ln->header.value_size = pn->header.value_size;
-
rn->header.flags = pn->header.flags;
rn->header.nr_entries = cpu_to_le32(nr_right);
rn->header.max_entries = pn->header.max_entries;
rn->header.value_size = pn->header.value_size;
-
- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
-
- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
- sizeof(__le64) : s->info->value_type.size;
- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
nr_right * size);
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 306d2e4502c4..22729fd92a1b 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -382,6 +382,33 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
return -ENOSPC;
}
+int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+ dm_block_t begin, dm_block_t end, dm_block_t *b)
+{
+ int r;
+ uint32_t count;
+
+ do {
+ r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b);
+ if (r)
+ break;
+
+ /* double check this block wasn't used in the old transaction */
+ if (*b >= old_ll->nr_blocks)
+ count = 0;
+ else {
+ r = sm_ll_lookup(old_ll, *b, &count);
+ if (r)
+ break;
+
+ if (count)
+ begin = *b + 1;
+ }
+ } while (count);
+
+ return r;
+}
+
static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
int (*mutator)(void *context, uint32_t old, uint32_t *new),
void *context, enum allocation_event *ev)
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index b3078d5eda0c..8de63ce39bdd 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -109,6 +109,8 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result);
+int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
+ dm_block_t begin, dm_block_t end, dm_block_t *result);
int sm_ll_insert(struct ll_disk *ll, dm_block_t b, uint32_t ref_count, enum allocation_event *ev);
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index 32adf6b4a9c7..bf4c5e2ccb6f 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -167,8 +167,10 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
- /* FIXME: we should loop round a couple of times */
- r = sm_ll_find_free_block(&smd->old_ll, smd->begin, smd->old_ll.nr_blocks, b);
+ /*
+ * Any block we allocate has to be free in both the old and current ll.
+ */
+ r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
if (r)
return r;
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 20557e2c60c6..967d8f2a731f 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -248,7 +248,7 @@ static int out(struct sm_metadata *smm)
}
if (smm->recursion_count == 1)
- apply_bops(smm);
+ r = apply_bops(smm);
smm->recursion_count--;
@@ -447,7 +447,10 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
enum allocation_event ev;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
- r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
+ /*
+ * Any block we allocate has to be free in both the old and current ll.
+ */
+ r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
if (r)
return r;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 258986a2699d..cf745211799f 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -82,7 +82,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
char b[BDEVNAME_SIZE];
char b2[BDEVNAME_SIZE];
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- unsigned short blksize = 512;
+ unsigned blksize = 512;
*private_conf = ERR_PTR(-ENOMEM);
if (!conf)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 53048bf0b2b8..8a50da4f148f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2633,7 +2633,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
write_targets++;
}
}
- if (bio->bi_end_io) {
+ if (rdev && bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
@@ -2960,6 +2960,13 @@ static int raid1_run(struct mddev *mddev)
!test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
test_bit(Faulty, &conf->mirrors[i].rdev->flags))
mddev->degraded++;
+ /*
+ * RAID1 needs at least one disk in active
+ */
+ if (conf->raid_disks - mddev->degraded < 1) {
+ ret = -EINVAL;
+ goto abort;
+ }
if (conf->raid_disks - mddev->degraded == 1)
mddev->recovery_cp = MaxSector;
@@ -2994,8 +3001,12 @@ static int raid1_run(struct mddev *mddev)
ret = md_integrity_register(mddev);
if (ret) {
md_unregister_thread(&mddev->thread);
- raid1_free(mddev, conf);
+ goto abort;
}
+ return 0;
+
+abort:
+ raid1_free(mddev, conf);
return ret;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9ec74dfe94f4..1143860f0028 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2416,7 +2416,9 @@ static void raid5_end_read_request(struct bio * bi)
&& !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
retry = 1;
if (retry)
- if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
+ if (sh->qd_idx >= 0 && sh->pd_idx == i)
+ set_bit(R5_ReadError, &sh->dev[i].flags);
+ else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
set_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
} else
@@ -3878,7 +3880,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
/* now write out any block on a failed drive,
* or P or Q if they were recomputed
*/
- BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
+ dev = NULL;
if (s->failed == 2) {
dev = &sh->dev[s->failed_num[1]];
s->locked++;
@@ -3903,6 +3905,14 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
+ if (WARN_ONCE(dev && !test_bit(R5_UPTODATE, &dev->flags),
+ "%s: disk%td not up to date\n",
+ mdname(conf->mddev),
+ dev - (struct r5dev *) &sh->dev)) {
+ clear_bit(R5_LOCKED, &dev->flags);
+ clear_bit(R5_Wantwrite, &dev->flags);
+ s->locked--;
+ }
clear_bit(STRIPE_DEGRADED, &sh->state);
set_bit(STRIPE_INSYNC, &sh->state);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 75a3f4b57fd4..a1cc1c1e5318 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -314,8 +314,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
if (npads) {
dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
GFP_KERNEL);
- if (!dvbdev->pads)
+ if (!dvbdev->pads) {
+ kfree(dvbdev->entity);
return -ENOMEM;
+ }
}
switch (type) {
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 31f16105184c..59a4563c0466 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -309,6 +309,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
u16 u16tmp;
u32 tuner_frequency_khz, target_mclk;
s32 s32tmp;
+ static const struct reg_sequence reset_buf[] = {
+ {0x07, 0x80}, {0x07, 0x00}
+ };
dev_dbg(&client->dev,
"delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -321,11 +324,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
}
/* reset */
- ret = regmap_write(dev->regmap, 0x07, 0x80);
- if (ret)
- goto err;
-
- ret = regmap_write(dev->regmap, 0x07, 0x00);
+ ret = regmap_multi_reg_write(dev->regmap, reset_buf, 2);
if (ret)
goto err;
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 6da12b9e55eb..02c734b8718b 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -80,8 +80,8 @@ static int tua6100_set_params(struct dvb_frontend *fe)
struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 };
struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 };
-#define _R 4
-#define _P 32
+#define _R_VAL 4
+#define _P_VAL 32
#define _ri 4000000
// setup register 0
@@ -96,14 +96,14 @@ static int tua6100_set_params(struct dvb_frontend *fe)
else
reg1[1] = 0x0c;
- if (_P == 64)
+ if (_P_VAL == 64)
reg1[1] |= 0x40;
if (c->frequency >= 1525000)
reg1[1] |= 0x80;
// register 2
- reg2[1] = (_R >> 8) & 0x03;
- reg2[2] = _R;
+ reg2[1] = (_R_VAL >> 8) & 0x03;
+ reg2[2] = _R_VAL;
if (c->frequency < 1455000)
reg2[1] |= 0x1c;
else if (c->frequency < 1630000)
@@ -115,18 +115,18 @@ static int tua6100_set_params(struct dvb_frontend *fe)
* The N divisor ratio (note: c->frequency is in kHz, but we
* need it in Hz)
*/
- prediv = (c->frequency * _R) / (_ri / 1000);
- div = prediv / _P;
+ prediv = (c->frequency * _R_VAL) / (_ri / 1000);
+ div = prediv / _P_VAL;
reg1[1] |= (div >> 9) & 0x03;
reg1[2] = div >> 1;
reg1[3] = (div << 7);
- priv->frequency = ((div * _P) * (_ri / 1000)) / _R;
+ priv->frequency = ((div * _P_VAL) * (_ri / 1000)) / _R_VAL;
// Finally, calculate and store the value for A
- reg1[3] |= (prediv - (div*_P)) & 0x7f;
+ reg1[3] |= (prediv - (div*_P_VAL)) & 0x7f;
-#undef _R
-#undef _P
+#undef _R_VAL
+#undef _P_VAL
#undef _ri
if (fe->ops.i2c_gate_ctrl)
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 92773b2e6225..bfe0afc209b8 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_VIDEO_ADV7393) += adv7393.o
obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
-obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
+obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o
obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
obj-$(CONFIG_VIDEO_VS6624) += vs6624.o
obj-$(CONFIG_VIDEO_BT819) += bt819.o
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511-v4l2.c
index 5f1c8ee8a50e..b87c9e7ff146 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -17,6 +17,11 @@
* SOFTWARE.
*/
+/*
+ * This file is named adv7511-v4l2.c so it doesn't conflict with the Analog
+ * Device ADV7511 (config fragment CONFIG_DRM_I2C_ADV7511).
+ */
+
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 72e71b762827..a6145877bd00 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -974,6 +974,8 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
mt9m111->rect.width = MT9M111_MAX_WIDTH;
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
+ mt9m111->width = mt9m111->rect.width;
+ mt9m111->height = mt9m111->rect.height;
mt9m111->fmt = &mt9m111_colour_fmts[0];
mt9m111->lastpage = -1;
mutex_init(&mt9m111->power_lock);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 58eb62f1ba21..a018a76662df 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -423,10 +423,12 @@ static int mt9v032_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
+ struct mt9v032 *mt9v032 = to_mt9v032(subdev);
+
if (code->index > 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ code->code = mt9v032->format.code;
return 0;
}
@@ -434,7 +436,11 @@ static int mt9v032_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->index >= 3 || fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ struct mt9v032 *mt9v032 = to_mt9v032(subdev);
+
+ if (fse->index >= 3)
+ return -EINVAL;
+ if (mt9v032->format.code != fse->code)
return -EINVAL;
fse->min_width = MT9V032_WINDOW_WIDTH_DEF / (1 << fse->index);
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 1f999e9c0118..18546f950d79 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -419,10 +419,14 @@ static struct sensor_register ov2659_720p[] = {
{ REG_TIMING_YINC, 0x11 },
{ REG_TIMING_VERT_FORMAT, 0x80 },
{ REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x370a, 0x12 },
{ 0x3a03, 0xe8 },
{ 0x3a09, 0x6f },
{ 0x3a0b, 0x5d },
{ 0x3a15, 0x9a },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
{ REG_NULL, 0x00 },
};
@@ -1117,8 +1121,10 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
if (ov2659_formats[index].code == mf->code)
break;
- if (index < 0)
- return -EINVAL;
+ if (index < 0) {
+ index = 0;
+ mf->code = ov2659_formats[index].code;
+ }
mf->colorspace = V4L2_COLORSPACE_SRGB;
mf->code = ov2659_formats[index].code;
@@ -1131,7 +1137,7 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
#else
- return -ENOTTY;
+ ret = -ENOTTY;
#endif
} else {
s64 val;
@@ -1202,11 +1208,15 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
goto unlock;
}
- ov2659_set_pixel_clock(ov2659);
- ov2659_set_frame_size(ov2659);
- ov2659_set_format(ov2659);
- ov2659_set_streaming(ov2659, 1);
- ov2659->streaming = on;
+ ret = ov2659_set_pixel_clock(ov2659);
+ if (!ret)
+ ret = ov2659_set_frame_size(ov2659);
+ if (!ret)
+ ret = ov2659_set_format(ov2659);
+ if (!ret) {
+ ov2659_set_streaming(ov2659, 1);
+ ov2659->streaming = on;
+ }
unlock:
mutex_unlock(&ov2659->lock);
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 56cfb5ca9c95..0a72228734ae 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -155,10 +155,10 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define REG_GFIX 0x69 /* Fix gain control */
#define REG_DBLV 0x6b /* PLL control an debugging */
-#define DBLV_BYPASS 0x00 /* Bypass PLL */
-#define DBLV_X4 0x01 /* clock x4 */
-#define DBLV_X6 0x10 /* clock x6 */
-#define DBLV_X8 0x11 /* clock x8 */
+#define DBLV_BYPASS 0x0a /* Bypass PLL */
+#define DBLV_X4 0x4a /* clock x4 */
+#define DBLV_X6 0x8a /* clock x6 */
+#define DBLV_X8 0xca /* clock x8 */
#define REG_REG76 0x76 /* OV's name */
#define R76_BLKPCOR 0x80 /* Black pixel correction enable */
@@ -833,7 +833,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
if (ret < 0)
return ret;
- return ov7670_write(sd, REG_DBLV, DBLV_X4);
+ return 0;
}
static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
@@ -1578,11 +1578,7 @@ static int ov7670_probe(struct i2c_client *client,
if (config->clock_speed)
info->clock_speed = config->clock_speed;
- /*
- * It should be allowed for ov7670 too when it is migrated to
- * the new frame rate formula.
- */
- if (config->pll_bypass && id->driver_data != MODEL_OV7670)
+ if (config->pll_bypass)
info->pll_bypass = true;
if (config->pclk_hb_disable)
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 502c72238a4a..db962e2108ad 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -708,6 +708,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
for (m = 6; m >= 0; m--)
if (gain >= (1 << m) * 16)
break;
+
+ /* Sanity check: don't adjust the gain with a negative value */
+ if (m < 0)
+ return -EINVAL;
+
rgain = (gain - ((1 << m) * 16)) / (1 << m);
rgain |= (((1 << m) - 1) << 4);
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index 8f85910eda5d..3c959e48855c 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -203,7 +203,6 @@ struct ov6650 {
unsigned long pclk_max; /* from resolution and format */
struct v4l2_fract tpf; /* as requested with s_parm */
u32 code;
- enum v4l2_colorspace colorspace;
};
@@ -216,6 +215,17 @@ static u32 ov6650_codes[] = {
MEDIA_BUS_FMT_Y8_1X8,
};
+static const struct v4l2_mbus_framefmt ov6650_def_fmt = {
+ .width = W_CIF,
+ .height = H_CIF,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+};
+
/* read a register */
static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
{
@@ -512,12 +522,20 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
if (format->pad)
return -EINVAL;
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- mf->colorspace = priv->colorspace;
- mf->field = V4L2_FIELD_NONE;
+ /* initialize response with default media bus frame format */
+ *mf = ov6650_def_fmt;
+
+ /* update media bus format code and frame size */
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+ } else {
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -612,7 +630,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
return -EINVAL;
}
- priv->code = code;
if (code == MEDIA_BUS_FMT_Y8_1X8 ||
code == MEDIA_BUS_FMT_SBGGR8_1X8) {
@@ -625,11 +642,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
priv->pclk_max = 8000000;
}
- if (code == MEDIA_BUS_FMT_SBGGR8_1X8)
- priv->colorspace = V4L2_COLORSPACE_SRGB;
- else if (code != 0)
- priv->colorspace = V4L2_COLORSPACE_JPEG;
-
if (half_scale) {
dev_dbg(&client->dev, "max resolution: QCIF\n");
coma_set |= COMA_QCIF;
@@ -638,7 +650,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
dev_dbg(&client->dev, "max resolution: CIF\n");
coma_mask |= COMA_QCIF;
}
- priv->half_scale = half_scale;
if (sense) {
if (sense->master_clock == 8000000) {
@@ -678,14 +689,14 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
if (!ret)
ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
- if (!ret)
- ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
-
if (!ret) {
- mf->colorspace = priv->colorspace;
- mf->width = priv->rect.width >> half_scale;
- mf->height = priv->rect.height >> half_scale;
+ priv->half_scale = half_scale;
+
+ ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
}
+ if (!ret)
+ priv->code = code;
+
return ret;
}
@@ -704,8 +715,6 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
&mf->height, 2, H_CIF, 1, 0);
- mf->field = V4L2_FIELD_NONE;
-
switch (mf->code) {
case MEDIA_BUS_FMT_Y10_1X10:
mf->code = MEDIA_BUS_FMT_Y8_1X8;
@@ -714,19 +723,38 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_VYUY8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
- mf->colorspace = V4L2_COLORSPACE_JPEG;
break;
default:
mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
case MEDIA_BUS_FMT_SBGGR8_1X8:
- mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
}
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return ov6650_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ /* store media bus format code and frame size in pad config */
+ cfg->try_fmt.width = mf->width;
+ cfg->try_fmt.height = mf->height;
+ cfg->try_fmt.code = mf->code;
+
+ /* return default mbus frame format updated with pad config */
+ *mf = ov6650_def_fmt;
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+ } else {
+ /* apply new media bus format code and frame size */
+ int ret = ov6650_s_fmt(sd, mf);
+
+ if (ret)
+ return ret;
+
+ /* return default format updated with active size and code */
+ *mf = ov6650_def_fmt;
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -840,9 +868,18 @@ static int ov6650_video_probe(struct i2c_client *client)
u8 pidh, pidl, midh, midl;
int ret;
+ priv->clk = v4l2_clk_get(&client->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ dev_err(&client->dev, "v4l2_clk request err: %d\n", ret);
+ return ret;
+ }
+
ret = ov6650_s_power(&priv->subdev, 1);
if (ret < 0)
- return ret;
+ goto eclkput;
+
+ msleep(20);
/*
* check and show product ID and manufacturer ID
@@ -877,6 +914,11 @@ static int ov6650_video_probe(struct i2c_client *client)
done:
ov6650_s_power(&priv->subdev, 0);
+ if (!ret)
+ return 0;
+eclkput:
+ v4l2_clk_put(priv->clk);
+
return ret;
}
@@ -1031,20 +1073,10 @@ static int ov6650_probe(struct i2c_client *client,
priv->rect.height = H_CIF;
priv->half_scale = false;
priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
- priv->colorspace = V4L2_COLORSPACE_JPEG;
-
- priv->clk = v4l2_clk_get(&client->dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto eclkget;
- }
ret = ov6650_video_probe(client);
- if (ret) {
- v4l2_clk_put(priv->clk);
-eclkget:
+ if (ret)
v4l2_ctrl_handler_free(&priv->hdl);
- }
return ret;
}
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index e83f8111a5fb..b27362ae4a5e 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -824,7 +824,7 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
return 0;
case V4L2_CID_HUE:
tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val);
- break;
+ return 0;
case V4L2_CID_TEST_PATTERN:
decoder->enable = ctrl->val ? false : true;
tvp5150_selmux(sd);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 6f46c59415fe..73a2dba475d0 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -474,6 +474,7 @@ static long media_device_enum_links32(struct media_device *mdev,
{
struct media_links_enum links;
compat_uptr_t pads_ptr, links_ptr;
+ int ret;
memset(&links, 0, sizeof(links));
@@ -485,7 +486,14 @@ static long media_device_enum_links32(struct media_device *mdev,
links.pads = compat_ptr(pads_ptr);
links.links = compat_ptr(links_ptr);
- return media_device_enum_links(mdev, &links);
+ ret = media_device_enum_links(mdev, &links);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(ulinks->reserved, links.reserved,
+ sizeof(ulinks->reserved)))
+ return -EFAULT;
+ return 0;
}
#define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32)
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index df837408efd5..0171dc5b8809 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -490,7 +490,7 @@ static ssize_t cx18_read_pos(struct cx18_stream *s, char __user *ubuf,
CX18_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
if (rc > 0)
- pos += rc;
+ *pos += rc;
return rc;
}
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 818f3c2fc98d..1d86e57f4d9f 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1471,8 +1471,9 @@ static int dvb_register(struct cx23885_tsport *port)
if (fe0->dvb.frontend != NULL) {
struct i2c_adapter *tun_i2c;
- fe0->dvb.frontend->sec_priv = kmalloc(sizeof(dib7000p_ops), GFP_KERNEL);
- memcpy(fe0->dvb.frontend->sec_priv, &dib7000p_ops, sizeof(dib7000p_ops));
+ fe0->dvb.frontend->sec_priv = kmemdup(&dib7000p_ops, sizeof(dib7000p_ops), GFP_KERNEL);
+ if (!fe0->dvb.frontend->sec_priv)
+ return -ENOMEM;
tun_i2c = dib7000p_ops.get_i2c_master(fe0->dvb.frontend, DIBX000_I2C_INTERFACE_TUNER, 1);
if (!dvb_attach(dib0070_attach, fe0->dvb.frontend, tun_i2c, &dib7070p_dib0070_config))
return -ENODEV;
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index c9bd018e53de..e2b19c3eaa87 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -420,7 +420,7 @@ static ssize_t ivtv_read_pos(struct ivtv_stream *s, char __user *ubuf, size_t co
IVTV_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
if (rc > 0)
- pos += rc;
+ *pos += rc;
return rc;
}
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index f7299d3d8244..bcb51e87c72f 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -935,7 +935,7 @@ static void ivtv_yuv_init(struct ivtv *itv)
}
/* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */
- yi->blanking_ptr = kzalloc(720 * 16, GFP_KERNEL|__GFP_NOWARN);
+ yi->blanking_ptr = kzalloc(720 * 16, GFP_ATOMIC|__GFP_NOWARN);
if (yi->blanking_ptr) {
yi->blanking_dmaptr = pci_map_single(itv->pdev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE);
} else {
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index ba887e8e1b17..a85c5199ccd3 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1469,7 +1469,7 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
unsigned long page, pos;
mutex_lock(&meye.lock);
- if (size > gbuffers * gbufsize) {
+ if (size > gbuffers * gbufsize || offset > gbuffers * gbufsize - size) {
mutex_unlock(&meye.lock);
return -EINVAL;
}
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index dca0592c5f47..6f93568f5620 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -355,7 +355,11 @@ static struct i2c_client saa7134_client_template = {
/* ----------------------------------------------------------- */
-/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+/*
+ * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T
+ * demod i2c gate closed due to an address clash between this EEPROM
+ * and the demod one.
+ */
static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
{
u8 subaddr = 0x7, dmdregval;
@@ -372,14 +376,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
if ((ret == 2) && (dmdregval & 0x2)) {
- pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+ pr_debug("%s: DVB-T demod i2c gate was left open\n",
dev->name);
data[0] = subaddr;
data[1] = (dmdregval & ~0x2);
if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
- pr_err("%s: EEPROM i2c gate open failure\n",
- dev->name);
+ pr_err("%s: EEPROM i2c gate close failure\n",
+ dev->name);
}
}
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index c889ec9f8a5a..be85a2c4318e 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -270,9 +270,8 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
/* enable i2c-port pins */
saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
- hexium->i2c_adapter = (struct i2c_adapter) {
- .name = "hexium gemini",
- };
+ strscpy(hexium->i2c_adapter.name, "hexium gemini",
+ sizeof(hexium->i2c_adapter.name));
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
DEB_S("cannot register i2c-device. skipping.\n");
@@ -305,6 +304,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
if (ret < 0) {
pr_err("cannot register capture v4l2 device. skipping.\n");
+ saa7146_vv_release(dev);
+ i2c_del_adapter(&hexium->i2c_adapter);
+ kfree(hexium);
return ret;
}
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index c306a92e8909..dc07ca37ebd0 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -232,9 +232,8 @@ static int hexium_probe(struct saa7146_dev *dev)
saa7146_write(dev, DD1_STREAM_B, 0x00000000);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
- hexium->i2c_adapter = (struct i2c_adapter) {
- .name = "hexium orion",
- };
+ strscpy(hexium->i2c_adapter.name, "hexium orion",
+ sizeof(hexium->i2c_adapter.name));
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
DEB_S("cannot register i2c-device. skipping.\n");
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 1ddf80f85c24..27ff6e0d9845 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -1386,13 +1386,13 @@ static void tw5864_handle_frame(struct tw5864_h264_frame *frame)
input->vb = NULL;
spin_unlock_irqrestore(&input->slock, flags);
- v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
-
if (!vb) { /* Gone because of disabling */
dev_dbg(&dev->pci->dev, "vb is empty, dropping frame\n");
return;
}
+ v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
+
/*
* Check for space.
* Mind the overhead of startcode emulation prevention.
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 05489a401c5c..bd500f12d0f7 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1847,6 +1847,10 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
+ /* if trying to set the same std then nothing to do */
+ if (vpfe_standards[vpfe->std_index].std_id == std_id)
+ return 0;
+
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index ccfe13b7d3f8..ecf9fb08f36b 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -1297,8 +1297,11 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
break;
}
- subdev_entity->asd = devm_kzalloc(dev,
- sizeof(*subdev_entity->asd), GFP_KERNEL);
+ /* asd will be freed by the subsystem once it's added to the
+ * notifier list
+ */
+ subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd),
+ GFP_KERNEL);
if (subdev_entity->asd == NULL) {
of_node_put(rem);
ret = -ENOMEM;
@@ -1432,6 +1435,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
&subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
+ kfree(subdev_entity->asd);
goto cleanup_subdev;
}
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index b6625047250d..7b4c93619c3d 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1581,6 +1581,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
return -ETIMEDOUT;
}
+ ctx->sequence_offset = ~0U;
ctx->initialized = 1;
/* Update kfifo out pointer from coda bitstream read pointer */
@@ -1829,6 +1830,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
/* Clear decode success flag */
coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
+ /* Clear error return value */
+ coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB);
+
trace_coda_dec_pic_run(ctx, meta);
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
@@ -1963,12 +1967,17 @@ static void coda_finish_decode(struct coda_ctx *ctx)
else if (ctx->display_idx < 0)
ctx->hold = true;
} else if (decoded_idx == -2) {
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames)
+ ctx->sequence_offset++;
/* no frame was decoded, we still return remaining buffers */
} else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
v4l2_err(&dev->v4l2_dev,
"decoded frame index out of range: %d\n", decoded_idx);
} else {
- val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
+ val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
+ if (ctx->sequence_offset == -1)
+ ctx->sequence_offset = val;
val -= ctx->sequence_offset;
spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
if (!list_empty(&ctx->buffer_meta_list)) {
@@ -2098,7 +2107,6 @@ irqreturn_t coda_irq_handler(int irq, void *data)
if (ctx == NULL) {
v4l2_err(&dev->v4l2_dev,
"Instance released before the end of transaction\n");
- mutex_unlock(&dev->coda_mutex);
return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index 99faea2e84c6..b51b875c5a61 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -890,9 +890,7 @@ static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
static int isif_config_ycbcr(void)
{
struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
- struct vpss_pg_frame_size frame_size;
u32 modeset = 0, ccdcfg = 0;
- struct vpss_sync_pol sync;
dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
@@ -980,13 +978,6 @@ static int isif_config_ycbcr(void)
/* two fields are interleaved in memory */
regw(0x00000249, SDOFST);
- /* Setup test pattern if enabled */
- if (isif_cfg.bayer.config_params.test_pat_gen) {
- sync.ccdpg_hdpol = params->hd_pol;
- sync.ccdpg_vdpol = params->vd_pol;
- dm365_vpss_set_sync_pol(sync);
- dm365_vpss_set_pg_frame_size(frame_size);
- }
return 0;
}
@@ -1106,7 +1097,8 @@ fail_nobase_res:
while (i >= 0) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- release_mem_region(res->start, resource_size(res));
+ if (res)
+ release_mem_region(res->start, resource_size(res));
i--;
}
vpfe_unregister_ccdc_device(&isif_hw_dev);
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index abce9c4a1a8e..59518c08528b 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -130,7 +130,7 @@ static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
struct v4l2_output *output)
{
struct vpbe_config *cfg = vpbe_dev->cfg;
- int temp_index = output->index;
+ unsigned int temp_index = output->index;
if (temp_index >= cfg->num_outputs)
return -EINVAL;
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index a9bc0175e4d3..c839003953a7 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -518,7 +518,7 @@ vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
else if (v_scale == 4)
layer_info->v_zoom = ZOOM_X4;
if (v_exp)
- layer_info->h_exp = V_EXP_6_OVER_5;
+ layer_info->v_exp = V_EXP_6_OVER_5;
} else {
/* no scaling, only cropping. Set display area to crop area */
cfg->ysize = expected_ysize;
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index fce86f17dffc..c2c68988e38a 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -523,6 +523,11 @@ static int __init vpss_init(void)
return -EBUSY;
oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+ if (unlikely(!oper_cfg.vpss_regs_base2)) {
+ release_mem_region(VPSS_CLK_CTRL, 4);
+ return -ENOMEM;
+ }
+
writel(VPSS_CLK_CTRL_VENCCLKEN |
VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 7f92144a1de3..f9456f26ff4f 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -819,6 +819,7 @@ static int fimc_is_probe(struct platform_device *pdev)
return -ENODEV;
is->pmu_regs = of_iomap(node, 0);
+ of_node_put(node);
if (!is->pmu_regs)
return -ENOMEM;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index e00fa03ddc3e..0c0eec671d49 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -316,7 +316,7 @@ static int isp_video_release(struct file *file)
ivc->streaming = 0;
}
- vb2_fop_release(file);
+ _vb2_fop_release(file, NULL);
if (v4l2_fh_is_singular_file(file)) {
fimc_pipeline_call(&ivc->ve, close);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 1a1154a9dfa4..ef6ccb5b8952 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -494,6 +494,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
continue;
ret = fimc_md_parse_port_node(fmd, port, index);
+ of_node_put(port);
if (ret < 0) {
of_node_put(node);
goto rpm_put;
@@ -527,6 +528,7 @@ static int __of_get_csis_id(struct device_node *np)
if (!np)
return -EINVAL;
of_property_read_u32(np, "reg", &reg);
+ of_node_put(np);
return reg - FIMC_INPUT_MIPI_CSI2_0;
}
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index af59bf4dca2d..a74bfb9afc8d 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -209,7 +209,6 @@ struct mcam_vb_buffer {
struct list_head queue;
struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
dma_addr_t dma_desc_pa; /* Descriptor physical address */
- int dma_desc_nent; /* Number of mapped descriptors */
};
static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
@@ -616,9 +615,11 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
static void mcam_sg_next_buffer(struct mcam_camera *cam)
{
struct mcam_vb_buffer *buf;
+ struct sg_table *sg_table;
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
list_del_init(&buf->queue);
+ sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
/*
* Very Bad Not Good Things happen if you don't clear
* C1_DESC_ENA before making any descriptor changes.
@@ -626,7 +627,7 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam)
mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
mcam_reg_write(cam, REG_DESC_LEN_Y,
- buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+ sg_table->nents * sizeof(struct mcam_dma_desc));
mcam_reg_write(cam, REG_DESC_LEN_U, 0);
mcam_reg_write(cam, REG_DESC_LEN_V, 0);
mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index e68d271b10af..8354ad20865a 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -288,7 +288,7 @@ static void emmaprp_device_run(void *priv)
{
struct emmaprp_ctx *ctx = priv;
struct emmaprp_q_data *s_q_data, *d_q_data;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct emmaprp_dev *pcdev = ctx->dev;
unsigned int s_width, s_height;
unsigned int d_width, d_height;
@@ -308,8 +308,8 @@ static void emmaprp_device_run(void *priv)
d_height = d_q_data->height;
d_size = d_width * d_height;
- p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
if (!p_in || !p_out) {
v4l2_err(&pcdev->v4l2_dev,
"Acquiring kernel pointers to buffers failed\n");
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index a31b95cb3b09..77ec70f5fef6 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -1526,23 +1526,20 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
unsigned long size;
struct videobuf_buffer *vb;
- vb = q->bufs[b->index];
-
if (!vout->streaming)
return -EINVAL;
- if (file->f_flags & O_NONBLOCK)
- /* Call videobuf_dqbuf for non blocking mode */
- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
- else
- /* Call videobuf_dqbuf for blocking mode */
- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+ ret = videobuf_dqbuf(q, b, !!(file->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
addr = (unsigned long) vout->buf_phy_addr[vb->i];
size = (unsigned long) vb->size;
dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
size, DMA_TO_DEVICE);
- return ret;
+ return 0;
}
static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index a21b12c5c085..ce651d3ca1b8 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -726,6 +726,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
s_stream, mode);
pipe->do_propagation = true;
}
+
+ /* Stop at the first external sub-device. */
+ if (subdev->dev != isp->dev)
+ break;
}
return 0;
@@ -840,6 +844,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
&subdev->entity);
failure = -ETIMEDOUT;
}
+
+ /* Stop at the first external sub-device. */
+ if (subdev->dev != isp->dev)
+ break;
}
return failure;
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 882310eb45cc..fe16fbd95221 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -2608,6 +2608,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
int ret;
/* Register the subdev and video node. */
+ ccdc->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index ca095238510d..b64e218eaea6 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -1030,6 +1030,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
int ret;
/* Register the subdev and video nodes. */
+ ccp2->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index f75a1be29d84..27a2913363b6 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -1206,6 +1206,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
int ret;
/* Register the subdev and video nodes. */
+ csi2->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
index ac30a0f83780..e981eb2330f1 100644
--- a/drivers/media/platform/omap3isp/isppreview.c
+++ b/drivers/media/platform/omap3isp/isppreview.c
@@ -2228,6 +2228,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev,
int ret;
/* Register the subdev and video nodes. */
+ prev->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &prev->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
index 0b6a87508584..2035e3c6a9de 100644
--- a/drivers/media/platform/omap3isp/ispresizer.c
+++ b/drivers/media/platform/omap3isp/ispresizer.c
@@ -1684,6 +1684,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res,
int ret;
/* Register the subdev and video nodes. */
+ res->subdev.dev = vdev->mdev->dev;
ret = v4l2_device_register_subdev(vdev, &res->subdev);
if (ret < 0)
goto error;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 1b9217d3b1b6..4a4ae637655b 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -1010,6 +1010,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat)
int omap3isp_stat_register_entities(struct ispstat *stat,
struct v4l2_device *vdev)
{
+ stat->subdev.dev = vdev->mdev->dev;
+
return v4l2_device_register_subdev(vdev, &stat->subdev);
}
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 390d708c807a..3fab9f776afa 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2334,7 +2334,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->res = res;
pcdev->pdata = pdev->dev.platform_data;
- if (&pdev->dev.of_node && !pcdev->pdata) {
+ if (pdev->dev.of_node && !pcdev->pdata) {
err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
} else {
pcdev->platform_flags = pcdev->pdata->flags;
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 62c0dec30b59..5f6ccf492111 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -498,7 +498,7 @@ static void device_run(void *prv)
{
struct g2d_ctx *ctx = prv;
struct g2d_dev *dev = ctx->dev;
- struct vb2_buffer *src, *dst;
+ struct vb2_v4l2_buffer *src, *dst;
unsigned long flags;
u32 cmd = 0;
@@ -513,10 +513,10 @@ static void device_run(void *prv)
spin_lock_irqsave(&dev->ctrl_lock, flags);
g2d_set_src_size(dev, &ctx->in);
- g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));
+ g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
g2d_set_dst_size(dev, &ctx->out);
- g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));
+ g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
g2d_set_rop4(dev, ctx->rop);
g2d_set_flip(dev, ctx->flip);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 1da2c94e1dca..a63f4eec366e 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -789,14 +789,14 @@ static void skip(struct s5p_jpeg_buffer *buf, long len);
static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
unsigned int word;
int c, x, components;
jpeg_buffer.size = 2; /* Ls */
jpeg_buffer.data =
- (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2;
+ (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
jpeg_buffer.curr = 0;
word = 0;
@@ -826,14 +826,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
unsigned int word;
int c, i, n, j;
for (j = 0; j < ctx->out_q.dht.n; ++j) {
jpeg_buffer.size = ctx->out_q.dht.len[j];
- jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
+ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
ctx->out_q.dht.marker[j];
jpeg_buffer.curr = 0;
@@ -885,13 +885,13 @@ static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
int c, x, components;
jpeg_buffer.size = ctx->out_q.sof_len;
jpeg_buffer.data =
- (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof;
+ (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof;
jpeg_buffer.curr = 0;
skip(&jpeg_buffer, 5); /* P, Y, X */
@@ -916,14 +916,14 @@ static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
unsigned int word;
int c, i, j;
for (j = 0; j < ctx->out_q.dqt.n; ++j) {
jpeg_buffer.size = ctx->out_q.dqt.len[j];
- jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
+ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
ctx->out_q.dqt.marker[j];
jpeg_buffer.curr = 0;
@@ -1264,13 +1264,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
return 0;
}
-static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
+static int enum_fmt(struct s5p_jpeg_ctx *ctx,
+ struct s5p_jpeg_fmt *sjpeg_formats, int n,
struct v4l2_fmtdesc *f, u32 type)
{
int i, num = 0;
+ unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
for (i = 0; i < n; ++i) {
- if (sjpeg_formats[i].flags & type) {
+ if (sjpeg_formats[i].flags & type &&
+ sjpeg_formats[i].flags & fmt_ver_flag) {
/* index-th format of type type found ? */
if (num == f->index)
break;
@@ -1297,11 +1300,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_ENC_CAPTURE);
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
- SJPEG_FMT_FLAG_DEC_CAPTURE);
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_CAPTURE);
}
static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
@@ -1310,11 +1313,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_ENC_OUTPUT);
- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
- SJPEG_FMT_FLAG_DEC_OUTPUT);
+ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
+ SJPEG_FMT_FLAG_DEC_OUTPUT);
}
static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
@@ -1960,7 +1963,7 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_RESTART_INTERVAL,
- 0, 3, 0xffff, 0);
+ 0, 0xffff, 1, 0);
if (ctx->jpeg->variant->version == SJPEG_S5P)
mask = ~0x06; /* 422, 420 */
}
@@ -2027,15 +2030,15 @@ static void s5p_jpeg_device_run(void *priv)
{
struct s5p_jpeg_ctx *ctx = priv;
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned long src_addr, dst_addr, flags;
spin_lock_irqsave(&ctx->jpeg->slock, flags);
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
- src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
- dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
s5p_jpeg_reset(jpeg->regs);
s5p_jpeg_poweron(jpeg->regs);
@@ -2108,7 +2111,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct s5p_jpeg_fmt *fmt;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
struct s5p_jpeg_addr jpeg_addr = {};
u32 pix_size, padding_bytes = 0;
@@ -2127,7 +2130,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
}
- jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
+ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
if (fmt->colplanes == 2) {
jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
@@ -2145,7 +2148,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
unsigned int jpeg_addr = 0;
if (ctx->mode == S5P_JPEG_ENCODE)
@@ -2153,7 +2156,7 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
else
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
ctx->mode == S5P_JPEG_DECODE)
jpeg_addr += ctx->out_q.sos;
@@ -2268,7 +2271,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct s5p_jpeg_fmt *fmt;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
struct s5p_jpeg_addr jpeg_addr = {};
u32 pix_size;
@@ -2282,7 +2285,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
fmt = ctx->cap_q.fmt;
}
- jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
+ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
if (fmt->colplanes == 2) {
jpeg_addr.cb = jpeg_addr.y + pix_size;
@@ -2300,7 +2303,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
- struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb;
unsigned int jpeg_addr = 0;
if (ctx->mode == S5P_JPEG_ENCODE)
@@ -2308,7 +2311,7 @@ static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
else
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
- jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr);
}
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 15a562af13c7..a4f593220ef0 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -276,13 +276,13 @@ static void sh_veu_process(struct sh_veu_dev *veu,
static void sh_veu_device_run(void *priv)
{
struct sh_veu_dev *veu = priv;
- struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
if (src_buf && dst_buf)
- sh_veu_process(veu, src_buf, dst_buf);
+ sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
}
/* ========== video ioctls ========== */
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
index b7892f3efd98..5c4c3f0c57be 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
@@ -14,8 +14,8 @@
#define MAX_SRC_WIDTH 2048
/* Reset & boot poll config */
-#define POLL_RST_MAX 50
-#define POLL_RST_DELAY_MS 20
+#define POLL_RST_MAX 500
+#define POLL_RST_DELAY_MS 2
enum bdisp_target_plan {
BDISP_RGB,
@@ -382,7 +382,7 @@ int bdisp_hw_reset(struct bdisp_dev *bdisp)
for (i = 0; i < POLL_RST_MAX; i++) {
if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE)
break;
- msleep(POLL_RST_DELAY_MS);
+ udelay(POLL_RST_DELAY_MS * 1000);
}
if (i == POLL_RST_MAX)
dev_err(bdisp->dev, "Reset timeout\n");
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 45f82b5ddd77..d88c9ba401b5 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -651,8 +651,7 @@ static int bdisp_release(struct file *file)
dev_dbg(bdisp->dev, "%s\n", __func__);
- if (mutex_lock_interruptible(&bdisp->lock))
- return -ERESTARTSYS;
+ mutex_lock(&bdisp->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 44323cb5d287..563b9636ab63 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -548,16 +548,16 @@ static void enable_irqs(struct cal_ctx *ctx)
static void disable_irqs(struct cal_ctx *ctx)
{
+ u32 val;
+
/* Disable IRQ_WDMA_END 0/1 */
- reg_write_field(ctx->dev,
- CAL_HL_IRQENABLE_CLR(2),
- CAL_HL_IRQ_CLEAR,
- CAL_HL_IRQ_MASK(ctx->csi2_port));
+ val = 0;
+ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(2), val);
/* Disable IRQ_WDMA_START 0/1 */
- reg_write_field(ctx->dev,
- CAL_HL_IRQENABLE_CLR(3),
- CAL_HL_IRQ_CLEAR,
- CAL_HL_IRQ_MASK(ctx->csi2_port));
+ val = 0;
+ set_field(&val, CAL_HL_IRQ_CLEAR, CAL_HL_IRQ_MASK(ctx->csi2_port));
+ reg_write(ctx->dev, CAL_HL_IRQENABLE_CLR(3), val);
/* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
}
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 0189f7f7cb03..dbb4829acc43 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -330,20 +330,25 @@ enum {
};
/* find our format description corresponding to the passed v4l2_format */
-static struct vpe_fmt *find_format(struct v4l2_format *f)
+static struct vpe_fmt *__find_format(u32 fourcc)
{
struct vpe_fmt *fmt;
unsigned int k;
for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
fmt = &vpe_formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == fourcc)
return fmt;
}
return NULL;
}
+static struct vpe_fmt *find_format(struct v4l2_format *f)
+{
+ return __find_format(f->fmt.pix.pixelformat);
+}
+
/*
* there is one vpe_dev structure in the driver, it is shared by
* all instances.
@@ -1293,6 +1298,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
d_vb->timecode = s_vb->timecode;
d_vb->sequence = ctx->sequence;
+ s_vb->sequence = ctx->sequence;
d_q_data = &ctx->q_data[Q_DATA_DST];
if (d_q_data->flags & Q_DATA_INTERLACED) {
@@ -1433,9 +1439,9 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
int i, depth, depth_bytes;
if (!fmt || !(fmt->types & type)) {
- vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
pix->pixelformat);
- return -EINVAL;
+ fmt = __find_format(V4L2_PIX_FMT_YUYV);
}
if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
@@ -1992,7 +1998,7 @@ static int vpe_open(struct file *file)
v4l2_ctrl_handler_setup(hdl);
s_q_data = &ctx->q_data[Q_DATA_SRC];
- s_q_data->fmt = &vpe_formats[2];
+ s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
s_q_data->width = 1920;
s_q_data->height = 1080;
s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index d300e5e7eadc..2ca9c928ed2f 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -777,7 +777,11 @@ static int vivid_thread_vid_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->cap_seq_resync) {
dev->jiffies_vid_cap = cur_jiffies;
@@ -930,8 +934,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_cap);
dev->kthread_vid_cap = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index 7c8d75852816..ed5d8fb854b4 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -147,7 +147,11 @@ static int vivid_thread_vid_out(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->out_seq_resync) {
dev->jiffies_vid_out = cur_jiffies;
@@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_out);
dev->kthread_vid_out = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
index bdc380b14e0c..a95b7c56569e 100644
--- a/drivers/media/platform/vivid/vivid-osd.c
+++ b/drivers/media/platform/vivid/vivid-osd.c
@@ -167,7 +167,7 @@ static int _vivid_fb_check_var(struct fb_var_screeninfo *var, struct vivid_dev *
var->nonstd = 0;
var->vmode &= ~FB_VMODE_MASK;
- var->vmode = FB_VMODE_NONINTERLACED;
+ var->vmode |= FB_VMODE_NONINTERLACED;
/* Dummy values */
var->hsync_len = 24;
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index ebd7b9c4dd83..4f49c9a47d49 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -149,7 +149,11 @@ static int vivid_thread_sdr_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->sdr_cap_seq_resync) {
dev->jiffies_sdr_cap = cur_jiffies;
@@ -309,10 +313,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
}
/* shutdown control thread */
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_sdr_cap);
dev->kthread_sdr_cap = NULL;
- mutex_lock(&dev->mutex);
}
const struct vb2_ops vivid_sdr_cap_qops = {
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 25d4fd4f4c0b..82621260fc34 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -236,9 +236,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_out_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_cap)
- return 0;
-
dev->vid_cap_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
for (i = 0; i < VIDEO_MAX_FRAME; i++)
@@ -984,7 +981,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
if (dev->bitmap_cap && (compose->width != s->r.width ||
compose->height != s->r.height)) {
- kfree(dev->bitmap_cap);
+ vfree(dev->bitmap_cap);
dev->bitmap_cap = NULL;
}
*compose = s->r;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index f9a810e3f521..5f052189a6c4 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -841,6 +841,7 @@ int vidioc_g_edid(struct file *file, void *_fh,
if (edid->start_block + edid->blocks > dev->edid_blocks)
edid->blocks = dev->edid_blocks - edid->start_block;
memcpy(edid->edid, dev->edid, edid->blocks * 128);
- cec_set_edid_phys_addr(edid->edid, edid->blocks * 128, adap->phys_addr);
+ if (adap)
+ cec_set_edid_phys_addr(edid->edid, edid->blocks * 128, adap->phys_addr);
return 0;
}
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index dd609eea4753..8fed2fbe91a9 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -158,9 +158,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_cap_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_out)
- return 0;
-
dev->vid_out_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
if (dev->start_streaming_error) {
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
index bfb3a6d051ba..10958bac0ad9 100644
--- a/drivers/media/radio/radio-raremono.c
+++ b/drivers/media/radio/radio-raremono.c
@@ -283,6 +283,14 @@ static int vidioc_g_frequency(struct file *file, void *priv,
return 0;
}
+static void raremono_device_release(struct v4l2_device *v4l2_dev)
+{
+ struct raremono_device *radio = to_raremono_dev(v4l2_dev);
+
+ kfree(radio->buffer);
+ kfree(radio);
+}
+
/* File system interface */
static const struct v4l2_file_operations usb_raremono_fops = {
.owner = THIS_MODULE,
@@ -307,12 +315,14 @@ static int usb_raremono_probe(struct usb_interface *intf,
struct raremono_device *radio;
int retval = 0;
- radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
- if (radio)
- radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
-
- if (!radio || !radio->buffer)
+ radio = kzalloc(sizeof(*radio), GFP_KERNEL);
+ if (!radio)
+ return -ENOMEM;
+ radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+ if (!radio->buffer) {
+ kfree(radio);
return -ENOMEM;
+ }
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
@@ -336,7 +346,8 @@ static int usb_raremono_probe(struct usb_interface *intf,
if (retval != 3 ||
(get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
- return -ENODEV;
+ retval = -ENODEV;
+ goto free_mem;
}
dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
@@ -345,7 +356,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
if (retval < 0) {
dev_err(&intf->dev, "couldn't register v4l2_device\n");
- return retval;
+ goto free_mem;
}
mutex_init(&radio->lock);
@@ -357,6 +368,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
radio->vdev.lock = &radio->lock;
radio->vdev.release = video_device_release_empty;
+ radio->v4l2_dev.release = raremono_device_release;
usb_set_intfdata(intf, &radio->v4l2_dev);
@@ -372,6 +384,10 @@ static int usb_raremono_probe(struct usb_interface *intf,
}
dev_err(&intf->dev, "could not register video device\n");
v4l2_device_unregister(&radio->v4l2_dev);
+
+free_mem:
+ kfree(radio->buffer);
+ kfree(radio);
return retval;
}
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index a93f681aa9d6..6426b07510a7 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1149,8 +1149,7 @@ static int wl1273_fm_fops_release(struct file *file)
if (radio->rds_users > 0) {
radio->rds_users--;
if (radio->rds_users == 0) {
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
+ mutex_lock(&core->lock);
radio->irq_flags &= ~WL1273_RDS_EVENT;
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index f218886c504d..8f3086773db4 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -458,8 +458,10 @@ static int si470x_i2c_remove(struct i2c_client *client)
free_irq(client->irq, radio);
video_unregister_device(&radio->videodev);
- kfree(radio);
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
+ kfree(radio);
return 0;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 4b132c29f290..1d045a8c29e2 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -742,7 +742,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
/* start radio */
retval = si470x_start_usb(radio);
if (retval < 0)
- goto err_all;
+ goto err_buf;
/* set initial frequency */
si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
@@ -757,6 +757,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
return 0;
err_all:
+ usb_kill_urb(radio->int_in_urb);
+err_buf:
kfree(radio->buffer);
err_ctrl:
v4l2_ctrl_handler_free(&radio->hdl);
@@ -830,6 +832,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
mutex_lock(&radio->lock);
v4l2_device_disconnect(&radio->v4l2_dev);
video_unregister_device(&radio->videodev);
+ usb_kill_urb(radio->int_in_urb);
usb_set_intfdata(intf, NULL);
mutex_unlock(&radio->lock);
v4l2_device_put(&radio->v4l2_dev);
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 642b89c66bcb..db987dda356e 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -494,7 +494,8 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
return -EIO;
}
/* Send response data to caller */
- if (response != NULL && response_len != NULL && evt_hdr->dlen) {
+ if (response != NULL && response_len != NULL && evt_hdr->dlen &&
+ evt_hdr->dlen <= payload_len) {
/* Skip header info and copy only response data */
skb_pull(skb, sizeof(struct fm_event_msg_hdr));
memcpy(response, skb->data, evt_hdr->dlen);
@@ -590,6 +591,8 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
return;
fm_evt_hdr = (void *)skb->data;
+ if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
+ return;
/* Skip header info and copy only response data */
skb_pull(skb, sizeof(struct fm_event_msg_hdr));
@@ -1275,8 +1278,9 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
switch (action->type) {
case ACTION_SEND_COMMAND: /* Send */
- if (fmc_send_cmd(fmdev, 0, 0, action->data,
- action->size, NULL, NULL))
+ ret = fmc_send_cmd(fmdev, 0, 0, action->data,
+ action->size, NULL, NULL);
+ if (ret)
goto rel_fw;
cmd_cnt++;
@@ -1315,7 +1319,7 @@ static int load_default_rx_configuration(struct fmdev *fmdev)
static int fm_power_up(struct fmdev *fmdev, u8 mode)
{
u16 payload;
- __be16 asic_id, asic_ver;
+ __be16 asic_id = 0, asic_ver = 0;
int resp_len, ret;
u8 fw_name[50];
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index fb42f0fd0c1f..add26eac1677 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -553,6 +553,7 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
/* Register with V4L2 subsystem as RADIO device */
if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
+ v4l2_device_unregister(&fmdev->v4l2_dev);
fmerr("Could not register video device\n");
return -ENOMEM;
}
@@ -566,6 +567,8 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
if (ret < 0) {
fmerr("(fmdev): Can't init ctrl handler\n");
v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
+ video_unregister_device(fmdev->radio_dev);
+ v4l2_device_unregister(&fmdev->v4l2_dev);
return -EBUSY;
}
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index 5f634545ddd8..246795c31553 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -430,6 +430,10 @@ static int iguanair_probe(struct usb_interface *intf,
int ret, pipein, pipeout;
struct usb_host_interface *idesc;
+ idesc = intf->cur_altsetting;
+ if (idesc->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
rc = rc_allocate_device();
if (!ir || !rc) {
@@ -444,18 +448,13 @@ static int iguanair_probe(struct usb_interface *intf,
ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
- if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) {
+ if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out ||
+ !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) ||
+ !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) {
ret = -ENOMEM;
goto out;
}
- idesc = intf->altsetting;
-
- if (idesc->desc.bNumEndpoints < 2) {
- ret = -ENODEV;
- goto out;
- }
-
ir->rc = rc;
ir->dev = &intf->dev;
ir->udev = udev;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index f072bf28fccd..0b386fd518cc 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1644,8 +1644,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */
- if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
- buf[7] == 0x86) {
+ if (ictx->touch && len == 8 && buf[7] == 0x86) {
imon_touch_event(ictx, buf);
return;
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index bf53553d2624..78f0bf8ee084 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -630,10 +630,16 @@ static int au0828_usb_probe(struct usb_interface *interface,
/* Setup */
au0828_card_setup(dev);
+ /*
+ * Store the pointer to the au0828_dev so it can be accessed in
+ * au0828_usb_disconnect
+ */
+ usb_set_intfdata(interface, dev);
+
/* Analog TV */
retval = au0828_analog_register(dev, interface);
if (retval) {
- pr_err("%s() au0282_dev_register failed to register on V4L2\n",
+ pr_err("%s() au0828_analog_register failed to register on V4L2\n",
__func__);
goto done;
}
@@ -641,18 +647,12 @@ static int au0828_usb_probe(struct usb_interface *interface,
/* Digital TV */
retval = au0828_dvb_register(dev);
if (retval)
- pr_err("%s() au0282_dev_register failed\n",
+ pr_err("%s() au0828_dvb_register failed\n",
__func__);
/* Remote controller */
au0828_rc_register(dev);
- /*
- * Store the pointer to the au0828_dev so it can be accessed in
- * au0828_usb_disconnect
- */
- usb_set_intfdata(interface, dev);
-
pr_info("Registered device AU0828 [%s]\n",
dev->board.name == NULL ? "Unset" : dev->board.name);
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 85dd9a8e83ff..48eeb5a6a209 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -764,6 +764,9 @@ static int au0828_analog_stream_enable(struct au0828_dev *d)
dprintk(1, "au0828_analog_stream_enable called\n");
+ if (test_bit(DEV_DISCONNECTED, &d->dev_state))
+ return -ENODEV;
+
iface = usb_ifnum_to_if(d->usbdev, 0);
if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
dprintk(1, "Changing intf#0 to alt 5\n");
@@ -852,9 +855,9 @@ int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
return rc;
}
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1);
+
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- v4l2_device_call_all(&dev->v4l2_dev, 0, video,
- s_stream, 1);
dev->vid_timeout_running = 1;
mod_timer(&dev->vid_timeout, jiffies + (HZ / 10));
} else if (vq->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
@@ -874,10 +877,11 @@ static void au0828_stop_streaming(struct vb2_queue *vq)
dprintk(1, "au0828_stop_streaming called %d\n", dev->streaming_users);
- if (dev->streaming_users-- == 1)
+ if (dev->streaming_users-- == 1) {
au0828_uninit_isoc(dev);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+ }
- v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
dev->vid_timeout_running = 0;
del_timer_sync(&dev->vid_timeout);
@@ -906,8 +910,10 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
dprintk(1, "au0828_stop_vbi_streaming called %d\n",
dev->streaming_users);
- if (dev->streaming_users-- == 1)
+ if (dev->streaming_users-- == 1) {
au0828_uninit_isoc(dev);
+ v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
+ }
spin_lock_irqsave(&dev->slock, flags);
if (dev->isoc_ctl.vbi_buf != NULL) {
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 52bc42da8a4c..a93fc1839e13 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -295,7 +295,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
mutex_unlock(&fc_usb->data_mutex);
- return 0;
+ return ret;
}
/* actual bus specific access functions,
@@ -504,7 +504,16 @@ urb_error:
static int flexcop_usb_init(struct flexcop_usb *fc_usb)
{
/* use the alternate setting with the larges buffer */
- usb_set_interface(fc_usb->udev,0,1);
+ int ret = usb_set_interface(fc_usb->udev, 0, 1);
+
+ if (ret) {
+ err("set interface failed.");
+ return ret;
+ }
+
+ if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
switch (fc_usb->udev->speed) {
case USB_SPEED_LOW:
err("cannot handle USB speed because it is too slow.");
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index e9100a235831..30e27844e0e9 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -690,6 +690,10 @@ static int submit_urbs(struct camera_data *cam)
if (!urb) {
for (j = 0; j < i; j++)
usb_free_urb(cam->sbuf[j].urb);
+ for (j = 0; j < NUM_SBUF; j++) {
+ kfree(cam->sbuf[j].data);
+ cam->sbuf[j].data = NULL;
+ }
return -ENOMEM;
}
@@ -909,7 +913,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
cpia2_unregister_camera(cam);
v4l2_device_disconnect(&cam->v4l2_dev);
mutex_unlock(&cam->v4l2_lock);
- v4l2_device_put(&cam->v4l2_dev);
if(cam->buffers) {
DBG("Wakeup waiting processes\n");
@@ -921,6 +924,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
DBG("Releasing interface\n");
usb_driver_release_interface(&cpia2_driver, intf);
+ v4l2_device_put(&cam->v4l2_dev);
+
LOG("CPiA2 camera disconnected.\n");
}
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index d793c630f1dd..05e7edb213de 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -1248,8 +1248,7 @@ static int __init cpia2_init(void)
LOG("%s v%s\n",
ABOUT, CPIA_VERSION);
check_parameters();
- cpia2_usb_init();
- return 0;
+ return cpia2_usb_init();
}
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 6414188ffdfa..cd973e780da9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1389,7 +1389,7 @@ int cx231xx_g_register(struct file *file, void *priv,
ret = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER,
(u16)reg->reg, value, 4);
reg->val = value[0] | value[1] << 8 |
- value[2] << 16 | value[3] << 24;
+ value[2] << 16 | (u32)value[3] << 24;
reg->size = 4;
break;
case 1: /* AFE - read byte */
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 7853261906b1..519e01ba5750 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -567,7 +567,7 @@ static int af9005_boot_packet(struct usb_device *udev, int type, u8 *reply,
u8 *buf, int size)
{
u16 checksum;
- int act_len, i, ret;
+ int act_len = 0, i, ret;
memset(buf, 0, size);
buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff);
@@ -990,8 +990,9 @@ static int af9005_identify_state(struct usb_device *udev,
else if (reply == 0x02)
*cold = 0;
else
- return -EIO;
- deb_info("Identify state cold = %d\n", *cold);
+ ret = -EIO;
+ if (!ret)
+ deb_info("Identify state cold = %d\n", *cold);
err:
kfree(buf);
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index b20f03d86e00..2b7a1b569db0 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -437,7 +437,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
u8 ircode[4];
int i;
- cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
+ if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
+ return 0;
*event = 0;
*state = REMOTE_NO_KEY_PRESSED;
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 563f690cd978..4a5ea74c91d4 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -812,7 +812,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
- if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
+ if (intf->cur_altsetting->desc.bNumEndpoints < rc_ep + 1)
return -ENODEV;
purb = usb_alloc_urb(0, GFP_KERNEL);
@@ -832,7 +832,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
* Some devices like the Hauppauge NovaTD model 52009 use an interrupt
* endpoint, while others use a bulk one.
*/
- e = &intf->altsetting[0].endpoint[rc_ep].desc;
+ e = &intf->cur_altsetting->endpoint[rc_ep].desc;
if (usb_endpoint_dir_in(e)) {
if (usb_endpoint_xfer_bulk(e)) {
pipe = usb_rcvbulkpipe(d->udev, rc_ep);
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 2868766893c8..c7c8fea0f1fa 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -2438,9 +2438,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
8, 0x0486,
};
+ if (!IS_ENABLED(CONFIG_DVB_DIB9000))
+ return -ENODEV;
if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL)
return -ENODEV;
i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+ if (!i2c)
+ return -ENODEV;
if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
return -ENODEV;
dib0700_set_i2c_speed(adap->dev, 1500);
@@ -2516,10 +2520,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
0, 0x00ef,
8, 0x0406,
};
+ if (!IS_ENABLED(CONFIG_DVB_DIB9000))
+ return -ENODEV;
i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
return -ENODEV;
i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
+ if (!i2c)
+ return -ENODEV;
if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
return -ENODEV;
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 475a3c0cdee7..20d33f0544ed 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -233,18 +233,22 @@ static struct rc_map_table rc_map_digitv_table[] = {
static int digitv_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
- int i;
+ int ret, i;
u8 key[5];
u8 b[4] = { 0 };
*event = 0;
*state = REMOTE_NO_KEY_PRESSED;
- digitv_ctrl_msg(d,USB_READ_REMOTE,0,NULL,0,&key[1],4);
+ ret = digitv_ctrl_msg(d, USB_READ_REMOTE, 0, NULL, 0, &key[1], 4);
+ if (ret)
+ return ret;
/* Tell the device we've read the remote. Not sure how necessary
this is, but the Nebula SDK does it. */
- digitv_ctrl_msg(d,USB_WRITE_REMOTE,0,b,4,NULL,0);
+ ret = digitv_ctrl_msg(d, USB_WRITE_REMOTE, 0, b, 4, NULL, 0);
+ if (ret)
+ return ret;
/* if something is inside the buffer, simulate key press */
if (key[1] != 0)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
index 84308569e7dc..b3413404f91a 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
@@ -287,12 +287,15 @@ EXPORT_SYMBOL(dvb_usb_device_init);
void dvb_usb_device_exit(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- const char *name = "generic DVB-USB module";
+ const char *default_name = "generic DVB-USB module";
+ char name[40];
usb_set_intfdata(intf, NULL);
if (d != NULL && d->desc != NULL) {
- name = d->desc->name;
+ strscpy(name, d->desc->name, sizeof(name));
dvb_usb_exit(d);
+ } else {
+ strscpy(name, default_name, sizeof(name));
}
info("%s successfully deinitialized and disconnected.", name);
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-urb.c b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
index 95f9097498cb..2fa8d71385ec 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
@@ -11,7 +11,7 @@
int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf,
u16 rlen, int delay_ms)
{
- int actlen,ret = -ENOMEM;
+ int actlen = 0, ret = -ENOMEM;
if (!d || wbuf == NULL || wlen == 0)
return -EINVAL;
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 4706628a3ed5..10bccce22858 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -612,10 +612,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
static int technisat_usb2_get_ir(struct dvb_usb_device *d)
{
struct technisat_usb2_state *state = d->priv;
- u8 *buf = state->buf;
- u8 *b;
- int ret;
struct ir_raw_event ev;
+ u8 *buf = state->buf;
+ int i, ret;
buf[0] = GET_IR_DATA_VENDOR_REQUEST;
buf[1] = 0x08;
@@ -651,26 +650,25 @@ unlock:
return 0; /* no key pressed */
/* decoding */
- b = buf+1;
#if 0
deb_rc("RC: %d ", ret);
- debug_dump(b, ret, deb_rc);
+ debug_dump(buf + 1, ret, deb_rc);
#endif
ev.pulse = 0;
- while (1) {
- ev.pulse = !ev.pulse;
- ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
- ir_raw_event_store(d->rc_dev, &ev);
-
- b++;
- if (*b == 0xff) {
+ for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
+ if (buf[i] == 0xff) {
ev.pulse = 0;
ev.duration = 888888*2;
ir_raw_event_store(d->rc_dev, &ev);
break;
}
+
+ ev.pulse = !ev.pulse;
+ ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
+ FIRMWARE_CLOCK_TICK) / 1000;
+ ir_raw_event_store(d->rc_dev, &ev);
}
ir_raw_event_handle(d->rc_dev);
diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c
index 60bf5f0644d1..a5efcd4f7b4f 100644
--- a/drivers/media/usb/go7007/go7007-fw.c
+++ b/drivers/media/usb/go7007/go7007-fw.c
@@ -1499,8 +1499,8 @@ static int modet_to_package(struct go7007 *go, __le16 *code, int space)
return cnt;
}
-static int do_special(struct go7007 *go, u16 type, __le16 *code, int space,
- int *framelen)
+static noinline_for_stack int do_special(struct go7007 *go, u16 type,
+ __le16 *code, int space, int *framelen)
{
switch (type) {
case SPECIAL_FRM_HEAD:
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index af2395a76d8b..2cba2e1acdc6 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -2043,7 +2043,7 @@ int gspca_dev_probe2(struct usb_interface *intf,
pr_err("couldn't kzalloc gspca struct\n");
return -ENOMEM;
}
- gspca_dev->usb_buf = kmalloc(USB_BUF_SZ, GFP_KERNEL);
+ gspca_dev->usb_buf = kzalloc(USB_BUF_SZ, GFP_KERNEL);
if (!gspca_dev->usb_buf) {
pr_err("out of memory\n");
ret = -ENOMEM;
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 78542fff403f..5a37d32e8fd0 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -127,6 +127,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, 2);
}
}
diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
index 599f755e75b8..7ebeee98dc1b 100644
--- a/drivers/media/usb/gspca/nw80x.c
+++ b/drivers/media/usb/gspca/nw80x.c
@@ -1584,6 +1584,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
return;
}
if (len == 1)
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 965372a5ff2f..25871bcc03a9 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -2087,6 +2087,11 @@ static int reg_r(struct sd *sd, u16 index)
} else {
PERR("reg_r %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
+ /*
+ * Make sure the result is zeroed to avoid uninitialized
+ * values.
+ */
+ gspca_dev->usb_buf[0] = 0;
}
return ret;
@@ -2115,6 +2120,11 @@ static int reg_r8(struct sd *sd,
} else {
PERR("reg_r8 %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, 8);
}
return ret;
@@ -3472,6 +3482,11 @@ static void ov511_mode_init_regs(struct sd *sd)
return;
}
+ if (alt->desc.bNumEndpoints < 1) {
+ sd->gspca_dev.usb_err = -ENODEV;
+ return;
+ }
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
@@ -3597,6 +3612,11 @@ static void ov518_mode_init_regs(struct sd *sd)
return;
}
+ if (alt->desc.bNumEndpoints < 1) {
+ sd->gspca_dev.usb_err = -ENODEV;
+ return;
+ }
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index 9266a5c9abc5..ba289b453077 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -645,6 +645,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
if (ret < 0) {
pr_err("read failed %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the result is zeroed to avoid uninitialized
+ * values.
+ */
+ gspca_dev->usb_buf[0] = 0;
}
return gspca_dev->usb_buf[0];
}
diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
index 47085cf2d723..f2dca0606935 100644
--- a/drivers/media/usb/gspca/ov534_9.c
+++ b/drivers/media/usb/gspca/ov534_9.c
@@ -1157,6 +1157,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ return 0;
}
return gspca_dev->usb_buf[0];
}
diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
index 5102cea50471..6adbb0eca71f 100644
--- a/drivers/media/usb/gspca/se401.c
+++ b/drivers/media/usb/gspca/se401.c
@@ -115,6 +115,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
pr_err("read req failed req %#04x error %d\n",
req, err);
gspca_dev->usb_err = err;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE);
}
}
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 10269dad9d20..11c794aea045 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -138,6 +138,13 @@ static const struct dmi_system_id flip_dmi_table[] = {
}
},
{
+ .ident = "MSI MS-1039",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"),
+ }
+ },
+ {
.ident = "MSI MS-1632",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
@@ -923,6 +930,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
if (unlikely(result < 0 || result != length)) {
pr_err("Read register %02x failed %d\n", reg, result);
gspca_dev->usb_err = result;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 6696b2ec34e9..83e98b85ab6a 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -466,6 +466,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
dev_err(gspca_dev->v4l2_dev.dev,
"Error reading register %02x: %d\n", value, res);
gspca_dev->usb_err = res;
+ /*
+ * Make sure the result is zeroed to avoid uninitialized
+ * values.
+ */
+ gspca_dev->usb_buf[0] = 0;
}
}
diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
index d49d76ec1421..9ec63f75b8ea 100644
--- a/drivers/media/usb/gspca/sonixj.c
+++ b/drivers/media/usb/gspca/sonixj.c
@@ -1174,6 +1174,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
index f38fd8949609..ee93bd443df5 100644
--- a/drivers/media/usb/gspca/spca1528.c
+++ b/drivers/media/usb/gspca/spca1528.c
@@ -84,6 +84,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
index e274cf19a3ea..b236e9dcd468 100644
--- a/drivers/media/usb/gspca/sq930x.c
+++ b/drivers/media/usb/gspca/sq930x.c
@@ -438,6 +438,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r %04x failed %d\n", value, ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 6ac93d8db427..7d255529ed4c 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -293,6 +293,9 @@ static int stv06xx_start(struct gspca_dev *gspca_dev)
return -EIO;
}
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size);
if (err < 0)
@@ -317,11 +320,21 @@ out:
static int stv06xx_isoc_init(struct gspca_dev *gspca_dev)
{
+ struct usb_interface_cache *intfc;
struct usb_host_interface *alt;
struct sd *sd = (struct sd *) gspca_dev;
+ intfc = gspca_dev->dev->actconfig->intf_cache[0];
+
+ if (intfc->num_altsetting < 2)
+ return -ENODEV;
+
+ alt = &intfc->altsetting[1];
+
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* Start isoc bandwidth "negotiation" at max isoc bandwidth */
- alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
alt->endpoint[0].desc.wMaxPacketSize =
cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]);
@@ -334,6 +347,10 @@ static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev)
struct usb_host_interface *alt;
struct sd *sd = (struct sd *) gspca_dev;
+ /*
+ * Existence of altsetting and endpoint was verified in
+ * stv06xx_isoc_init()
+ */
alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode];
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
index 8d785edcccf2..cc88c059b8d7 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
@@ -198,6 +198,10 @@ static int pb0100_start(struct sd *sd)
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt)
return -ENODEV;
+
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
/* If we don't have enough bandwidth use a lower framerate */
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index 46c9f2229a18..cc3e1478c5a0 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -268,6 +268,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c
index b4efb2fb36fa..5032b9d7d9bb 100644
--- a/drivers/media/usb/gspca/vc032x.c
+++ b/drivers/media/usb/gspca/vc032x.c
@@ -2919,6 +2919,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
if (ret < 0) {
pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
}
}
static void reg_r(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
index 896f1b2b9179..948aaae4d47e 100644
--- a/drivers/media/usb/gspca/w996Xcf.c
+++ b/drivers/media/usb/gspca/w996Xcf.c
@@ -147,6 +147,11 @@ static int w9968cf_read_sb(struct sd *sd)
} else {
pr_err("Read SB reg [01] failed\n");
sd->gspca_dev.usb_err = ret;
+ /*
+ * Make sure the buffer is zeroed to avoid uninitialized
+ * values.
+ */
+ memset(sd->gspca_dev.usb_buf, 0, 2);
}
udelay(W9968CF_I2C_BUS_DELAY);
diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
index d5ed9d36ce25..2a555b0f0058 100644
--- a/drivers/media/usb/gspca/xirlink_cit.c
+++ b/drivers/media/usb/gspca/xirlink_cit.c
@@ -1455,6 +1455,9 @@ static int cit_get_packet_size(struct gspca_dev *gspca_dev)
return -EIO;
}
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
}
@@ -2638,6 +2641,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
static int sd_isoc_init(struct gspca_dev *gspca_dev)
{
+ struct usb_interface_cache *intfc;
struct usb_host_interface *alt;
int max_packet_size;
@@ -2653,8 +2657,17 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
break;
}
+ intfc = gspca_dev->dev->actconfig->intf_cache[0];
+
+ if (intfc->num_altsetting < 2)
+ return -ENODEV;
+
+ alt = &intfc->altsetting[1];
+
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* Start isoc bandwidth "negotiation" at max isoc bandwidth */
- alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size);
return 0;
@@ -2677,6 +2690,9 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
break;
}
+ /*
+ * Existence of altsetting and endpoint was verified in sd_isoc_init()
+ */
alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
if (packet_size <= min_packet_size)
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index a20b60ac66ca..99171b912a2d 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -143,6 +143,7 @@ static int device_authorization(struct hdpvr_device *dev)
dev->fw_ver = dev->usbc_buf[1];
+ dev->usbc_buf[46] = '\0';
v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
dev->fw_ver, &dev->usbc_buf[2]);
@@ -278,6 +279,7 @@ static int hdpvr_probe(struct usb_interface *interface,
#endif
size_t buffer_size;
int i;
+ int dev_num;
int retval = -ENOMEM;
/* allocate memory for our device state and initialize it */
@@ -382,8 +384,17 @@ static int hdpvr_probe(struct usb_interface *interface,
}
#endif
+ dev_num = atomic_inc_return(&dev_nr);
+ if (dev_num >= HDPVR_MAX) {
+ v4l2_err(&dev->v4l2_dev,
+ "max device number reached, device register failed\n");
+ atomic_dec(&dev_nr);
+ retval = -ENODEV;
+ goto reg_fail;
+ }
+
retval = hdpvr_register_videodev(dev, &interface->dev,
- video_nr[atomic_inc_return(&dev_nr)]);
+ video_nr[dev_num]);
if (retval < 0) {
v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
goto reg_fail;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 1eb4f7ba2967..ff489645e070 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -670,6 +670,8 @@ static int ctrl_get_input(struct pvr2_ctrl *cptr,int *vp)
static int ctrl_check_input(struct pvr2_ctrl *cptr,int v)
{
+ if (v < 0 || v > PVR2_CVAL_INPUT_MAX)
+ return 0;
return ((1 << v) & cptr->hdw->input_allowed_mask) != 0;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
index a82a00dd7329..80869990ffbb 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
@@ -54,6 +54,7 @@
#define PVR2_CVAL_INPUT_COMPOSITE 2
#define PVR2_CVAL_INPUT_SVIDEO 3
#define PVR2_CVAL_INPUT_RADIO 4
+#define PVR2_CVAL_INPUT_MAX PVR2_CVAL_INPUT_RADIO
enum pvr2_config {
pvr2_config_empty, /* No configuration */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 2cc4d2b6f810..d18ced28797d 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -919,8 +919,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
if (!list_empty(&vp->dev_video->devbase.fh_list) ||
- !list_empty(&vp->dev_radio->devbase.fh_list))
+ (vp->dev_radio &&
+ !list_empty(&vp->dev_radio->devbase.fh_list))) {
+ pvr2_trace(PVR2_TRACE_STRUCT,
+ "pvr2_v4l2 internal_check exit-empty id=%p", vp);
return;
+ }
pvr2_v4l2_destroy_no_lock(vp);
}
@@ -994,7 +998,8 @@ static int pvr2_v4l2_release(struct file *file)
kfree(fhp);
if (vp->channel.mc_head->disconnect_flag &&
list_empty(&vp->dev_video->devbase.fh_list) &&
- list_empty(&vp->dev_radio->devbase.fh_list)) {
+ (!vp->dev_radio ||
+ list_empty(&vp->dev_radio->devbase.fh_list))) {
pvr2_v4l2_destroy_no_lock(vp);
}
return 0;
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 18b41b9dc2e4..73889d6cfd50 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -402,6 +402,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
struct smsusb_device_t *dev;
void *mdev;
int i, rc;
+ int align = 0;
/* create device object */
dev = kzalloc(sizeof(struct smsusb_device_t), GFP_KERNEL);
@@ -413,6 +414,24 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
dev->udev = interface_to_usbdev(intf);
dev->state = SMSUSB_DISCONNECTED;
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ struct usb_endpoint_descriptor *desc =
+ &intf->cur_altsetting->endpoint[i].desc;
+
+ if (desc->bEndpointAddress & USB_DIR_IN) {
+ dev->in_ep = desc->bEndpointAddress;
+ align = usb_endpoint_maxp(desc) - sizeof(struct sms_msg_hdr);
+ } else {
+ dev->out_ep = desc->bEndpointAddress;
+ }
+ }
+
+ pr_debug("in_ep = %02x, out_ep = %02x\n", dev->in_ep, dev->out_ep);
+ if (!dev->in_ep || !dev->out_ep || align < 0) { /* Missing endpoints? */
+ smsusb_term_device(intf);
+ return -ENODEV;
+ }
+
params.device_type = sms_get_board(board_id)->type;
switch (params.device_type) {
@@ -427,24 +446,12 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
/* fall-thru */
default:
dev->buffer_size = USB2_BUFFER_SIZE;
- dev->response_alignment =
- le16_to_cpu(dev->udev->ep_in[1]->desc.wMaxPacketSize) -
- sizeof(struct sms_msg_hdr);
+ dev->response_alignment = align;
params.flags |= SMS_DEVICE_FAMILY2;
break;
}
- for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
- if (intf->cur_altsetting->endpoint[i].desc. bEndpointAddress & USB_DIR_IN)
- dev->in_ep = intf->cur_altsetting->endpoint[i].desc.bEndpointAddress;
- else
- dev->out_ep = intf->cur_altsetting->endpoint[i].desc.bEndpointAddress;
- }
-
- pr_debug("in_ep = %02x, out_ep = %02x\n",
- dev->in_ep, dev->out_ep);
-
params.device = &dev->udev->dev;
params.buffer_size = dev->buffer_size;
params.num_buffers = MAX_BUFFERS;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 1c48f2f1e14a..f9844f87467b 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -166,7 +166,11 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
*value = *buf;
kfree(buf);
- return ret;
+
+ if (ret < 0)
+ return ret;
+ else
+ return 0;
}
static int stk_start_stream(struct stk_camera *dev)
@@ -647,8 +651,7 @@ static int v4l_stk_release(struct file *fp)
dev->owner = NULL;
}
- if (is_present(dev))
- usb_autopm_put_interface(dev->interface);
+ usb_autopm_put_interface(dev->interface);
mutex_unlock(&dev->lock);
return v4l2_fh_release(fp);
}
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index ee88ae83230c..185c8079d0f9 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -111,6 +111,7 @@ static void tm6000_urb_received(struct urb *urb)
printk(KERN_ERR "tm6000: error %s\n", __func__);
kfree(urb->transfer_buffer);
usb_free_urb(urb);
+ dev->dvb->bulk_urb = NULL;
}
}
}
@@ -141,6 +142,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
if (dvb->bulk_urb->transfer_buffer == NULL) {
usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
return -ENOMEM;
}
@@ -168,6 +170,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
kfree(dvb->bulk_urb->transfer_buffer);
usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
return ret;
}
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 4e7671a3a1e4..d7397c0d7f86 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -278,7 +278,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
dprintk("%s\n", __func__);
- b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
+ b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
if (!b)
return -ENOMEM;
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index e56a49a5e8b1..d8ce7d75ff18 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -56,7 +56,7 @@ int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, index, NULL, 0, 0);
+ value, index, NULL, 0, USB_CTRL_GET_TIMEOUT);
if (ret < 0)
return ret;
}
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index bfdf72355332..230f50aa3000 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -332,6 +332,10 @@ static int usbvision_v4l2_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto unlock;
+ }
if (usbvision->user) {
err_code = -EBUSY;
} else {
@@ -395,6 +399,7 @@ unlock:
static int usbvision_v4l2_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "close");
@@ -409,9 +414,10 @@ static int usbvision_v4l2_close(struct file *file)
usbvision_scratch_free(usbvision);
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
usbvision_release(usbvision);
return 0;
@@ -1095,6 +1101,11 @@ static int usbvision_radio_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto out;
+ }
err_code = v4l2_fh_open(file);
if (err_code)
goto out;
@@ -1127,6 +1138,7 @@ out:
static int usbvision_radio_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "");
@@ -1139,9 +1151,10 @@ static int usbvision_radio_close(struct file *file)
usbvision_audio_off(usbvision);
usbvision->radio = 0;
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
v4l2_fh_release(file);
usbvision_release(usbvision);
@@ -1568,6 +1581,7 @@ err_usb:
static void usbvision_disconnect(struct usb_interface *intf)
{
struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
+ int u;
PDEBUG(DBG_PROBE, "");
@@ -1584,13 +1598,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
v4l2_device_disconnect(&usbvision->v4l2_dev);
usbvision_i2c_unregister(usbvision);
usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
+ u = usbvision->user;
usb_put_dev(usbvision->dev);
usbvision->dev = NULL; /* USB device is no more */
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->user) {
+ if (u) {
printk(KERN_INFO "%s: In use, disconnect pending\n",
__func__);
wake_up_interruptible(&usbvision->wait_frame);
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index c630a9f8e356..9803135f2e59 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -868,7 +868,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
unsigned int size;
unsigned int i;
- extra_size = ALIGN(extra_size, sizeof(*entity->pads));
+ extra_size = roundup(extra_size, sizeof(*entity->pads));
num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
+ num_inputs;
@@ -1411,6 +1411,11 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
break;
if (forward == prev)
continue;
+ if (forward->chain.next || forward->chain.prev) {
+ uvc_trace(UVC_TRACE_DESCR, "Found reference to "
+ "entity %d already in chain.\n", forward->id);
+ return -EINVAL;
+ }
switch (UVC_ENTITY_TYPE(forward)) {
case UVC_VC_EXTENSION_UNIT:
@@ -1492,6 +1497,13 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
return -1;
}
+ if (term->chain.next || term->chain.prev) {
+ uvc_trace(UVC_TRACE_DESCR, "Found reference to "
+ "entity %d already in chain.\n",
+ term->id);
+ return -EINVAL;
+ }
+
if (uvc_trace_param & UVC_TRACE_PROBE)
printk(" %d", term->id);
@@ -2021,6 +2033,21 @@ static int uvc_probe(struct usb_interface *intf,
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
+ /* Initialize the media device. */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &intf->dev;
+ strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
+ if (udev->serial)
+ strscpy(dev->mdev.serial, udev->serial,
+ sizeof(dev->mdev.serial));
+ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
+ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ dev->mdev.driver_version = LINUX_VERSION_CODE;
+ media_device_init(&dev->mdev);
+
+ dev->vdev.mdev = &dev->mdev;
+#endif
+
/* Parse the Video Class control descriptor. */
if (uvc_parse_control(dev) < 0) {
uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
@@ -2041,20 +2068,7 @@ static int uvc_probe(struct usb_interface *intf,
"linux-uvc-devel mailing list.\n");
}
- /* Initialize the media device and register the V4L2 device. */
-#ifdef CONFIG_MEDIA_CONTROLLER
- dev->mdev.dev = &intf->dev;
- strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
- if (udev->serial)
- strlcpy(dev->mdev.serial, udev->serial,
- sizeof(dev->mdev.serial));
- strcpy(dev->mdev.bus_info, udev->devpath);
- dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- dev->mdev.driver_version = LINUX_VERSION_CODE;
- media_device_init(&dev->mdev);
-
- dev->vdev.mdev = &dev->mdev;
-#endif
+ /* Register the V4L2 device. */
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error;
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index e3735bfcc02f..c5513f55e64e 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -711,7 +711,8 @@ static int zr364xx_vidioc_querycap(struct file *file, void *priv,
struct zr364xx_camera *cam = video_drvdata(file);
strlcpy(cap->driver, DRIVER_DESC, sizeof(cap->driver));
- strlcpy(cap->card, cam->udev->product, sizeof(cap->card));
+ if (cam->udev->product)
+ strlcpy(cap->card, cam->udev->product, sizeof(cap->card));
strlcpy(cap->bus_info, dev_name(&cam->udev->dev),
sizeof(cap->bus_info));
cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index c56d649fa7da..6c0c5de2527b 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1007,6 +1007,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_FLASH_STROBE_STOP:
case V4L2_CID_AUTO_FOCUS_START:
case V4L2_CID_AUTO_FOCUS_STOP:
+ case V4L2_CID_DO_WHITE_BALANCE:
*type = V4L2_CTRL_TYPE_BUTTON;
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
@@ -2103,16 +2104,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
&def, &flags);
- is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
- cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ is_menu = (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU);
if (is_menu)
WARN_ON(step);
else
WARN_ON(cfg->menu_skip_mask);
- if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
+ if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
qmenu = v4l2_ctrl_get_menu(cfg->id);
- else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
- qmenu_int == NULL) {
+ } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 74f657b050be..99dfd6362c5a 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1978,7 +1978,22 @@ static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
struct v4l2_streamparm *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_s_parm(file, fh, p);
+ if (ret)
+ return ret;
+
+ /* Note: extendedmode is never used in drivers */
+ if (V4L2_TYPE_IS_OUTPUT(p->type)) {
+ memset(p->parm.output.reserved, 0,
+ sizeof(p->parm.output.reserved));
+ p->parm.output.extendedmode = 0;
+ p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY;
+ } else {
+ memset(p->parm.capture.reserved, 0,
+ sizeof(p->parm.capture.reserved));
+ p->parm.capture.extendedmode = 0;
+ p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY;
+ }
+ return ops->vidioc_s_parm(file, fh, p);
}
static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index b6189a4958c5..025822bc6941 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -352,8 +352,11 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
BUG_ON(dma->sglen);
if (dma->pages) {
- for (i = 0; i < dma->nr_pages; i++)
+ for (i = 0; i < dma->nr_pages; i++) {
+ if (dma->direction == DMA_FROM_DEVICE)
+ set_page_dirty_lock(dma->pages[i]);
put_page(dma->pages[i]);
+ }
kfree(dma->pages);
dma->pages = NULL;
}
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 1d49a8dd4a37..7c040a3b45be 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -72,7 +72,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
u32 value;
/* compute the number of MC clock cycles per tick */
- tick = mc->tick * clk_get_rate(mc->clk);
+ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
do_div(tick, NSEC_PER_SEC);
value = readl(mc->regs + MC_EMEM_ARB_CFG);
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 4d673a626db4..1041eb7a6167 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -629,13 +629,18 @@ static int __init memstick_init(void)
return -ENOMEM;
rc = bus_register(&memstick_bus_type);
- if (!rc)
- rc = class_register(&memstick_host_class);
+ if (rc)
+ goto error_destroy_workqueue;
- if (!rc)
- return 0;
+ rc = class_register(&memstick_host_class);
+ if (rc)
+ goto error_bus_unregister;
+
+ return 0;
+error_bus_unregister:
bus_unregister(&memstick_bus_type);
+error_destroy_workqueue:
destroy_workqueue(workqueue);
return rc;
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 48db922075e2..08fa6400d255 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -947,7 +947,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
if (!cnt) {
rc = -ENODEV;
pci_dev_busy = 1;
- goto err_out;
+ goto err_out_int;
}
jm = kzalloc(sizeof(struct jmb38x_ms)
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 14cf6dfc3b14..4d837bcad5db 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -100,19 +100,19 @@ struct buflist {
* Function prototypes. Called from OS entry point mptctl_ioctl.
* arg contents specific to function.
*/
-static int mptctl_fw_download(unsigned long arg);
-static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_gettargetinfo(unsigned long arg);
-static int mptctl_readtest(unsigned long arg);
-static int mptctl_mpt_command(unsigned long arg);
-static int mptctl_eventquery(unsigned long arg);
-static int mptctl_eventenable(unsigned long arg);
-static int mptctl_eventreport(unsigned long arg);
-static int mptctl_replace_fw(unsigned long arg);
-
-static int mptctl_do_reset(unsigned long arg);
-static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
-static int mptctl_hp_targetinfo(unsigned long arg);
+static int mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_getiocinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_gettargetinfo(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_readtest(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_mpt_command(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventquery(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventenable(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_eventreport(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_replace_fw(MPT_ADAPTER *iocp, unsigned long arg);
+
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg);
+static int mptctl_hp_hostinfo(MPT_ADAPTER *iocp, unsigned long arg, unsigned int cmd);
+static int mptctl_hp_targetinfo(MPT_ADAPTER *iocp, unsigned long arg);
static int mptctl_probe(struct pci_dev *, const struct pci_device_id *);
static void mptctl_remove(struct pci_dev *);
@@ -123,8 +123,8 @@ static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
/*
* Private function calls.
*/
-static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr);
-static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
+static int mptctl_do_mpt_command(MPT_ADAPTER *iocp, struct mpt_ioctl_command karg, void __user *mfPtr);
+static int mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen);
static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags,
struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
@@ -656,19 +656,19 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* by TM and FW reloads.
*/
if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
- return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
+ return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd));
} else if (cmd == MPTTARGETINFO) {
- return mptctl_gettargetinfo(arg);
+ return mptctl_gettargetinfo(iocp, arg);
} else if (cmd == MPTTEST) {
- return mptctl_readtest(arg);
+ return mptctl_readtest(iocp, arg);
} else if (cmd == MPTEVENTQUERY) {
- return mptctl_eventquery(arg);
+ return mptctl_eventquery(iocp, arg);
} else if (cmd == MPTEVENTENABLE) {
- return mptctl_eventenable(arg);
+ return mptctl_eventenable(iocp, arg);
} else if (cmd == MPTEVENTREPORT) {
- return mptctl_eventreport(arg);
+ return mptctl_eventreport(iocp, arg);
} else if (cmd == MPTFWREPLACE) {
- return mptctl_replace_fw(arg);
+ return mptctl_replace_fw(iocp, arg);
}
/* All of these commands require an interrupt or
@@ -678,15 +678,15 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
if (cmd == MPTFWDOWNLOAD)
- ret = mptctl_fw_download(arg);
+ ret = mptctl_fw_download(iocp, arg);
else if (cmd == MPTCOMMAND)
- ret = mptctl_mpt_command(arg);
+ ret = mptctl_mpt_command(iocp, arg);
else if (cmd == MPTHARDRESET)
- ret = mptctl_do_reset(arg);
+ ret = mptctl_do_reset(iocp, arg);
else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
- ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
+ ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd));
else if (cmd == HP_GETTARGETINFO)
- ret = mptctl_hp_targetinfo(arg);
+ ret = mptctl_hp_targetinfo(iocp, arg);
else
ret = -EINVAL;
@@ -705,11 +705,10 @@ mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-static int mptctl_do_reset(unsigned long arg)
+static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
struct mpt_ioctl_diag_reset krinfo;
- MPT_ADAPTER *iocp;
if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - "
@@ -718,12 +717,6 @@ static int mptctl_do_reset(unsigned long arg)
return -EFAULT;
}
- if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n",
- __FILE__, __LINE__, krinfo.hdr.iocnum);
- return -ENODEV; /* (-6) No such device or address */
- }
-
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n",
iocp->name));
@@ -754,7 +747,7 @@ static int mptctl_do_reset(unsigned long arg)
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_fw_download(unsigned long arg)
+mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg)
{
struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
struct mpt_fw_xfer kfwdl;
@@ -766,7 +759,7 @@ mptctl_fw_download(unsigned long arg)
return -EFAULT;
}
- return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
+ return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -784,11 +777,10 @@ mptctl_fw_download(unsigned long arg)
* -ENOMSG if FW upload returned bad status
*/
static int
-mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
+mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen)
{
FWDownload_t *dlmsg;
MPT_FRAME_HDR *mf;
- MPT_ADAPTER *iocp;
FWDownloadTCSGE_t *ptsge;
MptSge_t *sgl, *sgIn;
char *sgOut;
@@ -808,17 +800,10 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
pFWDownloadReply_t ReplyMsg = NULL;
unsigned long timeleft;
- if (mpt_verify_adapter(ioc, &iocp) < 0) {
- printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
- ioc);
- return -ENODEV; /* (-6) No such device or address */
- } else {
-
- /* Valid device. Get a message frame and construct the FW download message.
- */
- if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
- return -EAGAIN;
- }
+ /* Valid device. Get a message frame and construct the FW download message.
+ */
+ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
+ return -EAGAIN;
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
"mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
@@ -826,8 +811,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
iocp->name, ufwbuf));
dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
iocp->name, (int)fwlen));
- dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n",
- iocp->name, ioc));
dlmsg = (FWDownload_t*) mf;
ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
@@ -1238,13 +1221,11 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
* -ENODEV if no such device/adapter
*/
static int
-mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
+mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_iocinfo *karg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
- int iocnum;
unsigned int port;
int cim_rev;
struct scsi_device *sdev;
@@ -1272,14 +1253,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
return PTR_ERR(karg);
}
- if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- kfree(karg);
- return -ENODEV;
- }
-
/* Verify the data transfer size is correct. */
if (karg->hdr.maxDataSize != data_size) {
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
@@ -1385,15 +1358,13 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_gettargetinfo (unsigned long arg)
+mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
struct mpt_ioctl_targetinfo karg;
- MPT_ADAPTER *ioc;
VirtDevice *vdevice;
char *pmem;
int *pdata;
- int iocnum;
int numDevices = 0;
int lun;
int maxWordsLeft;
@@ -1408,13 +1379,6 @@ mptctl_gettargetinfo (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
ioc->name));
/* Get the port number and set the maximum number of bytes
@@ -1510,12 +1474,10 @@ mptctl_gettargetinfo (unsigned long arg)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_readtest (unsigned long arg)
+mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_test __user *uarg = (void __user *) arg;
struct mpt_ioctl_test karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - "
@@ -1524,13 +1486,6 @@ mptctl_readtest (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n",
ioc->name));
/* Fill in the data and return the structure to the calling
@@ -1571,12 +1526,10 @@ mptctl_readtest (unsigned long arg)
* -ENODEV if no such device/adapter
*/
static int
-mptctl_eventquery (unsigned long arg)
+mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventquery karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - "
@@ -1585,13 +1538,6 @@ mptctl_eventquery (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n",
ioc->name));
karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
@@ -1610,12 +1556,10 @@ mptctl_eventquery (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventenable (unsigned long arg)
+mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventenable karg;
- MPT_ADAPTER *ioc;
- int iocnum;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
@@ -1624,13 +1568,6 @@ mptctl_eventenable (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
ioc->name));
if (ioc->events == NULL) {
@@ -1658,12 +1595,10 @@ mptctl_eventenable (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_eventreport (unsigned long arg)
+mptctl_eventreport (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
struct mpt_ioctl_eventreport karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int numBytes, maxEvents, max;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
@@ -1673,12 +1608,6 @@ mptctl_eventreport (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n",
ioc->name));
@@ -1712,12 +1641,10 @@ mptctl_eventreport (unsigned long arg)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static int
-mptctl_replace_fw (unsigned long arg)
+mptctl_replace_fw (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
struct mpt_ioctl_replace_fw karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int newFwSize;
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
@@ -1727,13 +1654,6 @@ mptctl_replace_fw (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
ioc->name));
/* If caching FW, Free the old FW image
@@ -1780,12 +1700,10 @@ mptctl_replace_fw (unsigned long arg)
* -ENOMEM if memory allocation error
*/
static int
-mptctl_mpt_command (unsigned long arg)
+mptctl_mpt_command (MPT_ADAPTER *ioc, unsigned long arg)
{
struct mpt_ioctl_command __user *uarg = (void __user *) arg;
struct mpt_ioctl_command karg;
- MPT_ADAPTER *ioc;
- int iocnum;
int rc;
@@ -1796,14 +1714,7 @@ mptctl_mpt_command (unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
- rc = mptctl_do_mpt_command (karg, &uarg->MF);
+ rc = mptctl_do_mpt_command (ioc, karg, &uarg->MF);
return rc;
}
@@ -1821,9 +1732,8 @@ mptctl_mpt_command (unsigned long arg)
* -EPERM if SCSI I/O and target is untagged
*/
static int
-mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr)
{
- MPT_ADAPTER *ioc;
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *hdr;
char *psge;
@@ -1832,7 +1742,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
dma_addr_t dma_addr_in;
dma_addr_t dma_addr_out;
int sgSize = 0; /* Num SG elements */
- int iocnum, flagsLength;
+ int flagsLength;
int sz, rc = 0;
int msgContext;
u16 req_idx;
@@ -1847,13 +1757,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
bufIn.kptr = bufOut.kptr = NULL;
bufIn.len = bufOut.len = 0;
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
-
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
@@ -2418,17 +2321,15 @@ done_free_mem:
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
+mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
{
hp_host_info_t __user *uarg = (void __user *) arg;
- MPT_ADAPTER *ioc;
struct pci_dev *pdev;
char *pbuf=NULL;
dma_addr_t buf_dma;
hp_host_info_t karg;
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
- int iocnum;
int rc, cim_rev;
ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
MPT_FRAME_HDR *mf = NULL;
@@ -2452,12 +2353,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n",
ioc->name));
@@ -2670,15 +2565,13 @@ retry_wait:
* -ENOMEM if memory allocation error
*/
static int
-mptctl_hp_targetinfo(unsigned long arg)
+mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
{
hp_target_info_t __user *uarg = (void __user *) arg;
SCSIDevicePage0_t *pg0_alloc;
SCSIDevicePage3_t *pg3_alloc;
- MPT_ADAPTER *ioc;
MPT_SCSI_HOST *hd = NULL;
hp_target_info_t karg;
- int iocnum;
int data_sz;
dma_addr_t page_dma;
CONFIGPARMS cfg;
@@ -2692,12 +2585,6 @@ mptctl_hp_targetinfo(unsigned long arg)
return -EFAULT;
}
- if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
- (ioc == NULL)) {
- printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
- __FILE__, __LINE__, iocnum);
- return -ENODEV;
- }
if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
return -EINVAL;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
@@ -2865,7 +2752,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
kfw.fwlen = kfw32.fwlen;
kfw.bufp = compat_ptr(kfw32.bufp);
- ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
+ ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen);
mutex_unlock(&iocp->ioctl_cmds.mutex);
@@ -2919,7 +2806,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
/* Pass new structure to do_mpt_command
*/
- ret = mptctl_do_mpt_command (karg, &uarg->MF);
+ ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF);
mutex_unlock(&iocp->ioctl_cmds.mutex);
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 41767f7239bb..1f0c2b594654 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona)
if (ret != 0)
goto err_ref;
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
- if (ret != 0)
- goto err_pm;
+ if (ret != 0) {
+ pm_runtime_put_sync(arizona->dev);
+ goto err_ref;
+ }
break;
case ARIZONA_32KZ_MCLK2:
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]);
@@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona)
ARIZONA_CLK_32K_ENA);
}
-err_pm:
- pm_runtime_put_sync(arizona->dev);
err_ref:
if (ret != 0)
arizona->clk32k_ref--;
@@ -1038,7 +1038,7 @@ int arizona_dev_init(struct arizona *arizona)
unsigned int reg, val, mask;
int (*apply_patch)(struct arizona *) = NULL;
const struct mfd_cell *subdevs = NULL;
- int n_subdevs, ret, i;
+ int n_subdevs = 0, ret, i;
dev_set_drvdata(arizona->dev, arizona);
mutex_init(&arizona->clk_lock);
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 8f873866ea60..86aab322da33 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -142,7 +142,7 @@ static const struct mfd_cell da9062_devs[] = {
.name = "da9062-watchdog",
.num_resources = ARRAY_SIZE(da9062_wdt_resources),
.resources = da9062_wdt_resources,
- .of_compatible = "dlg,da9062-wdt",
+ .of_compatible = "dlg,da9062-watchdog",
},
{
.name = "da9062-thermal",
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 704e189ca162..672831d5ee32 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -93,6 +93,11 @@ struct dln2_mod_rx_slots {
spinlock_t lock;
};
+enum dln2_endpoint {
+ DLN2_EP_OUT = 0,
+ DLN2_EP_IN = 1,
+};
+
struct dln2_dev {
struct usb_device *usb_dev;
struct usb_interface *interface;
@@ -729,6 +734,8 @@ static int dln2_probe(struct usb_interface *interface,
const struct usb_device_id *usb_id)
{
struct usb_host_interface *hostif = interface->cur_altsetting;
+ struct usb_endpoint_descriptor *epin;
+ struct usb_endpoint_descriptor *epout;
struct device *dev = &interface->dev;
struct dln2_dev *dln2;
int ret;
@@ -738,12 +745,19 @@ static int dln2_probe(struct usb_interface *interface,
hostif->desc.bNumEndpoints < 2)
return -ENODEV;
+ epout = &hostif->endpoint[DLN2_EP_OUT].desc;
+ if (!usb_endpoint_is_bulk_out(epout))
+ return -ENODEV;
+ epin = &hostif->endpoint[DLN2_EP_IN].desc;
+ if (!usb_endpoint_is_bulk_in(epin))
+ return -ENODEV;
+
dln2 = kzalloc(sizeof(*dln2), GFP_KERNEL);
if (!dln2)
return -ENOMEM;
- dln2->ep_out = hostif->endpoint[0].desc.bEndpointAddress;
- dln2->ep_in = hostif->endpoint[1].desc.bEndpointAddress;
+ dln2->ep_out = epout->bEndpointAddress;
+ dln2->ep_in = epin->bEndpointAddress;
dln2->usb_dev = usb_get_dev(interface_to_usbdev(interface));
dln2->interface = interface;
usb_set_intfdata(interface, dln2);
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 11347a3e6d40..c311b869be38 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -111,6 +111,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
&hi655x_regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return PTR_ERR(pmic->regmap);
regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver);
if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 9ff243970e93..5b41111e62fd 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -39,6 +39,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
info->mem = &pdev->resource[0];
info->irq = pdev->irq;
+ pdev->d3cold_delay = 0;
+
/* Probably it is enough to set this for iDMA capable devices only */
pci_set_master(pdev);
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 19ac8bc8e7ea..71cecd7aeea0 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -273,6 +273,9 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
{
u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
+ /* Set the device in reset state */
+ writel(0, lpss->priv + LPSS_PRIV_RESETS);
+
intel_lpss_deassert_reset(lpss);
intel_lpss_set_remap_addr(lpss);
@@ -530,6 +533,7 @@ module_init(intel_lpss_init);
static void __exit intel_lpss_exit(void)
{
+ ida_destroy(&intel_lpss_devid_ida);
debugfs_remove(intel_lpss_debugfs);
}
module_exit(intel_lpss_exit);
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 2d6e2c392786..4a2fc59d5901 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -155,12 +155,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
pd->ono = irq_of_parse_and_map(dev->of_node, 1);
- /*
- * ToDo: the 'wakeup' member in the platform data is more of a linux
- * specfic information. Hence, there is no binding for that yet and
- * not parsed here.
- */
-
return pd;
}
@@ -248,7 +242,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
*/
/* MAX8997 has a power button input. */
- device_init_wakeup(max8997->dev, pdata->wakeup);
+ device_init_wakeup(max8997->dev, true);
return ret;
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 6c16f170529f..75d52034f89d 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -278,7 +278,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
if (ret)
goto out;
- adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
+ adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
+ MC13XXX_ADC0_CHRGRAWDIV;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
if (channel > 7)
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index c57e407020f1..5c8ed2150c8b 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -179,6 +179,7 @@ static int mfd_add_device(struct device *parent, int id,
for_each_child_of_node(parent->of_node, np) {
if (of_device_is_compatible(np, cell->of_compatible)) {
pdev->dev.of_node = np;
+ pdev->dev.fwnode = &np->fwnode;
break;
}
}
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 9d167c9af2c6..e153276ed954 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -131,12 +131,12 @@ static inline u32 usbtll_read(void __iomem *base, u32 reg)
return readl_relaxed(base + reg);
}
-static inline void usbtll_writeb(void __iomem *base, u8 reg, u8 val)
+static inline void usbtll_writeb(void __iomem *base, u32 reg, u8 val)
{
writeb_relaxed(val, base + reg);
}
-static inline u8 usbtll_readb(void __iomem *base, u8 reg)
+static inline u8 usbtll_readb(void __iomem *base, u32 reg)
{
return readb_relaxed(base + reg);
}
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index ee94080e1cbb..dd20c3e32352 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -32,6 +32,7 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
case RN5T618_WATCHDOGCNT:
case RN5T618_DCIRQ:
case RN5T618_ILIMDATAH ... RN5T618_AIN0DATAL:
+ case RN5T618_ADCCNT3:
case RN5T618_IR_ADC1 ... RN5T618_IR_ADC3:
case RN5T618_IR_GPR:
case RN5T618_IR_GPF:
diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c
index ff296a4bf3d2..dc6a9432a4b6 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/mfd/rts5227.c
@@ -369,6 +369,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
void rts522a_init_params(struct rtsx_pcr *pcr)
{
rts5227_init_params(pcr);
+ pcr->ops = &rts522a_pcr_ops;
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
}
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 60286adbd6a1..e56f0844b98d 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -295,11 +295,24 @@ static int ti_tscadc_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused ti_tscadc_can_wakeup(struct device *dev, void *data)
+{
+ return device_may_wakeup(dev);
+}
+
static int __maybe_unused tscadc_suspend(struct device *dev)
{
struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev);
regmap_write(tscadc->regmap, REG_SE, 0x00);
+ if (device_for_each_child(dev, NULL, ti_tscadc_can_wakeup)) {
+ u32 ctrl;
+
+ regmap_read(tscadc->regmap, REG_CTRL, &ctrl);
+ ctrl &= ~(CNTRLREG_POWERDOWN);
+ ctrl |= CNTRLREG_TSCSSENB;
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
+ }
pm_runtime_put_sync(dev);
return 0;
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index 4aeba9b6942a..ec37cfe32ca3 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -27,6 +27,7 @@ static const struct of_device_id tps65912_spi_of_match_table[] = {
{ .compatible = "ti,tps65912", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, tps65912_spi_of_match_table);
static int tps65912_spi_probe(struct spi_device *spi)
{
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index dd19f17a1b63..2b8c479dbfa6 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -322,8 +322,19 @@ int twl6040_power(struct twl6040 *twl6040, int on)
}
}
+ /*
+ * Register access can produce errors after power-up unless we
+ * wait at least 8ms based on measurements on duovero.
+ */
+ usleep_range(10000, 12000);
+
/* Sync with the HW */
- regcache_sync(twl6040->regmap);
+ ret = regcache_sync(twl6040->regmap);
+ if (ret) {
+ dev_err(twl6040->dev, "Failed to sync with the HW: %i\n",
+ ret);
+ goto out;
+ }
/* Default PLL configuration after power up */
twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index f53e217e963f..b7ee8043a133 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -2126,8 +2126,8 @@ exit_done:
return status;
}
-static int altera_get_note(u8 *p, s32 program_size,
- s32 *offset, char *key, char *value, int length)
+static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
+ char *key, char *value, int keylen, int vallen)
/*
* Gets key and value of NOTE fields in the JBC file.
* Can be called in two modes: if offset pointer is NULL,
@@ -2176,8 +2176,7 @@ static int altera_get_note(u8 *p, s32 program_size,
key_ptr = &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i)])];
- if ((strncasecmp(key, key_ptr, strlen(key_ptr)) == 0) &&
- (key != NULL)) {
+ if (key && !strncasecmp(key, key_ptr, strlen(key_ptr))) {
status = 0;
value_ptr = &p[note_strings +
@@ -2185,7 +2184,7 @@ static int altera_get_note(u8 *p, s32 program_size,
&p[note_table + (8 * i) + 4])];
if (value != NULL)
- strlcpy(value, value_ptr, length);
+ strlcpy(value, value_ptr, vallen);
}
}
@@ -2204,13 +2203,13 @@ static int altera_get_note(u8 *p, s32 program_size,
strlcpy(key, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i)])],
- length);
+ keylen);
if (value != NULL)
strlcpy(value, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i) + 4])],
- length);
+ vallen);
*offset = i + 1;
}
@@ -2464,7 +2463,7 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
__func__, (format_version == 2) ? "Jam STAPL" :
"pre-standardized Jam 1.1");
while (altera_get_note((u8 *)fw->data, fw->size,
- &offset, key, value, 256) == 0)
+ &offset, key, value, 32, 256) == 0)
printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n",
__func__, key, value);
}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3e102cd6ed91..d08509cd978a 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1026,8 +1026,6 @@ err1:
void cxl_guest_remove_afu(struct cxl_afu *afu)
{
- pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
-
if (!afu)
return;
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
index 9597e9523cac..fff13176f9b8 100644
--- a/drivers/misc/echo/echo.c
+++ b/drivers/misc/echo/echo.c
@@ -454,7 +454,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
*/
ec->factor = 0;
ec->shift = 0;
- if ((ec->nonupdate_dwell == 0)) {
+ if (!ec->nonupdate_dwell) {
int p, logp, shift;
/* Determine:
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index a37b9b6a315a..2eef811764ad 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -777,7 +777,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->nvmem_config.name = dev_name(&client->dev);
at24->nvmem_config.dev = &client->dev;
at24->nvmem_config.read_only = !writable;
- at24->nvmem_config.root_only = true;
+ at24->nvmem_config.root_only = !(chip.flags & AT24_FLAG_IRUGO);
at24->nvmem_config.owner = THIS_MODULE;
at24->nvmem_config.compat = true;
at24->nvmem_config.base_dev = &client->dev;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index eb29113e0bac..b11737f7bdca 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -419,10 +419,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
cdev = &edev->component[i];
if (cdev->dev == dev) {
enclosure_remove_links(cdev);
- device_del(&cdev->cdev);
put_device(dev);
cdev->dev = NULL;
- return device_add(&cdev->cdev);
+ return 0;
}
}
return -ENODEV;
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index c0012ca4229e..74923ffb0df1 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -782,6 +782,8 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
if ((m->addr == 0x0) || (m->size == 0))
return -EINVAL;
+ if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
+ return -EINVAL;
map_addr = (m->addr & PAGE_MASK);
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 466a9b711480..95584bffa4ea 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -298,7 +298,7 @@ static int genwqe_sgl_size(int num_pages)
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
void __user *user_addr, size_t user_size)
{
- int rc;
+ int ret = -ENOMEM;
struct pci_dev *pci_dev = cd->pci_dev;
sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
@@ -317,7 +317,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (get_order(sgl->sgl_size) > MAX_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
- return -ENOMEM;
+ return ret;
}
sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
@@ -325,7 +325,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (sgl->sgl == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: no memory available!\n", __func__);
- return -ENOMEM;
+ return ret;
}
/* Only use buffering on incomplete pages */
@@ -338,7 +338,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/* Sync with user memory */
if (copy_from_user(sgl->fpage + sgl->fpage_offs,
user_addr, sgl->fpage_size)) {
- rc = -EFAULT;
+ ret = -EFAULT;
goto err_out;
}
}
@@ -351,7 +351,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/* Sync with user memory */
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
- rc = -EFAULT;
+ ret = -EFAULT;
goto err_out2;
}
}
@@ -373,7 +373,8 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->sgl = NULL;
sgl->sgl_dma_addr = 0;
sgl->sgl_size = 0;
- return -ENOMEM;
+
+ return ret;
}
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
@@ -582,6 +583,10 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* determine space needed for page_list. */
data = (unsigned long)uaddr;
offs = offset_in_page(data);
+ if (size > ULONG_MAX - PAGE_SIZE - offs) {
+ m->size = 0; /* mark unused and not added */
+ return -EINVAL;
+ }
m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
m->page_list = kcalloc(m->nr_pages,
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 99635dd9dbac..fc8cb855c6e6 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -979,6 +979,12 @@ static void kgdbts_run_tests(void)
int nmi_sleep = 0;
int i;
+ verbose = 0;
+ if (strstr(config, "V1"))
+ verbose = 1;
+ if (strstr(config, "V2"))
+ verbose = 2;
+
ptr = strchr(config, 'F');
if (ptr)
fork_test = simple_strtol(ptr + 1, NULL, 10);
@@ -1062,13 +1068,6 @@ static int kgdbts_option_setup(char *opt)
return -ENOSPC;
}
strcpy(config, opt);
-
- verbose = 0;
- if (strstr(config, "V1"))
- verbose = 1;
- if (strstr(config, "V2"))
- verbose = 2;
-
return 0;
}
@@ -1080,9 +1079,6 @@ static int configure_kgdbts(void)
if (!strlen(config) || isspace(config[0]))
goto noconfig;
- err = kgdbts_option_setup(config);
- if (err)
- goto noconfig;
final_ack = 0;
run_plant_and_detach_test(1);
@@ -1132,7 +1128,7 @@ static void kgdbts_put_char(u8 chr)
static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
{
- int len = strlen(kmessage);
+ size_t len = strlen(kmessage);
if (len >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdbts: config string too long\n");
@@ -1152,7 +1148,7 @@ static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
strcpy(config, kmessage);
/* Chop out \n char as a result of echo */
- if (config[len - 1] == '\n')
+ if (len && config[len - 1] == '\n')
config[len - 1] = '\0';
/* Go and configure with the new params. */
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index fdf954c2107f..6abc97b245e4 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -40,7 +40,9 @@ void lkdtm_EXEC_KMALLOC(void);
void lkdtm_EXEC_VMALLOC(void);
void lkdtm_EXEC_RODATA(void);
void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_EXEC_NULL(void);
void lkdtm_ACCESS_USERSPACE(void);
+void lkdtm_ACCESS_NULL(void);
/* lkdtm_rodata.c */
void lkdtm_rodata_do_nothing(void);
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index b2989f2d3126..035e51bea450 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -214,7 +214,9 @@ struct crashtype crashtypes[] = {
CRASHTYPE(EXEC_VMALLOC),
CRASHTYPE(EXEC_RODATA),
CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(EXEC_NULL),
CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(ACCESS_NULL),
CRASHTYPE(WRITE_RO),
CRASHTYPE(WRITE_RO_AFTER_INIT),
CRASHTYPE(WRITE_KERN),
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
index 45f1c0f96612..1a9dcdaa95f0 100644
--- a/drivers/misc/lkdtm_perms.c
+++ b/drivers/misc/lkdtm_perms.c
@@ -160,6 +160,11 @@ void lkdtm_EXEC_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
+void lkdtm_EXEC_NULL(void)
+{
+ execute_location(NULL, CODE_AS_IS);
+}
+
void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
@@ -191,6 +196,19 @@ void lkdtm_ACCESS_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
+void lkdtm_ACCESS_NULL(void)
+{
+ unsigned long tmp;
+ unsigned long *ptr = (unsigned long *)NULL;
+
+ pr_info("attempting bad read at %px\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+
+ pr_info("attempting bad write at %px\n", ptr);
+ *ptr = tmp;
+}
+
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0c98ed44df05..582b24d2c479 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -765,15 +765,16 @@ static struct device_type mei_cl_device_type = {
/**
* mei_cl_bus_set_name - set device name for me client device
+ * <controller>-<client device>
+ * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
*
* @cldev: me client device
*/
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
{
- dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X",
- cldev->name,
- mei_me_cl_uuid(cldev->me_cl),
- mei_me_cl_ver(cldev->me_cl));
+ dev_set_name(&cldev->dev, "%s-%pUl",
+ dev_name(cldev->bus->dev),
+ mei_me_cl_uuid(cldev->me_cl));
}
/**
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index b9f0710ffa6b..4007adc666f3 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -249,6 +249,9 @@ static int __init mic_probe(struct platform_device *pdev)
mdrv->dev = &pdev->dev;
snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name);
+ /* FIXME: use dma_set_mask_and_coherent() and check result */
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
mdev->mmio.pa = MIC_X100_MMIO_BASE;
mdev->mmio.len = MIC_X100_MMIO_LEN;
mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
@@ -294,18 +297,6 @@ static void mic_platform_shutdown(struct platform_device *pdev)
mic_remove(pdev);
}
-static u64 mic_dma_mask = DMA_BIT_MASK(64);
-
-static struct platform_device mic_platform_dev = {
- .name = mic_driver_name,
- .id = 0,
- .num_resources = 0,
- .dev = {
- .dma_mask = &mic_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
- },
-};
-
static struct platform_driver __refdata mic_platform_driver = {
.probe = mic_probe,
.remove = mic_remove,
@@ -315,6 +306,8 @@ static struct platform_driver __refdata mic_platform_driver = {
},
};
+static struct platform_device *mic_platform_dev;
+
static int __init mic_init(void)
{
int ret;
@@ -328,9 +321,12 @@ static int __init mic_init(void)
request_module("mic_x100_dma");
mic_init_card_debugfs();
- ret = platform_device_register(&mic_platform_dev);
+
+ mic_platform_dev = platform_device_register_simple(mic_driver_name,
+ 0, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(mic_platform_dev);
if (ret) {
- pr_err("platform_device_register ret %d\n", ret);
+ pr_err("platform_device_register_full ret %d\n", ret);
goto cleanup_debugfs;
}
ret = platform_driver_register(&mic_platform_driver);
@@ -341,7 +337,7 @@ static int __init mic_init(void)
return ret;
device_unregister:
- platform_device_unregister(&mic_platform_dev);
+ platform_device_unregister(mic_platform_dev);
cleanup_debugfs:
mic_exit_card_debugfs();
done:
@@ -351,7 +347,7 @@ done:
static void __exit mic_exit(void)
{
platform_driver_unregister(&mic_platform_driver);
- platform_device_unregister(&mic_platform_dev);
+ platform_device_unregister(mic_platform_dev);
mic_exit_card_debugfs();
}
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index cac3bcc308a7..7bb929f05d85 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
dma_fail:
if (!x100)
dma_pool_free(ep->remote_dev->signal_pool, status,
- status->src_dma_addr);
+ src - offsetof(struct scif_status, val));
alloc_fail:
return err;
}
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 6956f7e7d439..ca5f0102daef 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -70,7 +70,7 @@ xpc_get_rsvd_page_pa(int nasid)
unsigned long rp_pa = nasid; /* seed with nasid */
size_t len = 0;
size_t buf_len = 0;
- void *buf = buf;
+ void *buf = NULL;
void *buf_base = NULL;
enum xp_retval (*get_partition_rsvd_page_pa)
(void *, u64 *, unsigned long *, size_t *) =
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index f866a4baecb5..b9da2c6cc981 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -28,6 +28,9 @@
#include "vmci_driver.h"
#include "vmci_event.h"
+/* Use a wide upper bound for the maximum contexts. */
+#define VMCI_MAX_CONTEXTS 2000
+
/*
* List of current VMCI contexts. Contexts can be added by
* vmci_ctx_create() and removed via vmci_ctx_destroy().
@@ -124,19 +127,22 @@ struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
/* Initialize host-specific VMCI context. */
init_waitqueue_head(&context->host_context.wait_queue);
- context->queue_pair_array = vmci_handle_arr_create(0);
+ context->queue_pair_array =
+ vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT);
if (!context->queue_pair_array) {
error = -ENOMEM;
goto err_free_ctx;
}
- context->doorbell_array = vmci_handle_arr_create(0);
+ context->doorbell_array =
+ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
if (!context->doorbell_array) {
error = -ENOMEM;
goto err_free_qp_array;
}
- context->pending_doorbell_array = vmci_handle_arr_create(0);
+ context->pending_doorbell_array =
+ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
if (!context->pending_doorbell_array) {
error = -ENOMEM;
goto err_free_db_array;
@@ -211,7 +217,7 @@ static int ctx_fire_notification(u32 context_id, u32 priv_flags)
* We create an array to hold the subscribers we find when
* scanning through all contexts.
*/
- subscriber_array = vmci_handle_arr_create(0);
+ subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS);
if (subscriber_array == NULL)
return VMCI_ERROR_NO_MEM;
@@ -630,20 +636,26 @@ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
spin_lock(&context->lock);
- list_for_each_entry(n, &context->notifier_list, node) {
- if (vmci_handle_is_equal(n->handle, notifier->handle)) {
- exists = true;
- break;
+ if (context->n_notifiers < VMCI_MAX_CONTEXTS) {
+ list_for_each_entry(n, &context->notifier_list, node) {
+ if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+ exists = true;
+ break;
+ }
}
- }
- if (exists) {
- kfree(notifier);
- result = VMCI_ERROR_ALREADY_EXISTS;
+ if (exists) {
+ kfree(notifier);
+ result = VMCI_ERROR_ALREADY_EXISTS;
+ } else {
+ list_add_tail_rcu(&notifier->node,
+ &context->notifier_list);
+ context->n_notifiers++;
+ result = VMCI_SUCCESS;
+ }
} else {
- list_add_tail_rcu(&notifier->node, &context->notifier_list);
- context->n_notifiers++;
- result = VMCI_SUCCESS;
+ kfree(notifier);
+ result = VMCI_ERROR_NO_MEM;
}
spin_unlock(&context->lock);
@@ -728,8 +740,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
u32 *buf_size, void **pbuf)
{
struct dbell_cpt_state *dbells;
- size_t n_doorbells;
- int i;
+ u32 i, n_doorbells;
n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
if (n_doorbells > 0) {
@@ -867,7 +878,8 @@ int vmci_ctx_rcv_notifications_get(u32 context_id,
spin_lock(&context->lock);
*db_handle_array = context->pending_doorbell_array;
- context->pending_doorbell_array = vmci_handle_arr_create(0);
+ context->pending_doorbell_array =
+ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
if (!context->pending_doorbell_array) {
context->pending_doorbell_array = *db_handle_array;
*db_handle_array = NULL;
@@ -949,12 +961,11 @@ int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
return VMCI_ERROR_NOT_FOUND;
spin_lock(&context->lock);
- if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
- vmci_handle_arr_append_entry(&context->doorbell_array, handle);
- result = VMCI_SUCCESS;
- } else {
+ if (!vmci_handle_arr_has_entry(context->doorbell_array, handle))
+ result = vmci_handle_arr_append_entry(&context->doorbell_array,
+ handle);
+ else
result = VMCI_ERROR_DUPLICATE_ENTRY;
- }
spin_unlock(&context->lock);
vmci_ctx_put(context);
@@ -1090,15 +1101,16 @@ int vmci_ctx_notify_dbell(u32 src_cid,
if (!vmci_handle_arr_has_entry(
dst_context->pending_doorbell_array,
handle)) {
- vmci_handle_arr_append_entry(
+ result = vmci_handle_arr_append_entry(
&dst_context->pending_doorbell_array,
handle);
-
- ctx_signal_notify(dst_context);
- wake_up(&dst_context->host_context.wait_queue);
-
+ if (result == VMCI_SUCCESS) {
+ ctx_signal_notify(dst_context);
+ wake_up(&dst_context->host_context.wait_queue);
+ }
+ } else {
+ result = VMCI_SUCCESS;
}
- result = VMCI_SUCCESS;
}
spin_unlock(&dst_context->lock);
}
@@ -1125,13 +1137,11 @@ int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
if (context == NULL || vmci_handle_is_invalid(handle))
return VMCI_ERROR_INVALID_ARGS;
- if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
- vmci_handle_arr_append_entry(&context->queue_pair_array,
- handle);
- result = VMCI_SUCCESS;
- } else {
+ if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle))
+ result = vmci_handle_arr_append_entry(
+ &context->queue_pair_array, handle);
+ else
result = VMCI_ERROR_DUPLICATE_ENTRY;
- }
return result;
}
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index b3fa738ae005..f005206d9033 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -318,7 +318,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
entry = container_of(resource, struct dbell_entry, resource);
if (entry->run_delayed) {
- schedule_work(&entry->work);
+ if (!schedule_work(&entry->work))
+ vmci_resource_put(resource);
} else {
entry->notify_cb(entry->client_data);
vmci_resource_put(resource);
@@ -366,7 +367,8 @@ static void dbell_fire_entries(u32 notify_idx)
atomic_read(&dbell->active) == 1) {
if (dbell->run_delayed) {
vmci_resource_get(&dbell->resource);
- schedule_work(&dbell->work);
+ if (!schedule_work(&dbell->work))
+ vmci_resource_put(&dbell->resource);
} else {
dbell->notify_cb(dbell->client_data);
}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
index 344973a0fb0a..917e18a8af95 100644
--- a/drivers/misc/vmw_vmci/vmci_handle_array.c
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
@@ -16,24 +16,29 @@
#include <linux/slab.h>
#include "vmci_handle_array.h"
-static size_t handle_arr_calc_size(size_t capacity)
+static size_t handle_arr_calc_size(u32 capacity)
{
- return sizeof(struct vmci_handle_arr) +
+ return VMCI_HANDLE_ARRAY_HEADER_SIZE +
capacity * sizeof(struct vmci_handle);
}
-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
+struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
{
struct vmci_handle_arr *array;
+ if (max_capacity == 0 || capacity > max_capacity)
+ return NULL;
+
if (capacity == 0)
- capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
+ capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY,
+ max_capacity);
array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
if (!array)
return NULL;
array->capacity = capacity;
+ array->max_capacity = max_capacity;
array->size = 0;
return array;
@@ -44,27 +49,34 @@ void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
kfree(array);
}
-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
- struct vmci_handle handle)
+int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle)
{
struct vmci_handle_arr *array = *array_ptr;
if (unlikely(array->size >= array->capacity)) {
/* reallocate. */
struct vmci_handle_arr *new_array;
- size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
- size_t new_size = handle_arr_calc_size(new_capacity);
+ u32 capacity_bump = min(array->max_capacity - array->capacity,
+ array->capacity);
+ size_t new_size = handle_arr_calc_size(array->capacity +
+ capacity_bump);
+
+ if (array->size >= array->max_capacity)
+ return VMCI_ERROR_NO_MEM;
new_array = krealloc(array, new_size, GFP_ATOMIC);
if (!new_array)
- return;
+ return VMCI_ERROR_NO_MEM;
- new_array->capacity = new_capacity;
+ new_array->capacity += capacity_bump;
*array_ptr = array = new_array;
}
array->entries[array->size] = handle;
array->size++;
+
+ return VMCI_SUCCESS;
}
/*
@@ -74,7 +86,7 @@ struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
struct vmci_handle entry_handle)
{
struct vmci_handle handle = VMCI_INVALID_HANDLE;
- size_t i;
+ u32 i;
for (i = 0; i < array->size; i++) {
if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
@@ -109,7 +121,7 @@ struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
* Handle at given index, VMCI_INVALID_HANDLE if invalid index.
*/
struct vmci_handle
-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index)
{
if (unlikely(index >= array->size))
return VMCI_INVALID_HANDLE;
@@ -120,7 +132,7 @@ vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
struct vmci_handle entry_handle)
{
- size_t i;
+ u32 i;
for (i = 0; i < array->size; i++)
if (vmci_handle_is_equal(array->entries[i], entry_handle))
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
index b5f3a7f98cf1..0fc58597820e 100644
--- a/drivers/misc/vmw_vmci/vmci_handle_array.h
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
@@ -17,32 +17,41 @@
#define _VMCI_HANDLE_ARRAY_H_
#include <linux/vmw_vmci_defs.h>
+#include <linux/limits.h>
#include <linux/types.h>
-#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
-#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */
-
struct vmci_handle_arr {
- size_t capacity;
- size_t size;
+ u32 capacity;
+ u32 max_capacity;
+ u32 size;
+ u32 pad;
struct vmci_handle entries[];
};
-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
+#define VMCI_HANDLE_ARRAY_HEADER_SIZE \
+ offsetof(struct vmci_handle_arr, entries)
+/* Select a default capacity that results in a 64 byte sized array */
+#define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6
+/* Make sure that the max array size can be expressed by a u32 */
+#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \
+ ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \
+ sizeof(struct vmci_handle))
+
+struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity);
void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
- struct vmci_handle handle);
+int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+ struct vmci_handle handle);
struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
struct vmci_handle
entry_handle);
struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
struct vmci_handle
-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index);
bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
struct vmci_handle entry_handle);
struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
-static inline size_t vmci_handle_arr_get_size(
+static inline u32 vmci_handle_arr_get_size(
const struct vmci_handle_arr *array)
{
return array->size;
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index efb8a7965dd4..154f4204d58c 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -30,19 +30,14 @@ struct mmc_pwrseq_emmc {
#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
-static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
-{
- gpiod_set_value(pwrseq->reset_gpio, 1);
- udelay(1);
- gpiod_set_value(pwrseq->reset_gpio, 0);
- udelay(200);
-}
-
static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
{
struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
- __mmc_pwrseq_emmc_reset(pwrseq);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+ udelay(200);
}
static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
@@ -50,8 +45,11 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
{
struct mmc_pwrseq_emmc *pwrseq = container_of(this,
struct mmc_pwrseq_emmc, reset_nb);
+ gpiod_set_value(pwrseq->reset_gpio, 1);
+ udelay(1);
+ gpiod_set_value(pwrseq->reset_gpio, 0);
+ udelay(200);
- __mmc_pwrseq_emmc_reset(pwrseq);
return NOTIFY_DONE;
}
@@ -72,14 +70,18 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
if (IS_ERR(pwrseq->reset_gpio))
return PTR_ERR(pwrseq->reset_gpio);
- /*
- * register reset handler to ensure emmc reset also from
- * emergency_reboot(), priority 255 is the highest priority
- * so it will be executed before any system reboot handler.
- */
- pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
- pwrseq->reset_nb.priority = 255;
- register_restart_handler(&pwrseq->reset_nb);
+ if (!gpiod_cansleep(pwrseq->reset_gpio)) {
+ /*
+ * register reset handler to ensure emmc reset also from
+ * emergency_reboot(), priority 255 is the highest priority
+ * so it will be executed before any system reboot handler.
+ */
+ pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
+ pwrseq->reset_nb.priority = 255;
+ register_restart_handler(&pwrseq->reset_nb);
+ } else {
+ dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n");
+ }
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
pwrseq->pwrseq.dev = dev;
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 009e1cf3435c..ba81898b6e57 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -216,6 +216,14 @@ static int mmc_decode_scr(struct mmc_card *card)
if (scr->sda_spec3)
scr->cmds = UNSTUFF_BITS(resp, 32, 2);
+
+ /* SD Spec says: any SD Card shall set at least bits 0 and 2 */
+ if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
+ !(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) {
+ pr_err("%s: invalid bus width\n", mmc_hostname(card->host));
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1245,6 +1253,12 @@ int mmc_attach_sd(struct mmc_host *host)
goto err;
}
+ /*
+ * Some SD cards claims an out of spec VDD voltage range. Let's treat
+ * these bits as being in-valid and especially also bit7.
+ */
+ ocr &= ~0x7FFF;
+
rocr = mmc_select_voltage(host, ocr);
/*
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index affa9fc92c1b..32f28aacd614 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1115,7 +1115,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
{
}
#endif
-static void __init init_mmcsd_host(struct mmc_davinci_host *host)
+static void init_mmcsd_host(struct mmc_davinci_host *host)
{
mmc_davinci_reset_ctrl(host, 1);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index ba5fdd696ddb..9c50984b90f4 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1855,8 +1855,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if ((err != -ETIMEDOUT) &&
- (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+ if (err != -ETIMEDOUT) {
state = STATE_SENDING_DATA;
continue;
}
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 67f6bd24a9d0..24795454d106 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -819,6 +819,10 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
}
status = spi_sync_locked(spi, &host->m);
+ if (status < 0) {
+ dev_dbg(&spi->dev, "read error %d\n", status);
+ return status;
+ }
if (host->dma_dev) {
dma_sync_single_for_cpu(host->dma_dev,
@@ -1150,17 +1154,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host)
* SPI protocol. Another is that when chipselect is released while
* the card returns BUSY status, the clock must issue several cycles
* with chipselect high before the card will stop driving its output.
+ *
+ * SPI_CS_HIGH means "asserted" here. In some cases like when using
+ * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
+ * inverted by gpiolib, so if we want to ascertain to drive it high
+ * we should toggle the default with an XOR as we do here.
*/
- host->spi->mode |= SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Just warn; most cards work without it. */
dev_warn(&host->spi->dev,
"can't change chip-select polarity\n");
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
} else {
mmc_spi_readbytes(host, 18);
- host->spi->mode &= ~SPI_CS_HIGH;
+ host->spi->mode ^= SPI_CS_HIGH;
if (spi_setup(host->spi) != 0) {
/* Wot, we can't get the same setup we had before? */
dev_err(&host->spi->dev,
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 7360a7be1654..2bd0f80d3777 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -867,6 +867,7 @@ static void msdc_start_command(struct msdc_host *host,
WARN_ON(host->cmd);
host->cmd = cmd;
+ mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
if (!msdc_cmd_is_ready(host, mrq, cmd))
return;
@@ -878,7 +879,6 @@ static void msdc_start_command(struct msdc_host *host,
cmd->error = 0;
rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
- mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
writel(cmd->arg, host->base + SDC_ARG);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index a4bf14e21b5e..21dfce21aa63 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -920,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques
reg &= ~(1 << 5);
OMAP_MMC_WRITE(host, SDIO, reg);
/* Set maximum timeout */
- OMAP_MMC_WRITE(host, CTO, 0xff);
+ OMAP_MMC_WRITE(host, CTO, 0xfd);
}
static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 69928a8f0768..63cfcd1c19e6 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1690,6 +1690,36 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
if (mmc_pdata(host)->init_card)
mmc_pdata(host)->init_card(card);
+ else if (card->type == MMC_TYPE_SDIO ||
+ card->type == MMC_TYPE_SD_COMBO) {
+ struct device_node *np = mmc_dev(mmc)->of_node;
+
+ /*
+ * REVISIT: should be moved to sdio core and made more
+ * general e.g. by expanding the DT bindings of child nodes
+ * to provide a mechanism to provide this information:
+ * Documentation/devicetree/bindings/mmc/mmc-card.txt
+ */
+
+ np = of_get_compatible_child(np, "ti,wl1251");
+ if (np) {
+ /*
+ * We have TI wl1251 attached to MMC3. Pass this
+ * information to the SDIO core because it can't be
+ * probed by normal methods.
+ */
+
+ dev_info(host->dev, "found wl1251\n");
+ card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+ card->cccr.wide_bus = 1;
+ card->cis.vendor = 0x104c;
+ card->cis.device = 0x9066;
+ card->cis.blksize = 512;
+ card->cis.max_dtr = 24000000;
+ card->ocr = 0x80;
+ of_node_put(np);
+ }
+ }
}
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 94fe5780cd00..7022e94c8760 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -127,7 +127,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
static bool sdhci_acpi_byt(void)
{
static const struct x86_cpu_id byt[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
{}
};
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 242c5dc7a81e..85272a551570 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -93,7 +93,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
sdhci_get_of_property(pdev);
- mmc_of_parse(host->mmc);
+ res = mmc_of_parse(host->mmc);
+ if (res)
+ goto err;
/*
* Supply the existing CAPS, but clear the UHS modes. This
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index e4018f62ba7f..9f23d88a0fe9 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -126,7 +126,8 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ || mmc_gpio_get_cd(host->mmc) >= 0)
sdhci_at91_set_force_card_detect(host);
}
@@ -320,6 +321,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
+ /* HS200 is broken at this moment */
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
+
ret = sdhci_add_host(host);
if (ret)
goto pm_runtime_disable;
@@ -354,8 +358,11 @@ static int sdhci_at91_probe(struct platform_device *pdev)
* detection procedure using the SDMCC_CD signal is bypassed.
* This bit is reset when a software reset for all command is performed
* so we need to implement our own reset function to set back this bit.
+ *
+ * WA: SAMA5D2 doesn't drive CMD if using CD GPIO line.
*/
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ || mmc_gpio_get_cd(host->mmc) >= 0)
sdhci_at91_set_force_card_detect(host);
pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 45066dbf257d..4a0c085bcb13 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -639,6 +639,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
if (esdhc->vendor_ver > VENDOR_V_22)
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
+ if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
+ host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ }
+
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 088a3ae0dff0..ec1949045295 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -174,7 +174,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index efe4c8ae9d61..9b6870dce15b 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -968,8 +968,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
return (!(host->flags & SDHCI_DEVICE_DEAD) &&
((mrq->cmd && mrq->cmd->error) ||
(mrq->sbc && mrq->sbc->error) ||
- (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
- (mrq->data->stop && mrq->data->stop->error))) ||
+ (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}
@@ -1021,6 +1020,16 @@ static void sdhci_finish_data(struct sdhci_host *host)
host->data = NULL;
host->data_cmd = NULL;
+ /*
+ * The controller needs a reset of internal state machines upon error
+ * conditions.
+ */
+ if (data->error) {
+ if (!host->cmd || host->cmd == data_cmd)
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ }
+
if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
(SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
sdhci_adma_table_post(host, data);
@@ -1045,17 +1054,6 @@ static void sdhci_finish_data(struct sdhci_host *host)
if (data->stop &&
(data->error ||
!data->mrq->sbc)) {
-
- /*
- * The controller needs a reset of internal state machines
- * upon error conditions.
- */
- if (data->error) {
- if (!host->cmd || host->cmd == data_cmd)
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
- }
-
/*
* 'cap_cmd_during_tfr' request must not use the command line
* after mmc_command_done() has been called. It is upper layer's
@@ -2463,8 +2461,9 @@ static void sdhci_timeout_data_timer(unsigned long data)
if (host->data || host->data_cmd ||
(host->cmd && sdhci_data_line_cmd(host->cmd))) {
- pr_err("%s: Timeout waiting for hardware interrupt.\n",
- mmc_hostname(host->mmc));
+ pr_err("%s: Timeout waiting for hardware interrupt. retries left=%d opcode=%x\n",
+ mmc_hostname(host->mmc), host->cmd ? host->cmd->retries : 0,
+ host->cmd ? host->cmd->opcode : 0);
sdhci_dumpregs(host);
if (host->data) {
@@ -2489,7 +2488,7 @@ static void sdhci_timeout_data_timer(unsigned long data)
* *
\*****************************************************************************/
-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
{
if (!host->cmd) {
/*
@@ -2512,20 +2511,12 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
else
host->cmd->error = -EILSEQ;
- /*
- * If this command initiates a data phase and a response
- * CRC error is signalled, the card can start transferring
- * data - the card may have received the command without
- * error. We must not terminate the mmc_request early.
- *
- * If the card did not receive the command or returned an
- * error which prevented it sending data, the data phase
- * will time out.
- */
+ /* Treat data command CRC error the same as data CRC error */
if (host->cmd->data &&
(intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
SDHCI_INT_CRC) {
host->cmd = NULL;
+ *intmask_p |= SDHCI_INT_DATA_CRC;
return;
}
@@ -2757,7 +2748,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
}
if (intmask & SDHCI_INT_CMD_MASK)
- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
if (intmask & SDHCI_INT_DATA_MASK)
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
@@ -3428,11 +3419,13 @@ int sdhci_setup_host(struct sdhci_host *host)
if (host->ops->get_min_clock)
mmc->f_min = host->ops->get_min_clock(host);
else if (host->version >= SDHCI_SPEC_300) {
- if (host->clk_mul) {
- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+ if (host->clk_mul)
max_clk = host->max_clk * host->clk_mul;
- } else
- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+ /*
+ * Divided Clock Mode minimum clock rate is always less than
+ * Programmable Clock Mode minimum clock rate.
+ */
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 0fc1f73b0d23..3e025766181b 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1076,7 +1076,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
mmc->ops = &tmio_mmc_ops;
- mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index de35a2a362f9..8725e406a9eb 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1624,29 +1624,35 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
continue;
}
- if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
+ /*
+ * We check "time_after" and "!chip_good" before checking
+ * "chip_good" to avoid the failure due to scheduling.
+ */
+ if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) {
xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
xip_disable(map, chip, adr);
+ ret = -EIO;
break;
}
- if (chip_ready(map, adr))
+ if (chip_good(map, adr, datum))
break;
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
}
+
/* Did we succeed? */
- if (!chip_good(map, adr, datum)) {
+ if (ret) {
/* reset on all failures. */
map_write( map, CMD(0xF0), chip->start );
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_RETRIES)
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
goto retry;
-
- ret = -EIO;
+ }
}
xip_enable(map, chip, adr);
op_done:
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 8b66e52ca3cc..9734e6903fe6 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -247,22 +247,25 @@ static int phram_setup(const char *val)
ret = parse_num64(&start, token[1]);
if (ret) {
- kfree(name);
parse_err("illegal start address\n");
+ goto error;
}
ret = parse_num64(&len, token[2]);
if (ret) {
- kfree(name);
parse_err("illegal device length\n");
+ goto error;
}
ret = register_device(name, start, len);
- if (!ret)
- pr_info("%s device: %#llx at %#llx\n", name, len, start);
- else
- kfree(name);
+ if (ret)
+ goto error;
+
+ pr_info("%s device: %#llx at %#llx\n", name, len, start);
+ return 0;
+error:
+ kfree(name);
return ret;
}
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index dd5069876537..4a7da5fde714 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -595,6 +595,26 @@ static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
return 0;
}
+/*
+ * The purpose of this function is to ensure a memcpy_toio() with byte writes
+ * only. Its structure is inspired from the ARM implementation of _memcpy_toio()
+ * which also does single byte writes but cannot be used here as this is just an
+ * implementation detail and not part of the API. Not mentioning the comment
+ * stating that _memcpy_toio() should be optimized.
+ */
+static void spear_smi_memcpy_toio_b(volatile void __iomem *dest,
+ const void *src, size_t len)
+{
+ const unsigned char *from = src;
+
+ while (len) {
+ len--;
+ writeb(*from, dest);
+ from++;
+ dest++;
+ }
+}
+
static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
void __iomem *dest, const void *src, size_t len)
{
@@ -617,7 +637,23 @@ static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
ctrlreg1 = readl(dev->io_base + SMI_CR1);
writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
- memcpy_toio(dest, src, len);
+ /*
+ * In Write Burst mode (WB_MODE), the specs states that writes must be:
+ * - incremental
+ * - of the same size
+ * The ARM implementation of memcpy_toio() will optimize the number of
+ * I/O by using as much 4-byte writes as possible, surrounded by
+ * 2-byte/1-byte access if:
+ * - the destination is not 4-byte aligned
+ * - the length is not a multiple of 4-byte.
+ * Avoid this alternance of write access size by using our own 'byte
+ * access' helper if at least one of the two conditions above is true.
+ */
+ if (IS_ALIGNED(len, sizeof(u32)) &&
+ IS_ALIGNED((uintptr_t)dest, sizeof(u32)))
+ memcpy_toio(dest, src, len);
+ else
+ spear_smi_memcpy_toio_b(dest, src, len);
writel(ctrlreg1, dev->io_base + SMI_CR1);
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 018c75faadb3..e1c283ccbbde 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -81,7 +81,6 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
GFP_KERNEL);
if (!shared) {
- kfree(lpddr);
kfree(mtd);
return NULL;
}
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 3fad35942895..7a716bed1158 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -29,7 +29,6 @@
struct of_flash_list {
struct mtd_info *mtd;
struct map_info map;
- struct resource *res;
};
struct of_flash {
@@ -54,18 +53,10 @@ static int of_flash_remove(struct platform_device *dev)
mtd_concat_destroy(info->cmtd);
}
- for (i = 0; i < info->list_size; i++) {
+ for (i = 0; i < info->list_size; i++)
if (info->list[i].mtd)
map_destroy(info->list[i].mtd);
- if (info->list[i].map.virt)
- iounmap(info->list[i].map.virt);
-
- if (info->list[i].res) {
- release_resource(info->list[i].res);
- kfree(info->list[i].res);
- }
- }
return 0;
}
@@ -223,10 +214,11 @@ static int of_flash_probe(struct platform_device *dev)
err = -EBUSY;
res_size = resource_size(&res);
- info->list[i].res = request_mem_region(res.start, res_size,
- dev_name(&dev->dev));
- if (!info->list[i].res)
+ info->list[i].map.virt = devm_ioremap_resource(&dev->dev, &res);
+ if (IS_ERR(info->list[i].map.virt)) {
+ err = PTR_ERR(info->list[i].map.virt);
goto err_out;
+ }
err = -ENXIO;
width = of_get_property(dp, "bank-width", NULL);
@@ -247,15 +239,6 @@ static int of_flash_probe(struct platform_device *dev)
return err;
}
- err = -ENOMEM;
- info->list[i].map.virt = ioremap(info->list[i].map.phys,
- info->list[i].map.size);
- if (!info->list[i].map.virt) {
- dev_err(&dev->dev, "Failed to ioremap() flash"
- " region\n");
- goto err_out;
- }
-
simple_map_init(&info->list[i].map);
/*
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 55fdb8e1fd2a..488b652ba9e6 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -6,7 +6,7 @@
extern struct mutex mtd_table_mutex;
struct mtd_info *__mtd_next_device(int i);
-int add_mtd_device(struct mtd_info *mtd);
+int __must_check add_mtd_device(struct mtd_info *mtd);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index fccdd49bb964..5e2d1aa5e81e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -648,10 +648,21 @@ int mtd_add_partition(struct mtd_info *master, const char *name,
list_add(&new->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
- add_mtd_device(&new->mtd);
+ ret = add_mtd_device(&new->mtd);
+ if (ret)
+ goto err_remove_part;
mtd_add_partition_attrs(new);
+ return 0;
+
+err_remove_part:
+ mutex_lock(&mtd_partitions_mutex);
+ list_del(&new->list);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ free_partition(new);
+
return ret;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
@@ -696,28 +707,42 @@ int add_mtd_partitions(struct mtd_info *master,
{
struct mtd_part *slave;
uint64_t cur_offset = 0;
- int i;
+ int i, ret;
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
for (i = 0; i < nbparts; i++) {
slave = allocate_partition(master, parts + i, i, cur_offset);
if (IS_ERR(slave)) {
- del_mtd_partitions(master);
- return PTR_ERR(slave);
+ ret = PTR_ERR(slave);
+ goto err_del_partitions;
}
mutex_lock(&mtd_partitions_mutex);
list_add(&slave->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
- add_mtd_device(&slave->mtd);
+ ret = add_mtd_device(&slave->mtd);
+ if (ret) {
+ mutex_lock(&mtd_partitions_mutex);
+ list_del(&slave->list);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ free_partition(slave);
+ goto err_del_partitions;
+ }
+
mtd_add_partition_attrs(slave);
cur_offset = slave->offset + slave->mtd.size;
}
return 0;
+
+err_del_partitions:
+ del_mtd_partitions(master);
+
+ return ret;
}
static DEFINE_SPINLOCK(part_parser_lock);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index 5223a2182ee4..ca95ae00215e 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -810,19 +810,21 @@ static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
return ret & NAND_STATUS_FAIL ? -EIO : 0;
}
-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
+ u32 sectors)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtk_nfc *nfc = nand_get_controller_data(chip);
struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
struct mtk_ecc_stats stats;
+ u32 reg_size = mtk_nand->fdm.reg_size;
int rc, i;
rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
if (rc) {
memset(buf, 0xff, sectors * chip->ecc.size);
for (i = 0; i < sectors; i++)
- memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+ memset(oob_ptr(chip, start + i), 0xff, reg_size);
return 0;
}
@@ -842,7 +844,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
u32 spare = mtk_nand->spare_per_sector;
u32 column, sectors, start, end, reg;
dma_addr_t addr;
- int bitflips;
+ int bitflips = 0;
size_t len;
u8 *buf;
int rc;
@@ -910,14 +912,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
if (rc < 0) {
dev_err(nfc->dev, "subpage done timeout\n");
bitflips = -EIO;
- } else {
- bitflips = 0;
- if (!raw) {
- rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
- bitflips = rc < 0 ? -ETIMEDOUT :
- mtk_nfc_update_ecc_stats(mtd, buf, sectors);
- mtk_nfc_read_fdm(chip, start, sectors);
- }
+ } else if (!raw) {
+ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+ bitflips = rc < 0 ? -ETIMEDOUT :
+ mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
+ mtk_nfc_read_fdm(chip, start, sectors);
}
dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 442ce619b3b6..d6c013f93b8c 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -480,7 +480,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
/* initiate DMA transfer */
if (flctl->chan_fifo0_rx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
goto convert; /* DMA success */
/* do polling transfer */
@@ -539,7 +539,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
/* initiate DMA transfer */
if (flctl->chan_fifo0_tx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
+ flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
return; /* DMA success */
/* do polling transfer */
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index e26c4f880df6..886355bfa761 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1420,7 +1420,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_randomizer_enable(mtd);
writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
- nfc->regs + NFC_REG_RCMD_SET);
+ nfc->regs + NFC_REG_WCMD_SET);
dma_async_issue_pending(nfc->dmac);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 88670b8ccb05..b70c61bb9c0a 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1072,7 +1072,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- int ret;
+ ssize_t ret;
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 35c76e4053f8..aab57593748a 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1146,10 +1146,10 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
- put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
+ put_mtd_device(ubi->mtd);
put_device(&ubi->dev);
return 0;
}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b44c8d348e78..e7b177c61642 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -73,7 +73,7 @@ static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
return 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
- if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
+ if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ret = -EINVAL;
}
@@ -1127,7 +1127,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
struct rb_node *tmp_rb;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
- unsigned long *seen_pebs = NULL;
+ unsigned long *seen_pebs;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -1141,7 +1141,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
if (!dvbuf) {
ret = -ENOMEM;
- goto out_kfree;
+ goto out_free_avbuf;
}
avhdr = ubi_get_vid_hdr(avbuf);
@@ -1150,7 +1150,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
seen_pebs = init_seen(ubi);
if (IS_ERR(seen_pebs)) {
ret = PTR_ERR(seen_pebs);
- goto out_kfree;
+ goto out_free_dvbuf;
}
spin_lock(&ubi->volumes_lock);
@@ -1318,7 +1318,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
- goto out_kfree;
+ goto out_free_seen;
}
for (i = 0; i < new_fm->used_blocks; i++) {
@@ -1340,7 +1340,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
if (ret) {
ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
- goto out_kfree;
+ goto out_free_seen;
}
}
@@ -1350,7 +1350,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
if (ret) {
ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
- goto out_kfree;
+ goto out_free_seen;
}
}
@@ -1360,10 +1360,13 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
ret = self_check_seen(ubi, seen_pebs);
dbg_bld("fastmap written!");
-out_kfree:
- ubi_free_vid_buf(avbuf);
- ubi_free_vid_buf(dvbuf);
+out_free_seen:
free_seen(seen_pebs);
+out_free_dvbuf:
+ ubi_free_vid_buf(dvbuf);
+out_free_avbuf:
+ ubi_free_vid_buf(avbuf);
+
out:
return ret;
}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 88b1897aeb40..7826f7c4ec2f 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -227,9 +227,9 @@ out_unlock:
out_free:
kfree(desc);
out_put_ubi:
- ubi_put_device(ubi);
ubi_err(ubi, "cannot open device %d, volume %d, error %d",
ubi_num, vol_id, err);
+ ubi_put_device(ubi);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 6ea963e3b89a..85ffd0561827 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -1009,31 +1009,34 @@ EXPORT_SYMBOL(arcnet_interrupt);
static void arcnet_rx(struct net_device *dev, int bufnum)
{
struct arcnet_local *lp = netdev_priv(dev);
- struct archdr pkt;
+ union {
+ struct archdr pkt;
+ char buf[512];
+ } rxdata;
struct arc_rfc1201 *soft;
int length, ofs;
- soft = &pkt.soft.rfc1201;
+ soft = &rxdata.pkt.soft.rfc1201;
- lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
- if (pkt.hard.offset[0]) {
- ofs = pkt.hard.offset[0];
+ lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
+ if (rxdata.pkt.hard.offset[0]) {
+ ofs = rxdata.pkt.hard.offset[0];
length = 256 - ofs;
} else {
- ofs = pkt.hard.offset[1];
+ ofs = rxdata.pkt.hard.offset[1];
length = 512 - ofs;
}
/* get the full header, if possible */
- if (sizeof(pkt.soft) <= length) {
- lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
+ if (sizeof(rxdata.pkt.soft) <= length) {
+ lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
} else {
- memset(&pkt.soft, 0, sizeof(pkt.soft));
+ memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
}
arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
- bufnum, pkt.hard.source, pkt.hard.dest, length);
+ bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
dev->stats.rx_packets++;
dev->stats.rx_bytes += length + ARC_HDR_SIZE;
@@ -1042,13 +1045,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
if (arc_proto_map[soft->proto]->is_ip) {
if (BUGLVL(D_PROTO)) {
struct ArcProto
- *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
+ *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
*newp = arc_proto_map[soft->proto];
if (oldp != newp) {
arc_printk(D_PROTO, dev,
"got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
- soft->proto, pkt.hard.source,
+ soft->proto, rxdata.pkt.hard.source,
newp->suffix, oldp->suffix);
}
}
@@ -1057,10 +1060,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
lp->default_proto[0] = soft->proto;
/* in striking contrast, the following isn't a hack. */
- lp->default_proto[pkt.hard.source] = soft->proto;
+ lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
}
/* call the protocol-specific receiver. */
- arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
+ arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
}
static void null_rx(struct net_device *dev, int bufnum,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 91d8a48e53c3..1f8fbd7776fb 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -71,11 +71,6 @@ struct arp_pkt {
};
#pragma pack()
-static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
-{
- return (struct arp_pkt *)skb_network_header(skb);
-}
-
/* Forward declaration */
static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
bool strict_match);
@@ -574,10 +569,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
spin_unlock(&bond->mode_lock);
}
-static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
+static struct slave *rlb_choose_channel(struct sk_buff *skb,
+ struct bonding *bond,
+ const struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- struct arp_pkt *arp = arp_pkt(skb);
struct slave *assigned_slave, *curr_active_slave;
struct rlb_client_info *client_info;
u32 hash_index = 0;
@@ -674,8 +670,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
*/
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
- struct arp_pkt *arp = arp_pkt(skb);
struct slave *tx_slave = NULL;
+ struct arp_pkt *arp;
+
+ if (!pskb_network_may_pull(skb, sizeof(*arp)))
+ return NULL;
+ arp = (struct arp_pkt *)skb_network_header(skb);
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
@@ -685,7 +685,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
- tx_slave = rlb_choose_channel(skb, bond);
+ tx_slave = rlb_choose_channel(skb, bond, arp);
if (tx_slave)
ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
netdev_dbg(bond->dev, "Server sent ARP Reply packet\n");
@@ -695,7 +695,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
* When the arp reply is received the entry will be updated
* with the correct unicast address of the client.
*/
- rlb_choose_channel(skb, bond);
+ rlb_choose_channel(skb, bond, arp);
/* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
@@ -1371,26 +1371,31 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
bool do_tx_balance = true;
u32 hash_index = 0;
const u8 *hash_start = NULL;
- struct ipv6hdr *ip6hdr;
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
- const struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph;
if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
- (iph->daddr == ip_bcast) ||
- (iph->protocol == IPPROTO_IGMP)) {
+ (!pskb_network_may_pull(skb, sizeof(*iph)))) {
+ do_tx_balance = false;
+ break;
+ }
+ iph = ip_hdr(skb);
+ if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
do_tx_balance = false;
break;
}
hash_start = (char *)&(iph->daddr);
hash_size = sizeof(iph->daddr);
- }
break;
- case ETH_P_IPV6:
+ }
+ case ETH_P_IPV6: {
+ const struct ipv6hdr *ip6hdr;
+
/* IPv6 doesn't really use broadcast mac address, but leave
* that here just in case.
*/
@@ -1407,7 +1412,11 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- /* Additianally, DAD probes should not be tx-balanced as that
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
+ do_tx_balance = false;
+ break;
+ }
+ /* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
*/
@@ -1417,17 +1426,26 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- hash_start = (char *)&(ipv6_hdr(skb)->daddr);
- hash_size = sizeof(ipv6_hdr(skb)->daddr);
+ hash_start = (char *)&ip6hdr->daddr;
+ hash_size = sizeof(ip6hdr->daddr);
break;
- case ETH_P_IPX:
- if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
+ }
+ case ETH_P_IPX: {
+ const struct ipxhdr *ipxhdr;
+
+ if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) {
+ do_tx_balance = false;
+ break;
+ }
+ ipxhdr = (struct ipxhdr *)skb_network_header(skb);
+
+ if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) {
/* something is wrong with this packet */
do_tx_balance = false;
break;
}
- if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
+ if (ipxhdr->ipx_type != IPX_TYPE_NCP) {
/* The only protocol worth balancing in
* this family since it has an "ARP" like
* mechanism
@@ -1436,9 +1454,11 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
+ eth_data = eth_hdr(skb);
hash_start = (char *)eth_data->h_dest;
hash_size = ETH_ALEN;
break;
+ }
case ETH_P_ARP:
do_tx_balance = false;
if (bond_info->rlb_enabled)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 24a3433f3944..d52fd842ef1f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1107,7 +1107,9 @@ static void bond_compute_features(struct bonding *bond)
done:
bond_dev->vlan_features = vlan_features;
- bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
+ bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
bond_dev->hard_header_len = max_hard_header_len;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -1757,7 +1759,8 @@ err_detach:
slave_disable_netpoll(new_slave);
err_close:
- slave_dev->priv_flags &= ~IFF_BONDING;
+ if (!netif_is_bond_master(slave_dev))
+ slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
err_restore_mac:
@@ -1958,7 +1961,8 @@ static int __bond_release_one(struct net_device *bond_dev,
dev_set_mtu(slave_dev, slave->original_mtu);
- slave_dev->priv_flags &= ~IFF_BONDING;
+ if (!netif_is_bond_master(slave_dev))
+ slave_dev->priv_flags &= ~IFF_BONDING;
bond_free_slave(slave);
@@ -2129,6 +2133,15 @@ static void bond_miimon_commit(struct bonding *bond)
bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
+ /* For 802.3ad mode, check current slave speed and
+ * duplex again in case its port was disabled after
+ * invalid speed/duplex reporting but recovered before
+ * link monitoring could make a decision on the actual
+ * link status
+ */
+ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+ slave->link == BOND_LINK_UP)
+ bond_3ad_adapter_speed_duplex_changed(slave);
continue;
case BOND_LINK_UP:
@@ -3134,8 +3147,12 @@ static int bond_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
if (event_dev->flags & IFF_MASTER) {
+ int ret;
+
netdev_dbg(event_dev, "IFF_MASTER\n");
- return bond_master_netdev_event(event, event_dev);
+ ret = bond_master_netdev_event(event, event_dev);
+ if (ret != NOTIFY_DONE)
+ return ret;
}
if (event_dev->flags & IFF_SLAVE) {
@@ -3784,8 +3801,8 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct iphdr *iph = ip_hdr(skb);
struct slave *slave;
+ int slave_cnt;
u32 slave_id;
/* Start with the curr_active_slave that joined the bond as the
@@ -3794,23 +3811,32 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
* send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic.
*/
- if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
- slave = rcu_dereference(bond->curr_active_slave);
- if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_xmit_slave_id(bond, skb, 0);
- } else {
- int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ int noff = skb_network_offset(skb);
+ struct iphdr *iph;
- if (likely(slave_cnt)) {
- slave_id = bond_rr_gen_slave_id(bond);
- bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
- } else {
- bond_tx_drop(bond_dev, skb);
+ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
+ goto non_igmp;
+
+ iph = ip_hdr(skb);
+ if (iph->protocol == IPPROTO_IGMP) {
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
+ bond_xmit_slave_id(bond, skb, 0);
+ return NETDEV_TX_OK;
}
}
+non_igmp:
+ slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+ if (likely(slave_cnt)) {
+ slave_id = bond_rr_gen_slave_id(bond);
+ bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+ } else {
+ bond_tx_drop(bond_dev, skb);
+ }
return NETDEV_TX_OK;
}
@@ -3939,7 +3965,7 @@ out:
* this to-be-skipped slave to send a packet out.
*/
old_arr = rtnl_dereference(bond->slave_arr);
- for (idx = 0; idx < old_arr->count; idx++) {
+ for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
if (skipslave == old_arr->arr[idx]) {
old_arr->arr[idx] =
old_arr->arr[old_arr->count-1];
@@ -4237,12 +4263,12 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->features |= NETIF_F_NETNS_LOCAL;
bond_dev->hw_features = BOND_VLAN_FEATURES |
- NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
+ bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
}
/* Destroy a bonding device.
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 473da3bf10c6..258cb3999b0e 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1065,13 +1065,6 @@ static int bond_option_arp_validate_set(struct bonding *bond,
{
netdev_info(bond->dev, "Setting arp_validate to %s (%llu)\n",
newval->string, newval->value);
-
- if (bond->dev->flags & IFF_UP) {
- if (!newval->value)
- bond->recv_probe = NULL;
- else if (bond->params.arp_interval)
- bond->recv_probe = bond_arp_rcv;
- }
bond->params.arp_validate = newval->value;
return 0;
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 7d16c51e6913..641a532b67cb 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+ return sprintf(buf, "%*phC\n",
+ slave->dev->addr_len,
+ slave->perm_hwaddr);
}
static SLAVE_ATTR_RO(perm_hwaddr);
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index ddabce759456..7f79a6cf5665 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1464,7 +1464,7 @@ static void __exit cfhsi_exit_module(void)
rtnl_lock();
list_for_each_safe(list_node, n, &cfhsi_list) {
cfhsi = list_entry(list_node, struct cfhsi, list);
- unregister_netdev(cfhsi->ndev);
+ unregister_netdevice(cfhsi->ndev);
}
rtnl_unlock();
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e3dccd3200d5..4ead5a18b794 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -52,6 +52,7 @@
#define CONTROL_EX_PDR BIT(8)
/* control register */
+#define CONTROL_SWR BIT(15)
#define CONTROL_TEST BIT(7)
#define CONTROL_CCE BIT(6)
#define CONTROL_DISABLE_AR BIT(5)
@@ -97,6 +98,9 @@
#define BTR_TSEG2_SHIFT 12
#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
+/* interrupt register */
+#define INT_STS_PENDING 0x8000
+
/* brp extension register */
#define BRP_EXT_BRPE_MASK 0x0f
#define BRP_EXT_BRPE_SHIFT 0
@@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev)
IF_MCONT_RCV_EOB);
}
+static int c_can_software_reset(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+ int retry = 0;
+
+ if (priv->type != BOSCH_D_CAN)
+ return 0;
+
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
+ while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
+ msleep(20);
+ if (retry++ > 100) {
+ netdev_err(dev, "CCTRL: software reset failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
/*
* Configure C_CAN chip:
* - enable/disable auto-retransmission
@@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev)
static int c_can_chip_config(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = c_can_software_reset(dev);
+ if (err)
+ return err;
/* enable automatic retransmission */
priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
@@ -1029,10 +1058,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
u16 curr, last = priv->last_status;
int work_done = 0;
- priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
- /* Ack status on C_CAN. D_CAN is self clearing */
- if (priv->type != BOSCH_D_CAN)
- priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
+ /* Only read the status register if a status interrupt was pending */
+ if (atomic_xchg(&priv->sie_pending, 0)) {
+ priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
+ /* Ack status on C_CAN. D_CAN is self clearing */
+ if (priv->type != BOSCH_D_CAN)
+ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
+ } else {
+ /* no change detected ... */
+ curr = last;
+ }
/* handle state changes */
if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
@@ -1083,10 +1118,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev);
+ int reg_int;
- if (!priv->read_reg(priv, C_CAN_INT_REG))
+ reg_int = priv->read_reg(priv, C_CAN_INT_REG);
+ if (!reg_int)
return IRQ_NONE;
+ /* save for later use */
+ if (reg_int & INT_STS_PENDING)
+ atomic_set(&priv->sie_pending, 1);
+
/* disable all interrupts and schedule the NAPI */
c_can_irq_control(priv, false);
napi_schedule(&priv->napi);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 8acdc7fa4792..d5567a7c1c6d 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -198,6 +198,7 @@ struct c_can_priv {
struct net_device *dev;
struct device *device;
atomic_t tx_active;
+ atomic_t sie_pending;
unsigned long tx_dir;
int last_status;
u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 214a48703a4e..ffc5467a1ec2 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -1095,6 +1095,8 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
int register_candev(struct net_device *dev)
{
dev->rtnl_link_ops = &can_link_ops;
+ netif_carrier_off(dev);
+
return register_netdev(dev);
}
EXPORT_SYMBOL_GPL(register_candev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 7abab349f54d..5de7602f7285 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -210,7 +210,7 @@
#define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16)
#define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff)
-#define FLEXCAN_TIMEOUT_US (50)
+#define FLEXCAN_TIMEOUT_US (250)
#define FLEXCAN_FDCTRL_FDRATE BIT(31)
@@ -1271,6 +1271,7 @@ static int flexcan_chip_start(struct net_device *dev)
reg_mecr = flexcan_read(priv, FLEXCAN_MECR);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
flexcan_write(priv, FLEXCAN_MECR, reg_mecr);
+ reg_mecr |= FLEXCAN_MECR_ECCDIS;
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK);
flexcan_write(priv, FLEXCAN_MECR, reg_mecr);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index acb708fc1463..0a7d818a06f3 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -392,13 +392,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
struct net_device *dev = napi->dev;
struct mscan_regs __iomem *regs = priv->reg_base;
struct net_device_stats *stats = &dev->stats;
- int npackets = 0;
- int ret = 1;
+ int work_done = 0;
struct sk_buff *skb;
struct can_frame *frame;
u8 canrflg;
- while (npackets < quota) {
+ while (work_done < quota) {
canrflg = in_8(&regs->canrflg);
if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
break;
@@ -419,18 +418,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
- npackets++;
+ work_done++;
netif_receive_skb(skb);
}
- if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
- napi_complete(&priv->napi);
- clear_bit(F_RX_PROGRESS, &priv->flags);
- if (priv->can.state < CAN_STATE_BUS_OFF)
- out_8(&regs->canrier, priv->shadow_canrier);
- ret = 0;
+ if (work_done < quota) {
+ if (likely(napi_complete_done(&priv->napi, work_done))) {
+ clear_bit(F_RX_PROGRESS, &priv->flags);
+ if (priv->can.state < CAN_STATE_BUS_OFF)
+ out_8(&regs->canrier, priv->shadow_canrier);
+ }
}
- return ret;
+ return work_done;
}
static irqreturn_t mscan_isr(int irq, void *dev_id)
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index dd56133cc461..fc9f8b01ecae 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strncpy(name, netdev->name, IFNAMSIZ);
+ strlcpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index eb7173713bbc..d0435c7631ff 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -147,7 +147,7 @@ static void slc_bump(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
- cf.can_id = 0;
+ memset(&cf, 0, sizeof(cf));
switch (*cmd) {
case 'r':
@@ -186,8 +186,6 @@ static void slc_bump(struct slcan *sl)
else
return;
- *(u64 *) (&cf.data) = 0; /* clear payload */
-
/* RTR frames may have a dlc > 0 but they never have any data bytes */
if (!(cf.can_id & CAN_RTR_FLAG)) {
for (i = 0; i < cf.can_dlc; i++) {
@@ -344,9 +342,16 @@ static void slcan_transmit(struct work_struct *work)
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
- struct slcan *sl = tty->disc_data;
+ struct slcan *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
/* Send a can_frame to a TTY queue. */
@@ -613,6 +618,11 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
+ slc_free_netdev(sl->dev);
+ /* do not call free_netdev before rtnl_unlock */
+ rtnl_unlock();
+ free_netdev(sl->dev);
+ return err;
err_exit:
rtnl_unlock();
@@ -638,10 +648,11 @@ static void slcan_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* Flush network side */
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
index 148cae5871a6..249d2db7d600 100644
--- a/drivers/net/can/spi/Kconfig
+++ b/drivers/net/can/spi/Kconfig
@@ -2,9 +2,10 @@ menu "CAN SPI interfaces"
depends on SPI
config CAN_MCP251X
- tristate "Microchip MCP251x SPI CAN controllers"
+ tristate "Microchip MCP251x and MCP25625 SPI CAN controllers"
depends on HAS_DMA
---help---
- Driver for the Microchip MCP251x SPI CAN controllers.
+ Driver for the Microchip MCP251x and MCP25625 SPI CAN
+ controllers.
endmenu
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index f3f05fea8e1f..ec0b3d025867 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1,5 +1,5 @@
/*
- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
+ * CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface
*
* MCP2510 support and bug fixes by Christian Pellegrin
* <chripell@evolware.org>
@@ -41,7 +41,7 @@
* static struct spi_board_info spi_board_info[] = {
* {
* .modalias = "mcp2510",
- * // or "mcp2515" depending on your controller
+ * // "mcp2515" or "mcp25625" depending on your controller
* .platform_data = &mcp251x_info,
* .irq = IRQ_EINT13,
* .max_speed_hz = 2*1000*1000,
@@ -238,6 +238,7 @@ static const struct can_bittiming_const mcp251x_bittiming_const = {
enum mcp251x_model {
CAN_MCP251X_MCP2510 = 0x2510,
CAN_MCP251X_MCP2515 = 0x2515,
+ CAN_MCP251X_MCP25625 = 0x25625,
};
struct mcp251x_priv {
@@ -280,7 +281,6 @@ static inline int mcp251x_is_##_model(struct spi_device *spi) \
}
MCP251X_IS(2510);
-MCP251X_IS(2515);
static void mcp251x_clean(struct net_device *net)
{
@@ -627,7 +627,7 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- u8 reg;
+ unsigned long timeout;
int ret;
/* Wait for oscillator startup timer after power up */
@@ -640,11 +640,20 @@ static int mcp251x_hw_reset(struct spi_device *spi)
/* Wait for oscillator startup timer after reset */
mdelay(MCP251X_OST_DELAY_MS);
-
- reg = mcp251x_read_reg(spi, CANSTAT);
- if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
- return -ENODEV;
+ /* Wait for reset to finish */
+ timeout = jiffies + HZ;
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+ CANCTRL_REQOP_CONF) {
+ usleep_range(MCP251X_OST_DELAY_MS * 1000,
+ MCP251X_OST_DELAY_MS * 1000 * 2);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev,
+ "MCP251x didn't enter in conf mode after reset\n");
+ return -EBUSY;
+ }
+ }
return 0;
}
@@ -821,9 +830,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
- /*
- * Free one buffer ASAP
- * (The MCP2515 does this automatically.)
+ /* Free one buffer ASAP
+ * (The MCP2515/25625 does this automatically.)
*/
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
@@ -832,7 +840,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
/* receive buffer 1 */
if (intf & CANINTF_RX1IF) {
mcp251x_hw_rx(spi, 1);
- /* the MCP2515 does this automatically */
+ /* The MCP2515/25625 does this automatically. */
if (mcp251x_is_2510(spi))
clear_intf |= CANINTF_RX1IF;
}
@@ -1007,6 +1015,10 @@ static const struct of_device_id mcp251x_of_match[] = {
.compatible = "microchip,mcp2515",
.data = (void *)CAN_MCP251X_MCP2515,
},
+ {
+ .compatible = "microchip,mcp25625",
+ .data = (void *)CAN_MCP251X_MCP25625,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, mcp251x_of_match);
@@ -1020,6 +1032,10 @@ static const struct spi_device_id mcp251x_id_table[] = {
.name = "mcp2515",
.driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2515,
},
+ {
+ .name = "mcp25625",
+ .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP25625,
+ },
{ }
};
MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
@@ -1260,5 +1276,5 @@ module_spi_driver(mcp251x_can_driver);
MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
"Christian Pellegrin <chripell@evolware.org>");
-MODULE_DESCRIPTION("Microchip 251x CAN driver");
+MODULE_DESCRIPTION("Microchip 251x/25625 CAN driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 5d5012337d9e..a65203e6ea5f 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -632,6 +632,7 @@ static int gs_can_open(struct net_device *netdev)
rc);
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
break;
}
@@ -926,7 +927,7 @@ static int gs_usb_probe(struct usb_interface *intf,
GS_USB_BREQ_HOST_FORMAT,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
hconf,
sizeof(*hconf),
1000);
@@ -949,7 +950,7 @@ static int gs_usb_probe(struct usb_interface *intf,
GS_USB_BREQ_DEVICE_CONFIG,
USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
1,
- intf->altsetting[0].desc.bInterfaceNumber,
+ intf->cur_altsetting->desc.bInterfaceNumber,
dconf,
sizeof(*dconf),
1000);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 838545ce468d..0e1fc6c4360e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -108,7 +108,7 @@ struct pcan_usb_msg_context {
u8 *end;
u8 rec_cnt;
u8 rec_idx;
- u8 rec_data_idx;
+ u8 rec_ts_idx;
struct net_device *netdev;
struct pcan_usb *pdev;
};
@@ -441,8 +441,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
}
if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
/* no error (back to active state) */
- mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
- return 0;
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ break;
}
break;
@@ -465,9 +465,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
}
if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
- /* no error (back to active state) */
- mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE;
- return 0;
+ /* no error (back to warning state) */
+ new_state = CAN_STATE_ERROR_WARNING;
+ break;
}
break;
@@ -506,6 +506,11 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
mc->pdev->dev.can.can_stats.error_warning++;
break;
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ break;
+
default:
/* CAN_STATE_MAX (trick to handle other errors) */
cf->can_id |= CAN_ERR_CRTL;
@@ -552,10 +557,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
mc->ptr += PCAN_USB_CMD_ARGS;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
- int err = pcan_usb_decode_ts(mc, !mc->rec_idx);
+ int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx);
if (err)
return err;
+
+ /* Next packet in the buffer will have a timestamp on a single
+ * byte
+ */
+ mc->rec_ts_idx++;
}
switch (f) {
@@ -638,10 +648,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
cf->can_dlc = get_can_dlc(rec_len);
- /* first data packet timestamp is a word */
- if (pcan_usb_decode_ts(mc, !mc->rec_data_idx))
+ /* Only first packet timestamp is a word */
+ if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx))
goto decode_failed;
+ /* Next packet in the buffer will have a timestamp on a single byte */
+ mc->rec_ts_idx++;
+
/* read data */
memset(cf->data, 0x0, sizeof(cf->data));
if (status_len & PCAN_USB_STATUSLEN_RTR) {
@@ -695,7 +708,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
/* handle normal can frames here */
} else {
err = pcan_usb_decode_data(&mc, sl);
- mc.rec_data_idx++;
}
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 0b0302af3bd2..6cd4317fe94d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -592,16 +592,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
dev->state &= ~PCAN_USB_STATE_STARTED;
netif_stop_queue(netdev);
+ close_candev(netdev);
+
+ dev->can.state = CAN_STATE_STOPPED;
+
/* unlink all pending urbs and free used memory */
peak_usb_unlink_all_urbs(dev);
if (dev->adapter->dev_stop)
dev->adapter->dev_stop(dev);
- close_candev(netdev);
-
- dev->can.state = CAN_STATE_STOPPED;
-
/* can set bus off now */
if (dev->adapter->dev_set_bus) {
int err = dev->adapter->dev_set_bus(dev, 0);
@@ -774,7 +774,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev = netdev_priv(netdev);
/* allocate a buffer large enough to send commands */
- dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
+ dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
if (!dev->cmd_buf) {
err = -ENOMEM;
goto lbl_free_candev;
@@ -879,7 +879,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strncpy(name, netdev->name, IFNAMSIZ);
+ strlcpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 7f5ec40e2b4d..40647b837b31 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -851,7 +851,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
goto err_out;
/* allocate command buffer once for all for the interface */
- pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
GFP_KERNEL);
if (!pdev->cmd_buffer_addr)
goto err_out_1;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index bbdd6058cd2f..d85fdc6949c6 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -500,7 +500,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
u8 *buffer;
int err;
- buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+ buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 27861c417c94..3e4416473607 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -1007,9 +1007,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
netdev_info(priv->netdev, "device disconnected\n");
unregister_netdev(priv->netdev);
- free_candev(priv->netdev);
-
unlink_all_urbs(priv);
+ free_candev(priv->netdev);
}
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index c2cd540e9c9e..796571fccba7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -405,11 +405,10 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
* send them to our master MDIO bus controller
*/
if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
- bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
+ return bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
else
- mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
-
- return 0;
+ return mdiobus_write_nested(priv->master_mii_bus, addr,
+ regnum, val);
}
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
@@ -977,6 +976,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
struct device_node *dn = pdev->dev.of_node;
struct b53_platform_data *pdata;
struct dsa_switch_ops *ops;
+ struct device_node *ports;
struct bcm_sf2_priv *priv;
struct b53_device *dev;
struct dsa_switch *ds;
@@ -1039,7 +1039,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
spin_lock_init(&priv->indir_lock);
mutex_init(&priv->stats_mutex);
- bcm_sf2_identify_ports(priv, dn->child);
+ ports = of_find_node_by_name(dn, "ports");
+ if (ports) {
+ bcm_sf2_identify_ports(priv, ports);
+ of_node_put(ports);
+ }
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 7cee3e5db56c..f157b81551b7 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -789,7 +789,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
if (err)
return UINT64_MAX;
- high = reg;
+ low |= ((u32)reg) << 16;
}
break;
case BANK0:
@@ -1742,7 +1742,7 @@ static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
int err;
if (!vid)
- return -EINVAL;
+ return -EOPNOTSUPP;
err = _mv88e6xxx_vtu_vid_write(chip, vid - 1);
if (err)
@@ -3846,6 +3846,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
mv88e6xxx_mdio_unregister(chip);
return err;
}
+ if (chip->reset)
+ usleep_range(1000, 2000);
return 0;
}
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 7f64a76acd37..3bbe85aae49b 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -460,6 +460,18 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* RGMII_ID needs internal delay. This is enabled through
+ * PORT5_PAD_CTRL for all ports, rather than individual port
+ * registers
+ */
+ qca8k_write(priv, reg,
+ QCA8K_PORT_PAD_RGMII_EN |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY));
+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+ break;
case PHY_INTERFACE_MODE_SGMII:
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
break;
@@ -630,22 +642,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
qca8k_port_set_status(priv, port, 1);
}
-static int
-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- return mdiobus_read(priv->bus, phy, regnum);
-}
-
-static int
-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
- return mdiobus_write(priv->bus, phy, regnum, val);
-}
-
static void
qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
{
@@ -961,8 +957,6 @@ static struct dsa_switch_ops qca8k_switch_ops = {
.setup = qca8k_setup,
.adjust_link = qca8k_adjust_link,
.get_strings = qca8k_get_strings,
- .phy_read = qca8k_phy_read,
- .phy_write = qca8k_phy_write,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
.get_eee = qca8k_get_eee,
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 9c22bc3210cd..db95168ca111 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -40,6 +40,7 @@
((0x8 + (x & 0x3)) << 22)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) \
((0x10 + (x & 0x3)) << 20)
+#define QCA8K_MAX_DELAY 3
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
#define QCA8K_REG_MODULE_EN 0x030
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index b9283901136e..0fdc9ad32a2e 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -156,8 +156,6 @@ static void dayna_block_output(struct net_device *dev, int count,
#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
-
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
@@ -237,19 +235,26 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
{
- unsigned long outdata = 0xA5A0B5B0;
- unsigned long indata = 0x00000000;
+ u32 outdata = 0xA5A0B5B0;
+ u32 indata = 0;
+
/* Try writing 32 bits */
- memcpy_toio(membase, &outdata, 4);
- /* Now compare them */
- if (memcmp_withio(&outdata, membase, 4) == 0)
+ nubus_writel(outdata, membase);
+ /* Now read it back */
+ indata = nubus_readl(membase);
+ if (outdata == indata)
return ACCESS_32;
+
+ outdata = 0xC5C0D5D0;
+ indata = 0;
+
/* Write 16 bit output */
word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
+
return ACCESS_UNKNOWN;
}
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index 99b30353541a..9e87d7b8360f 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON
config ENA_ETHERNET
tristate "Elastic Network Adapter (ENA) support"
- depends on (PCI_MSI && X86)
+ depends on PCI_MSI && !CPU_BIG_ENDIAN
---help---
This driver supports Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index bcd993140f84..905911f78693 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -199,6 +199,11 @@ static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
u16 command_id, bool capture)
{
+ if (unlikely(!queue->comp_ctx)) {
+ pr_err("Completion context is NULL\n");
+ return NULL;
+ }
+
if (unlikely(command_id >= queue->q_depth)) {
pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
command_id, queue->q_depth);
@@ -839,6 +844,24 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
0);
}
+static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ (ena_dev->rss).hash_key;
+
+ netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
+ /* The key is stored in the device in u32 array
+ * as well as the API requires the key to be passed in this
+ * format. Thus the size of our array should be divided by 4
+ */
+ hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
+}
+
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->rss.hash_func;
+}
+
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
@@ -1967,7 +1990,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (unlikely(ret))
return ret;
- if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+ if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
pr_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return -EPERM;
@@ -2034,15 +2057,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
switch (func) {
case ENA_ADMIN_TOEPLITZ:
- if (key_len > sizeof(hash_key->key)) {
- pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
- key_len, sizeof(hash_key->key));
- return -EINVAL;
+ if (key) {
+ if (key_len != sizeof(hash_key->key)) {
+ pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return -EINVAL;
+ }
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len >> 2;
}
-
- memcpy(hash_key->key, key, key_len);
- rss->hash_init_val = init_val;
- hash_key->keys_num = key_len >> 2;
break;
case ENA_ADMIN_CRC32:
rss->hash_init_val = init_val;
@@ -2052,6 +2076,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EINVAL;
}
+ rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
/* Restore the old function */
@@ -2078,7 +2103,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+ /* ffs() returns 1 in case the lsb is set */
+ rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
+ if (rss->hash_func)
+ rss->hash_func--;
+
if (func)
*func = rss->hash_func;
@@ -2365,6 +2394,8 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
if (unlikely(rc))
goto err_hash_key;
+ ena_com_hash_key_fill_default_key(ena_dev);
+
rc = ena_com_hash_ctrl_init(ena_dev);
if (unlikely(rc))
goto err_hash_ctrl;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 509d7b8e15ab..98b2ad20f599 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -41,6 +41,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/netdevice.h>
#include "ena_common_defs.h"
#include "ena_admin_defs.h"
@@ -622,6 +623,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
*/
void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+/* ena_com_get_current_hash_function - Get RSS hash function
+ * @ena_dev: ENA communication layer struct
+ *
+ * Return the current hash function.
+ * @return: 0 or one of the ena_admin_hash_functions values.
+ */
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
+
/* ena_com_fill_hash_function - Fill RSS hash function
* @ena_dev: ENA communication layer struct
* @func: The hash function (Toeplitz or crc)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 67b2338f8fb3..0ef0a7b75751 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -651,6 +651,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
return ENA_HASH_KEY_SIZE;
}
+static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int i, rc;
+
+ if (!indir)
+ return 0;
+
+ rc = ena_com_indirect_table_get(ena_dev, indir);
+ if (rc)
+ return rc;
+
+ /* Our internal representation of the indices is: even indices
+ * for Tx and uneven indices for Rx. We need to convert the Rx
+ * indices to be consecutive
+ */
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
+ indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
+
+ return rc;
+}
+
static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
@@ -659,11 +681,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 func;
int rc;
- rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
+ rc = ena_indirection_table_get(adapter, indir);
if (rc)
return rc;
+ /* We call this function in order to check if the device
+ * supports getting/setting the hash function.
+ */
rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+ key = NULL;
+ hfunc = NULL;
+ rc = 0;
+ }
+
+ return rc;
+ }
+
if (rc)
return rc;
@@ -697,8 +733,8 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
if (indir) {
for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
rc = ena_com_indirect_table_fill_entry(ena_dev,
- ENA_IO_RXQ_IDX(indir[i]),
- i);
+ i,
+ ENA_IO_RXQ_IDX(indir[i]));
if (unlikely(rc)) {
netif_err(adapter, drv, netdev,
"Cannot fill indirect table (index is too large)\n");
@@ -715,6 +751,9 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
}
switch (hfunc) {
+ case ETH_RSS_HASH_NO_CHANGE:
+ func = ena_com_get_current_hash_function(ena_dev);
+ break;
case ETH_RSS_HASH_TOP:
func = ENA_ADMIN_TOEPLITZ;
break;
@@ -819,6 +858,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_channels = ena_get_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0c298878bf46..da21886609e3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1105,8 +1105,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
struct ena_ring *tx_ring, *rx_ring;
struct ena_eth_io_intr_reg intr_reg;
- u32 tx_work_done;
- u32 rx_work_done;
+ int tx_work_done;
+ int rx_work_done = 0;
int tx_budget;
int napi_comp_call = 0;
int ret;
@@ -1122,7 +1122,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
}
tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
- rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
+ /* On netpoll the budget is zero and the handler should only clean the
+ * tx completions.
+ */
+ if (likely(budget))
+ rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
napi_complete_done(napi, rx_work_done);
@@ -1698,6 +1702,7 @@ err_setup_rx:
err_setup_tx:
ena_free_io_irq(adapter);
err_req_irq:
+ ena_del_napi(adapter);
return rc;
}
@@ -2116,7 +2121,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strncpy(host_info->kernel_ver_str, utsname()->version,
+ strlcpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
strncpy(host_info->os_dist_str, utsname()->release,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 008f2d594d40..326c2e1437b3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -110,6 +110,8 @@
#define ENA_IO_TXQ_IDX(q) (2 * (q))
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
+#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
+#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
#define ENA_MGMNT_IRQ_IDX 0
#define ENA_IO_IRQ_FIRST_IDX 1
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index fcdf5dda448f..77d99c532917 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -440,7 +440,7 @@ static void am79c961_timeout(struct net_device *dev)
/*
* Transmit a packet
*/
-static int
+static netdev_tx_t
am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index d2bc8e5dcd23..35a9f252ceb6 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -339,7 +339,8 @@ static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
*init_rec );
static int lance_open( struct net_device *dev );
static void lance_init_ring( struct net_device *dev );
-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t lance_interrupt( int irq, void *dev_id );
static int lance_rx( struct net_device *dev );
static int lance_close( struct net_device *dev );
@@ -770,7 +771,8 @@ static void lance_tx_timeout (struct net_device *dev)
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+static netdev_tx_t
+lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
struct lance_ioreg *IO = lp->iobase;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 9e80a76c3dfe..76393ae4cf0a 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -893,7 +893,7 @@ static void lance_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 3d8c6b2cdea4..09271665712d 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -235,7 +235,8 @@ struct lance_private {
static int lance_probe( struct net_device *dev);
static int lance_open( struct net_device *dev );
static void lance_init_ring( struct net_device *dev );
-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t lance_interrupt( int irq, void *dev_id);
static int lance_rx( struct net_device *dev );
static int lance_close( struct net_device *dev );
@@ -511,7 +512,8 @@ static void lance_init_ring( struct net_device *dev )
}
-static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+static netdev_tx_t
+lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
int entry, len;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 3153465d4d02..acbbe4e5a2f8 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1106,7 +1106,7 @@ static void lance_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
int entry, skblen, len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 1e4e8b245cd5..1df7f5da8411 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1390,7 +1390,7 @@ static int xgbe_close(struct net_device *netdev)
return 0;
}
-static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -1399,7 +1399,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
struct xgbe_ring *ring;
struct xgbe_packet_data *packet;
struct netdev_queue *txq;
- int ret;
+ netdev_tx_t ret;
DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index de4b5d267c30..17c07837033f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1711,7 +1711,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
int ret;
ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
- XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
+ XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index c770ca37c9b2..a7d30731d376 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -261,6 +261,9 @@ static int emac_rockchip_remove(struct platform_device *pdev)
if (priv->regulator)
regulator_disable(priv->regulator);
+ if (priv->soc_data->need_div_macclk)
+ clk_disable_unprepare(priv->macclk);
+
free_netdev(ndev);
return err;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 974713b19ab6..5e1f03590aaf 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -478,7 +478,9 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
+ if (atl1e_write_phy_reg(&adapter->hw,
+ reg_num & MDIO_REG_ADDR_MASK, val))
+ netdev_err(netdev, "write phy register failed\n");
}
static int atl1e_mii_ioctl(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 17aa33c5567d..d95dec595786 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1524,8 +1524,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
int ethaddr_bytes = ETH_ALEN;
memset(ppattern + offset, 0xff, magicsync);
- for (j = 0; j < magicsync; j++)
- set_bit(len++, (unsigned long *) pmask);
+ for (j = 0; j < magicsync; j++) {
+ pmask[len >> 3] |= BIT(len & 7);
+ len++;
+ }
for (j = 0; j < B44_MAX_PATTERNS; j++) {
if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
@@ -1537,7 +1539,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
for (k = 0; k< ethaddr_bytes; k++) {
ppattern[offset + magicsync +
(j * ETH_ALEN) + k] = macaddr[k];
- set_bit(len++, (unsigned long *) pmask);
+ pmask[len >> 3] |= BIT(len & 7);
+ len++;
}
}
return len - 1;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index c4078401b7de..900f2f706cbc 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -571,12 +571,13 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
/*
* tx request callback
*/
-static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct bcm_enet_desc *desc;
u32 len_stat;
- int ret;
+ netdev_tx_t ret;
priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 95874c10c23b..6519dd33c7ca 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1773,7 +1773,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->phy_interface = of_get_phy_mode(dn);
/* Default to GMII interface mode */
- if (priv->phy_interface < 0)
+ if ((int)priv->phy_interface < 0)
priv->phy_interface = PHY_INTERFACE_MODE_GMII;
/* In the case of a fixed PHY, the DT node associated
@@ -1983,6 +1983,9 @@ static int bcm_sysport_resume(struct device *d)
umac_reset(priv);
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
+
/* We may have been suspended and never received a WOL event that
* would turn off MPD detection, take care of that now
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2cd1dcd77559..46a7dcf2ff4a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -286,6 +286,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
sw_cons = txdata->tx_pkt_cons;
+ /* Ensure subsequent loads occur after hw_cons */
+ smp_rmb();
+
while (sw_cons != hw_cons) {
u16 pkt_cons;
@@ -1936,7 +1939,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp));
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -3059,12 +3062,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* if VF indicate to PF this function is going down (PF will delete sp
* elements and clear initializations
*/
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
+ bnx2x_clear_vlan_info(bp);
bnx2x_vfpf_close_vf(bp);
- else if (unload_mode != UNLOAD_RECOVERY)
+ } else if (unload_mode != UNLOAD_RECOVERY) {
/* if this is a normal/close unload need to clean up chip*/
bnx2x_chip_cleanup(bp, unload_mode, keep_link);
- else {
+ } else {
/* Send the UNLOAD_REQUEST to the MCP */
bnx2x_send_unload_req(bp, unload_mode);
@@ -3860,9 +3864,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+ bp->eth_stats.ptp_skip_tx_ts++;
BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (bp->ptp_tx_skb) {
- BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ bp->eth_stats.ptp_skip_tx_ts++;
+ dev_err_once(&bp->dev->dev,
+ "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
} else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* schedule check for Tx timestamp */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 243cb9748d35..bb36312c9696 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
+void bnx2x_clear_vlan_info(struct bnx2x *bp);
+
/**
* bnx2x_sp_event - handle ramrods completion.
*
@@ -1110,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
u32 func_config =
MF_CFG_RD(bp,
- func_mf_config[BP_PORT(bp) + 2 * i].
+ func_mf_config[BP_PATH(bp) + 2 * i].
config);
func_num +=
((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 8aecd8ef6542..b1992f464b3d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -182,7 +182,9 @@ static const struct {
{ STATS_OFFSET32(driver_filtered_tx_pkt),
4, false, "driver_filtered_tx_pkt" },
{ STATS_OFFSET32(eee_tx_lpi),
- 4, true, "Tx LPI entry count"}
+ 4, true, "Tx LPI entry count"},
+ { STATS_OFFSET32(ptp_skip_tx_ts),
+ 4, false, "ptp_skipped_tx_tstamp" },
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -1562,7 +1564,8 @@ static int bnx2x_get_module_info(struct net_device *dev,
}
if (!sff8472_comp ||
- (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
+ (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ) ||
+ !(diag_type & SFP_EEPROM_DDM_IMPLEMENTED)) {
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index b7d251108c19..7115f5025664 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -62,6 +62,7 @@
#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
#define SFP_EEPROM_DIAG_TYPE_SIZE 1
#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define SFP_EEPROM_DDM_IMPLEMENTED (1<<6)
#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index eeeb4c5740bf..8d17d464c067 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3540,6 +3540,16 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
*/
static void bnx2x_config_mf_bw(struct bnx2x *bp)
{
+ /* Workaround for MFW bug.
+ * MFW is not supposed to generate BW attention in
+ * single function mode.
+ */
+ if (!IS_MF(bp)) {
+ DP(BNX2X_MSG_MCP,
+ "Ignoring MF BW config in single function mode\n");
+ return;
+ }
+
if (bp->link_vars.link_up) {
bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
bnx2x_link_sync_notify(bp);
@@ -8488,11 +8498,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
return rc;
}
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* Mark that hw forgot all entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+
+ bp->vlan_cnt = 0;
+}
+
static int bnx2x_del_all_vlans(struct bnx2x *bp)
{
struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
unsigned long ramrod_flags = 0, vlan_flags = 0;
- struct bnx2x_vlan_entry *vlan;
int rc;
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8501,10 +8521,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
if (rc)
return rc;
- /* Mark that hw forgot all entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link)
- vlan->hw = false;
- bp->vlan_cnt = 0;
+ bnx2x_clear_vlan_info(bp);
return 0;
}
@@ -9978,10 +9995,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
*/
static void bnx2x_parity_recover(struct bnx2x *bp)
{
- bool global = false;
u32 error_recovered, error_unrecovered;
- bool is_parity;
+ bool is_parity, global = false;
+#ifdef CONFIG_BNX2X_SRIOV
+ int vf_idx;
+ for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ if (vf)
+ vf->state = VF_LOST;
+ }
+#endif
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {
@@ -15261,11 +15286,24 @@ static void bnx2x_ptp_task(struct work_struct *work)
u32 val_seq;
u64 timestamp, ns;
struct skb_shared_hwtstamps shhwtstamps;
+ bool bail = true;
+ int i;
- /* Read Tx timestamp registers */
- val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
- NIG_REG_P0_TLLH_PTP_BUF_SEQID);
- if (val_seq & 0x10000) {
+ /* FW may take a while to complete timestamping; try a bit and if it's
+ * still not complete, may indicate an error state - bail out then.
+ */
+ for (i = 0; i < 10; i++) {
+ /* Read Tx timestamp registers */
+ val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+ if (val_seq & 0x10000) {
+ bail = false;
+ break;
+ }
+ msleep(1 << i);
+ }
+
+ if (!bail) {
/* There is a valid timestamp value */
timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
@@ -15280,16 +15318,18 @@ static void bnx2x_ptp_task(struct work_struct *work)
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
- dev_kfree_skb_any(bp->ptp_tx_skb);
- bp->ptp_tx_skb = NULL;
DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
timestamp, ns);
} else {
- DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
- /* Reschedule to keep checking for a valid timestamp value */
- schedule_work(&bp->ptp_task);
+ DP(BNX2X_MSG_PTP,
+ "Tx timestamp is not recorded (register read=%u)\n",
+ val_seq);
+ bp->eth_stats.ptp_skip_tx_ts++;
}
+
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
}
void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index c6e059119b22..e8a09d0afe1c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2376,15 +2376,21 @@ static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
/* send the ramrod on all the queues of the PF */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ int tx_idx;
/* Set the appropriate Queue object */
q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure Tx switching\n");
- return rc;
+ for (tx_idx = FIRST_TX_COS_INDEX;
+ tx_idx < fp->max_cos; tx_idx++) {
+ q_params.params.update.cid_index = tx_idx;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure Tx switching\n");
+ return rc;
+ }
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 888d0b6632e8..7152a03e3607 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -139,6 +139,7 @@ struct bnx2x_virtf {
#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
#define VF_ENABLED 2 /* VF Enabled */
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
+#define VF_LOST 4 /* Recovery while VFs are loaded */
bool flr_clnup_stage; /* true during flr cleanup */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index b2644ed13d06..d55e63692cf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -207,6 +207,9 @@ struct bnx2x_eth_stats {
u32 driver_filtered_tx_pkt;
/* src: Clear-on-Read register; Will not survive PMF Migration */
u32 eee_tx_lpi;
+
+ /* PTP */
+ u32 ptp_skip_tx_ts;
};
struct bnx2x_eth_q_stats {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index c2d327d9dff0..27142fb195b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -2095,6 +2095,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
{
int i;
+ if (vf->state == VF_LOST) {
+ /* Just ack the FW and return if VFs are lost
+ * in case of parity error. VFs are supposed to be timedout
+ * on waiting for PF response.
+ */
+ DP(BNX2X_MSG_IOV,
+ "VF 0x%x lost, not handling the request\n", vf->abs_vfid);
+
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ return;
+ }
+
/* check if tlv type is known */
if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
/* Lock the per vf op mutex and note the locker's identity.
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 737f0f6f4075..736e550163e1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -959,6 +959,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons)) {
+ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return;
}
@@ -1377,14 +1379,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
cons = rxcmp->rx_cmp_opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- data = rx_buf->data;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return rc1;
}
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
prefetch(data);
agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
@@ -1400,11 +1404,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
+
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
rc = -EIO;
+ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+ bnxt_sched_reset(bp, rxr);
+ }
goto next_rx;
}
@@ -1415,6 +1425,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
+ if (agg_bufs)
+ bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
rc = -ENOMEM;
goto next_rx;
}
@@ -5944,8 +5956,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
skip_uc:
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc && vnic->mc_list_count) {
+ netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ }
if (rc)
- netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
rc);
return rc;
@@ -6420,13 +6439,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
if (netif_running(dev))
- bnxt_close_nic(bp, false, false);
+ bnxt_close_nic(bp, true, false);
dev->mtu = new_mtu;
bnxt_set_ring_params(bp);
if (netif_running(dev))
- return bnxt_open_nic(bp, false, false);
+ return bnxt_open_nic(bp, true, false);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3480b3078775..a23404480597 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1078,7 +1078,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
break;
}
- return 0;
+ return ret;
}
static void bcmgenet_power_up(struct bcmgenet_priv *priv,
@@ -1914,6 +1914,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
*/
if (priv->internal_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+ int0_enable |= UMAC_IRQ_PHY_DET_R;
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -2531,6 +2533,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
}
+ if (status & UMAC_IRQ_PHY_DET_R &&
+ priv->dev->phydev->autoneg != AUTONEG_ENABLE)
+ phy_init_hw(priv->dev->phydev);
+
/* Link UP/DOWN event */
if (status & UMAC_IRQ_LINK_EVENT)
phy_mac_interrupt(priv->phydev,
@@ -2627,8 +2633,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
/* all other interested interrupts handled in bottom half */
- status &= (UMAC_IRQ_LINK_EVENT |
- UMAC_IRQ_MPD_R);
+ status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_MPD_R | UMAC_IRQ_PHY_DET_R);
if (status) {
/* Save irq status for bottom-half processing. */
spin_lock_irqsave(&priv->lock, flags);
@@ -3002,39 +3007,42 @@ static void bcmgenet_timeout(struct net_device *dev)
netif_tx_wake_all_queues(dev);
}
-#define MAX_MC_COUNT 16
+#define MAX_MDF_FILTER 17
static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
unsigned char *addr,
- int *i,
- int *mc)
+ int *i)
{
- u32 reg;
-
bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
UMAC_MDF_ADDR + (*i * 4));
bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
addr[4] << 8 | addr[5],
UMAC_MDF_ADDR + ((*i + 1) * 4));
- reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
- reg |= (1 << (MAX_MC_COUNT - *mc));
- bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
*i += 2;
- (*mc)++;
}
static void bcmgenet_set_rx_mode(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
- int i, mc;
+ int i, nfilter;
u32 reg;
netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
- /* Promiscuous mode */
+ /* Number of filters needed */
+ nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
+
+ /*
+ * Turn on promicuous mode for three scenarios
+ * 1. IFF_PROMISC flag is set
+ * 2. IFF_ALLMULTI flag is set
+ * 3. The number of filters needed exceeds the number filters
+ * supported by the hardware.
+ */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (dev->flags & IFF_PROMISC) {
+ if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
+ (nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
@@ -3044,32 +3052,24 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
}
- /* UniMac doesn't support ALLMULTI */
- if (dev->flags & IFF_ALLMULTI) {
- netdev_warn(dev, "ALLMULTI is not supported\n");
- return;
- }
-
/* update MDF filter */
i = 0;
- mc = 0;
/* Broadcast */
- bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+ bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
/* my own address.*/
- bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
- /* Unicast list*/
- if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
- return;
+ bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
- if (!netdev_uc_empty(dev))
- netdev_for_each_uc_addr(ha, dev)
- bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
- /* Multicast */
- if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
- return;
+ /* Unicast */
+ netdev_for_each_uc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+ /* Multicast */
netdev_for_each_mc_addr(ha, dev)
- bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+
+ /* Enable filters */
+ reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
+ bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
}
/* Set the hardware MAC address. */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 3f8858db12eb..dcf10ea60e7f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -362,6 +362,7 @@ struct bcmgenet_mib_counters {
#define EXT_ENERGY_DET_MASK (1 << 12)
#define EXT_RGMII_OOB_CTRL 0x0C
+#define RGMII_MODE_EN_V123 (1 << 0)
#define RGMII_LINK (1 << 4)
#define OOB_DISABLE (1 << 5)
#define RGMII_MODE_EN (1 << 6)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9bd90a7c4d40..b0b9feeb173b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -328,7 +328,11 @@ int bcmgenet_mii_config(struct net_device *dev)
*/
if (priv->ext_phy) {
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg |= RGMII_MODE_EN | id_mode_dis;
+ reg |= id_mode_dis;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+ reg |= RGMII_MODE_EN_V123;
+ else
+ reg |= RGMII_MODE_EN;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
@@ -342,11 +346,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device_node *dn = priv->pdev->dev.of_node;
struct phy_device *phydev;
- u32 phy_flags;
+ u32 phy_flags = 0;
int ret;
/* Communicate the integrated PHY revision */
- phy_flags = priv->gphy_rev;
+ if (priv->internal_phy)
+ phy_flags = priv->gphy_rev;
/* Initialize link state variables that bcmgenet_mii_setup() uses */
priv->old_link = -1;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f1b81187a201..dc7953894c35 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -299,7 +299,7 @@ static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *,
static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff);
static uint64_t sbmac_addr2reg(unsigned char *ptr);
static irqreturn_t sbmac_intr(int irq, void *dev_instance);
-static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
static void sbmac_setmulti(struct sbmac_softc *sc);
static int sbmac_init(struct platform_device *pldev, long long base);
static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed);
@@ -2032,7 +2032,7 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
* Return value:
* nothing
********************************************************************* */
-static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct sbmac_softc *sc = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a0d640243df2..30e93041bf83 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2364,14 +2364,14 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
*pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(*pclk)) {
err = PTR_ERR(*pclk);
- dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
return err;
}
*hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(*hclk)) {
err = PTR_ERR(*hclk);
- dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
return err;
}
@@ -2385,25 +2385,25 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
err = clk_prepare_enable(*pclk);
if (err) {
- dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
return err;
}
err = clk_prepare_enable(*hclk);
if (err) {
- dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
goto err_disable_pclk;
}
err = clk_prepare_enable(*tx_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
goto err_disable_hclk;
}
err = clk_prepare_enable(*rx_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
goto err_disable_txclk;
}
@@ -2823,7 +2823,7 @@ static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
err = clk_prepare_enable(*pclk);
if (err) {
- dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+ dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
return err;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index ddd1ec8f7bd0..9627ed0b2f1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2440,6 +2440,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (!is_offload(adapter))
return -EOPNOTSUPP;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
if (!(adapter->flags & FULL_INIT_DONE))
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
@@ -3263,7 +3265,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
- goto out_free_adapter;
+ goto out_free_adapter_nofail;
}
adapter->pdev = pdev;
@@ -3381,6 +3383,9 @@ out_free_dev:
if (adapter->port[i])
free_netdev(adapter->port[i]);
+out_free_adapter_nofail:
+ kfree_skb(adapter->nofail_skb);
+
out_free_adapter:
kfree(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 8cffcdfd5678..38b5858c335a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -75,8 +75,8 @@ struct l2t_data {
struct l2t_entry *rover; /* starting point for next allocation */
atomic_t nfree; /* number of free entries */
rwlock_t lock;
- struct l2t_entry l2tab[0];
struct rcu_head rcu_head; /* to handle rcu cleanup */
+ struct l2t_entry l2tab[];
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6ee2ed30626b..306b4b320616 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -266,8 +266,8 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
enum cxgb4_dcb_state_input input =
((pcmd->u.dcb.control.all_syncd_pkd &
FW_PORT_CMD_ALL_SYNCD_F)
- ? CXGB4_DCB_STATE_FW_ALLSYNCED
- : CXGB4_DCB_STATE_FW_INCOMPLETE);
+ ? CXGB4_DCB_INPUT_FW_ALLSYNCED
+ : CXGB4_DCB_INPUT_FW_INCOMPLETE);
if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index ccf24d3dc982..2c418c405c50 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -67,7 +67,7 @@
do { \
if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \
cxgb4_dcb_state_fsm((__dev), \
- CXGB4_DCB_STATE_FW_ALLSYNCED); \
+ CXGB4_DCB_INPUT_FW_ALLSYNCED); \
} while (0)
/* States we can be in for a port's Data Center Bridging.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 20455d082cb8..c15052164717 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -66,8 +66,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = seq_tab_get_idx(seq->private, *pos + 1);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
@@ -2781,8 +2780,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
- if (err)
+ if (err) {
+ kvfree(t);
return err;
+ }
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
t4_free_mem(t);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c71a52a2f801..5478a2ab45c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5203,15 +5203,24 @@ static int __init cxgb4_init_module(void)
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
- debugfs_remove(cxgb4_debugfs_root);
+ goto err_pci;
#if IS_ENABLED(CONFIG_IPV6)
if (!inet6addr_registered) {
- register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
- inet6addr_registered = true;
+ ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+ if (ret)
+ pci_unregister_driver(&cxgb4_driver);
+ else
+ inet6addr_registered = true;
}
#endif
+ if (ret == 0)
+ return ret;
+
+err_pci:
+ debugfs_remove(cxgb4_debugfs_root);
+
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 23d6c44dc459..9ce1ad3d950c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
static int alloc_uld_rxqs(struct adapter *adap,
struct sge_uld_rxq_info *rxq_info, bool lro)
{
- struct sge *s = &adap->sge;
unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+ int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
struct sge_ofld_rxq *q = rxq_info->uldrxq;
unsigned short *ids = rxq_info->rspq_id;
- unsigned int bmap_idx = 0;
+ struct sge *s = &adap->sge;
unsigned int per_chan;
- int i, err, msi_idx, que_idx = 0;
per_chan = rxq_info->nrxq / adap->params.nports;
@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
+ if (bmap_idx < 0) {
+ err = -ENOSPC;
+ goto freeout;
+ }
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 60a26037a1c6..e58aae110ed2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -682,8 +682,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
v = l2t_get_idx(seq, *pos);
- if (v)
- ++*pos;
+ ++(*pos);
return v;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index ebeeb3581b9c..b4b435276a18 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3541,7 +3541,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.param[0].mnem =
cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
- c.param[0].val = (__force __be32)op;
+ c.param[0].val = cpu_to_be32(op);
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index a37481c04a87..9eb3071b69a4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -718,6 +718,10 @@ static int adapter_up(struct adapter *adapter)
if (adapter->flags & USING_MSIX)
name_msix_vecs(adapter);
+
+ /* Initialize hash mac addr list*/
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
adapter->flags |= FULL_INIT_DONE;
}
@@ -743,8 +747,6 @@ static int adapter_up(struct adapter *adapter)
enable_rx(adapter);
t4vf_sge_start(adapter);
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adapter->mac_hlist);
return 0;
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 9a161e981529..24f69034f52c 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -780,6 +780,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct ep93xx_priv *ep;
+ struct resource *mem;
dev = platform_get_drvdata(pdev);
if (dev == NULL)
@@ -795,8 +796,8 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
iounmap(ep->base_addr);
if (ep->res != NULL) {
- release_resource(ep->res);
- kfree(ep->res);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
}
free_netdev(dev);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 89acf7bc4cf9..96290b83dfde 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -120,7 +120,7 @@ static void enic_init_affinity_hint(struct enic *enic)
for (i = 0; i < enic->intr_count; i++) {
if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
- (enic->msix[i].affinity_mask &&
+ (cpumask_available(enic->msix[i].affinity_mask) &&
!cpumask_empty(enic->msix[i].affinity_mask)))
continue;
if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
@@ -149,7 +149,7 @@ static void enic_set_affinity_hint(struct enic *enic)
for (i = 0; i < enic->intr_count; i++) {
if (enic_is_err_intr(enic, i) ||
enic_is_notify_intr(enic, i) ||
- !enic->msix[i].affinity_mask ||
+ !cpumask_available(enic->msix[i].affinity_mask) ||
cpumask_empty(enic->msix[i].affinity_mask))
continue;
err = irq_set_affinity_hint(enic->msix_entry[i].vector,
@@ -162,7 +162,7 @@ static void enic_set_affinity_hint(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
int wq_intr = enic_msix_wq_intr(enic, i);
- if (enic->msix[wq_intr].affinity_mask &&
+ if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
!cpumask_empty(enic->msix[wq_intr].affinity_mask))
netif_set_xps_queue(enic->netdev,
enic->msix[wq_intr].affinity_mask,
@@ -1806,10 +1806,10 @@ static int enic_stop(struct net_device *netdev)
}
netif_carrier_off(netdev);
- netif_tx_disable(netdev);
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
for (i = 0; i < enic->wq_count; i++)
napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
+ netif_tx_disable(netdev);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_del_station_addr(enic);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 6620fc861c47..005c79b5b3f0 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2109,7 +2109,6 @@ static struct eisa_driver de4x5_eisa_driver = {
.remove = de4x5_eisa_remove,
}
};
-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
#endif
#ifdef CONFIG_PCI
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 8ed0fd8b1dda..74cb9b3c2f41 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -2225,15 +2225,16 @@ static int __init dmfe_init_module(void)
if (cr6set)
dmfe_cr6_user_set = cr6set;
- switch(mode) {
- case DMFE_10MHF:
+ switch (mode) {
+ case DMFE_10MHF:
case DMFE_100MHF:
case DMFE_10MFD:
case DMFE_100MFD:
case DMFE_1M_HPNA:
dmfe_media_mode = mode;
break;
- default:dmfe_media_mode = DMFE_AUTO;
+ default:
+ dmfe_media_mode = DMFE_AUTO;
break;
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index e750b5ddc0fb..5f79e2731b76 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1813,8 +1813,8 @@ static int __init uli526x_init_module(void)
if (cr6set)
uli526x_cr6_user_set = cr6set;
- switch (mode) {
- case ULI526X_10MHF:
+ switch (mode) {
+ case ULI526X_10MHF:
case ULI526X_100MHF:
case ULI526X_10MFD:
case ULI526X_100MFD:
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 0a48a31225e6..56db37d92937 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -898,7 +898,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
- int status;
+ int status, cnt;
u8 link_status = 0;
if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
@@ -909,6 +909,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
+ /* check link status before offline tests */
+ link_status = netif_carrier_ok(netdev);
+
if (test->flags & ETH_TEST_FL_OFFLINE) {
if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
@@ -929,13 +932,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
test->flags |= ETH_TEST_FL_FAILED;
}
- status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
- if (status) {
- test->flags |= ETH_TEST_FL_FAILED;
- data[4] = -1;
- } else if (!link_status) {
+ /* link status was down prior to test */
+ if (!link_status) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = 1;
+ return;
+ }
+
+ for (cnt = 10; cnt; cnt--) {
+ status = be_cmd_link_status_query(adapter, NULL, &link_status,
+ 0);
+ if (status) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ break;
+ }
+
+ if (link_status)
+ break;
+
+ msleep_interruptible(500);
}
}
@@ -1108,7 +1124,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
break;
case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_qs - 1;
+ cmd->data = adapter->num_rx_qs;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b2eeecb26939..289560b0f643 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4701,8 +4701,12 @@ int be_update_queues(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int status;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ /* device cannot transmit now, avoid dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+
be_close(netdev);
+ }
be_cancel_worker(adapter);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2e5efe6b1469..02ded9eb491b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1763,10 +1763,10 @@ static void fec_get_mac(struct net_device *ndev)
*/
if (!is_valid_ether_addr(iap)) {
/* Report it and use a random ethernet address instead */
- netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
+ dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
eth_hw_addr_random(ndev);
- netdev_info(ndev, "Using random MAC address: %pM\n",
- ndev->dev_addr);
+ dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
+ ndev->dev_addr);
return;
}
@@ -2582,15 +2582,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
return -EINVAL;
}
- cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
if (cycle > 0xFFFF) {
pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
- cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+ cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesced usec exceed hardware limitation\n");
+ pr_err("Tx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
@@ -3842,6 +3842,7 @@ failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_reset:
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
@@ -3867,6 +3868,11 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
cancel_work_sync(&fep->tx_timeout_work);
fec_ptp_stop(pdev);
@@ -3874,13 +3880,17 @@ fec_drv_remove(struct platform_device *pdev)
fec_enet_mii_remove(fep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(fep->phy_node);
free_netdev(ndev);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 21dd5579130e..c30994a09a7c 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -109,7 +109,7 @@ do { \
/* Interface Mode Register (IF_MODE) */
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
-#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@@ -438,7 +438,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
tmp = 0;
switch (phy_if) {
case PHY_INTERFACE_MODE_XGMII:
- tmp |= IF_MODE_XGMII;
+ tmp |= IF_MODE_10G;
break;
default:
tmp |= IF_MODE_GMII;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 60bd1b36df60..b665d27f8e29 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2688,13 +2688,17 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_dirtytx = tx_queue->skb_dirtytx;
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
+ bool do_tstamp;
+
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
frags = skb_shinfo(skb)->nr_frags;
/* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ if (unlikely(do_tstamp))
nr_txbds = frags + 2;
else
nr_txbds = frags + 1;
@@ -2708,7 +2712,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
(lstatus & BD_LENGTH_MASK))
break;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(do_tstamp)) {
next = next_txbd(bdp, base, tx_ring_size);
buflen = be16_to_cpu(next->length) +
GMAC_FCB_LEN + GMAC_TXPAL_LEN;
@@ -2718,7 +2722,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
buflen, DMA_TO_DEVICE);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(do_tstamp)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
~0x7UL);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 812a968a78e9..6aa4b50435da 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -250,14 +250,12 @@ uec_set_ringparam(struct net_device *netdev,
return -EINVAL;
}
+ if (netif_running(netdev))
+ return -EBUSY;
+
ug_info->bdRingLenRx[queue] = ring->rx_pending;
ug_info->bdRingLenTx[queue] = ring->tx_pending;
- if (netif_running(netdev)) {
- /* FIXME: restart automatically */
- netdev_info(netdev, "Please re-open the interface\n");
- }
-
return ret;
}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index e03b30c60dcf..c82c85ef5fb3 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -49,6 +49,7 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
bool is_little_endian;
+ bool has_a011043;
};
static u32 xgmac_read32(void __iomem *regs,
@@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
/* Return all Fs if nothing was there */
- if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+ if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+ !priv->has_a011043) {
dev_err(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
@@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
"little-endian");
+ priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
+ "fsl,erratum-a011043");
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index b5d18d95d7b9..0b3aa83f3fc1 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -157,6 +157,7 @@ struct hip04_priv {
unsigned int reg_inten;
struct napi_struct napi;
+ struct device *dev;
struct net_device *ndev;
struct tx_desc *tx_desc;
@@ -173,6 +174,7 @@ struct hip04_priv {
dma_addr_t rx_phys[RX_DESC_NUM];
unsigned int rx_head;
unsigned int rx_buf_size;
+ unsigned int rx_cnt_remaining;
struct device_node *phy_node;
struct phy_device *phy;
@@ -185,7 +187,7 @@ struct hip04_priv {
static inline unsigned int tx_count(unsigned int head, unsigned int tail)
{
- return (head - tail) % (TX_DESC_NUM - 1);
+ return (head - tail) % TX_DESC_NUM;
}
static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
@@ -387,7 +389,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
}
if (priv->tx_phys[tx_tail]) {
- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
priv->tx_skb[tx_tail]->len,
DMA_TO_DEVICE);
priv->tx_phys[tx_tail] = 0;
@@ -437,8 +439,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys)) {
+ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, phys)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -453,9 +455,9 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
hip04_set_xmit_desc(priv, phys);
- priv->tx_head = TX_NEXT(tx_head);
count++;
netdev_sent_queue(ndev, skb->len);
+ priv->tx_head = TX_NEXT(tx_head);
stats->tx_bytes += skb->len;
stats->tx_packets++;
@@ -486,7 +488,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
struct net_device *ndev = priv->ndev;
struct net_device_stats *stats = &ndev->stats;
- unsigned int cnt = hip04_recv_cnt(priv);
struct rx_desc *desc;
struct sk_buff *skb;
unsigned char *buf;
@@ -497,7 +498,10 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
u16 len;
u32 err;
- while (cnt && !last) {
+ /* clean up tx descriptors */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+ while (priv->rx_cnt_remaining && !last) {
buf = priv->rx_buf[priv->rx_head];
skb = build_skb(buf, priv->rx_buf_size);
if (unlikely(!skb)) {
@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
goto refill;
}
- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
RX_BUF_SIZE, DMA_FROM_DEVICE);
priv->rx_phys[priv->rx_head] = 0;
@@ -534,20 +538,22 @@ refill:
buf = netdev_alloc_frag(priv->rx_buf_size);
if (!buf)
goto done;
- phys = dma_map_single(&ndev->dev, buf,
+ phys = dma_map_single(priv->dev, buf,
RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys))
+ if (dma_mapping_error(priv->dev, phys))
goto done;
priv->rx_buf[priv->rx_head] = buf;
priv->rx_phys[priv->rx_head] = phys;
hip04_set_recv_desc(priv, phys);
priv->rx_head = RX_NEXT(priv->rx_head);
- if (rx >= budget)
+ if (rx >= budget) {
+ --priv->rx_cnt_remaining;
goto done;
+ }
- if (--cnt == 0)
- cnt = hip04_recv_cnt(priv);
+ if (--priv->rx_cnt_remaining == 0)
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
}
if (!(priv->reg_inten & RCV_INT)) {
@@ -557,8 +563,7 @@ refill:
}
napi_complete(napi);
done:
- /* clean up tx descriptors and start a new timer if necessary */
- tx_remaining = hip04_tx_reclaim(ndev, false);
+ /* start a new timer if necessary */
if (rx < budget && tx_remaining)
hip04_start_tx_timer(priv);
@@ -633,6 +638,7 @@ static int hip04_mac_open(struct net_device *ndev)
int i;
priv->rx_head = 0;
+ priv->rx_cnt_remaining = 0;
priv->tx_head = 0;
priv->tx_tail = 0;
hip04_reset_ppe(priv);
@@ -640,9 +646,9 @@ static int hip04_mac_open(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) {
dma_addr_t phys;
- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+ phys = dma_map_single(priv->dev, priv->rx_buf[i],
RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys))
+ if (dma_mapping_error(priv->dev, phys))
return -EIO;
priv->rx_phys[i] = phys;
@@ -676,7 +682,7 @@ static int hip04_mac_stop(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) {
if (priv->rx_phys[i]) {
- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+ dma_unmap_single(priv->dev, priv->rx_phys[i],
RX_BUF_SIZE, DMA_FROM_DEVICE);
priv->rx_phys[i] = 0;
}
@@ -827,6 +833,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->dev = d;
priv->ndev = ndev;
platform_set_drvdata(pdev, ndev);
@@ -946,7 +953,6 @@ static int hip04_remove(struct platform_device *pdev)
hip04_free_ring(ndev, d);
unregister_netdev(ndev);
- free_irq(ndev->irq, ndev);
of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index e69a6bed31a9..dd24c352b200 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -929,7 +929,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
goto err_free_mdio;
priv->phy_mode = of_get_phy_mode(node);
- if (priv->phy_mode < 0) {
+ if ((int)priv->phy_mode < 0) {
netdev_err(ndev, "not find phy-mode\n");
ret = -EINVAL;
goto err_mdiobus;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 06bc8638501e..66e7a5fd4249 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -146,7 +146,6 @@ out_buffer_fail:
/* free desc along with its attached buffer */
static void hnae_free_desc(struct hnae_ring *ring)
{
- hnae_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
ring_to_dma_dir(ring));
@@ -179,6 +178,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
/* fini ring, also free the buffer for the ring */
static void hnae_fini_ring(struct hnae_ring *ring)
{
+ if (is_rx_ring(ring))
+ hnae_free_buffers(ring);
+
hnae_free_desc(ring);
kfree(ring->desc_cb);
ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ad8681cf5ef0..24a815997ec5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -28,9 +28,6 @@
#define SERVICE_TIMER_HZ (1 * HZ)
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048
@@ -375,8 +372,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
- ring->stats.tx_pkts++;
- ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
@@ -916,6 +911,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
/* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
+ /* update tx ring statistics. */
+ ring->stats.tx_pkts += pkts;
+ ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ndev);
@@ -1821,7 +1819,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -1834,7 +1832,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 4cd163390dcc..f38848c4f69d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -367,6 +367,7 @@ static int __lb_setup(struct net_device *ndev,
static int __lb_up(struct net_device *ndev,
enum hnae_loop loop_mode)
{
+#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
int speed, duplex;
@@ -393,6 +394,9 @@ static int __lb_up(struct net_device *ndev,
h->dev->ops->adjust_link(h, speed, duplex);
+ /* wait adjust link done and phy ready */
+ msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index de23a0ead5d7..d06efcd5f13b 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -166,11 +166,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
{
u32 time_cnt;
u32 reg_value;
+ int ret;
regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
- regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+ ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+ if (ret)
+ return ret;
+
reg_value &= st_msk;
if ((!!check_st) == (!!reg_value))
break;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index bd719e25dd76..3692adb8902d 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1476,7 +1476,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
memset(pr, 0, sizeof(struct ehea_port_res));
- pr->tx_bytes = rx_bytes;
+ pr->tx_bytes = tx_bytes;
pr->tx_packets = tx_packets;
pr->rx_bytes = rx_bytes;
pr->rx_packets = rx_packets;
@@ -3184,6 +3184,7 @@ static ssize_t ehea_probe_port(struct device *dev,
if (ehea_add_adapter_mr(adapter)) {
pr_err("creating MR failed\n");
+ of_node_put(eth_dn);
return -EIO;
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 955f658f3b65..de9897c8e933 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1557,7 +1557,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct net_device *netdev;
struct ibmveth_adapter *adapter;
unsigned char *mac_addr_p;
- unsigned int *mcastFilterSize_p;
+ __be32 *mcastFilterSize_p;
long ret;
unsigned long ret_attr;
@@ -1579,8 +1579,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
return -EINVAL;
}
- mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
- VETH_MCAST_FILTER_SIZE, NULL);
+ mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
+ VETH_MCAST_FILTER_SIZE,
+ NULL);
if (!mcastFilterSize_p) {
dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
"attribute\n");
@@ -1597,7 +1598,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
- adapter->mcastFilterSize = *mcastFilterSize_p;
+ adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
adapter->pool_config = 0;
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 068789e694c9..93c29094ceff 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1370,8 +1370,8 @@ static inline int e100_load_ucode_wait(struct nic *nic)
fw = e100_request_firmware(nic);
/* If it's NULL, then no ucode is required */
- if (!fw || IS_ERR(fw))
- return PTR_ERR(fw);
+ if (IS_ERR_OR_NULL(fw))
+ return PTR_ERR_OR_ZERO(fw);
if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
netif_err(nic, probe, nic->netdev,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2a81f6d72140..8936f19e9325 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -628,6 +628,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++)
rxdr[i].count = rxdr->count;
+ err = 0;
if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
err = e1000_setup_all_rx_resources(adapter);
@@ -648,14 +649,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
- if (err)
- goto err_setup;
}
kfree(tx_old);
kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
- return 0;
+ return err;
+
err_setup_tx:
e1000_free_all_rx_resources(adapter);
err_setup_rx:
@@ -667,7 +667,6 @@ err_alloc_rx:
err_alloc_tx:
if (netif_running(adapter->netdev))
e1000_up(adapter);
-err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index dc7d671b903c..625008e8cb0d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1447,6 +1447,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
else
phy_reg |= 0xFA;
e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
+
+ if (speed == SPEED_1000) {
+ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+ &phy_reg);
+
+ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+ phy_reg);
+ }
}
hw->phy.ops.release(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 67163ca898ba..6374c8fc76a8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -227,7 +227,7 @@
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_CLK_REQ 0x200
#define HV_PM_CTRL_K1_ENABLE 0x4000
#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6855b3380a83..a0f97c5ab6ef 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2121,7 +2121,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
if (strlen(netdev->name) < (IFNAMSIZ - 5))
snprintf(adapter->rx_ring->name,
sizeof(adapter->rx_ring->name) - 1,
- "%s-rx-0", netdev->name);
+ "%.14s-rx-0", netdev->name);
else
memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -2137,7 +2137,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
if (strlen(netdev->name) < (IFNAMSIZ - 5))
snprintf(adapter->tx_ring->name,
sizeof(adapter->tx_ring->name) - 1,
- "%s-tx-0", netdev->name);
+ "%.14s-tx-0", netdev->name);
else
memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -4212,7 +4212,7 @@ void e1000e_up(struct e1000_adapter *adapter)
e1000_configure_msix(adapter);
e1000_irq_enable(adapter);
- netif_start_queue(adapter->netdev);
+ /* Tx queue started by watchdog timer when link is up */
e1000e_trigger_lsc(adapter);
}
@@ -4588,6 +4588,7 @@ int e1000e_open(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
/* allocate transmit descriptors */
err = e1000e_setup_tx_resources(adapter->tx_ring);
@@ -4648,7 +4649,6 @@ int e1000e_open(struct net_device *netdev)
e1000_irq_enable(adapter);
adapter->tx_hang_recheck = false;
- netif_start_queue(netdev);
hw->mac.get_link_status = true;
pm_runtime_put(&pdev->dev);
@@ -5271,6 +5271,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if (phy->ops.cfg_on_link_up)
phy->ops.cfg_on_link_up(hw);
+ netif_wake_queue(netdev);
netif_carrier_on(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -5284,6 +5285,7 @@ static void e1000_watchdog_task(struct work_struct *work)
/* Link status message must follow this format */
pr_info("%s NIC Link is Down\n", adapter->netdev->name);
netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 2aae6f88dca0..a52663745051 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -58,6 +58,8 @@ static int __init fm10k_init_module(void)
/* create driver workqueue */
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
fm10k_driver_name);
+ if (!fm10k_workqueue)
+ return -ENOMEM;
fm10k_dbg_init();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 7836072d3f63..043b69b5843b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2285,6 +2285,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
struct i40e_vsi_context ctxt;
i40e_status ret;
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
if ((vsi->info.valid_sections &
cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
@@ -2315,6 +2319,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
struct i40e_vsi_context ctxt;
i40e_status ret;
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
if ((vsi->info.valid_sections &
cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
@@ -11352,6 +11360,7 @@ static void i40e_remove(struct pci_dev *pdev)
mutex_destroy(&hw->aq.asq_mutex);
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
+ rtnl_lock();
i40e_clear_interrupt_scheme(pf);
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
@@ -11360,6 +11369,7 @@ static void i40e_remove(struct pci_dev *pdev)
pf->vsi[i] = NULL;
}
}
+ rtnl_unlock();
for (i = 0; i < I40E_MAX_VEB; i++) {
kfree(pf->veb[i]);
@@ -11505,7 +11515,13 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_WUFC,
(pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ /* Since we're going to destroy queues during the
+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+ * whole section
+ */
+ rtnl_lock();
i40e_clear_interrupt_scheme(pf);
+ rtnl_unlock();
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, pf->wol_en);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index f1feceab758a..41cbcb0ac2d9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -604,7 +604,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name));
+ strncpy(pf->ptp_caps.name, i40e_driver_name,
+ sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
pf->ptp_caps.n_ext_ts = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 54b8ee2583f1..7484ad3c955d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2000,6 +2000,16 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_INVALID_MAC_ADDR;
goto error_param;
}
+
+ if (vf->pf_set_mac &&
+ ether_addr_equal(al->list[i].addr,
+ vf->default_lan_addr.addr)) {
+ dev_err(&pf->pdev->dev,
+ "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
+ vf->default_lan_addr.addr, vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
}
vsi = pf->vsi[vf->lan_vsi_idx];
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2688180a7acd..f948eec7b35f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -193,6 +193,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 82e48e355fb9..7e35bd665630 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1677,7 +1677,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
if ((hw->phy.media_type == e1000_media_type_copper) &&
(!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
swap_now = true;
- } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ } else if ((hw->phy.media_type != e1000_media_type_copper) &&
+ !(connsw & E1000_CONNSW_SERDESD)) {
/* copper signal takes time to appear */
if (adapter->copper_tries < 4) {
adapter->copper_tries++;
@@ -7548,9 +7549,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
+ bool wake;
rtnl_lock();
netif_device_detach(netdev);
@@ -7563,14 +7562,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- if (!runtime) {
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
- }
-#endif
-
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -7587,10 +7578,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
ctrl = rd32(E1000_CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
wr32(E1000_CTRL, ctrl);
@@ -7604,12 +7591,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
wr32(E1000_WUFC, 0);
}
- *enable_wake = wufc || adapter->en_mng_pt;
- if (!*enable_wake)
+ wake = wufc || adapter->en_mng_pt;
+ if (!wake)
igb_power_down_link(adapter);
else
igb_power_up_link(adapter);
+ if (enable_wake)
+ *enable_wake = wake;
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -7624,22 +7614,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
#ifdef CONFIG_PM_SLEEP
static int igb_suspend(struct device *dev)
{
- int retval;
- bool wake;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- retval = __igb_shutdown(pdev, &wake, 0);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
#endif /* CONFIG_PM_SLEEP */
@@ -7707,22 +7682,7 @@ static int igb_runtime_idle(struct device *dev)
static int igb_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __igb_shutdown(pdev, &wake, 1);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
static int igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 9eb9b68f8935..ae1f963b6092 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -65,9 +65,15 @@
*
* The 40 bit 82580 SYSTIM overflows every
* 2^40 * 10^-9 / 60 = 18.3 minutes.
+ *
+ * SYSTIM is converted to real time using a timecounter. As
+ * timecounter_cyc2time() allows old timestamps, the timecounter
+ * needs to be updated at least once per half of the SYSTIM interval.
+ * Scheduling of delayed work is not very accurate, so we aim for 8
+ * minutes to be sure the actual interval is shorter than 9.16 minutes.
*/
-#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index a137e060c185..bbc23e88de89 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3192,7 +3192,8 @@ static int ixgbe_get_module_info(struct net_device *dev,
page_swap = true;
}
- if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
+ !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
/* We have a SFP, but it does not support SFF-8472 */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index a5428b6abdac..4c729faeb713 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4804,6 +4804,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
struct ixgbe_fdir_filter *filter;
+ u8 queue;
spin_lock(&adapter->fdir_perfect_lock);
@@ -4812,12 +4813,34 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
hlist_for_each_entry_safe(filter, node2,
&adapter->fdir_filter_list, fdir_node) {
+ if (filter->action == IXGBE_FDIR_DROP_QUEUE) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(filter->action);
+ u8 vf = ethtool_get_flow_spec_ring_vf(filter->action);
+
+ if (!vf && (ring >= adapter->num_rx_queues)) {
+ e_err(drv, "FDIR restore failed without VF, ring: %u\n",
+ ring);
+ continue;
+ } else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool)) {
+ e_err(drv, "FDIR restore failed with VF, vf: %hhu, ring: %u\n",
+ vf, ring);
+ continue;
+ }
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
+
ixgbe_fdir_write_perfect_filter_82599(hw,
- &filter->filter,
- filter->sw_idx,
- (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[filter->action]->reg_idx);
+ &filter->filter, filter->sw_idx, queue);
}
spin_unlock(&adapter->fdir_perfect_lock);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index cc735ec3e045..25090b4880b3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -70,6 +70,7 @@
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_DDM_IMPLEMENTED 0x40
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 75607267e656..7a763e85ff27 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1885,11 +1885,6 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
struct ixgbe_hw *hw = &adapter->hw;
int count = 0;
- if ((netdev_uc_count(netdev)) > 10) {
- pr_err("Too many unicast filters - No Space\n");
- return -ENOSPC;
- }
-
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d98b874a7238..bb6bc84995a2 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2690,10 +2690,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
/* For the case where the last mvneta_poll did not process all
* RX packets
*/
- rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
-
cause_rx_tx |= port->cause_rx_tx;
+ rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
if (rx_queue) {
rx_queue = rx_queue - 1;
if (pp->bm_priv)
@@ -4162,7 +4161,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
- goto err_free_stats;
+ goto err_netdev;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -4181,13 +4180,11 @@ static int mvneta_probe(struct platform_device *pdev)
return 0;
err_netdev:
- unregister_netdev(dev);
if (pp->bm_priv) {
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
1 << pp->id);
}
-err_free_stats:
free_percpu(pp->stats);
err_free_ports:
free_percpu(pp->ports);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ff62dc7485d5..0ea1c05f5f0e 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3938,7 +3938,7 @@ static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
/* Set defaults to the MVPP2 port */
static void mvpp2_defaults_set(struct mvpp2_port *port)
{
- int tx_port_num, val, queue, ptxq, lrxq;
+ int tx_port_num, val, queue, lrxq;
/* Configure port to loopback if needed */
if (port->flags & MVPP2_F_LOOPBACK)
@@ -3958,11 +3958,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
/* Close bandwidth for all queues */
- for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
- ptxq = mvpp2_txq_phys(port->id, queue);
+ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
mvpp2_write(port->priv,
- MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
- }
+ MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
/* Set refill period to 1 usec, refill tokens
* and bucket size to maximum
@@ -4709,7 +4707,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
txq->descs_phys = 0;
/* Set minimum bandwidth for disabled TXQs */
- mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
+ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
/* Set Tx descriptors queue starting address and size */
mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c9f4b5412844..b97a070074b7 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3114,7 +3114,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
skb_put(skb, len);
if (dev->features & NETIF_F_RXCSUM) {
- skb->csum = csum;
+ skb->csum = le16_to_cpu(csum);
skb->ip_summed = CHECKSUM_COMPLETE;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 4ac023a37936..49f692907a30 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4939,6 +4939,20 @@ static const struct dmi_system_id msi_blacklist[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
},
},
+ {
+ .ident = "ASUS P6T",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P6T"),
+ },
+ },
+ {
+ .ident = "ASUS P6X",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P6X"),
+ },
+ },
{}
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 20de37a414fe..d10c8a8156bc 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1700,6 +1700,7 @@ static void mtk_poll_controller(struct net_device *dev)
static int mtk_start_dma(struct mtk_eth *eth)
{
+ u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
int err;
err = mtk_dma_init(eth);
@@ -1714,7 +1715,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
MTK_QDMA_GLO_CFG);
mtk_w32(eth,
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
MTK_PDMA_GLO_CFG);
@@ -2175,13 +2176,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- if (dev->features & NETIF_F_LRO) {
+ if (dev->hw_features & NETIF_F_LRO) {
cmd->data = MTK_MAX_RX_RING_NUM;
ret = 0;
}
break;
case ETHTOOL_GRXCLSRLCNT:
- if (dev->features & NETIF_F_LRO) {
+ if (dev->hw_features & NETIF_F_LRO) {
struct mtk_mac *mac = netdev_priv(dev);
cmd->rule_cnt = mac->hwlro_ip_cnt;
@@ -2189,11 +2190,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
}
break;
case ETHTOOL_GRXCLSRULE:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_entry(dev, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_all(dev, cmd,
rule_locs);
break;
@@ -2210,11 +2211,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_add_ipaddr(dev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_del_ipaddr(dev, cmd);
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 8a9a332d78b4..6068e7c4fc7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1679,6 +1679,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
+ cmd->data = MAX_NUM_OF_FS_RULES;
while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
err = mlx4_en_get_flow(dev, cmd, i);
if (!err)
@@ -1930,6 +1931,8 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret;
}
+#define MLX4_EEPROM_PAGE_LEN 256
+
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -1964,7 +1967,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
break;
default:
return -ENOSYS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index cb7c3ef97134..781642d47133 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -198,7 +198,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports - 1; i++) {
if (port_type[i] != port_type[i + 1]) {
mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
}
@@ -207,7 +207,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
i + 1);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
return 0;
@@ -1122,8 +1122,7 @@ static int __set_port_type(struct mlx4_port_info *info,
mlx4_err(mdev,
"Requested port type for port %d is not supported on this HCA\n",
info->port);
- err = -EINVAL;
- goto err_sup;
+ return -EOPNOTSUPP;
}
mlx4_stop_sense(mdev);
@@ -1145,7 +1144,7 @@ static int __set_port_type(struct mlx4_port_info *info,
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
- err = -EINVAL;
+ err = -EOPNOTSUPP;
}
}
}
@@ -1171,7 +1170,7 @@ static int __set_port_type(struct mlx4_port_info *info,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
-err_sup:
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 0710b3677464..cb186321edc1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1490,7 +1490,7 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
rule.port = port;
rule.qpn = qpn;
INIT_LIST_HEAD(&rule.list);
- mlx4_err(dev, "going promisc on %x\n", port);
+ mlx4_info(dev, "going promisc on %x\n", port);
return mlx4_flow_attach(dev, &rule, regid_p);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index b656dd5772e5..3173875a715f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1960,11 +1960,6 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
size -= offset + size - I2C_PAGE_SIZE;
i2c_addr = I2C_ADDR_LOW;
- if (offset >= I2C_PAGE_SIZE) {
- /* Reset offset to high page */
- i2c_addr = I2C_ADDR_HIGH;
- offset -= I2C_PAGE_SIZE;
- }
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = cpu_to_be16(offset);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 79944302dd46..7d1e8ab956e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -470,12 +470,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
-static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+static int
+mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
+ struct resource_allocator *res_alloc,
+ int vf)
{
- /* reduce the sink counter */
- return (dev->caps.max_counters - 1 -
- (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
- / MLX4_MAX_PORTS;
+ struct mlx4_active_ports actv_ports;
+ int ports, counters_guaranteed;
+
+ /* For master, only allocate according to the number of phys ports */
+ if (vf == mlx4_master_func_num(dev))
+ return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
+
+ /* calculate real number of ports for the VF */
+ actv_ports = mlx4_get_active_ports(dev, vf);
+ ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+ counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
+
+ /* If we do not have enough counters for this VF, do not
+ * allocate any for it. '-1' to reduce the sink counter.
+ */
+ if ((res_alloc->res_reserved + counters_guaranteed) >
+ (dev->caps.max_counters - 1))
+ return 0;
+
+ return counters_guaranteed;
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
@@ -483,7 +502,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
- int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
priv->mfunc.master.res_tracker.slave_list =
kzalloc(dev->num_slaves * sizeof(struct slave_list),
@@ -600,16 +618,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
- if (t == mlx4_master_func_num(dev))
- res_alloc->guaranteed[t] =
- MLX4_PF_COUNTERS_PER_PORT *
- MLX4_MAX_PORTS;
- else if (t <= max_vfs_guarantee_counter)
- res_alloc->guaranteed[t] =
- MLX4_VF_COUNTERS_PER_PORT *
- MLX4_MAX_PORTS;
- else
- res_alloc->guaranteed[t] = 0;
+ res_alloc->guaranteed[t] =
+ mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
res_alloc->res_free -= res_alloc->guaranteed[t];
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 524fff2b3dc6..2e6d6dfdcc80 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -207,7 +207,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
struct mlx5_interface *intf;
mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
+ list_for_each_entry_reverse(intf, &intf_list, list)
mlx5_remove_device(intf, priv);
list_del(&priv->dev_list);
mutex_unlock(&mlx5_intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 4a51fc6908ad..098a6b56fd54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -441,12 +441,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
return &arfs_t->rules_hash[bucket_idx];
}
-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IP)) ?
- ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
-}
-
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
u8 ip_proto, __be16 etype)
{
@@ -605,31 +599,9 @@ out:
arfs_may_expire_flow(priv);
}
-/* return L4 destination port from ip4/6 packets */
-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
-{
- char *transport_header;
-
- transport_header = skb_transport_header(skb);
- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
- return ((struct tcphdr *)transport_header)->dest;
- return ((struct udphdr *)transport_header)->dest;
-}
-
-/* return L4 source port from ip4/6 packets */
-static __be16 arfs_get_src_port(const struct sk_buff *skb)
-{
- char *transport_header;
-
- transport_header = skb_transport_header(skb);
- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
- return ((struct tcphdr *)transport_header)->source;
- return ((struct udphdr *)transport_header)->source;
-}
-
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
struct arfs_table *arfs_t,
- const struct sk_buff *skb,
+ const struct flow_keys *fk,
u16 rxq, u32 flow_id)
{
struct arfs_rule *rule;
@@ -644,19 +616,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
INIT_WORK(&rule->arfs_work, arfs_handle_work);
tuple = &rule->tuple;
- tuple->etype = skb->protocol;
+ tuple->etype = fk->basic.n_proto;
+ tuple->ip_proto = fk->basic.ip_proto;
if (tuple->etype == htons(ETH_P_IP)) {
- tuple->src_ipv4 = ip_hdr(skb)->saddr;
- tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+ tuple->src_ipv4 = fk->addrs.v4addrs.src;
+ tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
} else {
- memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
sizeof(struct in6_addr));
- memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
sizeof(struct in6_addr));
}
- tuple->ip_proto = arfs_get_ip_proto(skb);
- tuple->src_port = arfs_get_src_port(skb);
- tuple->dst_port = arfs_get_dst_port(skb);
+ tuple->src_port = fk->ports.src;
+ tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -667,37 +639,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
return rule;
}
-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
- const struct sk_buff *skb)
+static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
{
- if (tuple->etype == htons(ETH_P_IP) &&
- tuple->src_ipv4 == ip_hdr(skb)->saddr &&
- tuple->dst_ipv4 == ip_hdr(skb)->daddr)
- return true;
- if (tuple->etype == htons(ETH_P_IPV6) &&
- (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
- sizeof(struct in6_addr))) &&
- (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
- sizeof(struct in6_addr))))
- return true;
+ if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
+ return false;
+ if (tuple->etype != fk->basic.n_proto)
+ return false;
+ if (tuple->etype == htons(ETH_P_IP))
+ return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
+ tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
+ if (tuple->etype == htons(ETH_P_IPV6))
+ return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
return false;
}
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
- const struct sk_buff *skb)
+ const struct flow_keys *fk)
{
struct arfs_rule *arfs_rule;
struct hlist_head *head;
- __be16 src_port = arfs_get_src_port(skb);
- __be16 dst_port = arfs_get_dst_port(skb);
- head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+ head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
hlist_for_each_entry(arfs_rule, head, hlist) {
- if (arfs_rule->tuple.src_port == src_port &&
- arfs_rule->tuple.dst_port == dst_port &&
- arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+ if (arfs_cmp(&arfs_rule->tuple, fk))
return arfs_rule;
- }
}
return NULL;
@@ -710,20 +678,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
+ struct flow_keys fk;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
- if (skb->protocol != htons(ETH_P_IP) &&
- skb->protocol != htons(ETH_P_IPV6))
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
- arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+ arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
if (!arfs_t)
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
- arfs_rule = arfs_find_rule(arfs_t, skb);
+ arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index) {
spin_unlock_bh(&arfs->arfs_lock);
@@ -731,8 +703,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
}
arfs_rule->rxq = rxq_index;
} else {
- arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
- rxq_index, flow_id);
+ arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
if (!arfs_rule) {
spin_unlock_bh(&arfs->arfs_lock);
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 029e856f72a0..dc809c2ea413 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
if (err)
return err;
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return 0;
}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir)
{
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
mlx5_core_destroy_tir(mdev, tir->tirn);
list_del(&tir->list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
}
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+ mutex_init(&mdev->mlx5e_res.td.list_lock);
return 0;
@@ -151,6 +156,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
if (err)
@@ -159,6 +165,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
out:
kvfree(in);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d5e8ac86c195..e13a6cd5163f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1149,6 +1149,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EOPNOTSUPP;
+
if (pauseparam->autoneg)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index da9246f6c31e..a10f042df01f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -92,8 +92,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -121,8 +120,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
@@ -1759,7 +1757,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
unlock:
mutex_unlock(&esw->state_lock);
- return 0;
+ return err;
}
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d676088512cf..c9fb589690ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -786,11 +786,9 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
priv->numa_node = dev_to_node(&dev->pdev->dev);
- priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
- if (!priv->dbg_root) {
- dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
- return -ENOMEM;
- }
+ if (mlx5_debugfs_root)
+ priv->dbg_root =
+ debugfs_create_dir(pci_name(pdev), mlx5_debugfs_root);
err = mlx5_pci_enable_device(dev);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 43d7c8378fb4..0bad09d06206 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -368,10 +368,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
i2c_addr = MLX5_I2C_ADDR_LOW;
- if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
- i2c_addr = MLX5_I2C_ADDR_HIGH;
- offset -= MLX5_EEPROM_PAGE_LENGTH;
- }
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 9346f3985edf..354338c8a510 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -125,7 +125,7 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
event_type, rsn);
- return;
+ goto out;
}
switch (common->res) {
@@ -139,7 +139,7 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
default:
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
}
-
+out:
mlx5_core_put_rsc(common);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index a01e6c0d0cd1..fdc69218c8ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -935,7 +935,7 @@ static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
MLXSW_REG_ZERO(spaft, payload);
mlxsw_reg_spaft_local_port_set(payload, local_port);
mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
- mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
+ mlxsw_reg_spaft_allow_prio_tagged_set(payload, allow_untagged);
mlxsw_reg_spaft_allow_tagged_set(payload, true);
}
@@ -1873,7 +1873,7 @@ static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
* Configures the ETS elements.
*/
#define MLXSW_REG_QEEC_ID 0x400D
-#define MLXSW_REG_QEEC_LEN 0x1C
+#define MLXSW_REG_QEEC_LEN 0x20
static const struct mlxsw_reg_info mlxsw_reg_qeec = {
.id = MLXSW_REG_QEEC_ID,
@@ -1918,6 +1918,15 @@ MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
*/
MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
+/* reg_qeec_mise
+ * Min shaper configuration enable. Enables configuration of the min
+ * shaper on this ETS element
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mise, 0x0C, 31, 1);
+
enum {
MLXSW_REG_QEEC_BYTES_MODE,
MLXSW_REG_QEEC_PACKETS_MODE,
@@ -1934,6 +1943,17 @@ enum {
*/
MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
+/* The smallest permitted min shaper rate. */
+#define MLXSW_REG_QEEC_MIS_MIN 200000 /* Kbps */
+
+/* reg_qeec_min_shaper_rate
+ * Min shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28);
+
/* reg_qeec_mase
* Max shaper configuration enable. Enables configuration of the max
* shaper on this ETS element.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 22a5916e477e..8460c4807567 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1565,7 +1565,7 @@ static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
int i;
for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
- snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+ snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
mlxsw_sp_port_hw_prio_stats[i].str, prio);
*p += ETH_GSTRING_LEN;
}
@@ -1576,7 +1576,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
int i;
for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
- snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+ snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
mlxsw_sp_port_hw_tc_stats[i].str, tc);
*p += ETH_GSTRING_LEN;
}
@@ -2044,6 +2044,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+ if (!autoneg && cmd->base.speed == SPEED_56000) {
+ netdev_err(dev, "56G not supported with autoneg off\n");
+ return -EINVAL;
+ }
eth_proto_new = autoneg ?
mlxsw_sp_to_ptys_advert_link(cmd) :
mlxsw_sp_to_ptys_speed(cmd->base.speed);
@@ -2059,11 +2063,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
if (err)
return err;
+ mlxsw_sp_port->link.autoneg = autoneg;
+
if (!netif_running(dev))
return 0;
- mlxsw_sp_port->link.autoneg = autoneg;
-
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
@@ -2187,6 +2191,13 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
MLXSW_REG_QEEC_MAS_DIS);
if (err)
return err;
+
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i + 8, i,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
}
/* Map all priorities to traffic class 0. */
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 20cb85bc0c5f..6135d90f368f 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1156,7 +1156,7 @@ ks8695_timeout(struct net_device *ndev)
* sk_buff and adds it to the TX ring. It then kicks the TX DMA
* engine to ensure transmission begins.
*/
-static int
+static netdev_tx_t
ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ks8695_priv *ksp = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 1edc973df4c4..7377dca6eb57 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -547,9 +547,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
/* set dma read address */
ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
- /* start the packet dma process, and set auto-dequeue rx */
- ks8851_wrreg16(ks, KS_RXQCR,
- ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+ /* start DMA access */
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
if (rxlen > 4) {
unsigned int rxalign;
@@ -580,7 +579,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
}
}
- ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ /* end DMA access and dequeue packet */
+ ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
}
}
@@ -797,6 +797,15 @@ static void ks8851_tx_work(struct work_struct *work)
static int ks8851_net_open(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
+ int ret;
+
+ ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev->name, ks);
+ if (ret < 0) {
+ netdev_err(dev, "failed to get irq\n");
+ return ret;
+ }
/* lock the card, even if we may not actually be doing anything
* else at the moment */
@@ -861,6 +870,7 @@ static int ks8851_net_open(struct net_device *dev)
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
mutex_unlock(&ks->lock);
+ mii_check_link(&ks->mii);
return 0;
}
@@ -911,6 +921,8 @@ static int ks8851_net_stop(struct net_device *dev)
dev_kfree_skb(txb);
}
+ free_irq(dev->irq, ks);
+
return 0;
}
@@ -1516,6 +1528,7 @@ static int ks8851_probe(struct spi_device *spi)
spi_set_drvdata(spi, ks);
+ netif_carrier_off(ks->netdev);
ndev->if_port = IF_PORT_100BASET;
ndev->netdev_ops = &ks8851_netdev_ops;
ndev->irq = spi->irq;
@@ -1542,14 +1555,6 @@ static int ks8851_probe(struct spi_device *spi)
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
- ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- ndev->name, ks);
- if (ret < 0) {
- dev_err(&spi->dev, "failed to get irq\n");
- goto err_irq;
- }
-
ret = register_netdev(ndev);
if (ret) {
dev_err(&spi->dev, "failed to register network device\n");
@@ -1562,14 +1567,10 @@ static int ks8851_probe(struct spi_device *spi)
return 0;
-
err_netdev:
- free_irq(ndev->irq, ks);
-
-err_irq:
+err_id:
if (gpio_is_valid(gpio))
gpio_set_value(gpio, 0);
-err_id:
regulator_disable(ks->vdd_reg);
err_reg:
regulator_disable(ks->vdd_io);
@@ -1587,7 +1588,6 @@ static int ks8851_remove(struct spi_device *spi)
dev_info(&spi->dev, "remove\n");
unregister_netdev(priv->netdev);
- free_irq(spi->irq, priv);
if (gpio_is_valid(priv->gpio))
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 2fc5cd56c0a8..bebddb8b997e 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -475,21 +475,47 @@ static int msg_enable;
*/
/**
- * ks_rdreg8 - read 8 bit register from device
+ * ks_check_endian - Check whether endianness of the bus is correct
* @ks : The chip information
- * @offset: The register address
*
- * Read a 8bit register from the chip, returning the result
+ * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit
+ * bus. To maintain optimum performance, the bus endianness should be set
+ * such that it matches the endianness of the CPU.
*/
-static u8 ks_rdreg8(struct ks_net *ks, int offset)
+
+static int ks_check_endian(struct ks_net *ks)
{
- u16 data;
- u8 shift_bit = offset & 0x03;
- u8 shift_data = (offset & 1) << 3;
- ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
- data = ioread16(ks->hw_addr);
- return (u8)(data >> shift_data);
+ u16 cider;
+
+ /*
+ * Read CIDER register first, however read it the "wrong" way around.
+ * If the endian strap on the KS8851-16MLL in incorrect and the chip
+ * is operating in different endianness than the CPU, then the meaning
+ * of BE[3:0] byte-enable bits is also swapped such that:
+ * BE[3,2,1,0] becomes BE[1,0,3,2]
+ *
+ * Luckily for us, the byte-enable bits are the top four MSbits of
+ * the address register and the CIDER register is at offset 0xc0.
+ * Hence, by reading address 0xc0c0, which is not impacted by endian
+ * swapping, we assert either BE[3:2] or BE[1:0] while reading the
+ * CIDER register.
+ *
+ * If the bus configuration is correct, reading 0xc0c0 asserts
+ * BE[3:2] and this read returns 0x0000, because to read register
+ * with bottom two LSbits of address set to 0, BE[1:0] must be
+ * asserted.
+ *
+ * If the bus configuration is NOT correct, reading 0xc0c0 asserts
+ * BE[1:0] and this read returns non-zero 0x8872 value.
+ */
+ iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd);
+ cider = ioread16(ks->hw_addr);
+ if (!cider)
+ return 0;
+
+ netdev_err(ks->netdev, "incorrect EESK endian strap setting\n");
+
+ return -EINVAL;
}
/**
@@ -508,22 +534,6 @@ static u16 ks_rdreg16(struct ks_net *ks, int offset)
}
/**
- * ks_wrreg8 - write 8bit register value to chip
- * @ks: The chip information
- * @offset: The register address
- * @value: The value to write
- *
- */
-static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
-{
- u8 shift_bit = (offset & 0x03);
- u16 value_write = (u16)(value << ((offset & 1) << 3));
- ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
- iowrite16(value_write, ks->hw_addr);
-}
-
-/**
* ks_wrreg16 - write 16bit register value to chip
* @ks: The chip information
* @offset: The register address
@@ -642,8 +652,7 @@ static void ks_read_config(struct ks_net *ks)
u16 reg_data = 0;
/* Regardless of bus width, 8 bit read should always work.*/
- reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
- reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
+ reg_data = ks_rdreg16(ks, KS_CCR);
/* addr/data bus are multiplexed */
ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
@@ -747,7 +756,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
/* 1. set sudo DMA mode */
ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
/* 2. read prepend data */
/**
@@ -764,7 +773,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
ks_inblk(ks, buf, ALIGN(len, 4));
/* 4. reset sudo DMA Mode */
- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
}
/**
@@ -866,14 +875,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
{
struct net_device *netdev = pw;
struct ks_net *ks = netdev_priv(netdev);
+ unsigned long flags;
u16 status;
+ spin_lock_irqsave(&ks->statelock, flags);
/*this should be the first in IRQ handler */
ks_save_cmd_reg(ks);
status = ks_rdreg16(ks, KS_ISR);
if (unlikely(!status)) {
ks_restore_cmd_reg(ks);
+ spin_unlock_irqrestore(&ks->statelock, flags);
return IRQ_NONE;
}
@@ -899,6 +911,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
ks->netdev->stats.rx_over_errors++;
/* this should be the last in IRQ handler*/
ks_restore_cmd_reg(ks);
+ spin_unlock_irqrestore(&ks->statelock, flags);
return IRQ_HANDLED;
}
@@ -968,6 +981,7 @@ static int ks_net_stop(struct net_device *netdev)
/* shutdown RX/TX QMU */
ks_disable_qmu(ks);
+ ks_disable_int(ks);
/* set powermode to soft power down to save power */
ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
@@ -997,13 +1011,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
ks->txh.txw[1] = cpu_to_le16(len);
/* 1. set sudo-DMA mode */
- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
/* 2. write status/lenth info */
ks_outblk(ks, ks->txh.txw, 4);
/* 3. write pkt data */
ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
/* 4. reset sudo-DMA mode */
- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
/* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
/* 6. wait until TXQCR_METFE is auto-cleared */
@@ -1020,14 +1034,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
* spin_lock_irqsave is required because tx and rx should be mutual exclusive.
* So while tx is in-progress, prevent IRQ interrupt from happenning.
*/
-static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
- int retv = NETDEV_TX_OK;
+ netdev_tx_t retv = NETDEV_TX_OK;
struct ks_net *ks = netdev_priv(netdev);
+ unsigned long flags;
- disable_irq(netdev->irq);
- ks_disable_int(ks);
- spin_lock(&ks->statelock);
+ spin_lock_irqsave(&ks->statelock, flags);
/* Extra space are required:
* 4 byte for alignment, 4 for status/length, 4 for CRC
@@ -1041,9 +1054,7 @@ static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb(skb);
} else
retv = NETDEV_TX_BUSY;
- spin_unlock(&ks->statelock);
- ks_enable_int(ks);
- enable_irq(netdev->irq);
+ spin_unlock_irqrestore(&ks->statelock, flags);
return retv;
}
@@ -1568,6 +1579,10 @@ static int ks8851_probe(struct platform_device *pdev)
goto err_free;
}
+ err = ks_check_endian(ks);
+ if (err)
+ goto err_free;
+
netdev->irq = platform_get_irq(pdev, 0);
if ((int)netdev->irq < 0) {
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 6d1a956e3f77..02ec326cb129 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4113,7 +4113,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* setup (if available). */
status = myri10ge_request_irq(mgp);
if (status != 0)
- goto abort_with_firmware;
+ goto abort_with_slices;
myri10ge_free_irq(mgp);
/* Save configuration space to be restored if the
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 23821540ab07..254e6dbc4c6a 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -50,6 +50,8 @@ static int sonic_open(struct net_device *dev)
if (sonic_debug > 2)
printk("sonic_open: initializing sonic driver.\n");
+ spin_lock_init(&lp->lock);
+
for (i = 0; i < SONIC_NUM_RRS; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (skb == NULL) {
@@ -101,6 +103,24 @@ static int sonic_open(struct net_device *dev)
return 0;
}
+/* Wait for the SONIC to become idle. */
+static void sonic_quiesce(struct net_device *dev, u16 mask)
+{
+ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
+ int i;
+ u16 bits;
+
+ for (i = 0; i < 1000; ++i) {
+ bits = SONIC_READ(SONIC_CMD) & mask;
+ if (!bits)
+ return;
+ if (irqs_disabled() || in_interrupt())
+ udelay(20);
+ else
+ usleep_range(100, 200);
+ }
+ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
+}
/*
* Close the SONIC device
@@ -118,6 +138,9 @@ static int sonic_close(struct net_device *dev)
/*
* stop the SONIC, disable interrupts
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -157,6 +180,9 @@ static void sonic_tx_timeout(struct net_device *dev)
* put the Sonic into software-reset mode and
* disable all interrupts before releasing DMA buffers
*/
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
+
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -194,8 +220,6 @@ static void sonic_tx_timeout(struct net_device *dev)
* wake the tx queue
* Concurrently with all of this, the SONIC is potentially writing to
* the status flags of the TDs.
- * Until some mutual exclusion is added, this code will not work with SMP. However,
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
*/
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
@@ -203,7 +227,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
dma_addr_t laddr;
int length;
- int entry = lp->next_tx;
+ int entry;
+ unsigned long flags;
if (sonic_debug > 2)
printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);
@@ -221,11 +246,15 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
if (!laddr) {
- printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
- dev_kfree_skb(skb);
- return NETDEV_TX_BUSY;
+ pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
+ spin_lock_irqsave(&lp->lock, flags);
+
+ entry = lp->next_tx;
+
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
@@ -235,10 +264,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
- /*
- * Must set tx_skb[entry] only after clearing status, and
- * before clearing EOL and before stopping queue
- */
wmb();
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
@@ -263,6 +288,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return NETDEV_TX_OK;
}
@@ -275,9 +302,21 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct sonic_local *lp = netdev_priv(dev);
int status;
+ unsigned long flags;
+
+ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
+ * with sonic_send_packet() so that the two functions can share state.
+ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
+ * by macsonic which must use two IRQs with different priority levels.
+ */
+ spin_lock_irqsave(&lp->lock, flags);
+
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ if (!status) {
+ spin_unlock_irqrestore(&lp->lock, flags);
- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
return IRQ_NONE;
+ }
do {
if (status & SONIC_INT_PKTRX) {
@@ -292,11 +331,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
int td_status;
int freed_some = 0;
- /* At this point, cur_tx is the index of a TD that is one of:
- * unallocated/freed (status set & tx_skb[entry] clear)
- * allocated and sent (status set & tx_skb[entry] set )
- * allocated and not yet sent (status clear & tx_skb[entry] set )
- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+ /* The state of a Transmit Descriptor may be inferred
+ * from { tx_skb[entry], td_status } as follows.
+ * { clear, clear } => the TD has never been used
+ * { set, clear } => the TD was handed to SONIC
+ * { set, set } => the TD was handed back
+ * { clear, set } => the TD is available for re-use
*/
if (sonic_debug > 2)
@@ -398,10 +438,30 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
/* load CAM done */
if (status & SONIC_INT_LCD)
SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+
+ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+ } while (status);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
return IRQ_HANDLED;
}
+/* Return the array index corresponding to a given Receive Buffer pointer. */
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
+ unsigned int last)
+{
+ unsigned int i = last;
+
+ do {
+ i = (i + 1) & SONIC_RRS_MASK;
+ if (addr == lp->rx_laddr[i])
+ return i;
+ } while (i != last);
+
+ return -ENOENT;
+}
+
/*
* We have a good packet(s), pass it/them up the network stack.
*/
@@ -421,6 +481,16 @@ static void sonic_rx(struct net_device *dev)
status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
if (status & SONIC_RCR_PRX) {
+ u32 addr = (sonic_rda_get(dev, entry,
+ SONIC_RD_PKTPTR_H) << 16) |
+ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+ int i = index_from_addr(lp, addr, entry);
+
+ if (i < 0) {
+ WARN_ONCE(1, "failed to find buffer!\n");
+ break;
+ }
+
/* Malloc up new buffer. */
new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (new_skb == NULL) {
@@ -442,7 +512,7 @@ static void sonic_rx(struct net_device *dev)
/* now we have a new skb to replace it, pass the used one up the stack */
dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
- used_skb = lp->rx_skb[entry];
+ used_skb = lp->rx_skb[i];
pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
skb_trim(used_skb, pkt_len);
used_skb->protocol = eth_type_trans(used_skb, dev);
@@ -451,13 +521,13 @@ static void sonic_rx(struct net_device *dev)
lp->stats.rx_bytes += pkt_len;
/* and insert the new skb */
- lp->rx_laddr[entry] = new_laddr;
- lp->rx_skb[entry] = new_skb;
+ lp->rx_laddr[i] = new_laddr;
+ lp->rx_skb[i] = new_skb;
bufadr_l = (unsigned long)new_laddr & 0xffff;
bufadr_h = (unsigned long)new_laddr >> 16;
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
+ sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
+ sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
} else {
/* This should only happen, if we enable accepting broken packets. */
lp->stats.rx_errors++;
@@ -592,6 +662,7 @@ static int sonic_init(struct net_device *dev)
*/
SONIC_WRITE(SONIC_CMD, 0);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+ sonic_quiesce(dev, SONIC_CR_ALL);
/*
* initialize the receive resource area
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 07091dd27e5d..7dcf913d7395 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -109,6 +109,9 @@
#define SONIC_CR_TXP 0x0002
#define SONIC_CR_HTX 0x0001
+#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
+ SONIC_CR_RXEN | SONIC_CR_TXP)
+
/*
* SONIC data configuration bits
*/
@@ -273,8 +276,9 @@
#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
+#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
+#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
+#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
#define SONIC_RBSIZE 1520 /* size of one resource buffer */
@@ -320,6 +324,7 @@ struct sonic_local {
unsigned int next_tx; /* next free TD */
struct device *device; /* generic device */
struct net_device_stats stats;
+ spinlock_t lock;
};
#define TX_TIMEOUT (3 * HZ)
@@ -341,30 +346,30 @@ static void sonic_tx_timeout(struct net_device *dev);
as far as we can tell. */
/* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
is a much better name. */
-static inline void sonic_buf_put(void* base, int bitmode,
+static inline void sonic_buf_put(u16 *base, int bitmode,
int offset, __u16 val)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- ((__u16 *) base + (offset*2))[1] = val;
+ __raw_writew(val, base + (offset * 2) + 1);
#else
- ((__u16 *) base + (offset*2))[0] = val;
+ __raw_writew(val, base + (offset * 2) + 0);
#endif
else
- ((__u16 *) base)[offset] = val;
+ __raw_writew(val, base + (offset * 1) + 0);
}
-static inline __u16 sonic_buf_get(void* base, int bitmode,
+static inline __u16 sonic_buf_get(u16 *base, int bitmode,
int offset)
{
if (bitmode)
#ifdef __BIG_ENDIAN
- return ((volatile __u16 *) base + (offset*2))[1];
+ return __raw_readw(base + (offset * 2) + 1);
#else
- return ((volatile __u16 *) base + (offset*2))[0];
+ return __raw_readw(base + (offset * 2) + 0);
#endif
else
- return ((volatile __u16 *) base)[offset];
+ return __raw_readw(base + (offset * 1) + 0);
}
/* Inlines that you should actually use for reading/writing DMA buffers */
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 6ce4412fcc1a..380e841fdd95 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -2065,7 +2065,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
(level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
if ((mask & VXGE_DEBUG_MASK) == mask) \
- printk(fmt "\n", __VA_ARGS__); \
+ printk(fmt "\n", ##__VA_ARGS__); \
} while (0)
#else
#define vxge_debug_ll(level, mask, fmt, ...)
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 3a79d93b8445..5b535aa10d23 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -454,49 +454,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
#define vxge_debug_ll_config(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_ll_config(level, fmt, ...)
#endif
#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
#define vxge_debug_init(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_init(level, fmt, ...)
#endif
#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
#define vxge_debug_tx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_tx(level, fmt, ...)
#endif
#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
#define vxge_debug_rx(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_rx(level, fmt, ...)
#endif
#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
#define vxge_debug_mem(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_mem(level, fmt, ...)
#endif
#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
#define vxge_debug_entryexit(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_entryexit(level, fmt, ...)
#endif
#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
#define vxge_debug_intr(level, fmt, ...) \
- vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
+ vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
#else
#define vxge_debug_intr(level, fmt, ...)
#endif
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8e13ec84c538..9fcaf1910633 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1374,13 +1374,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
pldat->dma_buff_base_p = dma_handle;
netdev_dbg(ndev, "IO address space :%pR\n", res);
- netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
+ netdev_dbg(ndev, "IO address size :%zd\n",
+ (size_t)resource_size(res));
netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
pldat->net_base);
netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
- netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
- netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
- pldat->dma_buff_base_p);
+ netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size);
+ netdev_dbg(ndev, "DMA buffer P address :%pad\n",
+ &pldat->dma_buff_base_p);
netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
pldat->dma_buff_base_v);
@@ -1427,8 +1428,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto err_out_unregister_netdev;
- netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
- res->start, ndev->irq);
+ netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
+ (unsigned long)res->start, ndev->irq);
phydev = ndev->phydev;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 2f4a837f0d6a..dcd56ac68748 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1053,7 +1053,6 @@ static int pasemi_mac_phy_init(struct net_device *dev)
dn = pci_device_to_OF_node(mac->pdev);
phy_dn = of_parse_phandle(dn, "phy-handle", 0);
- of_node_put(phy_dn);
mac->link = 0;
mac->speed = 0;
@@ -1062,6 +1061,7 @@ static int pasemi_mac_phy_init(struct net_device *dev)
phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
PHY_INTERFACE_MODE_SGMII);
+ of_node_put(phy_dn);
if (!phydev) {
printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
return -ENODEV;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 715776e2cfe5..2d198f6ee21d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1328,10 +1328,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_pstorm_per_queue_stat pstats;
u32 pstats_addr = 0, pstats_len = 0;
@@ -1351,10 +1350,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
}
-static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
@@ -1397,10 +1395,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack
+void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_ustorm_per_queue_stat ustats;
u32 ustats_addr = 0, ustats_len = 0;
@@ -1436,10 +1433,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_mstorm_per_queue_stat mstats;
u32 mstats_addr = 0, mstats_len = 0;
@@ -1463,9 +1459,9 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
-static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats)
+static noinline_for_stack void
+__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats)
{
struct port_stats port_stats;
int j;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index a769196628d9..708117fc6f73 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -958,7 +958,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
&drv_version);
if (rc) {
DP_NOTICE(cdev, "Failed sending drv version command\n");
- return rc;
+ goto err4;
}
}
@@ -966,6 +966,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return 0;
+err4:
+ qed_ll2_dealloc_if(cdev);
err3:
qed_hw_stop(cdev);
err2:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 85f46dbecd5b..9b1920b58594 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2619,8 +2619,16 @@ enum qede_remove_mode {
static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
{
struct net_device *ndev = pci_get_drvdata(pdev);
- struct qede_dev *edev = netdev_priv(ndev);
- struct qed_dev *cdev = edev->cdev;
+ struct qede_dev *edev;
+ struct qed_dev *cdev;
+
+ if (!ndev) {
+ dev_info(&pdev->dev, "Device has already been removed\n");
+ return;
+ }
+
+ edev = netdev_priv(ndev);
+ cdev = edev->cdev;
DP_INFO(edev, "Starting qede_remove\n");
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 355c5fb802cd..f2cb77c3b199 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2752,6 +2752,9 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
int err;
for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+
skb = netdev_alloc_skb(qdev->ndev,
qdev->lrg_buffer_len);
if (unlikely(!skb)) {
@@ -2762,11 +2765,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
ql_free_large_buffers(qdev);
return -ENOMEM;
} else {
-
- lrg_buf_cb = &qdev->lrg_buf[i];
- memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
lrg_buf_cb->index = i;
- lrg_buf_cb->skb = skb;
/*
* We save some space to copy the ethhdr from first
* buffer
@@ -2783,10 +2782,12 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
err);
+ dev_kfree_skb_irq(skb);
ql_free_large_buffers(qdev);
return -ENOMEM;
}
+ lrg_buf_cb->skb = skb;
dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len -
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index a496390b8632..cda5b0a9e948 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d
ahw->reset.seq_error = 0;
ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
- if (p_dev->ahw->reset.buff == NULL)
+ if (ahw->reset.buff == NULL)
return -ENOMEM;
p_buff = p_dev->ahw->reset.buff;
@@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
break;
}
entry += p_hdr->size;
+ cond_resched();
}
p_dev->ahw->reset.seq_index = index;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 4b76c69fe86d..834208e55f7b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
struct qlcnic_adapter *adapter = netdev_priv(netdev);
if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
- return 0;
+ return 1;
switch (capid) {
case DCB_CAP_ATTR_PG:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 0a2318cad34d..63ebc491057b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1038,6 +1038,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+ if (!skb)
+ break;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 0844b7c75767..5174e0bd75d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
addr += 16;
reg_read -= 16;
ret += 16;
+ cond_resched();
}
out:
mutex_unlock(&adapter->ahw->mem_lock);
@@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
buffer = fw_dump->data + buf_offset;
+ cond_resched();
}
fw_dump->clr = 1;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 21f546587e3d..31583d6f044f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -438,7 +438,6 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
u16 signature = 0;
u16 spi_config;
u16 wrbuf_space = 0;
- static u16 reset_count;
if (event == QCASPI_EVENT_CPUON) {
/* Read signature twice, if not valid
@@ -491,13 +490,13 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
qca->sync = QCASPI_SYNC_RESET;
qca->stats.trig_reset++;
- reset_count = 0;
+ qca->reset_count = 0;
break;
case QCASPI_SYNC_RESET:
- reset_count++;
+ qca->reset_count++;
netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n",
- reset_count);
- if (reset_count >= QCASPI_RESET_TIMEOUT) {
+ qca->reset_count);
+ if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
/* reset did not seem to take place, try again */
qca->sync = QCASPI_SYNC_UNKNOWN;
qca->stats.reset_timeout++;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 6e31a0e744a4..c48c314ca4df 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -97,6 +97,7 @@ struct qcaspi {
unsigned int intr_req;
unsigned int intr_svc;
+ u16 reset_count;
#ifdef CONFIG_DEBUG_FS
struct dentry *device_root;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 480883a7a3e5..545cb6262cff 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,6 +1,6 @@
/* Renesas Ethernet AVB device driver
*
- * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2014-2019 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
@@ -512,7 +512,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
kfree(ts_skb);
if (tag == tfa_tag) {
skb_tstamp_tx(skb, &shhwtstamps);
+ dev_consume_skb_any(skb);
break;
+ } else {
+ dev_kfree_skb_any(skb);
}
}
ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
@@ -1537,7 +1540,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
DMA_TO_DEVICE);
goto unmap;
}
- ts_skb->skb = skb;
+ ts_skb->skb = skb_get(skb);
ts_skb->tag = priv->ts_skb_tag++;
priv->ts_skb_tag &= 0x3ff;
list_add_tail(&ts_skb->list, &priv->ts_skb_list);
@@ -1665,6 +1668,7 @@ static int ravb_close(struct net_device *ndev)
/* Clear the timestamp list */
list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
list_del(&ts_skb->list);
+ kfree_skb(ts_skb->skb);
kfree(ts_skb);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c59e8fe37069..6f8d4810ce97 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1388,6 +1388,10 @@ static void sh_eth_dev_exit(struct net_device *ndev)
sh_eth_get_stats(ndev);
sh_eth_reset(ndev);
+ /* Set the RMII mode again if required */
+ if (mdp->cd->rmiimode)
+ sh_eth_write(ndev, 0x1, RMIIMODE);
+
/* Set MAC address again */
update_mac_address(ndev);
}
@@ -2925,12 +2929,16 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
struct device_node *np = dev->of_node;
struct sh_eth_plat_data *pdata;
const char *mac_addr;
+ int ret;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
- pdata->phy_interface = of_get_phy_mode(np);
+ ret = of_get_phy_mode(np);
+ if (ret < 0)
+ return NULL;
+ pdata->phy_interface = ret;
mac_addr = of_get_mac_address(np);
if (mac_addr)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index ea44a2456ce1..11dd7c8d576d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -2313,7 +2313,7 @@ static int __init sxgbe_cmdline_opt(char *str)
if (!str || !*str)
return -EINVAL;
while ((opt = strsep(&str, ",")) != NULL) {
- if (!strncmp(opt, "eee_timer:", 6)) {
+ if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c2bd5378ffda..3527962f0bda 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -792,15 +792,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
printk(KERN_ERR "Sgiseeq: Cannot register net device, "
"aborting.\n");
err = -ENODEV;
- goto err_out_free_page;
+ goto err_out_free_attrs;
}
printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
return 0;
-err_out_free_page:
- free_page((unsigned long) sp->srings);
+err_out_free_attrs:
+ dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
err_out_free_dev:
free_netdev(dev);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 3d5d5d54c103..22bc3dc44298 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -5093,22 +5093,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
};
+#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
struct efx_mcdi_mtd_partition *part,
- unsigned int type)
+ unsigned int type,
+ unsigned long *found)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
const struct efx_ef10_nvram_type_info *info;
size_t size, erase_size, outlen;
+ int type_idx = 0;
bool protected;
int rc;
- for (info = efx_ef10_nvram_types; ; info++) {
- if (info ==
- efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
+ for (type_idx = 0; ; type_idx++) {
+ if (type_idx == EF10_NVRAM_PARTITION_COUNT)
return -ENODEV;
+ info = efx_ef10_nvram_types + type_idx;
if ((type & ~info->type_mask) == info->type)
break;
}
@@ -5121,6 +5124,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (protected)
return -ENODEV; /* hide it */
+ /* If we've already exposed a partition of this type, hide this
+ * duplicate. All operations on MTDs are keyed by the type anyway,
+ * so we can't act on the duplicate.
+ */
+ if (__test_and_set_bit(type_idx, found))
+ return -EEXIST;
+
part->nvram_type = type;
MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
@@ -5149,6 +5159,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
static int efx_ef10_mtd_probe(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
+ DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
struct efx_mcdi_mtd_partition *parts;
size_t outlen, n_parts_total, i, n_parts;
unsigned int type;
@@ -5177,11 +5188,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
for (i = 0; i < n_parts_total; i++) {
type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
i);
- rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
- if (rc == 0)
- n_parts++;
- else if (rc != -ENODEV)
+ rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
+ found);
+ if (rc == -EEXIST || rc == -ENODEV)
+ continue;
+ if (rc)
goto fail;
+ n_parts++;
}
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 77a5364f7a10..04cbff7f1b23 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1320,7 +1320,8 @@ void efx_ptp_remove(struct efx_nic *efx)
(void)efx_ptp_disable(efx);
cancel_work_sync(&efx->ptp_data->work);
- cancel_work_sync(&efx->ptp_data->pps_work);
+ if (efx->ptp_data->pps_workwq)
+ cancel_work_sync(&efx->ptp_data->pps_work);
skb_queue_purge(&efx->ptp_data->rxq);
skb_queue_purge(&efx->ptp_data->txq);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6f85276376e8..ae9b983e8e5c 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1058,7 +1058,7 @@ sis900_open(struct net_device *net_dev)
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
sw32(cr, RxENA | sr32(cr));
sw32(ier, IE);
@@ -1581,7 +1581,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
sw32(txdp, sis_priv->tx_ring_dma);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
}
/**
@@ -1621,7 +1621,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
return NETDEV_TX_OK;
}
- sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
+ sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
sw32(cr, TxENA | sr32(cr));
sis_priv->cur_tx ++;
@@ -1677,7 +1677,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
do {
status = sr32(isr);
- if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
+ if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0)
/* nothing intresting happened */
break;
handled = 1;
@@ -1687,7 +1687,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
/* Rx interrupt */
sis900_rx(net_dev);
- if (status & (TxURN | TxERR | TxIDLE))
+ if (status & (TxURN | TxERR | TxIDLE | TxDESC))
/* Tx interrupt */
sis900_finish_xmit(net_dev);
@@ -1899,8 +1899,8 @@ static void sis900_finish_xmit (struct net_device *net_dev)
if (tx_status & OWN) {
/* The packet is not transmitted yet (owned by hardware) !
- * Note: the interrupt is generated only when Tx Machine
- * is idle, so this is an almost impossible case */
+ * Note: this is an almost impossible condition
+ * in case of TxDESC ('descriptor interrupt') */
break;
}
@@ -2476,7 +2476,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
/* Enable all known interrupts by setting the interrupt mask. */
- sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
+ sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
sw32(cr, RxENA | sr32(cr));
sw32(ier, IE);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index cb49c9654f0a..d0cf971aa4eb 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -514,7 +514,8 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
* now, or set the card to generates an interrupt when ready
* for the packet.
*/
-static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned int free;
@@ -947,7 +948,7 @@ static void smc911x_phy_configure(struct work_struct *work)
if (lp->ctl_rspeed != 100)
my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
- if (!lp->ctl_rfduplx)
+ if (!lp->ctl_rfduplx)
my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
/* Update our Auto-Neg Advertisement Register */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 73212590d04a..b0c72167bade 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -637,7 +637,8 @@ done: if (!THROTTLE_TX_PKTS)
* now, or set the card to generates an interrupt when ready
* for the packet.
*/
-static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 734caa7a557b..4143659615e1 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1776,7 +1776,8 @@ static int smsc911x_stop(struct net_device *dev)
}
/* Entry point for transmitting a packet */
-static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int freespace;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 6d2de4e01f6d..27c49d400bcd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -326,8 +326,8 @@ struct dma_features {
unsigned int enh_desc;
};
-/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
-#define BUF_SIZE_16KiB 16384
+/* RX Buffer size must be multiple of 4/8/16 bytes */
+#define BUF_SIZE_16KiB 16368
#define BUF_SIZE_8KiB 8192
#define BUF_SIZE_4KiB 4096
#define BUF_SIZE_2KiB 2048
@@ -354,7 +354,7 @@ struct dma_features {
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
- int end);
+ int end, int bfsize);
/* DMA TX descriptor ring initialization */
void (*init_tx_desc) (struct dma_desc *p, int mode, int end);
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 1d181e205d6e..f9cbba2d2cc0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -33,11 +33,14 @@
/* Specific functions used for Ring mode */
/* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+ int bfsize)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
- << ERDES1_BUFFER2_SIZE_SHIFT)
- & ERDES1_BUFFER2_SIZE_MASK);
+ if (bfsize == BUF_SIZE_16KiB)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+ << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK);
+
if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -63,11 +66,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
}
/* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
- << RDES1_BUFFER2_SIZE_SHIFT)
- & RDES1_BUFFER2_SIZE_MASK);
+ if (bfsize >= BUF_SIZE_2KiB) {
+ int bfsize2;
+
+ bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK);
+ }
if (end)
p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 866444b6c82f..11a4a81b0397 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -203,7 +203,7 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
struct device *dev = &gmac->pdev->dev;
gmac->phy_mode = of_get_phy_mode(dev->of_node);
- if (gmac->phy_mode < 0) {
+ if ((int)gmac->phy_mode < 0) {
dev_err(dev, "missing phy mode property\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index f356a44bcb81..6704d3e0392d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -280,7 +280,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
dwmac->pdev = pdev;
dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
- if (dwmac->phy_mode < 0) {
+ if ((int)dwmac->phy_mode < 0) {
dev_err(&pdev->dev, "missing phy-mode property\n");
ret = -EINVAL;
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6e61bccc90b3..15c063880f88 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -771,10 +771,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
int ret;
struct device *dev = &bsp_priv->pdev->dev;
- if (!ldo) {
- dev_err(dev, "no regulator found\n");
- return -1;
- }
+ if (!ldo)
+ return 0;
if (enable) {
ret = regulator_enable(ldo);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index d07520fb969e..fc1fa0f9f338 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -53,13 +53,15 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
* rate, which then uses the auto-reparenting feature of the
* clock driver, and enabling/disabling the clock.
*/
- if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
+ if (phy_interface_mode_is_rgmii(gmac->interface)) {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
clk_prepare_enable(gmac->tx_clk);
gmac->clk_enabled = 1;
} else {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
- clk_prepare(gmac->tx_clk);
+ ret = clk_prepare(gmac->tx_clk);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 7d19029e2564..3a2edf9f51e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -213,6 +213,12 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg));
reg++;
}
+
+ while (reg < perfect_addr_number) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
}
#ifdef FRAME_FILTER_DEBUG
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 51019b794be5..4216c0a5eaf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -168,19 +168,25 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
}
/* Handle multiple unicast addresses */
- if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
/* Switch to promiscuous mode if more than 128 addrs
* are required
*/
value |= GMAC_PACKET_FILTER_PR;
- } else if (!netdev_uc_empty(dev)) {
- int reg = 1;
+ } else {
struct netdev_hw_addr *ha;
+ int reg = 1;
netdev_for_each_uc_addr(ha, dev) {
dwmac4_set_umac_addr(hw, ha->addr, reg);
reg++;
}
+
+ while (reg <= GMAC_MAX_PERFECT_ADDRESSES) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
}
writel(value, ioaddr + GMAC_PACKET_FILTER);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 3f5056858535..a90b02926e4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -289,7 +289,7 @@ exit:
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index ce97e522566a..47f4fe50c848 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -205,6 +205,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
@@ -235,9 +240,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
- !!(rdes0 & RDES0_FRAME_TYPE),
- !!(rdes0 & ERDES0_RX_MAC_ADDR));
+ if (likely(ret == good_frame))
+ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+ !!(rdes0 & RDES0_FRAME_TYPE),
+ !!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
@@ -263,15 +269,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+ p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
else
- ehn_desc_rx_set_on_ring(p, end);
+ ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index fd78406e2e9a..5a06a5a1f6ea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -95,8 +95,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- pr_warn("%s: Oversized frame spanned multiple buffers\n",
- __func__);
stats->rx_length_errors++;
return discard_frame;
}
@@ -139,15 +137,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
- int end)
+ int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
else
- ndesc_rx_set_on_ring(p, end);
+ ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index f4074e25fb71..25136941a964 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -125,7 +125,7 @@ static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
* programmed with (2^32 – <new_sec_value>)
*/
if (gmac4)
- sec = (100000000ULL - sec);
+ sec = -sec;
value = readl(ioaddr + PTP_TCR);
if (value & PTP_TCR_TSCTRLSSR)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 20a2b01b392c..8c1a5361f661 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -55,7 +55,7 @@
#include <linux/of_mdio.h>
#include "dwmac1000.h"
-#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
+#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
@@ -929,7 +929,9 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
{
int ret = bufsize;
- if (mtu >= BUF_SIZE_4KiB)
+ if (mtu >= BUF_SIZE_8KiB)
+ ret = BUF_SIZE_16KiB;
+ else if (mtu >= BUF_SIZE_4KiB)
ret = BUF_SIZE_8KiB;
else if (mtu >= BUF_SIZE_2KiB)
ret = BUF_SIZE_4KiB;
@@ -956,11 +958,11 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
if (priv->extend_desc)
priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1), priv->dma_buf_sz);
else
priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1), priv->dma_buf_sz);
for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
@@ -1747,11 +1749,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (ret < 0)
pr_warn("%s: failed debugFS registration\n", __func__);
#endif
- /* Start the ball rolling... */
- pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
- priv->hw->dma->start_tx(priv->ioaddr);
- priv->hw->dma->start_rx(priv->ioaddr);
-
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
priv->hw->mac->dump_regs(priv->hw);
@@ -1779,6 +1776,11 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->tso)
priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
+ /* Start the ball rolling... */
+ pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
+
return 0;
}
@@ -1796,8 +1798,6 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- stmmac_check_ether_addr(priv);
-
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -2481,7 +2481,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
wmb();
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
- priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
+ priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0, priv->dma_buf_sz);
else
priv->hw->desc->set_rx_owner(p);
@@ -2501,8 +2501,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit)
{
- unsigned int entry = priv->cur_rx;
- unsigned int next_entry;
+ unsigned int next_entry = priv->cur_rx;
unsigned int count = 0;
int coe = priv->hw->rx_csum;
@@ -2518,10 +2517,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- int status;
+ int entry, status;
struct dma_desc *p;
struct dma_desc *np;
+ entry = next_entry;
+
if (priv->extend_desc)
p = (struct dma_desc *)(priv->dma_erx + entry);
else
@@ -2586,7 +2587,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
priv->dev->name, frame_len,
priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
- break;
+ continue;
}
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -2617,7 +2618,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
dev_warn(priv->device,
"packet dropped\n");
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
dma_sync_single_for_cpu(priv->device,
@@ -2640,7 +2641,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
pr_err("%s: Inconsistent Rx chain\n",
priv->dev->name);
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
prefetch(skb->data - NET_IP_ALIGN);
priv->rx_skbuff[entry] = NULL;
@@ -2674,7 +2675,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
}
- entry = next_entry;
}
stmmac_rx_refill(priv);
@@ -2931,6 +2931,20 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return ret;
}
+static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = eth_mac_addr(ndev, addr);
+ if (ret)
+ return ret;
+
+ priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
+
+ return ret;
+}
+
#ifdef CONFIG_DEBUG_FS
static struct dentry *stmmac_fs_dir;
@@ -3137,7 +3151,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = stmmac_set_mac_address,
};
/**
@@ -3341,6 +3355,8 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
+ stmmac_check_ether_addr(priv);
+
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 2abeba41c0af..5fe3622d27d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -240,7 +240,8 @@ int stmmac_mdio_reset(struct mii_bus *bus)
of_property_read_u32_array(np,
"snps,reset-delays-us", data->delays, 3);
- if (gpio_request(data->reset_gpio, "mdio-reset"))
+ if (devm_gpio_request(priv->device, data->reset_gpio,
+ "mdio-reset"))
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 3eb281d1db08..231330809037 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -158,7 +158,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
/* structure describing a PTP hardware clock */
static struct ptp_clock_info stmmac_ptp_clock_ops = {
.owner = THIS_MODULE,
- .name = "stmmac_ptp_clock",
+ .name = "stmmac ptp",
.max_adj = 62500000,
.n_alarm = 0,
.n_ext_ts = 0,
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index d7cb205fe7e2..687f0c20b47f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -590,6 +590,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
+ __dev_mc_unsync(ndev, NULL);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -772,8 +773,8 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
{
struct cpsw_common *cpsw = dev_id;
- cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
writel(0, &cpsw->wr_regs->rx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
if (cpsw->quirk_irq) {
disable_irq_nosync(cpsw->irqs_table[0]);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index d543298d6750..ff24524e7f46 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3122,12 +3122,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
- if (ret)
+ if (ret) {
+ of_node_put(interfaces);
return ret;
+ }
ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
- if (ret)
+ if (ret) {
+ of_node_put(interfaces);
return ret;
+ }
/* Create network interfaces */
INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 272f2b1cb7ad..34f843795531 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -845,9 +845,9 @@ static int gelic_card_kick_txdma(struct gelic_card *card,
* @skb: packet to send out
* @netdev: interface device structure
*
- * returns 0 on success, <0 on failure
+ * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
*/
-int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct gelic_card *card = netdev_card(netdev);
struct gelic_descr *descr;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 8505196be9f5..d123644bd720 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -370,7 +370,7 @@ void gelic_card_up(struct gelic_card *card);
void gelic_card_down(struct gelic_card *card);
int gelic_net_open(struct net_device *netdev);
int gelic_net_stop(struct net_device *netdev);
-int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+netdev_tx_t gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
void gelic_net_set_multi(struct net_device *netdev);
void gelic_net_tx_timeout(struct net_device *netdev);
int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 36a6e8b54d94..108598794621 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -880,9 +880,9 @@ out:
* @skb: packet to send out
* @netdev: interface device structure
*
- * returns 0 on success, !0 on failure
+ * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
*/
-static int
+static netdev_tx_t
spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int cnt;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 5b01b3fa9fec..9b84ee736fdc 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -474,7 +474,8 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_
/* Index to functions, as function prototypes. */
static int tc35815_open(struct net_device *dev);
-static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t tc35815_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t tc35815_interrupt(int irq, void *dev_id);
static int tc35815_rx(struct net_device *dev, int limit);
static int tc35815_poll(struct napi_struct *napi, int budget);
@@ -1249,7 +1250,8 @@ tc35815_open(struct net_device *dev)
* invariant will hold if you make sure that the netif_*_queue()
* calls are done at the proper times.
*/
-static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
struct TxFD *txfd;
@@ -1498,7 +1500,7 @@ tc35815_rx(struct net_device *dev, int limit)
pci_unmap_single(lp->pci_dev,
lp->rx_skbs[cur_bd].skb_dma,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
- if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
+ if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
memmove(skb->data, skb->data - NET_IP_ALIGN,
pkt_len);
data = skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 8fd131207ee1..499abe9108fa 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -381,9 +381,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
static void tsi108_stat_carry(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
+ unsigned long flags;
u32 carry1, carry2;
- spin_lock_irq(&data->misclock);
+ spin_lock_irqsave(&data->misclock, flags);
carry1 = TSI_READ(TSI108_STAT_CARRY1);
carry2 = TSI_READ(TSI108_STAT_CARRY2);
@@ -451,7 +452,7 @@ static void tsi108_stat_carry(struct net_device *dev)
TSI108_STAT_TXPAUSEDROP_CARRY,
&data->tx_pause_drop);
- spin_unlock_irq(&data->misclock);
+ spin_unlock_irqrestore(&data->misclock, flags);
}
/* Read a stat counter atomically with respect to carries.
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a9bd665fd122..545f60877bb7 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -673,7 +673,8 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
return 0;
}
-static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t
+temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index c688d68c39aa..46fcf3ec2caf 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -612,6 +612,10 @@ static void axienet_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
+
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
+
netif_wake_queue(ndev);
}
@@ -651,7 +655,8 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
* start the transmission. Additionally if checksum offloading is supported,
* it populates AXI Stream Control fields with appropriate values.
*/
-static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t
+axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
u32 ii;
u32 num_frag;
@@ -666,9 +671,19 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (!netif_queue_stopped(ndev))
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
+ if (netif_queue_stopped(ndev))
+ return NETDEV_TX_BUSY;
+
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (axienet_check_tx_bd_space(lp, num_frag))
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(ndev);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1548,12 +1563,14 @@ static int axienet_probe(struct platform_device *pdev)
ret = of_address_to_resource(np, 0, &dmares);
if (ret) {
dev_err(&pdev->dev, "unable to get DMA resource\n");
+ of_node_put(np);
goto free_netdev;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
+ of_node_put(np);
goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index aa02a03a6d8d..034b36442ee7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1005,9 +1005,10 @@ static int xemaclite_close(struct net_device *dev)
* deferred and the Tx queue is stopped so that the deferred socket buffer can
* be transmitted when the Emaclite device is free to transmit data.
*
- * Return: 0, always.
+ * Return: NETDEV_TX_OK, always.
*/
-static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
+static netdev_tx_t
+xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *new_skb;
@@ -1028,7 +1029,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
/* Take the time stamp now, since we can't do this in an ISR. */
skb_tx_timestamp(new_skb);
spin_unlock_irqrestore(&lp->reset_lock, flags);
- return 0;
+ return NETDEV_TX_OK;
}
spin_unlock_irqrestore(&lp->reset_lock, flags);
@@ -1037,7 +1038,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
dev->stats.tx_bytes += len;
dev_consume_skb_any(new_skb);
- return 0;
+ return NETDEV_TX_OK;
}
/**
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 7ea8ead4fd1c..3511d40ba3f1 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -148,6 +148,9 @@ static int fjes_acpi_add(struct acpi_device *device)
/* create platform_device */
plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
ARRAY_SIZE(fjes_resource));
+ if (IS_ERR(plat_dev))
+ return PTR_ERR(plat_dev);
+
device->driver_data = plat_dev;
return 0;
@@ -1187,8 +1190,17 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->open_guard = false;
adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->txrx_wq)) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->control_wq)) {
+ err = -ENOMEM;
+ goto err_free_txrx_wq;
+ }
INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
INIT_WORK(&adapter->raise_intr_rxdata_task,
@@ -1205,7 +1217,7 @@ static int fjes_probe(struct platform_device *plat_dev)
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
err = fjes_hw_init(&adapter->hw);
if (err)
- goto err_free_netdev;
+ goto err_free_control_wq;
/* setup MAC address (02:00:00:00:00:[epid])*/
netdev->dev_addr[0] = 2;
@@ -1225,6 +1237,10 @@ static int fjes_probe(struct platform_device *plat_dev)
err_hw_exit:
fjes_hw_exit(&adapter->hw);
+err_free_control_wq:
+ destroy_workqueue(adapter->control_wq);
+err_free_txrx_wq:
+ destroy_workqueue(adapter->txrx_wq);
err_free_netdev:
free_netdev(netdev);
err_out:
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index cb206e5526c4..a9e8a7356c41 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -42,7 +42,6 @@ struct pdp_ctx {
struct hlist_node hlist_addr;
union {
- u64 tid;
struct {
u64 tid;
u16 flow;
@@ -678,10 +677,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
goto out_err;
- if (!data[IFLA_GTP_PDP_HASHSIZE])
+ if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
- else
+ } else {
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
+ if (!hashsize)
+ hashsize = 1024;
+ }
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
@@ -782,11 +784,13 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
{
int i;
- gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+ gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize,
+ GFP_KERNEL | __GFP_NOWARN);
if (gtp->addr_hash == NULL)
return -ENOMEM;
- gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+ gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize,
+ GFP_KERNEL | __GFP_NOWARN);
if (gtp->tid_hash == NULL)
goto err1;
@@ -834,7 +838,9 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
return -ENOENT;
}
- if (sock0->sk->sk_protocol != IPPROTO_UDP) {
+ if (sock0->sk->sk_protocol != IPPROTO_UDP ||
+ sock0->sk->sk_type != SOCK_DGRAM ||
+ (sock0->sk->sk_family != AF_INET && sock0->sk->sk_family != AF_INET6)) {
netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
err = -EINVAL;
goto err1;
@@ -952,7 +958,7 @@ static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
}
- pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
+ pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
if (pctx == NULL)
return -ENOMEM;
@@ -1221,43 +1227,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+ int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- unsigned long tid = cb->args[1];
- int i, k = cb->args[0], ret;
struct pdp_ctx *pctx;
+ struct gtp_net *gn;
+
+ gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
+ rcu_read_lock();
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
if (last_gtp && last_gtp != gtp)
continue;
else
last_gtp = NULL;
- for (i = k; i < gtp->hash_size; i++) {
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
- if (tid && tid != pctx->u.tid)
- continue;
- else
- tid = 0;
-
- ret = gtp_genl_fill_info(skb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- cb->nlh->nlmsg_type, pctx);
- if (ret < 0) {
+ for (i = bucket; i < gtp->hash_size; i++) {
+ j = 0;
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
+ hlist_tid) {
+ if (j >= skip &&
+ gtp_genl_fill_info(skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, pctx)) {
cb->args[0] = i;
- cb->args[1] = pctx->u.tid;
+ cb->args[1] = j;
cb->args[2] = (unsigned long)gtp;
goto out;
}
+ j++;
}
+ skip = 0;
}
+ bucket = 0;
}
cb->args[4] = 1;
out:
+ rcu_read_unlock();
return skb->len;
}
@@ -1358,9 +1367,9 @@ late_initcall(gtp_init);
static void __exit gtp_fini(void)
{
- unregister_pernet_subsys(&gtp_net_ops);
genl_unregister_family(&gtp_genl_family);
rtnl_link_unregister(&gtp_link_ops);
+ unregister_pernet_subsys(&gtp_net_ops);
pr_info("GTP module unloaded\n");
}
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 470b3dcd54e5..03c96a6cbafd 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -665,10 +665,10 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
- write_lock_bh(&disc_data_lock);
+ write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
- write_unlock_bh(&disc_data_lock);
+ write_unlock_irq(&disc_data_lock);
if (!sp)
return;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index e0a6b1a0ca88..088fe5d34f50 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -783,10 +783,10 @@ static void mkiss_close(struct tty_struct *tty)
{
struct mkiss *ax;
- write_lock_bh(&disc_data_lock);
+ write_lock_irq(&disc_data_lock);
ax = tty->disc_data;
tty->disc_data = NULL;
- write_unlock_bh(&disc_data_lock);
+ write_unlock_irq(&disc_data_lock);
if (!ax)
return;
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index f186e0460cde..12df6cfb423a 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -838,10 +838,11 @@ static void atusb_disconnect(struct usb_interface *interface)
ieee802154_unregister_hw(atusb->hw);
+ usb_put_dev(atusb->usb_dev);
+
ieee802154_free_hw(atusb->hw);
usb_set_intfdata(interface, NULL);
- usb_put_dev(atusb->usb_dev);
pr_debug("atusb_disconnect done\n");
}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index c747ab652665..6c0982a39486 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -251,6 +251,7 @@ acct:
} else {
kfree_skb(skb);
}
+ cond_resched();
}
}
@@ -443,19 +444,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
- /* In this mode we dont care about multicast and broadcast traffic */
- if (is_multicast_ether_addr(ethh->h_dest)) {
- pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
- ntohs(skb->protocol));
- kfree_skb(skb);
- goto out;
- }
-
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
* will have L2; which need to discarded and processed further
* in the net-ns of the main-device.
*/
if (skb_mac_header_was_set(skb)) {
+ /* In this mode we dont care about
+ * multicast and broadcast traffic */
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ pr_debug_ratelimited(
+ "Dropped {multi|broad}cast of type=[%x]\n",
+ ntohs(skb->protocol));
+ kfree_skb(skb);
+ goto out;
+ }
+
skb_pull(skb, sizeof(*ethh));
skb->mac_header = (typeof(skb->mac_header))~0U;
skb_reset_network_header(skb);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 72fb55ca27f3..72f37e546ed2 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -217,7 +217,6 @@ static void ipvlan_uninit(struct net_device *dev)
static int ipvlan_open(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_addr *addr;
if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
@@ -229,7 +228,7 @@ static int ipvlan_open(struct net_device *dev)
list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_add(ipvlan, addr);
- return dev_uc_add(phy_dev, phy_dev->dev_addr);
+ return 0;
}
static int ipvlan_stop(struct net_device *dev)
@@ -241,8 +240,6 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_unsync(phy_dev, dev);
dev_mc_unsync(phy_dev, dev);
- dev_uc_del(phy_dev, phy_dev->dev_addr);
-
list_for_each_entry(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_del(addr);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 653f0b185a68..da8bf327a3e9 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -19,6 +19,7 @@
#include <net/genetlink.h>
#include <net/sock.h>
#include <net/gro_cells.h>
+#include <linux/if_arp.h>
#include <uapi/linux/if_macsec.h>
@@ -867,6 +868,7 @@ static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
{
+ skb->ip_summed = CHECKSUM_NONE;
memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
skb_pull(skb, hdr_len);
pskb_trim_unique(skb, skb->len - icv_len);
@@ -1105,10 +1107,9 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
}
skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb) {
- *pskb = NULL;
+ *pskb = skb;
+ if (!skb)
return RX_HANDLER_CONSUMED;
- }
pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
if (!pulled_sci) {
@@ -1240,6 +1241,7 @@ deliver:
macsec_rxsa_put(rx_sa);
macsec_rxsc_put(rx_sc);
+ skb_orphan(skb);
ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, skb->len);
@@ -2797,9 +2799,6 @@ static int macsec_dev_open(struct net_device *dev)
struct net_device *real_dev = macsec->real_dev;
int err;
- if (!(real_dev->flags & IFF_UP))
- return -ENETDOWN;
-
err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
return err;
@@ -2873,6 +2872,11 @@ static void macsec_dev_set_rx_mode(struct net_device *dev)
dev_uc_sync(real_dev, dev);
}
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+ return make_sci(dev->dev_addr, port);
+}
+
static int macsec_set_mac_address(struct net_device *dev, void *p)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -2894,6 +2898,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
out:
ether_addr_copy(dev->dev_addr, addr->sa_data);
+ macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
return 0;
}
@@ -2978,6 +2983,7 @@ static const struct device_type macsec_type = {
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
+ [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
@@ -3162,11 +3168,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
return false;
}
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
- return make_sci(dev->dev_addr, port);
-}
-
static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
{
struct macsec_dev *macsec = macsec_priv(dev);
@@ -3219,6 +3220,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
+ if (real_dev->type != ARPHRD_ETHER)
+ return -EINVAL;
dev->priv_flags |= IFF_MACSEC;
@@ -3274,6 +3277,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
goto del_dev;
+ netif_stacked_transfer_operstate(real_dev, dev);
+ linkwatch_fire_event(dev);
+
macsec_generation++;
return 0;
@@ -3445,6 +3451,20 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
return NOTIFY_DONE;
switch (event) {
+ case NETDEV_DOWN:
+ case NETDEV_UP:
+ case NETDEV_CHANGE: {
+ struct macsec_dev *m, *n;
+ struct macsec_rxh_data *rxd;
+
+ rxd = macsec_data_rtnl(real_dev);
+ list_for_each_entry_safe(m, n, &rxd->secys, secys) {
+ struct net_device *dev = m->secy.netdev;
+
+ netif_stacked_transfer_operstate(real_dev, dev);
+ }
+ break;
+ }
case NETDEV_UNREGISTER: {
struct macsec_dev *m, *n;
struct macsec_rxh_data *rxd;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6237236b7c4c..294881621430 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -309,6 +309,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
if (src)
dev_put(src->dev);
kfree_skb(skb);
+
+ cond_resched();
}
}
@@ -334,10 +336,11 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
}
spin_unlock(&port->bc_queue.lock);
+ schedule_work(&port->bc_work);
+
if (err)
goto free_nskb;
- schedule_work(&port->bc_work);
return;
free_nskb:
@@ -486,10 +489,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *dest;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
- const struct ethhdr *eth = (void *)skb->data;
+ const struct ethhdr *eth = skb_eth_hdr(skb);
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
goto xmit_world;
}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index a9acf7156855..03009f1becdd 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -236,7 +236,7 @@ static void ntb_netdev_tx_timer(unsigned long data)
struct ntb_netdev *dev = netdev_priv(ndev);
if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
- mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
+ mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
} else {
/* Make sure anybody stopping the queue after this sees the new
* value of ntb_transport_tx_free_entry()
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index eb5167210681..3ab2eb677a59 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -67,11 +67,11 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
do {
s = read_seqcount_begin(&fp->seqcount);
/* Issue callback if user registered it. */
- if (fp->link_update) {
+ if (fp->link_update)
fp->link_update(fp->phydev->attached_dev,
&fp->status);
- fixed_phy_update(fp);
- }
+ /* Check the GPIO for change in status */
+ fixed_phy_update(fp);
state = fp->status;
} while (read_seqcount_retry(&fp->seqcount, s));
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 520352327104..77357b07f6c9 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1429,9 +1429,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
{
+ int count = marvell_get_sset_count(phydev);
int i;
- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
+ for (i = 0; i < count; i++) {
memcpy(data + i * ETH_GSTRING_LEN,
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
@@ -1470,9 +1471,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
static void marvell_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
+ int count = marvell_get_sset_count(phydev);
int i;
- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
+ for (i = 0; i < count; i++)
data[i] = marvell_get_stat(phydev, i);
}
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index 46fe1ae919a3..51ce3ea17fb3 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -188,6 +188,23 @@ static int iproc_mdio_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+int iproc_mdio_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
+
+ /* restore the mii clock configuration */
+ iproc_mdio_config_clk(priv->base);
+
+ return 0;
+}
+
+static const struct dev_pm_ops iproc_mdio_pm_ops = {
+ .resume = iproc_mdio_resume
+};
+#endif /* CONFIG_PM_SLEEP */
+
static const struct of_device_id iproc_mdio_of_match[] = {
{ .compatible = "brcm,iproc-mdio", },
{ /* sentinel */ },
@@ -198,6 +215,9 @@ static struct platform_driver iproc_mdio_driver = {
.driver = {
.name = "iproc-mdio",
.of_match_table = iproc_mdio_of_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &iproc_mdio_pm_ops,
+#endif
},
.probe = iproc_mdio_probe,
.remove = iproc_mdio_remove,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9ac9923c04a5..9e5e3ff51572 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -28,6 +28,7 @@
#include <linux/micrel_phy.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/delay.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -728,6 +729,12 @@ static int kszphy_resume(struct phy_device *phydev)
{
genphy_resume(phydev);
+ /* After switching from power-down to normal mode, an internal global
+ * reset is automatically generated. Wait a minimum of 1 ms before
+ * read/write access to the PHY registers.
+ */
+ usleep_range(1000, 2000);
+
/* Enable PHY Interrupts */
if (phy_interrupt_is_valid(phydev)) {
phydev->interrupts = PHY_INTERRUPT_ENABLED;
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2a1b490bc587..718cd3c59e92 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -110,14 +110,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
{
+ u16 lb_dis = BIT(1);
+
if (disable)
- ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
+ ns_exp_write(phydev, 0x1c0,
+ ns_exp_read(phydev, 0x1c0) | lb_dis);
else
ns_exp_write(phydev, 0x1c0,
- ns_exp_read(phydev, 0x1c0) & 0xfffe);
+ ns_exp_read(phydev, 0x1c0) & ~lb_dis);
pr_debug("10BASE-T HDX loopback %s\n",
- (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
+ (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on");
}
static int ns_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 5fde8e335f13..82a4487bc66e 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -463,7 +463,8 @@ int phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.port = PORT_BNC;
else
cmd->base.port = PORT_MII;
-
+ cmd->base.transceiver = phy_is_internal(phydev) ?
+ XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->base.phy_address = phydev->mdio.addr;
cmd->base.autoneg = phydev->autoneg;
cmd->base.eth_tp_mdix_ctrl = phydev->mdix;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bf195cfe5f85..348a5fefd68e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -138,6 +138,8 @@ static int mdio_bus_phy_suspend(struct device *dev)
if (!mdio_bus_phy_may_suspend(phydev))
return 0;
+ phydev->suspended_by_mdio_bus = true;
+
return phy_suspend(phydev);
}
@@ -146,9 +148,11 @@ static int mdio_bus_phy_resume(struct device *dev)
struct phy_device *phydev = to_phy_device(dev);
int ret;
- if (!mdio_bus_phy_may_suspend(phydev))
+ if (!phydev->suspended_by_mdio_bus)
goto no_resume;
+ phydev->suspended_by_mdio_bus = false;
+
ret = phy_resume(phydev);
if (ret < 0)
return ret;
@@ -335,8 +339,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
mdiodev->device_free = phy_mdio_device_free;
mdiodev->device_remove = phy_mdio_device_remove;
- dev->speed = 0;
- dev->duplex = -1;
+ dev->speed = SPEED_UNKNOWN;
+ dev->duplex = DUPLEX_UNKNOWN;
dev->pause = 0;
dev->asym_pause = 0;
dev->link = 1;
@@ -684,6 +688,9 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
{
int rc;
+ if (!dev)
+ return -EINVAL;
+
rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
if (rc)
return rc;
@@ -976,6 +983,9 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
struct device *d;
int rc;
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
/* Search the list of PHY devices on the mdio bus for the
* PHY with the requested name
*/
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 1e2d4f1179da..45df03673e01 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -162,6 +162,14 @@ static const struct spi_device_id ks8995_id[] = {
};
MODULE_DEVICE_TABLE(spi, ks8995_id);
+static const struct of_device_id ks8895_spi_of_match[] = {
+ { .compatible = "micrel,ks8995" },
+ { .compatible = "micrel,ksz8864" },
+ { .compatible = "micrel,ksz8795" },
+ { },
+ };
+MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
+
static inline u8 get_chip_id(u8 val)
{
return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
@@ -529,6 +537,7 @@ static int ks8995_remove(struct spi_device *spi)
static struct spi_driver ks8995_driver = {
.driver = {
.name = "spi-ks8995",
+ .of_match_table = of_match_ptr(ks8895_spi_of_match),
},
.probe = ks8995_probe,
.remove = ks8995_remove,
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 9c889e0303dd..cef40de1bd05 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -878,15 +878,15 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
if (!skb)
goto nomem;
- ap->rpkt = skb;
- }
- if (skb->len == 0) {
- /* Try to get the payload 4-byte aligned.
- * This should match the
- * PPP_ALLSTATIONS/PPP_UI/compressed tests in
- * process_input_packet, but we do not have
- * enough chars here to test buf[1] and buf[2].
- */
+ ap->rpkt = skb;
+ }
+ if (skb->len == 0) {
+ /* Try to get the payload 4-byte aligned.
+ * This should match the
+ * PPP_ALLSTATIONS/PPP_UI/compressed tests in
+ * process_input_packet, but we do not have
+ * enough chars here to test buf[1] and buf[2].
+ */
if (buf[0] != PPP_ALLSTATIONS)
skb_reserve(skb, 2 + (buf[0] & 1));
}
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index b5edc7f96a39..685e875f5164 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -610,12 +610,20 @@ static struct compressor ppp_deflate_draft = {
static int __init deflate_init(void)
{
- int answer = ppp_register_compressor(&ppp_deflate);
- if (answer == 0)
- printk(KERN_INFO
- "PPP Deflate Compression module registered\n");
- ppp_register_compressor(&ppp_deflate_draft);
- return answer;
+ int rc;
+
+ rc = ppp_register_compressor(&ppp_deflate);
+ if (rc)
+ return rc;
+
+ rc = ppp_register_compressor(&ppp_deflate_draft);
+ if (rc) {
+ ppp_unregister_compressor(&ppp_deflate);
+ return rc;
+ }
+
+ pr_info("PPP Deflate Compression module registered\n");
+ return 0;
}
static void __exit deflate_cleanup(void)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 1e4969d90f1a..801bab5968d0 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1432,6 +1432,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
netif_wake_queue(ppp->dev);
else
netif_stop_queue(ppp->dev);
+ } else {
+ kfree_skb(skb);
}
ppp_xmit_unlock(ppp);
}
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index f60f7660b451..92f52a73ec0e 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
+MODULE_SOFTDEP("pre: arc4");
MODULE_VERSION("1.0.2");
static unsigned int
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 8c93ed5c9763..fa8f7c40a384 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1134,6 +1134,9 @@ static const struct proto_ops pppoe_ops = {
.recvmsg = pppoe_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppoe_proto = {
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index b9c8be6283d3..50856f9fe08a 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -22,6 +22,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/net.h>
@@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
EXPORT_SYMBOL(pppox_ioctl);
+#ifdef CONFIG_COMPAT
+int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ if (cmd == PPPOEIOCSFWD32)
+ cmd = PPPOEIOCSFWD;
+
+ return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+}
+
+EXPORT_SYMBOL(pppox_compat_ioctl);
+#endif
+
static int pppox_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 5a8befdfa5e4..fa14a67fb09a 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -638,6 +638,9 @@ static const struct proto_ops pptp_ops = {
.recvmsg = sock_no_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppox_pptp_proto = {
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index cfd81eb1b532..a516470da015 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -153,7 +153,7 @@ out_fail:
void
slhc_free(struct slcompress *comp)
{
- if ( comp == NULLSLCOMPR )
+ if ( IS_ERR_OR_NULL(comp) )
return;
if ( comp->tstate != NULLSLSTATE )
@@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
register struct cstate *cs = lcs->next;
register unsigned long deltaS, deltaA;
register short changes = 0;
- int hlen;
+ int nlen, hlen;
unsigned char new_seq[16];
register unsigned char *cp = new_seq;
struct iphdr *ip;
@@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
return isize;
ip = (struct iphdr *) icp;
+ if (ip->version != 4 || ip->ihl < 5)
+ return isize;
/* Bail if this packet isn't TCP, or is an IP fragment */
if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
@@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
comp->sls_o_tcp++;
return isize;
}
- /* Extract TCP header */
+ nlen = ip->ihl * 4;
+ if (isize < nlen + sizeof(*th))
+ return isize;
- th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
- hlen = ip->ihl*4 + th->doff*4;
+ th = (struct tcphdr *)(icp + nlen);
+ if (th->doff < sizeof(struct tcphdr) / 4)
+ return isize;
+ hlen = nlen + th->doff * 4;
/* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
* some other control bit is set). Also uncompressible if
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 9ed6d1c1ee45..cc841126147e 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
*/
static void slip_write_wakeup(struct tty_struct *tty)
{
- struct slip *sl = tty->disc_data;
+ struct slip *sl;
+
+ rcu_read_lock();
+ sl = rcu_dereference(tty->disc_data);
+ if (!sl)
+ goto out;
schedule_work(&sl->tx_work);
+out:
+ rcu_read_unlock();
}
static void sl_tx_timeout(struct net_device *dev)
@@ -860,6 +867,7 @@ err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
+ sl_free_netdev(sl->dev);
err_exit:
rtnl_unlock();
@@ -885,10 +893,11 @@ static void slip_close(struct tty_struct *tty)
return;
spin_lock_bh(&sl->lock);
- tty->disc_data = NULL;
+ rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
+ synchronize_rcu();
flush_work(&sl->tx_work);
/* VSV = very important to remove timers */
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 375b6810bf46..d0c18e3557f1 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1014,7 +1014,9 @@ static void ___team_compute_features(struct team *team)
}
team->dev->vlan_features = vlan_features;
- team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
+ team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
team->dev->hard_header_len = max_hard_header_len;
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -1163,6 +1165,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EINVAL;
}
+ if (netdev_has_upper_dev(dev, port_dev)) {
+ netdev_err(dev, "Device %s is already an upper device of the team interface\n",
+ portname);
+ return -EBUSY;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
@@ -1251,6 +1259,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_option_port_add;
}
+ /* set promiscuity level to new slave */
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(port_dev, 1);
+ if (err)
+ goto err_set_slave_promisc;
+ }
+
+ /* set allmulti level to new slave */
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(port_dev, 1);
+ if (err) {
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(port_dev, -1);
+ goto err_set_slave_promisc;
+ }
+ }
+
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
@@ -1267,6 +1292,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return 0;
+err_set_slave_promisc:
+ __team_option_inst_del_port(team, port);
+
err_option_port_add:
team_upper_dev_unlink(team, port);
@@ -1312,6 +1340,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
team_port_disable(team, port);
list_del_rcu(&port->list);
+
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(port_dev, -1);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(port_dev, -1);
+
team_upper_dev_unlink(team, port);
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
@@ -2104,12 +2138,12 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->hw_features = TEAM_VLAN_FEATURES |
- NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
dev->features |= dev->hw_features;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
}
static int team_newlink(struct net *src_net, struct net_device *dev,
@@ -2182,6 +2216,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
[TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
[TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
[TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
+ [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
+ [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
};
static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 24cc94453d38..44b16d945e33 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -627,7 +627,8 @@ static void tun_detach_all(struct net_device *dev)
module_put(THIS_MODULE);
}
-static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
+static int tun_attach(struct tun_struct *tun, struct file *file,
+ bool skip_filter, bool publish_tun)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
@@ -669,7 +670,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
- rcu_assign_pointer(tfile->tun, tun);
+ if (publish_tun)
+ rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
@@ -828,18 +830,8 @@ static void tun_net_uninit(struct net_device *dev)
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
- struct tun_struct *tun = netdev_priv(dev);
- int i;
-
netif_tx_start_all_queues(dev);
- for (i = 0; i < tun->numqueues; i++) {
- struct tun_file *tfile;
-
- tfile = rtnl_dereference(tun->tfiles[i]);
- tfile->socket.sk->sk_write_space(tfile->socket.sk);
- }
-
return 0;
}
@@ -1114,6 +1106,13 @@ static void tun_net_init(struct net_device *dev)
}
}
+static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
+{
+ struct sock *sk = tfile->socket.sk;
+
+ return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
+}
+
/* Character device part */
/* Poll */
@@ -1136,10 +1135,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
- if (tun->dev->flags & IFF_UP &&
- (sock_writeable(sk) ||
- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
- sock_writeable(sk))))
+ /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
+ * guarantee EPOLLOUT to be raised by either here or
+ * tun_sock_write_space(). Then process could get notification
+ * after it writes to a down device and meets -EIO.
+ */
+ if (tun_sock_writeable(tun, tfile) ||
+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+ tun_sock_writeable(tun, tfile)))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -1194,9 +1197,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
u32 rxhash;
ssize_t n;
- if (!(tun->dev->flags & IFF_UP))
- return -EIO;
-
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
return -EINVAL;
@@ -1273,9 +1273,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (err) {
+ err = -EFAULT;
+drop:
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
- return -EFAULT;
+ return err;
}
err = virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun));
@@ -1327,7 +1329,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_probe_transport_header(skb, 0);
rxhash = skb_get_hash(skb);
+
+ rcu_read_lock();
+ if (unlikely(!(tun->dev->flags & IFF_UP))) {
+ err = -EIO;
+ rcu_read_unlock();
+ goto drop;
+ }
+
netif_rx_ni(skb);
+ rcu_read_unlock();
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
@@ -1753,7 +1764,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (err < 0)
return err;
- err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
+ err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, true);
if (err < 0)
return err;
@@ -1841,13 +1852,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false);
+ err = tun_attach(tun, file, false, false);
if (err < 0)
goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
+ /* free_netdev() won't check refcnt, to aovid race
+ * with dev_put() we need publish tun after registration.
+ */
+ rcu_assign_pointer(tfile->tun, tun);
}
netif_carrier_on(tun->dev);
@@ -1991,7 +2006,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file, false);
+ ret = tun_attach(tun, file, false, true);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -2526,6 +2541,7 @@ static int tun_device_event(struct notifier_block *unused,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct tun_struct *tun = netdev_priv(dev);
+ int i;
if (dev->rtnl_link_ops != &tun_link_ops)
return NOTIFY_DONE;
@@ -2535,6 +2551,14 @@ static int tun_device_event(struct notifier_block *unused,
if (tun_queue_resize(tun))
return NOTIFY_BAD;
break;
+ case NETDEV_UP:
+ for (i = 0; i < tun->numqueues; i++) {
+ struct tun_file *tfile;
+
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ tfile->socket.sk->sk_write_space(tfile->socket.sk);
+ }
+ break;
default:
break;
}
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 393fd3ed6b94..4b12b6da3fab 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -237,7 +237,7 @@ static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits)
static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret = 0;
- u8 buf[ETH_ALEN];
+ u8 buf[ETH_ALEN] = {0};
int i;
unsigned long gpio_bits = dev->driver_info->data;
@@ -687,7 +687,7 @@ static int asix_resume(struct usb_interface *intf)
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret, i;
- u8 buf[ETH_ALEN], chipcode = 0;
+ u8 buf[ETH_ALEN] = {0}, chipcode = 0;
u32 phyid;
struct asix_common_private *priv;
@@ -1064,7 +1064,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
- u8 buf[ETH_ALEN];
+ u8 buf[ETH_ALEN] = {0};
usbnet_get_endpoints(dev,intf);
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 49a3bc107d05..2c50497cc4ed 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -215,7 +215,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
/* Get the MAC address */
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
- if (ret < 0) {
+ if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
goto free;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 99424c87b464..8f03cc52ddda 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -212,9 +212,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
goto bad_desc;
}
skip:
- if ( rndis &&
- header.usb_cdc_acm_descriptor &&
- header.usb_cdc_acm_descriptor->bmCapabilities) {
+ /* Communcation class functions with bmCapabilities are not
+ * RNDIS. But some Wireless class RNDIS functions use
+ * bmCapabilities for their own purpose. The failsafe is
+ * therefore applied only to Communication class RNDIS
+ * functions. The rndis test is redundant, but a cheap
+ * optimization.
+ */
+ if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
+ header.usb_cdc_acm_descriptor &&
+ header.usb_cdc_acm_descriptor->bmCapabilities) {
dev_dbg(&intf->dev,
"ACM capabilities %02x, not really RNDIS?\n",
header.usb_cdc_acm_descriptor->bmCapabilities);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 7b158674ceed..be4e56826daf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -576,8 +576,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
/* read current mtu value from device */
err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
- if (err < 0) {
+ 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
+ if (err != sizeof(max_datagram_size)) {
dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
goto out;
}
@@ -588,7 +588,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
- 0, iface_no, &max_datagram_size, 2);
+ 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
if (err < 0)
dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
@@ -679,8 +679,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
u8 ep;
for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
-
e = intf->cur_altsetting->endpoint + ep;
+
+ /* ignore endpoints which cannot transfer data */
+ if (!usb_endpoint_maxp(&e->desc))
+ continue;
+
switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_INT:
if (usb_endpoint_dir_in(&e->desc)) {
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 947bea81d924..dfbdea22fbad 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -175,7 +175,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
}
if (!timeout) {
dev_err(&udev->dev, "firmware not ready in time\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto err;
}
/* enable ethernet mode (?) */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 66ae647b712e..27fc699d8be5 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2635,14 +2635,18 @@ static struct hso_device *hso_create_bulk_serial_device(
*/
if (serial->tiocmget) {
tiocmget = serial->tiocmget;
+ tiocmget->endp = hso_get_ep(interface,
+ USB_ENDPOINT_XFER_INT,
+ USB_DIR_IN);
+ if (!tiocmget->endp) {
+ dev_err(&interface->dev, "Failed to find INT IN ep\n");
+ goto exit;
+ }
+
tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
if (tiocmget->urb) {
mutex_init(&tiocmget->mutex);
init_waitqueue_head(&tiocmget->waitq);
- tiocmget->endp = hso_get_ep(
- interface,
- USB_ENDPOINT_XFER_INT,
- USB_DIR_IN);
} else
hso_free_tiomget(serial);
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index f1f8227e7342..2b16a5fed9de 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -148,6 +148,7 @@ struct ipheth_device {
u8 bulk_in;
u8 bulk_out;
struct delayed_work carrier_work;
+ bool confirmed_pairing;
};
static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
@@ -259,7 +260,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += len;
-
+ dev->confirmed_pairing = true;
netif_rx(skb);
ipheth_rx_submit(dev, GFP_ATOMIC);
}
@@ -280,14 +281,24 @@ static void ipheth_sndbulk_callback(struct urb *urb)
dev_err(&dev->intf->dev, "%s: urb status: %d\n",
__func__, status);
- netif_wake_queue(dev->net);
+ if (status == 0)
+ netif_wake_queue(dev->net);
+ else
+ // on URB error, trigger immediate poll
+ schedule_delayed_work(&dev->carrier_work, 0);
}
static int ipheth_carrier_set(struct ipheth_device *dev)
{
- struct usb_device *udev = dev->udev;
+ struct usb_device *udev;
int retval;
+ if (!dev)
+ return 0;
+ if (!dev->confirmed_pairing)
+ return 0;
+
+ udev = dev->udev;
retval = usb_control_msg(udev,
usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
IPHETH_CMD_CARRIER_CHECK, /* request */
@@ -302,11 +313,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
return retval;
}
- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
+ if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
netif_carrier_on(dev->net);
- else
+ if (dev->tx_urb->status != -EINPROGRESS)
+ netif_wake_queue(dev->net);
+ } else {
netif_carrier_off(dev->net);
-
+ netif_stop_queue(dev->net);
+ }
return 0;
}
@@ -386,7 +400,6 @@ static int ipheth_open(struct net_device *net)
return retval;
schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
- netif_start_queue(net);
return retval;
}
@@ -424,17 +437,18 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
dev);
dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ netif_stop_queue(net);
retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
__func__, retval);
dev->net->stats.tx_errors++;
dev_kfree_skb_any(skb);
+ netif_wake_queue(net);
} else {
dev->net->stats.tx_packets++;
dev->net->stats.tx_bytes += skb->len;
dev_consume_skb_any(skb);
- netif_stop_queue(net);
}
return NETDEV_TX_OK;
@@ -489,7 +503,7 @@ static int ipheth_probe(struct usb_interface *intf,
dev->udev = udev;
dev->net = netdev;
dev->intf = intf;
-
+ dev->confirmed_pairing = false;
/* Set up endpoints */
hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
if (hintf == NULL) {
@@ -540,7 +554,9 @@ static int ipheth_probe(struct usb_interface *intf,
retval = -EIO;
goto err_register_netdev;
}
-
+ // carrier down and transmit queues stopped until packet from device
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
return 0;
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 3e37724d30ae..0c4f4190c58e 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -117,16 +117,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1)
/ sizeof(init_msg_1[0]), usb_buf, 24);
if (status != 0)
- return status;
+ goto out;
memcpy(usb_buf, init_msg_2, 12);
status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2)
/ sizeof(init_msg_2[0]), usb_buf, 28);
if (status != 0)
- return status;
+ goto out;
memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
-
+out:
kfree(usb_buf);
return status;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index e143a7fe9320..65e94dffaabc 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -30,6 +30,7 @@
#include <linux/ipv6.h>
#include <linux/mdio.h>
#include <net/ip6_checksum.h>
+#include <net/vxlan.h>
#include <linux/microchipphy.h>
#include <linux/of_net.h>
#include "lan78xx.h"
@@ -442,7 +443,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
}
} else {
netdev_warn(dev->net,
- "Failed to read stat ret = 0x%x", ret);
+ "Failed to read stat ret = %d", ret);
}
kfree(stats);
@@ -1763,6 +1764,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
dev->mdiobus->read = lan78xx_mdiobus_read;
dev->mdiobus->write = lan78xx_mdiobus_write;
dev->mdiobus->name = "lan78xx-mdiobus";
+ dev->mdiobus->parent = &dev->udev->dev;
snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
@@ -2406,11 +2408,6 @@ static int lan78xx_stop(struct net_device *net)
return 0;
}
-static int lan78xx_linearize(struct sk_buff *skb)
-{
- return skb_linearize(skb);
-}
-
static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
struct sk_buff *skb, gfp_t flags)
{
@@ -2421,8 +2418,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
return NULL;
}
- if (lan78xx_linearize(skb) < 0)
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
return NULL;
+ }
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
@@ -2621,6 +2620,11 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
int i;
ret = lan78xx_get_endpoints(dev, intf);
+ if (ret) {
+ netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
+ ret);
+ return ret;
+ }
dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -3288,6 +3292,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
tasklet_schedule(&dev->bh);
}
+static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ return features;
+}
+
static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_open = lan78xx_open,
.ndo_stop = lan78xx_stop,
@@ -3301,6 +3318,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_set_features = lan78xx_set_features,
.ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
+ .ndo_features_check = lan78xx_features_check,
};
static void lan78xx_stat_monitor(unsigned long param)
@@ -3372,6 +3390,7 @@ static int lan78xx_probe(struct usb_interface *intf,
if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
+ netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index ee40ac23507a..5fe9f1273fe2 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{
int i;
- __u8 tmp;
+ __u8 tmp = 0;
__le16 retdatai;
int ret;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 16e5c8cd104d..97f6b8130db3 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -890,7 +890,9 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
+ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -938,6 +940,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
+ {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -948,6 +951,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
+ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 02e29562d254..6e74965d26a0 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -689,8 +689,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, tmp, size, 500);
+ if (ret < 0)
+ memset(data, 0xff, size);
+ else
+ memcpy(data, tmp, size);
- memcpy(data, tmp, size);
kfree(tmp);
return ret;
@@ -3420,7 +3423,10 @@ static void r8153_init(struct r8152 *tp)
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
+
msleep(20);
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ break;
}
for (i = 0; i < 500; i++) {
@@ -3444,7 +3450,10 @@ static void r8153_init(struct r8152 *tp)
ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
if (ocp_data == PHY_STAT_LAN_ON)
break;
+
msleep(20);
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ break;
}
usb_disable_lpm(tp->udev);
@@ -4362,6 +4371,9 @@ static int rtl8152_probe(struct usb_interface *intf,
return -ENODEV;
}
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
@@ -4435,6 +4447,11 @@ static int rtl8152_probe(struct usb_interface *intf,
intf->needs_remote_wakeup = 1;
+ if (!rtl_can_wakeup(tp))
+ __rtl_set_wol(tp, 0);
+ else
+ tp->saved_wolopts = __rtl_get_wol(tp);
+
tp->rtl_ops.init(tp);
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
set_ethernet_addr(tp);
@@ -4448,10 +4465,6 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
- if (!rtl_can_wakeup(tp))
- __rtl_set_wol(tp, 0);
-
- tp->saved_wolopts = __rtl_get_wol(tp);
if (tp->saved_wolopts)
device_set_wakeup_enable(&udev->dev, true);
else
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 004c955c1fd1..da0ae16f5c74 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -336,7 +336,7 @@ static void sr_set_multicast(struct net_device *net)
static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(net);
- __le16 res;
+ __le16 res = 0;
mutex_lock(&dev->phy_mutex);
sr_set_sw_mii(dev);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 4ab82b998a0f..0b5fd1499ac0 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -114,6 +114,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
int intr = 0;
e = alt->endpoint + ep;
+
+ /* ignore endpoints which cannot transfer data */
+ if (!usb_endpoint_maxp(&e->desc))
+ continue;
+
switch (e->desc.bmAttributes) {
case USB_ENDPOINT_XFER_INT:
if (!usb_endpoint_dir_in(&e->desc))
@@ -349,6 +354,8 @@ void usbnet_update_max_qlen(struct usbnet *dev)
{
enum usb_device_speed speed = dev->udev->speed;
+ if (!dev->rx_urb_size || !dev->hard_mtu)
+ goto insanity;
switch (speed) {
case USB_SPEED_HIGH:
dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
@@ -365,6 +372,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
break;
default:
+insanity:
dev->rx_qlen = dev->tx_qlen = 4;
}
}
@@ -508,6 +516,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
+ test_bit(EVENT_DEV_OPEN, &dev->flags) &&
!test_bit (EVENT_RX_HALT, &dev->flags) &&
!test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
@@ -1394,6 +1403,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
spin_unlock_irqrestore(&dev->txq.lock, flags);
goto drop;
}
+ if (netif_queue_stopped(net)) {
+ usb_autopm_put_interface_async(dev->intf);
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ goto drop;
+ }
#ifdef CONFIG_PM
/* if this triggers the device is still a sleep */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 42c9480acdc7..3b6e908d3164 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -153,23 +153,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk,
static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
struct net_device *dev)
{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const struct ipv6hdr *iph;
struct net *net = dev_net(skb->dev);
- struct flowi6 fl6 = {
- /* needed to match OIF rule */
- .flowi6_oif = dev->ifindex,
- .flowi6_iif = LOOPBACK_IFINDEX,
- .daddr = iph->daddr,
- .saddr = iph->saddr,
- .flowlabel = ip6_flowinfo(iph),
- .flowi6_mark = skb->mark,
- .flowi6_proto = iph->nexthdr,
- .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
- };
+ struct flowi6 fl6;
int ret = NET_XMIT_DROP;
struct dst_entry *dst;
struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
+ goto err;
+
+ iph = ipv6_hdr(skb);
+
+ memset(&fl6, 0, sizeof(fl6));
+ /* needed to match OIF rule */
+ fl6.flowi6_oif = dev->ifindex;
+ fl6.flowi6_iif = LOOPBACK_IFINDEX;
+ fl6.daddr = iph->daddr;
+ fl6.saddr = iph->saddr;
+ fl6.flowlabel = ip6_flowinfo(iph);
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = iph->nexthdr;
+ fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
+
dst = ip6_route_output(net, NULL, &fl6);
if (dst == dst_null)
goto err;
@@ -257,21 +263,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk,
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
struct net_device *vrf_dev)
{
- struct iphdr *ip4h = ip_hdr(skb);
+ struct iphdr *ip4h;
int ret = NET_XMIT_DROP;
- struct flowi4 fl4 = {
- /* needed to match OIF rule */
- .flowi4_oif = vrf_dev->ifindex,
- .flowi4_iif = LOOPBACK_IFINDEX,
- .flowi4_tos = RT_TOS(ip4h->tos),
- .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
- .flowi4_proto = ip4h->protocol,
- .daddr = ip4h->daddr,
- .saddr = ip4h->saddr,
- };
+ struct flowi4 fl4;
struct net *net = dev_net(vrf_dev);
struct rtable *rt;
+ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
+ goto err;
+
+ ip4h = ip_hdr(skb);
+
+ memset(&fl4, 0, sizeof(fl4));
+ /* needed to match OIF rule */
+ fl4.flowi4_oif = vrf_dev->ifindex;
+ fl4.flowi4_iif = LOOPBACK_IFINDEX;
+ fl4.flowi4_tos = RT_TOS(ip4h->tos);
+ fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
+ fl4.flowi4_proto = ip4h->protocol;
+ fl4.daddr = ip4h->daddr;
+ fl4.saddr = ip4h->saddr;
+
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 016f5da425ab..bc4542d9a08d 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2049,8 +2049,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
label = info->key.label;
udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
- if (info->options_len)
+ if (info->options_len) {
+ if (info->options_len < sizeof(*md))
+ goto drop;
md = ip_tunnel_info_opts(info);
+ }
} else {
md->gbp = skb->mark;
}
@@ -2101,7 +2104,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
df = htons(IP_DF);
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
vni, md, flags, udp_sum);
@@ -2160,7 +2163,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (!info)
udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
skb_scrub_packet(skb, xnet);
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
@@ -2351,10 +2354,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
/* Setup stats when device is created */
static int vxlan_init(struct net_device *dev)
{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ int err;
+
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
+ err = gro_cells_init(&vxlan->gro_cells, dev);
+ if (err) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+
return 0;
}
@@ -2620,8 +2632,6 @@ static void vxlan_setup(struct net_device *dev)
vxlan->dev = dev;
- gro_cells_init(&vxlan->gro_cells, dev);
-
for (h = 0; h < FDB_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
}
@@ -3375,10 +3385,8 @@ static void __net_exit vxlan_exit_net(struct net *net)
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
- if (!net_eq(dev_net(vxlan->dev), net)) {
- gro_cells_destroy(&vxlan->gro_cells);
+ if (!net_eq(dev_net(vxlan->dev), net))
unregister_netdevice_queue(vxlan->dev, &list);
- }
}
unregister_netdevice_many(&list);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 7a62316c570d..87bf05a81db5 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -77,7 +77,7 @@ static struct ucc_tdm_info utdm_primary_info = {
},
};
-static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
static int uhdlc_init(struct ucc_hdlc_private *priv)
{
@@ -209,6 +209,11 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
ret = -ENOMEM;
goto free_riptr;
}
+ if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
+ dev_err(priv->dev, "MURAM allocation out of addressable range\n");
+ ret = -ENOMEM;
+ goto free_tiptr;
+ }
/* Set RIPTR, TIPTR */
iowrite16be(riptr, &priv->ucc_pram->riptr);
@@ -1117,7 +1122,6 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
if (register_hdlc_device(dev)) {
ret = -ENOBUFS;
pr_err("ucc_hdlc: unable to register hdlc device\n");
- free_netdev(dev);
goto free_dev;
}
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index e7bbdb7af53a..97968e6a6a4e 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -261,7 +261,7 @@ struct port {
struct hss_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
- u32 desc_tab_phys;
+ dma_addr_t desc_tab_phys;
unsigned int id;
unsigned int clock_type, clock_rate, loopback;
unsigned int initialized, carrier;
@@ -861,7 +861,7 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
+ memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
dev_kfree_skb(skb);
#endif
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 421ac5f85699..79fd89150947 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -711,7 +711,7 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb,
spin_lock_irqsave(&sdla_lock, flags);
SDLA_WINDOW(dev, addr);
- pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK));
+ pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK));
__sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
SDLA_WINDOW(dev, addr);
pbuf->opp_flag = 1;
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index c9c711dcd0e6..0e6c665a4de8 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
}
result = i2400m_barker_db_add(barker);
if (result < 0)
- goto error_add;
+ goto error_parse_add;
}
kfree(options_orig);
}
return 0;
+error_parse_add:
error_parse:
+ kfree(options_orig);
error_add:
kfree(i2400m_barker_db);
return result;
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index b0dba35a8ad2..dc6fe93ce71f 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -147,6 +147,7 @@ error_msg_to_dev:
error_alloc:
d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
wimax_dev, state, result);
+ kfree(cmd);
return result;
}
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 7a60d2e652da..e492c7f0d311 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -255,7 +255,8 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
if (flags & AR5523_CMD_FLAG_MAGIC)
hdr->magic = cpu_to_be32(1 << 24);
- memcpy(hdr + 1, idata, ilen);
+ if (ilen)
+ memcpy(hdr + 1, idata, ilen);
cmd->odata = odata;
cmd->olen = olen;
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index da770af83036..125b5c31b2b0 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -658,10 +658,10 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
ath10k_ahb_irq_disable(ar);
synchronize_irq(ar_ahb->irq);
- ath10k_pci_flush(ar);
-
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
+
+ ath10k_pci_flush(ar);
}
static int ath10k_ahb_hif_power_up(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 90c0c4a7175d..414153cd5784 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -811,6 +811,7 @@ struct ath10k {
struct completion install_key_done;
+ int last_wmi_vdev_start_status;
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 675e75d66db2..14dc6548701c 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -157,7 +157,7 @@ const struct ath10k_hw_values qca6174_values = {
};
const struct ath10k_hw_values qca99x0_values = {
- .rtc_state_val_on = 5,
+ .rtc_state_val_on = 7,
.ce_count = 12,
.msi_assign_ce_max = 12,
.num_target_ce_config_wlan = 10,
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index fb632a454fc2..2294ba311c47 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -947,7 +947,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
if (time_left == 0)
return -ETIMEDOUT;
- return 0;
+ return ar->last_wmi_vdev_start_status;
}
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
@@ -1596,6 +1596,10 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
+ /* For mesh, probe response and beacon share the same template */
+ if (ieee80211_vif_is_mesh(vif))
+ return 0;
+
prb = ieee80211_proberesp_get(hw, vif);
if (!prb) {
ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 05ef6bac3ac2..4cde9c028fc4 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1039,10 +1039,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
u32 *buf;
- unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
- u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
int i;
@@ -1056,9 +1055,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
- orig_nbytes = nbytes;
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
- orig_nbytes,
+ alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
if (!data_buf) {
@@ -1066,9 +1066,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
- /* Copy caller's data to allocated DMA buf */
- memcpy(data_buf, data, orig_nbytes);
-
/*
* The address supplied by the caller is in the
* Target CPU virtual address space.
@@ -1081,12 +1078,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
- remaining_bytes = orig_nbytes;
- ce_data = ce_data_base;
+ remaining_bytes = nbytes;
while (remaining_bytes) {
/* FIXME: check cast */
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+ /* Copy caller's data to allocated DMA buf */
+ memcpy(data_buf, data, nbytes);
+
/* Set up to receive directly into Target(!) address */
ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
@@ -1096,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
- ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
nbytes, 0, 0);
if (ret != 0)
goto done;
@@ -1137,12 +1136,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
remaining_bytes -= nbytes;
address += nbytes;
- ce_data += nbytes;
+ data += nbytes;
}
done:
if (data_buf) {
- dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
}
@@ -1766,6 +1765,11 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+ ath10k_pci_irq_disable(ar);
+ ath10k_pci_irq_sync(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+
/* Most likely the device has HTT Rx ring configured. The only way to
* prevent the device from accessing (and possible corrupting) host
* memory is to reset the chip now.
@@ -1779,14 +1783,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
*/
ath10k_pci_safe_chip_reset(ar);
- ath10k_pci_irq_disable(ar);
- ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
ath10k_pci_sleep_sync(ar);
- napi_synchronize(&ar->napi);
- napi_disable(&ar->napi);
-
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 9852c5d51139..beeb6be06939 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -99,6 +99,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index bbfe7be214e1..af3bc06b4aed 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2384,7 +2384,8 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
- ieee80211_rx(ar->hw, skb);
+ ieee80211_rx_ni(ar->hw, skb);
+
return 0;
}
@@ -3102,18 +3103,31 @@ void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_vdev_start_ev_arg arg = {};
int ret;
+ u32 status;
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+ ar->last_wmi_vdev_start_status = 0;
+
ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
- return;
+ ar->last_wmi_vdev_start_status = ret;
+ goto out;
}
- if (WARN_ON(__le32_to_cpu(arg.status)))
- return;
+ status = __le32_to_cpu(arg.status);
+ if (WARN_ON_ONCE(status)) {
+ ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
+ status, (status == WMI_VDEV_START_CHAN_INVALID) ?
+ "chan-invalid" : "unknown");
+ /* Setup is done one way or another though, so we should still
+ * do the completion, so don't return here.
+ */
+ ar->last_wmi_vdev_start_status = -EINVAL;
+ }
+out:
complete(&ar->vdev_setup_done);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9b8562ff6698..cce028ea9b57 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6248,11 +6248,17 @@ struct wmi_ch_info_ev_arg {
__le32 rx_frame_count;
};
+/* From 10.4 firmware, not sure all have the same values. */
+enum wmi_vdev_start_status {
+ WMI_VDEV_START_OK = 0,
+ WMI_VDEV_START_CHAN_INVALID,
+};
+
struct wmi_vdev_start_ev_arg {
__le32 vdev_id;
__le32 req_id;
__le32 resp_type; /* %WMI_VDEV_RESP_ */
- __le32 status;
+ __le32 status; /* See wmi_vdev_start_status enum above */
};
struct wmi_peer_kick_ev_arg {
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b7fe0af4cb24..650d2f6446a6 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -934,7 +934,7 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
else
ssid_list[i].flag = ANY_SSID_FLAG;
- if (n_match_ssid == 0)
+ if (ar->wiphy->max_match_sets != 0 && n_match_ssid == 0)
ssid_list[i].flag |= MATCH_SSID_FLAG;
}
@@ -1088,7 +1088,7 @@ void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
for (i = 0; i < vif->scan_req->n_ssids; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
- i + 1, DISABLE_SSID_FLAG,
+ i, DISABLE_SSID_FLAG,
0, NULL);
}
}
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 9da3594fd010..fc22c5f47927 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -132,6 +132,10 @@ ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
struct ath6kl_urb_context *urb_context = NULL;
unsigned long flags;
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return NULL;
+
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
if (!list_empty(&pipe->urb_list_head)) {
urb_context =
@@ -150,6 +154,10 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
{
unsigned long flags;
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return;
+
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
pipe->urb_cnt++;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 3fd1cc98fd2f..55609fc4e50e 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1178,6 +1178,10 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
return -EINVAL;
ev = (struct wmi_pstream_timeout_event *) datap;
+ if (ev->traffic_class >= WMM_NUM_AC) {
+ ath6kl_err("invalid traffic class: %d\n", ev->traffic_class);
+ return -EINVAL;
+ }
/*
* When the pstream (fat pipe == AC) timesout, it means there were
@@ -1519,6 +1523,10 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;
reply = (struct wmi_cac_event *) datap;
+ if (reply->ac >= WMM_NUM_AC) {
+ ath6kl_err("invalid AC: %d\n", reply->ac);
+ return -EINVAL;
+ }
if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
(reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
@@ -2635,7 +2643,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
u16 active_tsids = 0;
int ret;
- if (traffic_class > 3) {
+ if (traffic_class >= WMM_NUM_AC) {
ath6kl_err("invalid traffic class: %d\n", traffic_class);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 08607d7fdb56..7eff6f8023d8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -4115,7 +4115,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
{
- u32 data, ko, kg;
+ u32 data = 0, ko, kg;
if (!AR_SREV_9462_20_OR_LATER(ah))
return;
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index eedf86b67cf5..807fbe31e930 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -411,7 +411,7 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
ath_dbg(common, SPECTRAL_SCAN,
"Calculated new upper max 0x%X at %i\n",
- tmp_mag, i);
+ tmp_mag, fft_sample_40.upper_max_index);
} else
for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
if (fft_sample_40.data[i] == (upper_mag >> max_exp))
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index 6e236a485431..71b4888b30e7 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -300,9 +300,9 @@ void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an)
an->ackto = ackto;
- spin_lock(&da->qlock);
+ spin_lock_bh(&da->qlock);
list_add_tail(&an->list, &da->nodes);
- spin_unlock(&da->qlock);
+ spin_unlock_bh(&da->qlock);
}
EXPORT_SYMBOL(ath_dynack_node_init);
@@ -316,9 +316,9 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an)
{
struct ath_dynack *da = &ah->dynack;
- spin_lock(&da->qlock);
+ spin_lock_bh(&da->qlock);
list_del(&an->list);
- spin_unlock(&da->qlock);
+ spin_unlock_bh(&da->qlock);
}
EXPORT_SYMBOL(ath_dynack_node_deinit);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f15589c70284..b5e12be73f2b 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1213,7 +1213,7 @@ err_fw:
static int send_eject_command(struct usb_interface *interface)
{
struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_host_interface *iface_desc = &interface->altsetting[0];
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index f333ef1e3e7b..52b42ecee621 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -972,6 +972,8 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ath_htc_rx_status *rxstatus;
struct ath_rx_status rx_stats;
bool decrypt_error = false;
+ __be16 rs_datalen;
+ bool is_phyerr;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -981,11 +983,24 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
rxstatus = (struct ath_htc_rx_status *)skb->data;
- if (be16_to_cpu(rxstatus->rs_datalen) -
- (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
+ rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
+ if (unlikely(rs_datalen -
+ (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0)) {
ath_err(common,
"Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
- rxstatus->rs_datalen, skb->len);
+ rs_datalen, skb->len);
+ goto rx_next;
+ }
+
+ is_phyerr = rxstatus->rs_status & ATH9K_RXERR_PHY;
+ /*
+ * Discard zero-length packets and packets smaller than an ACK
+ * which are not PHY_ERROR (short radar pulses have a length of 3)
+ */
+ if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) {
+ ath_warn(common,
+ "Short RX data len, dropping (dlen: %d)\n",
+ rs_datalen);
goto rx_next;
}
@@ -1010,7 +1025,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
* Process PHY errors and return so that the packet
* can be dropped.
*/
- if (rx_stats.rs_status & ATH9K_RXERR_PHY) {
+ if (unlikely(is_phyerr)) {
/* TODO: Not using DFS processing now. */
if (ath_cmn_process_fft(&priv->spec_priv, hdr,
&rx_stats, rx_status->mactime)) {
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 951bac2caf12..e7fca78cdd96 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -250,8 +250,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
/* Chip Revisions */
/******************/
-static void ath9k_hw_read_revisions(struct ath_hw *ah)
+static bool ath9k_hw_read_revisions(struct ath_hw *ah)
{
+ u32 srev;
u32 val;
if (ah->get_mac_revision)
@@ -267,25 +268,33 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
val = REG_READ(ah, AR_SREV);
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
}
- return;
+ return true;
case AR9300_DEVID_AR9340:
ah->hw_version.macVersion = AR_SREV_VERSION_9340;
- return;
+ return true;
case AR9300_DEVID_QCA955X:
ah->hw_version.macVersion = AR_SREV_VERSION_9550;
- return;
+ return true;
case AR9300_DEVID_AR953X:
ah->hw_version.macVersion = AR_SREV_VERSION_9531;
- return;
+ return true;
case AR9300_DEVID_QCA956X:
ah->hw_version.macVersion = AR_SREV_VERSION_9561;
- return;
+ return true;
}
- val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+ srev = REG_READ(ah, AR_SREV);
+
+ if (srev == -EIO) {
+ ath_err(ath9k_hw_common(ah),
+ "Failed to read SREV register");
+ return false;
+ }
+
+ val = srev & AR_SREV_ID;
if (val == 0xFF) {
- val = REG_READ(ah, AR_SREV);
+ val = srev;
ah->hw_version.macVersion =
(val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
@@ -304,6 +313,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
ah->is_pciexpress = true;
}
+
+ return true;
}
/************************************/
@@ -557,7 +568,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
int r = 0;
- ath9k_hw_read_revisions(ah);
+ if (!ath9k_hw_read_revisions(ah)) {
+ ath_err(common, "Could not read hardware revisions");
+ return -EOPNOTSUPP;
+ }
switch (ah->hw_version.macVersion) {
case AR_SREV_VERSION_5416_PCI:
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b868f02ced89..58af2fe5be3c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1250,7 +1250,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_node *an = &avp->mcast_node;
mutex_lock(&sc->mutex);
-
if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
if (sc->cur_chan->nvifs >= 1) {
mutex_unlock(&sc->mutex);
@@ -1456,6 +1455,9 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
}
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
+ ath9k_set_txpower(sc, NULL);
+
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 8e9480cc33e1..096902e0fdf5 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -56,11 +56,6 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
struct sk_buff *skb;
struct ath_vif *avp;
- if (!sc->tx99_vif)
- return NULL;
-
- avp = (struct ath_vif *)sc->tx99_vif->drv_priv;
-
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return NULL;
@@ -77,7 +72,10 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
- hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+ if (sc->tx99_vif) {
+ avp = (struct ath_vif *) sc->tx99_vif->drv_priv;
+ hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
+ }
tx_info = IEEE80211_SKB_CB(skb);
memset(tx_info, 0, sizeof(*tx_info));
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 99ab20334d21..37c3cbe0ff2b 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -128,6 +128,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
};
MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
+static struct usb_driver carl9170_driver;
+
static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
{
struct urb *urb;
@@ -966,32 +968,28 @@ err_out:
static void carl9170_usb_firmware_failed(struct ar9170 *ar)
{
- struct device *parent = ar->udev->dev.parent;
- struct usb_device *udev;
-
- /*
- * Store a copy of the usb_device pointer locally.
- * This is because device_release_driver initiates
- * carl9170_usb_disconnect, which in turn frees our
- * driver context (ar).
+ /* Store a copies of the usb_interface and usb_device pointer locally.
+ * This is because release_driver initiates carl9170_usb_disconnect,
+ * which in turn frees our driver context (ar).
*/
- udev = ar->udev;
+ struct usb_interface *intf = ar->intf;
+ struct usb_device *udev = ar->udev;
complete(&ar->fw_load_wait);
+ /* at this point 'ar' could be already freed. Don't use it anymore */
+ ar = NULL;
/* unbind anything failed */
- if (parent)
- device_lock(parent);
-
- device_release_driver(&udev->dev);
- if (parent)
- device_unlock(parent);
+ usb_lock_device(udev);
+ usb_driver_release_interface(&carl9170_driver, intf);
+ usb_unlock_device(udev);
- usb_put_dev(udev);
+ usb_put_intf(intf);
}
static void carl9170_usb_firmware_finish(struct ar9170 *ar)
{
+ struct usb_interface *intf = ar->intf;
int err;
err = carl9170_parse_firmware(ar);
@@ -1009,7 +1007,7 @@ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
goto err_unrx;
complete(&ar->fw_load_wait);
- usb_put_dev(ar->udev);
+ usb_put_intf(intf);
return;
err_unrx:
@@ -1052,7 +1050,6 @@ static int carl9170_usb_probe(struct usb_interface *intf,
return PTR_ERR(ar);
udev = interface_to_usbdev(intf);
- usb_get_dev(udev);
ar->udev = udev;
ar->intf = intf;
ar->features = id->driver_info;
@@ -1094,15 +1091,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
atomic_set(&ar->rx_anch_urbs, 0);
atomic_set(&ar->rx_pool_urbs, 0);
- usb_get_dev(ar->udev);
+ usb_get_intf(intf);
carl9170_set_state(ar, CARL9170_STOPPED);
err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
&ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
if (err) {
- usb_put_dev(udev);
- usb_put_dev(udev);
+ usb_put_intf(intf);
carl9170_free(ar);
}
return err;
@@ -1131,7 +1127,6 @@ static void carl9170_usb_disconnect(struct usb_interface *intf)
carl9170_release_firmware(ar);
carl9170_free(ar);
- usb_put_dev(udev);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 4100ffd42a43..78146607f16e 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -111,7 +111,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
- JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+ JP_PATTERN(3, 0, 4, 4000, 4000, 1, 18, 50, false),
JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index d117240d9a73..b8eeaef17edc 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1005,6 +1005,12 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
u8 *buf, *dpos;
const u8 *spos;
+ if (!ies1)
+ ies1_len = 0;
+
+ if (!ies2)
+ ies2_len = 0;
+
if (ies1_len == 0 && ies2_len == 0) {
*merged_ies = NULL;
*merged_len = 0;
@@ -1014,17 +1020,19 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- memcpy(buf, ies1, ies1_len);
+ if (ies1)
+ memcpy(buf, ies1, ies1_len);
dpos = buf + ies1_len;
spos = ies2;
- while (spos + 1 < ies2 + ies2_len) {
+ while (spos && (spos + 1 < ies2 + ies2_len)) {
/* IE tag at offset 0, length at offset 1 */
u16 ielen = 2 + spos[1];
if (spos + ielen > ies2 + ies2_len)
break;
if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
- !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) {
+ (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len,
+ spos, ielen))) {
memcpy(dpos, spos, ielen);
dpos += ielen;
}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 5e4058a4037b..cbf3958d788a 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1091,7 +1091,7 @@ static const struct file_operations fops_ssid = {
};
/*---------temp------------*/
-static void print_temp(struct seq_file *s, const char *prefix, u32 t)
+static void print_temp(struct seq_file *s, const char *prefix, s32 t)
{
switch (t) {
case 0:
@@ -1099,7 +1099,8 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t)
seq_printf(s, "%s N/A\n", prefix);
break;
default:
- seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000);
+ seq_printf(s, "%s %s%d.%03d\n", prefix, (t < 0 ? "-" : ""),
+ abs(t / 1000), abs(t % 1000));
break;
}
}
@@ -1107,7 +1108,7 @@ static void print_temp(struct seq_file *s, const char *prefix, u32 t)
static int wil_temp_debugfs_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
- u32 t_m, t_r;
+ s32 t_m, t_r;
int rc = wmi_get_temperature(wil, &t_m, &t_r);
if (rc) {
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 64046e0bd0a2..a37533cffc7c 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -356,6 +356,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil)
wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
}
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+ size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+ sizeof(struct wmi_cmd_hdr);
+
+ if (wil->mbox_ctl.rx.entry_size < min_size) {
+ wil_err(wil, "rx mbox entry too small (%d)\n",
+ wil->mbox_ctl.rx.entry_size);
+ return false;
+ }
+ if (wil->mbox_ctl.tx.entry_size < min_size) {
+ wil_err(wil, "tx mbox entry too small (%d)\n",
+ wil->mbox_ctl.tx.entry_size);
+ return false;
+ }
+
+ return true;
+}
+
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -391,7 +410,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
if (isr & ISR_MISC_FW_READY) {
wil_dbg_irq(wil, "IRQ: FW ready\n");
wil_cache_mbox_regs(wil);
- set_bit(wil_status_mbox_ready, wil->status);
+ if (wil_validate_mbox_regs(wil))
+ set_bit(wil_status_mbox_ready, wil->status);
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index f8bce58d48cc..12b4c6f00372 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -803,7 +803,7 @@ static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
{
- ulong to = msecs_to_jiffies(1000);
+ ulong to = msecs_to_jiffies(2000);
ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
if (0 == left) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 4c38520d4dd2..72e8fea05e5e 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -546,8 +546,8 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
v->swtail = next_tail) {
rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
if (unlikely(rc)) {
- wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
- rc, v->swtail);
+ wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
+ rc, v->swtail);
break;
}
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 61419d1b4543..3f6ac1ca0e57 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -209,7 +209,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
uint retry;
int rc = 0;
- if (sizeof(cmd) + len > r->entry_size) {
+ if (len > r->entry_size - sizeof(cmd)) {
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
(int)(sizeof(cmd) + len), r->entry_size);
return -ERANGE;
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 0e180677c7fc..09dbab464fe1 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2583,8 +2583,8 @@ static int __init at76_mod_init(void)
if (result < 0)
printk(KERN_ERR DRIVER_NAME
": usb_register failed (status %d)\n", result);
-
- led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
+ else
+ led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
return result;
}
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
index 6922cbb99a04..5a0699fb4b9a 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
@@ -1834,7 +1834,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
- struct lpphy_tx_gains gains, oldgains;
+ struct lpphy_tx_gains oldgains;
int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
lpphy_read_tx_pctl_mode_from_hardware(dev);
@@ -1848,9 +1848,9 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
- lpphy_papd_cal(dev, gains, 0, 1, 30);
+ lpphy_papd_cal(dev, oldgains, 0, 1, 30);
else
- lpphy_papd_cal(dev, gains, 0, 1, 65);
+ lpphy_papd_cal(dev, oldgains, 0, 1, 65);
if (old_afe_ovr)
lpphy_set_tx_gains(dev, oldgains);
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 83770d2ea057..9da8bd792702 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1304,8 +1304,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
}
/* Interrupt handler bottom-half */
-static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev)
+static void b43legacy_interrupt_tasklet(unsigned long data)
{
+ struct b43legacy_wldev *dev = (struct b43legacy_wldev *)data;
u32 reason;
u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
u32 merged_dma_reason = 0;
@@ -3775,7 +3776,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
wldev->bad_frames_preempt = modparam_bad_frames_preempt;
tasklet_init(&wldev->isr_tasklet,
- (void (*)(unsigned long))b43legacy_interrupt_tasklet,
+ b43legacy_interrupt_tasklet,
(unsigned long)wldev);
if (modparam_pio)
wldev->__using_pio = true;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 530f52120972..64f8f404c53f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -3220,6 +3220,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
struct brcmf_pno_scanresults_le *pfn_result;
u32 result_count;
u32 status;
+ u32 datalen;
brcmf_dbg(SCAN, "Enter\n");
@@ -3245,6 +3246,14 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
if (result_count > 0) {
int i;
+ data += sizeof(struct brcmf_pno_scanresults_le);
+ netinfo_start = (struct brcmf_pno_net_info_le *)data;
+ datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result);
+ if (datalen < result_count * sizeof(*netinfo)) {
+ brcmf_err("insufficient event data\n");
+ goto out_err;
+ }
+
request = kzalloc(sizeof(*request), GFP_KERNEL);
ssid = kcalloc(result_count, sizeof(*ssid), GFP_KERNEL);
channel = kcalloc(result_count, sizeof(*channel), GFP_KERNEL);
@@ -3254,9 +3263,6 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
}
request->wiphy = wiphy;
- data += sizeof(struct brcmf_pno_scanresults_le);
- netinfo_start = (struct brcmf_pno_net_info_le *)data;
-
for (i = 0; i < result_count; i++) {
netinfo = &netinfo_start[i];
if (!netinfo) {
@@ -3266,6 +3272,8 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
goto out_err;
}
+ if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
+ netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
brcmf_dbg(SCAN, "SSID:%s Channel:%d\n",
netinfo->SSID, netinfo->channel);
memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
@@ -3571,6 +3579,8 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
data += sizeof(struct brcmf_pno_scanresults_le);
netinfo = (struct brcmf_pno_net_info_le *)data;
+ if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
+ netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len);
cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len;
cfg->wowl.nd->n_channels = 1;
@@ -5374,6 +5384,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
conn_info->req_ie =
kmemdup(cfg->extra_buf, conn_info->req_ie_len,
GFP_KERNEL);
+ if (!conn_info->req_ie)
+ conn_info->req_ie_len = 0;
} else {
conn_info->req_ie_len = 0;
conn_info->req_ie = NULL;
@@ -5390,6 +5402,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
conn_info->resp_ie =
kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
GFP_KERNEL);
+ if (!conn_info->resp_ie)
+ conn_info->resp_ie_len = 0;
} else {
conn_info->resp_ie_len = 0;
conn_info->resp_ie = NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index f877301c9454..349cc5cb9e80 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -339,7 +339,8 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
} else {
/* Process special event packets */
if (handle_event)
- brcmf_fweh_process_skb(ifp->drvr, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb,
+ BCMILCP_SUBTYPE_VENDOR_LONG);
brcmf_netif_rx(ifp, skb);
}
@@ -356,7 +357,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
if (brcmf_rx_hdrpull(drvr, skb, &ifp))
return;
- brcmf_fweh_process_skb(ifp->drvr, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb, 0);
brcmu_pkt_buf_free_skb(skb);
}
@@ -685,17 +686,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
bool rtnl_locked)
{
struct brcmf_if *ifp;
+ int ifidx;
ifp = drvr->iflist[bsscfgidx];
- drvr->iflist[bsscfgidx] = NULL;
if (!ifp) {
brcmf_err("Null interface, bsscfgidx=%d\n", bsscfgidx);
return;
}
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx,
ifp->ifidx);
- if (drvr->if2bss[ifp->ifidx] == bsscfgidx)
- drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
+ ifidx = ifp->ifidx;
+
if (ifp->ndev) {
if (bsscfgidx == 0) {
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
@@ -723,6 +724,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
brcmf_p2p_ifp_removed(ifp, rtnl_locked);
kfree(ifp);
}
+
+ drvr->iflist[bsscfgidx] = NULL;
+ if (drvr->if2bss[ifidx] == bsscfgidx)
+ drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
}
void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 26ff5a9648f3..9cd112ef87d7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -181,7 +181,7 @@ enum brcmf_fweh_event_code {
*/
#define BRCM_OUI "\x00\x10\x18"
#define BCMILCP_BCM_SUBTYPE_EVENT 1
-
+#define BCMILCP_SUBTYPE_VENDOR_LONG 32769
/**
* struct brcm_ethhdr - broadcom specific ether header.
@@ -302,10 +302,10 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
- struct sk_buff *skb)
+ struct sk_buff *skb, u16 stype)
{
struct brcmf_event *event_packet;
- u16 usr_stype;
+ u16 subtype, usr_stype;
/* only process events when protocol matches */
if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
@@ -314,8 +314,16 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
if ((skb->len + ETH_HLEN) < sizeof(*event_packet))
return;
- /* check for BRCM oui match */
event_packet = (struct brcmf_event *)skb_mac_header(skb);
+
+ /* check subtype if needed */
+ if (unlikely(stype)) {
+ subtype = get_unaligned_be16(&event_packet->hdr.subtype);
+ if (subtype != stype)
+ return;
+ }
+
+ /* check for BRCM oui match */
if (memcmp(BRCM_OUI, &event_packet->hdr.oui[0],
sizeof(event_packet->hdr.oui)))
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 2b9a2bc429d6..ab9f136c1593 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -1114,7 +1114,7 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
skb->protocol = eth_type_trans(skb, ifp->ndev);
- brcmf_fweh_process_skb(ifp->drvr, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb, 0);
exit:
brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index f78d91b69287..aac9c97d2255 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -74,7 +74,7 @@
#define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000)
#define P2P_INVALID_CHANNEL -1
#define P2P_CHANNEL_SYNC_RETRY 5
-#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(1500)
+#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450)
#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
/* WiFi P2P Public Action Frame OUI Subtypes */
@@ -1139,7 +1139,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
{
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
struct brcmf_cfg80211_vif *pri_vif;
- unsigned long duration;
s32 retry;
brcmf_dbg(TRACE, "Enter\n");
@@ -1155,7 +1154,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
* pending action frame tx is cancelled.
*/
retry = 0;
- duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
(afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
afx_hdl->is_listen = false;
@@ -1163,7 +1161,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
retry);
/* search peer on peer's listen channel */
schedule_work(&afx_hdl->afx_work);
- wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
+ wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+ P2P_AF_FRM_SCAN_MAX_WAIT);
if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
(!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status)))
@@ -1176,7 +1175,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
afx_hdl->is_listen = true;
schedule_work(&afx_hdl->afx_work);
wait_for_completion_timeout(&afx_hdl->act_frm_scan,
- duration);
+ P2P_AF_FRM_SCAN_MAX_WAIT);
}
if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
(!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
@@ -1463,10 +1462,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
return 0;
if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
- if (e->status == BRCMF_E_STATUS_SUCCESS)
+ if (e->status == BRCMF_E_STATUS_SUCCESS) {
set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
&p2p->status);
- else {
+ if (!p2p->wait_for_offchan_complete)
+ complete(&p2p->send_af_done);
+ } else {
set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
/* If there is no ack, we don't need to wait for
* WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
@@ -1517,6 +1518,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
p2p->af_sent_channel = le32_to_cpu(af_params->channel);
p2p->af_tx_sent_jiffies = jiffies;
+ if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) &&
+ p2p->af_sent_channel ==
+ ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq))
+ p2p->wait_for_offchan_complete = false;
+ else
+ p2p->wait_for_offchan_complete = true;
+
+ brcmf_dbg(TRACE, "Waiting for %s tx completion event\n",
+ (p2p->wait_for_offchan_complete) ?
+ "off-channel" : "on-channel");
+
timeout = wait_for_completion_timeout(&p2p->send_af_done,
P2P_AF_MAX_WAIT_TIME);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index 8ce9447533ef..fbee51148904 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -124,6 +124,7 @@ struct afx_hdl {
* @gon_req_action: about to send go negotiation requets frame.
* @block_gon_req_tx: drop tx go negotiation requets frame.
* @p2pdev_dynamically: is p2p device if created by module param or supplicant.
+ * @wait_for_offchan_complete: wait for off-channel tx completion event.
*/
struct brcmf_p2p_info {
struct brcmf_cfg80211_info *cfg;
@@ -144,6 +145,7 @@ struct brcmf_p2p_info {
bool gon_req_action;
bool block_gon_req_tx;
bool p2pdev_dynamically;
+ bool wait_for_offchan_complete;
};
s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index de52d826eb24..998a4bd6db78 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1921,6 +1921,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmu_pkt_buf_free_skb(pkt);
+ continue;
}
bus->sdcnt.rx_readahead_cnt++;
if (rd->len != roundup(rd_new.len, 16)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 053f3b59f21e..31727f34381f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -157,7 +157,7 @@ struct brcmf_usbdev_info {
struct usb_device *usbdev;
struct device *dev;
- struct mutex dev_init_lock;
+ struct completion dev_init_done;
int ctl_in_pipe, ctl_out_pipe;
struct urb *ctl_urb; /* URB for control endpoint */
@@ -438,6 +438,7 @@ fail:
usb_free_urb(req->urb);
list_del(q->next);
}
+ kfree(reqs);
return NULL;
}
@@ -681,12 +682,18 @@ static int brcmf_usb_up(struct device *dev)
static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
{
+ int i;
+
if (devinfo->ctl_urb)
usb_kill_urb(devinfo->ctl_urb);
if (devinfo->bulk_urb)
usb_kill_urb(devinfo->bulk_urb);
- brcmf_usb_free_q(&devinfo->tx_postq, true);
- brcmf_usb_free_q(&devinfo->rx_postq, true);
+ if (devinfo->tx_reqs)
+ for (i = 0; i < devinfo->bus_pub.ntxq; i++)
+ usb_kill_urb(devinfo->tx_reqs[i].urb);
+ if (devinfo->rx_reqs)
+ for (i = 0; i < devinfo->bus_pub.nrxq; i++)
+ usb_kill_urb(devinfo->rx_reqs[i].urb);
}
static void brcmf_usb_down(struct device *dev)
@@ -1189,11 +1196,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret,
if (ret)
goto error;
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
return;
error:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
device_release_driver(dev);
}
@@ -1239,7 +1246,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
if (ret)
goto fail;
/* we are done */
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
return 0;
}
bus->chip = bus_pub->devid;
@@ -1300,11 +1307,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
devinfo->usbdev = usb;
devinfo->dev = &usb->dev;
- /* Take an init lock, to protect for disconnect while still loading.
+ /* Init completion, to protect for disconnect while still loading.
* Necessary because of the asynchronous firmware load construction
*/
- mutex_init(&devinfo->dev_init_lock);
- mutex_lock(&devinfo->dev_init_lock);
+ init_completion(&devinfo->dev_init_done);
usb_set_intfdata(intf, devinfo);
@@ -1325,7 +1331,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto fail;
}
- desc = &intf->altsetting[0].desc;
+ desc = &intf->cur_altsetting->desc;
if ((desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
(desc->bInterfaceSubClass != 2) ||
(desc->bInterfaceProtocol != 0xff)) {
@@ -1338,7 +1344,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
num_of_eps = desc->bNumEndpoints;
for (ep = 0; ep < num_of_eps; ep++) {
- endpoint = &intf->altsetting[0].endpoint[ep].desc;
+ endpoint = &intf->cur_altsetting->endpoint[ep].desc;
endpoint_num = usb_endpoint_num(endpoint);
if (!usb_endpoint_xfer_bulk(endpoint))
continue;
@@ -1382,7 +1388,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
return 0;
fail:
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
kfree(devinfo);
usb_set_intfdata(intf, NULL);
return ret;
@@ -1397,7 +1403,7 @@ brcmf_usb_disconnect(struct usb_interface *intf)
devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
if (devinfo) {
- mutex_lock(&devinfo->dev_init_lock);
+ wait_for_completion(&devinfo->dev_init_done);
/* Make sure that devinfo still exists. Firmware probe routines
* may have released the device and cleared the intfdata.
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
index 8eff2753abad..d493021f6031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
@@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
struct brcmf_if *ifp;
const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
struct sk_buff *reply;
- int ret, payload, ret_len;
+ unsigned int payload, ret_len;
void *dcmd_buf = NULL, *wr_pointer;
u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
+ int ret;
if (len < sizeof(*cmdhdr)) {
brcmf_err("vendor command too short: %d\n", len);
@@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
brcmf_err("oversize return buffer %d\n", ret_len);
ret_len = BRCMF_DCMD_MAXLEN;
}
- payload = max(ret_len, len) + 1;
+ payload = max_t(unsigned int, ret_len, len) + 1;
dcmd_buf = vzalloc(payload);
if (NULL == dcmd_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 7c2a9a9bc372..b820e80d4b4c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_bh(&wl->lock);
+ wl->wlc->vif = vif;
wl->mute_tx = false;
brcms_c_mute(wl->wlc, false);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static void
brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
+ struct brcms_info *wl = hw->priv;
+
+ spin_lock_bh(&wl->lock);
+ wl->wlc->vif = NULL;
+ spin_unlock_bh(&wl->lock);
}
static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@@ -840,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
status = brcms_c_aggregatable(wl->wlc, tid);
spin_unlock_bh(&wl->lock);
if (!status) {
- brcms_err(wl->wlc->hw->d11core,
- "START: tid %d is not agg\'able\n", tid);
+ brcms_dbg_ht(wl->wlc->hw->d11core,
+ "START: tid %d is not agg\'able\n", tid);
return -EINVAL;
}
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
spin_unlock_bh(&wl->lock);
}
+static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
+{
+ struct brcms_info *wl = hw->priv;
+ struct sk_buff *beacon = NULL;
+ u16 tim_offset = 0;
+
+ spin_lock_bh(&wl->lock);
+ if (wl->wlc->vif)
+ beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
+ &tim_offset, NULL);
+ if (beacon)
+ brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
+ wl->wlc->vif->bss_conf.dtim_period);
+ spin_unlock_bh(&wl->lock);
+
+ return 0;
+}
+
static const struct ieee80211_ops brcms_ops = {
.tx = brcms_ops_tx,
.start = brcms_ops_start,
@@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = {
.flush = brcms_ops_flush,
.get_tsf = brcms_ops_get_tsf,
.set_tsf = brcms_ops_set_tsf,
+ .set_tim = brcms_ops_beacon_set_tim,
};
void brcms_dpc(unsigned long data)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
index c4d135cff04a..9f76b880814e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
@@ -563,6 +563,7 @@ struct brcms_c_info {
struct wiphy *wiphy;
struct scb pri_scb;
+ struct ieee80211_vif *vif;
struct sk_buff *beacon;
u16 beacon_tim_offset;
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 69b826d229c5..a8d470010f5e 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -5472,7 +5472,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
we have to add a spin lock... */
rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
- ptr += sprintf(ptr, "%pM %*s rssi = %d",
+ ptr += sprintf(ptr, "%pM %.*s rssi = %d",
BSSList_rid.bssid,
(int)BSSList_rid.ssidLen,
BSSList_rid.ssid,
@@ -7796,16 +7796,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
case AIROGVLIST: ridcode = RID_APLIST; break;
case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
- case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
- case AIROGWEPKNV: ridcode = RID_WEP_PERM;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
+ case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break;
+ case AIROGWEPKNV: ridcode = RID_WEP_PERM; break;
case AIROGSTAT: ridcode = RID_STATUS; break;
case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
case AIROGSTATSC32: ridcode = RID_STATS; break;
@@ -7819,7 +7811,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
return -EINVAL;
}
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+ if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
+ /* Only super-user can read WEP keys */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ }
+
+ if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index bfa542c8d6f1..86c84b11218d 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -3220,8 +3220,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
}
}
-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
+static void ipw2100_irq_tasklet(unsigned long data)
{
+ struct ipw2100_priv *priv = (struct ipw2100_priv *)data;
struct net_device *dev = priv->net_dev;
unsigned long flags;
u32 inta, tmp;
@@ -6029,7 +6030,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
spin_unlock_irqrestore(&priv->low_lock, flags);
}
-static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
+static void ipw2100_irq_tasklet(unsigned long data);
static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_open = ipw2100_open,
@@ -6158,7 +6159,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ tasklet_init(&priv->irq_tasklet,
ipw2100_irq_tasklet, (unsigned long)priv);
/* NOTE: We do not start the deferred work for status checks yet */
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index bfd68612a535..48edb2b6eb7d 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1968,8 +1968,9 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
-static void ipw_irq_tasklet(struct ipw_priv *priv)
+static void ipw_irq_tasklet(unsigned long data)
{
+ struct ipw_priv *priv = (struct ipw_priv *)data;
u32 inta, inta_mask, handled = 0;
unsigned long flags;
int rc = 0;
@@ -10705,7 +10706,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
#endif /* CONFIG_IPW2200_QOS */
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ tasklet_init(&priv->irq_tasklet,
ipw_irq_tasklet, (unsigned long)priv);
return ret;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 466912eb2d87..d853ccbf74cb 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1399,8 +1399,9 @@ il3945_dump_nic_error_log(struct il_priv *il)
}
static void
-il3945_irq_tasklet(struct il_priv *il)
+il3945_irq_tasklet(unsigned long data)
{
+ struct il_priv *il = (struct il_priv *)data;
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -3432,7 +3433,7 @@ il3945_setup_deferred_work(struct il_priv *il)
setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
tasklet_init(&il->irq_tasklet,
- (void (*)(unsigned long))il3945_irq_tasklet,
+ il3945_irq_tasklet,
(unsigned long)il);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index a91d170a614b..6c2dcd236713 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -4361,8 +4361,9 @@ il4965_synchronize_irq(struct il_priv *il)
}
static void
-il4965_irq_tasklet(struct il_priv *il)
+il4965_irq_tasklet(unsigned long data)
{
+ struct il_priv *il = (struct il_priv *)data;
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -6260,7 +6261,7 @@ il4965_setup_deferred_work(struct il_priv *il)
setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
tasklet_init(&il->irq_tasklet,
- (void (*)(unsigned long))il4965_irq_tasklet,
+ il4965_irq_tasklet,
(unsigned long)il);
}
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 140b6ea8f7cc..db2373fe8ac3 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -717,7 +717,7 @@ il_eeprom_init(struct il_priv *il)
u32 gp = _il_rd(il, CSR_EEPROM_GP);
int sz;
int ret;
- u16 addr;
+ int addr;
/* allocate eeprom */
sz = il->cfg->eeprom_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index 1bbd17ada974..20e16c423990 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -185,6 +185,9 @@ void iwl_leds_init(struct iwl_priv *priv)
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
+ if (!priv->led.name)
+ return;
+
priv->led.brightness_set = iwl_led_brightness_set;
priv->led.blink_set = iwl_led_blink_set;
priv->led.max_brightness = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 207d8ae1e116..19052efe53f1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -935,8 +935,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
{
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+ bool unified = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
struct wowlan_key_data key_data = {
- .configure_keys = !d0i3,
+ .configure_keys = !d0i3 && !unified,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 2ec3a91a0f6b..bba7ace1a744 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -106,12 +106,12 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
int i;
struct iwl_rss_config_cmd cmd = {
.flags = cpu_to_le32(IWL_RSS_ENABLE),
- .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
- IWL_RSS_HASH_TYPE_IPV4_UDP |
- IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
- IWL_RSS_HASH_TYPE_IPV6_TCP |
- IWL_RSS_HASH_TYPE_IPV6_UDP |
- IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+ .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
};
if (mvm->trans->num_rx_queues == 1)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
index 1e51fbe95f7c..73c351a64187 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -109,6 +109,9 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(mvm->hw->wiphy));
+ if (!mvm->led.name)
+ return -ENOMEM;
+
mvm->led.brightness_set = iwl_led_brightness_set;
mvm->led.max_brightness = 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 92557cd31a39..d91ab2b8d667 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -805,6 +805,21 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
!ieee80211_is_action(hdr->frame_control)))
sta = NULL;
+ /* If there is no sta, and it's not offchannel - send through AP */
+ if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+ info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info->control.vif);
+ u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
+
+ if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+ /* mac80211 holds rcu read lock */
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
+ if (IS_ERR_OR_NULL(sta))
+ goto drop;
+ }
+ }
+
if (sta) {
if (iwl_mvm_defer_tx(mvm, sta, skb))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index b78e60eb600f..d0aa4d0a5537 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -62,6 +62,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
+#include <asm/unaligned.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "iwl-trans.h"
@@ -289,7 +290,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
len = le16_to_cpu(rx_res->byte_count);
- rx_pkt_status = le32_to_cpup((__le32 *)
+ rx_pkt_status = get_unaligned_le32((__le32 *)
(pkt->data + sizeof(*rx_res) + len));
/* Dont use dev_alloc_skb(), we'll have enough headroom once
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index c2bbc8c17beb..bc06d87a0106 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -810,12 +810,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
- rx_status->ampdu_reference = mvm->ampdu_ref;
/* toggle is switched whenever new aggregation starts */
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
}
+ rx_status->ampdu_reference = mvm->ampdu_ref;
}
rcu_read_lock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index c5203568a47a..f0f205c3aadb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -736,7 +736,8 @@ static struct thermal_zone_device_ops tzone_ops = {
static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
{
int i;
- char name[] = "iwlwifi";
+ char name[16];
+ static atomic_t counter = ATOMIC_INIT(0);
if (!iwl_mvm_is_tt_in_fw(mvm)) {
mvm->tz_device.tzone = NULL;
@@ -746,6 +747,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+ sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
mvm->tz_device.tzone = thermal_zone_device_register(name,
IWL_MAX_DTS_TRIPS,
IWL_WRITABLE_TRIPS_MSK,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index bd7ff562d82d..63dcea640d07 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -551,6 +551,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
memcpy(&info, skb->cb, sizeof(info));
+ if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
+ return -1;
+
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
return -1;
@@ -1300,6 +1303,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
break;
}
+ /*
+ * If we are freeing multiple frames, mark all the frames
+ * but the first one as acked, since they were acknowledged
+ * before
+ * */
+ if (skb_freed > 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
iwl_mvm_tx_status_check_trigger(mvm, status);
info->status.rates[0].count = tx_resp->failure_frame + 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index e58a50d31d96..395bbe2c0f98 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -475,7 +475,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct list_head local_empty;
- int pending = atomic_xchg(&rba->req_pending, 0);
+ int pending = atomic_read(&rba->req_pending);
IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
@@ -530,11 +530,13 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
i++;
}
+ atomic_dec(&rba->req_pending);
pending--;
+
if (!pending) {
- pending = atomic_xchg(&rba->req_pending, 0);
+ pending = atomic_read(&rba->req_pending);
IWL_DEBUG_RX(trans,
- "Pending allocation requests = %d\n",
+ "Got more pending allocation requests = %d\n",
pending);
}
@@ -546,12 +548,15 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
spin_unlock(&rba->lock);
atomic_inc(&rba->req_ready);
+
}
spin_lock(&rba->lock);
/* return unused rbds to the allocator empty list */
list_splice_tail(&local_empty, &rba->rbd_empty);
spin_unlock(&rba->lock);
+
+ IWL_DEBUG_RX(trans, "%s, exit.\n", __func__);
}
/*
@@ -893,9 +898,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
def_rxq = trans_pcie->rxq;
- if (!rba->alloc_wq)
+ if (!rba->alloc_wq) {
rba->alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!rba->alloc_wq)
+ return -ENOMEM;
+ }
+
INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
cancel_work_sync(&rba->rx_alloc);
@@ -1220,10 +1229,15 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
+ struct iwl_rxq *rxq;
u32 r, i, count = 0;
bool emergency = false;
+ if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
+ return;
+
+ rxq = &trans_pcie->rxq[queue];
+
restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
@@ -1891,10 +1905,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
- inta_fh,
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ inta_fh, trans_pcie->fh_mask,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+ if (inta_fh & ~trans_pcie->fh_mask)
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta_fh & ~trans_pcie->fh_mask);
+ }
+
+ inta_fh &= trans_pcie->fh_mask;
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
inta_fh & MSIX_FH_INT_CAUSES_Q0) {
@@ -1933,11 +1955,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
/* After checking FH register check HW register */
- if (iwl_have_debug_level(IWL_DL_ISR))
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
IWL_DEBUG_ISR(trans,
- "ISR inta_hw 0x%08x, enabled 0x%08x\n",
- inta_hw,
+ "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
+ inta_hw, trans_pcie->hw_mask,
iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
+ if (inta_hw & ~trans_pcie->hw_mask)
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt 0x%08x\n",
+ inta_hw & ~trans_pcie->hw_mask);
+ }
+
+ inta_hw &= trans_pcie->hw_mask;
/* Alive notification via Rx interrupt will do the real work */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index e1bfc9522cbe..174e45d78c46 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -439,6 +439,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
DMA_TO_DEVICE);
}
+ meta->tbs = 0;
+
if (trans->cfg->use_tfh) {
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index c995ace153ee..30171d4c4718 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -2570,7 +2570,7 @@ static int prism2_hostapd_add_sta(struct ap_data *ap,
sta->supported_rates[0] = 2;
if (sta->tx_supp_rates & WLAN_RATE_2M)
sta->supported_rates[1] = 4;
- if (sta->tx_supp_rates & WLAN_RATE_5M5)
+ if (sta->tx_supp_rates & WLAN_RATE_5M5)
sta->supported_rates[2] = 11;
if (sta->tx_supp_rates & WLAN_RATE_11M)
sta->supported_rates[3] = 22;
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index bca6935a94db..4e91c74fcfad 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -1351,7 +1351,8 @@ static int ezusb_init(struct hermes *hw)
int retval;
BUG_ON(in_interrupt());
- BUG_ON(!upriv);
+ if (!upriv)
+ return -EINVAL;
upriv->reply_count = 0;
/* Write the MAGIC number on the simulated registers to keep
@@ -1601,9 +1602,9 @@ static int ezusb_probe(struct usb_interface *interface,
/* set up the endpoint information */
/* check out the endpoints */
- iface_desc = &interface->altsetting[0].desc;
+ iface_desc = &interface->cur_altsetting->desc;
for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
- ep = &interface->altsetting[0].endpoint[i].desc;
+ ep = &interface->cur_altsetting->endpoint[i].desc;
if (usb_endpoint_is_bulk_in(ep)) {
/* we found a bulk in endpoint */
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index 27a49068d32d..57ad56435dda 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable new PCI device\n");
- return err;
+ goto err_put;
}
mem_addr = pci_resource_start(pdev, 0);
@@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
err_disable_dev:
pci_disable_device(pdev);
+err_put:
pci_dev_put(pdev);
return err;
}
diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
index 043bd1c23c19..4a197a32d78c 100644
--- a/drivers/net/wireless/intersil/p54/p54usb.c
+++ b/drivers/net/wireless/intersil/p54/p54usb.c
@@ -33,6 +33,8 @@ MODULE_ALIAS("prism54usb");
MODULE_FIRMWARE("isl3886usb");
MODULE_FIRMWARE("isl3887usb");
+static struct usb_driver p54u_driver;
+
/*
* Note:
*
@@ -921,9 +923,9 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
{
struct p54u_priv *priv = context;
struct usb_device *udev = priv->udev;
+ struct usb_interface *intf = priv->intf;
int err;
- complete(&priv->fw_wait_load);
if (firmware) {
priv->fw = firmware;
err = p54u_start_ops(priv);
@@ -932,26 +934,22 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
dev_err(&udev->dev, "Firmware not found.\n");
}
- if (err) {
- struct device *parent = priv->udev->dev.parent;
-
- dev_err(&udev->dev, "failed to initialize device (%d)\n", err);
-
- if (parent)
- device_lock(parent);
+ complete(&priv->fw_wait_load);
+ /*
+ * At this point p54u_disconnect may have already freed
+ * the "priv" context. Do not use it anymore!
+ */
+ priv = NULL;
- device_release_driver(&udev->dev);
- /*
- * At this point p54u_disconnect has already freed
- * the "priv" context. Do not use it anymore!
- */
- priv = NULL;
+ if (err) {
+ dev_err(&intf->dev, "failed to initialize device (%d)\n", err);
- if (parent)
- device_unlock(parent);
+ usb_lock_device(udev);
+ usb_driver_release_interface(&p54u_driver, intf);
+ usb_unlock_device(udev);
}
- usb_put_dev(udev);
+ usb_put_intf(intf);
}
static int p54u_load_firmware(struct ieee80211_hw *dev,
@@ -972,14 +970,14 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
dev_info(&priv->udev->dev, "Loading firmware file %s\n",
p54u_fwlist[i].fw);
- usb_get_dev(udev);
+ usb_get_intf(intf);
err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw,
device, GFP_KERNEL, priv,
p54u_load_firmware_cb);
if (err) {
dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
"(%d)!\n", p54u_fwlist[i].fw, err);
- usb_put_dev(udev);
+ usb_put_intf(intf);
}
return err;
@@ -1011,8 +1009,6 @@ static int p54u_probe(struct usb_interface *intf,
skb_queue_head_init(&priv->rx_queue);
init_usb_anchor(&priv->submitted);
- usb_get_dev(udev);
-
/* really lazy and simple way of figuring out if we're a 3887 */
/* TODO: should just stick the identification in the device table */
i = intf->altsetting->desc.bNumEndpoints;
@@ -1053,10 +1049,8 @@ static int p54u_probe(struct usb_interface *intf,
priv->upload_fw = p54u_upload_firmware_net2280;
}
err = p54u_load_firmware(dev, intf);
- if (err) {
- usb_put_dev(udev);
+ if (err)
p54_free_common(dev);
- }
return err;
}
@@ -1072,7 +1066,6 @@ static void p54u_disconnect(struct usb_interface *intf)
wait_for_completion(&priv->fw_wait_load);
p54_unregister_common(dev);
- usb_put_dev(interface_to_usbdev(intf));
release_firmware(priv->fw);
p54_free_common(dev);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index e9ec1da9935d..dd6924d21b8a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3060,9 +3060,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.no_vif = true;
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
- hwname = kasprintf(GFP_KERNEL, "%.*s",
- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ GFP_KERNEL);
if (!hwname)
return -ENOMEM;
param.hwname = hwname;
@@ -3101,9 +3101,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
} else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
- hwname = kasprintf(GFP_KERNEL, "%.*s",
- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ GFP_KERNEL);
if (!hwname)
return -ENOMEM;
} else
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 7ff2efadceca..ece6d72cf90c 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -272,6 +272,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
int hw, ap, ap_max = ie[1];
u8 hw_rate;
+ if (ap_max > MAX_RATES) {
+ lbs_deb_assoc("invalid rates\n");
+ return tlv;
+ }
/* Advance past IE header */
ie += 2;
@@ -1789,6 +1793,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
struct cmd_ds_802_11_ad_hoc_join cmd;
u8 preamble = RADIO_PREAMBLE_SHORT;
int ret = 0;
+ int hw, i;
+ u8 rates_max;
+ u8 *rates;
lbs_deb_enter(LBS_DEB_CFG80211);
@@ -1849,9 +1856,14 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
if (!rates_eid) {
lbs_add_rates(cmd.bss.rates);
} else {
- int hw, i;
- u8 rates_max = rates_eid[1];
- u8 *rates = cmd.bss.rates;
+ rates_max = rates_eid[1];
+ if (rates_max > MAX_RATES) {
+ lbs_deb_join("invalid rates");
+ rcu_read_unlock();
+ ret = -EINVAL;
+ goto out;
+ }
+ rates = cmd.bss.rates;
for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
u8 hw_rate = lbs_rates[hw].bitrate / 5;
for (i = 0; i < rates_max; i++) {
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 06a57c708992..44da911c9a1a 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1229,6 +1229,10 @@ static int if_sdio_probe(struct sdio_func *func,
spin_lock_init(&card->lock);
card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!card->workqueue)) {
+ ret = -ENOMEM;
+ goto err_queue;
+ }
INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
init_waitqueue_head(&card->pwron_waitq);
@@ -1282,6 +1286,7 @@ err_activate_card:
lbs_remove_card(priv);
free:
destroy_workqueue(card->workqueue);
+err_queue:
while (card->packets) {
packet = card->packets;
card->packets = card->packets->next;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index a605d569f663..9d147b11ee51 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -49,7 +49,8 @@ static const struct lbs_fw_table fw_table[] = {
{ MODEL_8388, "libertas/usb8388_v5.bin", NULL },
{ MODEL_8388, "libertas/usb8388.bin", NULL },
{ MODEL_8388, "usb8388.bin", NULL },
- { MODEL_8682, "libertas/usb8682.bin", NULL }
+ { MODEL_8682, "libertas/usb8682.bin", NULL },
+ { 0, NULL, NULL }
};
static struct usb_device_id if_usb_table[] = {
diff --git a/drivers/net/wireless/marvell/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c
index 909ac3685010..2b193f1257a5 100644
--- a/drivers/net/wireless/marvell/libertas_tf/cmd.c
+++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c
@@ -69,7 +69,7 @@ static void lbtf_geo_init(struct lbtf_private *priv)
break;
}
- for (ch = priv->range.start; ch < priv->range.end; ch++)
+ for (ch = range->start; ch < range->end; ch++)
priv->channels[CHAN_TO_IDX(ch)].flags = 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 4da3541471e6..94901b0041ce 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -364,11 +364,20 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
struct mwifiex_power_cfg power_cfg;
int dbm = MBM_TO_DBM(mbm);
- if (type == NL80211_TX_POWER_FIXED) {
+ switch (type) {
+ case NL80211_TX_POWER_FIXED:
power_cfg.is_power_auto = 0;
+ power_cfg.is_power_fixed = 1;
power_cfg.power_level = dbm;
- } else {
+ break;
+ case NL80211_TX_POWER_LIMITED:
+ power_cfg.is_power_auto = 0;
+ power_cfg.is_power_fixed = 0;
+ power_cfg.power_level = dbm;
+ break;
+ case NL80211_TX_POWER_AUTOMATIC:
power_cfg.is_power_auto = 1;
+ break;
}
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -4018,16 +4027,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
+ kfree(hostcmd);
return -EFAULT;
}
/* process hostcmd response*/
skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
- if (!skb)
+ if (!skb) {
+ kfree(hostcmd);
return -ENOMEM;
+ }
err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
hostcmd->len, hostcmd->cmd);
if (err) {
+ kfree(hostcmd);
kfree_skb(skb);
return -EMSGSIZE;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index 1ff22055e54f..9ddaa767ea74 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -533,5 +533,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
rx_rate - 1 : rx_rate;
+ if (rate_index >= MWIFIEX_MAX_AC_RX_RATES)
+ rate_index = MWIFIEX_MAX_AC_RX_RATES - 1;
+
return rate_index;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index ae2b69db5994..6eacea28d7ac 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -296,15 +296,13 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
"total samples = %d\n",
atomic_read(&phist_data->num_samples));
- p += sprintf(p, "rx rates (in Mbps): 0=1M 1=2M");
- p += sprintf(p, "2=5.5M 3=11M 4=6M 5=9M 6=12M\n");
- p += sprintf(p, "7=18M 8=24M 9=36M 10=48M 11=54M");
- p += sprintf(p, "12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
+ p += sprintf(p,
+ "rx rates (in Mbps): 0=1M 1=2M 2=5.5M 3=11M 4=6M 5=9M 6=12M\n"
+ "7=18M 8=24M 9=36M 10=48M 11=54M 12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) {
- p += sprintf(p, "44-53=MCS0-9(VHT:BW20)");
- p += sprintf(p, "54-63=MCS0-9(VHT:BW40)");
- p += sprintf(p, "64-73=MCS0-9(VHT:BW80)\n\n");
+ p += sprintf(p,
+ "44-53=MCS0-9(VHT:BW20) 54-63=MCS0-9(VHT:BW40) 64-73=MCS0-9(VHT:BW80)\n\n");
} else {
p += sprintf(p, "\n");
}
@@ -333,7 +331,7 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
for (i = 0; i < MWIFIEX_MAX_NOISE_FLR; i++) {
value = atomic_read(&phist_data->noise_flr[i]);
if (value)
- p += sprintf(p, "noise_flr[-%02ddBm] = %d\n",
+ p += sprintf(p, "noise_flr[%02ddBm] = %d\n",
(int)(i-128), value);
}
for (i = 0; i < MWIFIEX_MAX_SIG_STRENGTH; i++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 4b1894b4757f..395d6ece2cac 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1719,9 +1719,10 @@ struct mwifiex_ie_types_wmm_queue_status {
struct ieee_types_vendor_header {
u8 element_id;
u8 len;
- u8 oui[4]; /* 0~2: oui, 3: oui_type */
- u8 oui_subtype;
- u8 version;
+ struct {
+ u8 oui[3];
+ u8 oui_type;
+ } __packed oui;
} __packed;
struct ieee_types_wmm_parameter {
@@ -1735,6 +1736,9 @@ struct ieee_types_wmm_parameter {
* Version [1]
*/
struct ieee_types_vendor_header vend_hdr;
+ u8 oui_subtype;
+ u8 version;
+
u8 qos_info_bitmap;
u8 reserved;
struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
@@ -1752,6 +1756,8 @@ struct ieee_types_wmm_info {
* Version [1]
*/
struct ieee_types_vendor_header vend_hdr;
+ u8 oui_subtype;
+ u8 version;
u8 qos_info_bitmap;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index c488c3068abc..c67e08fa1aaf 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -240,6 +240,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
}
vs_ie = (struct ieee_types_header *)vendor_ie;
+ if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
+ IEEE_MAX_IE_SIZE)
+ return -EINVAL;
memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
vs_ie, vs_ie->len + 2);
le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
@@ -328,6 +331,8 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
struct ieee80211_vendor_ie *vendorhdr;
u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
int left_len, parsed_len = 0;
+ unsigned int token_len;
+ int err = 0;
if (!info->tail || !info->tail_len)
return 0;
@@ -343,6 +348,12 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
*/
while (left_len > sizeof(struct ieee_types_header)) {
hdr = (void *)(info->tail + parsed_len);
+ token_len = hdr->len + sizeof(struct ieee_types_header);
+ if (token_len > left_len) {
+ err = -EINVAL;
+ goto out;
+ }
+
switch (hdr->element_id) {
case WLAN_EID_SSID:
case WLAN_EID_SUPP_RATES:
@@ -356,13 +367,16 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
case WLAN_EID_VENDOR_SPECIFIC:
break;
default:
- memcpy(gen_ie->ie_buffer + ie_len, hdr,
- hdr->len + sizeof(struct ieee_types_header));
- ie_len += hdr->len + sizeof(struct ieee_types_header);
+ if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
+ err = -EINVAL;
+ goto out;
+ }
+ memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len);
+ ie_len += token_len;
break;
}
- left_len -= hdr->len + sizeof(struct ieee_types_header);
- parsed_len += hdr->len + sizeof(struct ieee_types_header);
+ left_len -= token_len;
+ parsed_len += token_len;
}
/* parse only WPA vendor IE from tail, WMM IE is configured by
@@ -372,15 +386,17 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
WLAN_OUI_TYPE_MICROSOFT_WPA,
info->tail, info->tail_len);
if (vendorhdr) {
- memcpy(gen_ie->ie_buffer + ie_len, vendorhdr,
- vendorhdr->len + sizeof(struct ieee_types_header));
- ie_len += vendorhdr->len + sizeof(struct ieee_types_header);
+ token_len = vendorhdr->len + sizeof(struct ieee_types_header);
+ if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
+ err = -EINVAL;
+ goto out;
+ }
+ memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len);
+ ie_len += token_len;
}
- if (!ie_len) {
- kfree(gen_ie);
- return 0;
- }
+ if (!ie_len)
+ goto out;
gen_ie->ie_index = cpu_to_le16(gen_idx);
gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
@@ -390,13 +406,15 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL,
NULL, NULL)) {
- kfree(gen_ie);
- return -1;
+ err = -EINVAL;
+ goto out;
}
priv->gen_idx = gen_idx;
+
+ out:
kfree(gen_ie);
- return 0;
+ return err;
}
/* This function parses different IEs-head & tail IEs, beacon IEs,
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 536ab834b126..729a69f88a48 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -265,6 +265,7 @@ struct mwifiex_ds_encrypt_key {
struct mwifiex_power_cfg {
u32 is_power_auto;
+ u32 is_power_fixed;
u32 power_level;
};
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 26df28f4bfb2..d4e19efb75af 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -120,6 +120,7 @@ enum {
#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
+#define WPA_GTK_OUI_OFFSET 2
#define RSN_GTK_OUI_OFFSET 2
#define MWIFIEX_OUI_NOT_PRESENT 0
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index cb681b265b10..d0743bd25ba9 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -632,8 +632,11 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
+ kfree(card->evtbd_ring_vbase);
return -1;
+ }
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -973,8 +976,10 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ PCI_DMA_FROMDEVICE)) {
+ kfree_skb(skb);
return -1;
+ }
card->cmdrsp_buf = skb;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 78d59a67f7e1..5fde2e2f1fea 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
- iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
+ iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
+ WPA_GTK_OUI_OFFSET);
oui = &mwifiex_wpa_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
if (ret)
@@ -1236,6 +1237,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
}
switch (element_id) {
case WLAN_EID_SSID:
+ if (element_len > IEEE80211_MAX_SSID_LEN)
+ return -EINVAL;
bss_entry->ssid.ssid_len = element_len;
memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
element_len);
@@ -1245,6 +1248,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_SUPP_RATES:
+ if (element_len > MWIFIEX_SUPPORTED_RATES)
+ return -EINVAL;
memcpy(bss_entry->data_rates, current_ptr + 2,
element_len);
memcpy(bss_entry->supported_rates, current_ptr + 2,
@@ -1254,6 +1259,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_FH_PARAMS:
+ if (element_len + 2 < sizeof(*fh_param_set))
+ return -EINVAL;
fh_param_set =
(struct ieee_types_fh_param_set *) current_ptr;
memcpy(&bss_entry->phy_param_set.fh_param_set,
@@ -1262,6 +1269,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_DS_PARAMS:
+ if (element_len + 2 < sizeof(*ds_param_set))
+ return -EINVAL;
ds_param_set =
(struct ieee_types_ds_param_set *) current_ptr;
@@ -1273,6 +1282,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_CF_PARAMS:
+ if (element_len + 2 < sizeof(*cf_param_set))
+ return -EINVAL;
cf_param_set =
(struct ieee_types_cf_param_set *) current_ptr;
memcpy(&bss_entry->ss_param_set.cf_param_set,
@@ -1281,6 +1292,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_IBSS_PARAMS:
+ if (element_len + 2 < sizeof(*ibss_param_set))
+ return -EINVAL;
ibss_param_set =
(struct ieee_types_ibss_param_set *)
current_ptr;
@@ -1290,10 +1303,14 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_ERP_INFO:
+ if (!element_len)
+ return -EINVAL;
bss_entry->erp_flags = *(current_ptr + 2);
break;
case WLAN_EID_PWR_CONSTRAINT:
+ if (!element_len)
+ return -EINVAL;
bss_entry->local_constraint = *(current_ptr + 2);
bss_entry->sensed_11h = true;
break;
@@ -1336,15 +1353,22 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
vendor_ie = (struct ieee_types_vendor_specific *)
current_ptr;
- if (!memcmp
- (vendor_ie->vend_hdr.oui, wpa_oui,
- sizeof(wpa_oui))) {
+ /* 802.11 requires at least 3-byte OUI. */
+ if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui))
+ return -EINVAL;
+
+ /* Not long enough for a match? Skip it. */
+ if (element_len < sizeof(wpa_oui))
+ break;
+
+ if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui,
+ sizeof(wpa_oui))) {
bss_entry->bcn_wpa_ie =
(struct ieee_types_vendor_specific *)
current_ptr;
bss_entry->wpa_offset = (u16)
(current_ptr - bss_entry->beacon_buf);
- } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
+ } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui,
sizeof(wmm_oui))) {
if (total_ie_len ==
sizeof(struct ieee_types_wmm_parameter) ||
@@ -1866,15 +1890,17 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
ETH_ALEN))
mwifiex_update_curr_bss_params(priv,
bss);
- cfg80211_put_bss(priv->wdev.wiphy, bss);
- }
- if ((chan->flags & IEEE80211_CHAN_RADAR) ||
- (chan->flags & IEEE80211_CHAN_NO_IR)) {
- mwifiex_dbg(adapter, INFO,
- "radar or passive channel %d\n",
- channel);
- mwifiex_save_hidden_ssid_channels(priv, bss);
+ if ((chan->flags & IEEE80211_CHAN_RADAR) ||
+ (chan->flags & IEEE80211_CHAN_NO_IR)) {
+ mwifiex_dbg(adapter, INFO,
+ "radar or passive channel %d\n",
+ channel);
+ mwifiex_save_hidden_ssid_channels(priv,
+ bss);
+ }
+
+ cfg80211_put_bss(priv->wdev.wiphy, bss);
}
}
} else {
@@ -2852,6 +2878,13 @@ mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv,
vs_param_set->header.len =
cpu_to_le16((((u16) priv->vs_ie[id].ie[1])
& 0x00FF) + 2);
+ if (le16_to_cpu(vs_param_set->header.len) >
+ MWIFIEX_MAX_VSIE_LEN) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Invalid param length!\n");
+ break;
+ }
+
memcpy(vs_param_set->ie, priv->vs_ie[id].ie,
le16_to_cpu(vs_param_set->header.len));
*buffer += le16_to_cpu(vs_param_set->header.len) +
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 1532ac9cee0b..f2d10ba19920 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -271,6 +271,15 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
"11D: skip setting domain info in FW\n");
return 0;
}
+
+ if (country_ie_len >
+ (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) {
+ rcu_read_unlock();
+ mwifiex_dbg(priv->adapter, ERROR,
+ "11D: country_ie_len overflow!, deauth AP\n");
+ return -EINVAL;
+ }
+
memcpy(priv->adapter->country_code, &country_ie[2], 2);
domain_info->country_code[0] = country_ie[2];
@@ -314,8 +323,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
priv->scan_block = false;
if (bss) {
- if (adapter->region_code == 0x00)
- mwifiex_process_country_ie(priv, bss);
+ if (adapter->region_code == 0x00 &&
+ mwifiex_process_country_ie(priv, bss))
+ return -EINVAL;
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
@@ -728,6 +738,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf;
txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
if (!power_cfg->is_power_auto) {
+ u16 dbm_min = power_cfg->is_power_fixed ?
+ dbm : priv->min_tx_power_level;
+
txp_cfg->mode = cpu_to_le32(1);
pg_tlv = (struct mwifiex_types_power_group *)
(buf + sizeof(struct host_cmd_ds_txpwr_cfg));
@@ -742,7 +755,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x03;
pg->modulation_class = MOD_CLASS_HR_DSSS;
pg->power_step = 0;
- pg->power_min = (s8) dbm;
+ pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm;
pg++;
/* Power group for modulation class OFDM */
@@ -750,7 +763,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x07;
pg->modulation_class = MOD_CLASS_OFDM;
pg->power_step = 0;
- pg->power_min = (s8) dbm;
+ pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm;
pg++;
/* Power group for modulation class HTBW20 */
@@ -758,7 +771,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x20;
pg->modulation_class = MOD_CLASS_HT;
pg->power_step = 0;
- pg->power_min = (s8) dbm;
+ pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_20;
pg++;
@@ -767,7 +780,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
pg->last_rate_code = 0x20;
pg->modulation_class = MOD_CLASS_HT;
pg->power_step = 0;
- pg->power_min = (s8) dbm;
+ pg->power_min = (s8) dbm_min;
pg->power_max = (s8) dbm;
pg->ht_bandwidth = HT_BW_40;
}
@@ -1374,7 +1387,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
/* Test to see if it is a WPA IE, if not, then it is a
* gen IE
*/
- if (!memcmp(pvendor_ie->oui, wpa_oui,
+ if (!memcmp(&pvendor_ie->oui, wpa_oui,
sizeof(wpa_oui))) {
find_wpa_ie = 1;
break;
@@ -1383,7 +1396,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
/* Test to see if it is a WPS IE, if so, enable
* wps session flag
*/
- if (!memcmp(pvendor_ie->oui, wps_oui,
+ if (!memcmp(&pvendor_ie->oui, wps_oui,
sizeof(wps_oui))) {
priv->wps.session_enable = true;
mwifiex_dbg(priv->adapter, MSG,
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index df9704de0715..c6fc09d17462 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -917,59 +917,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
switch (*pos) {
case WLAN_EID_SUPP_RATES:
+ if (pos[1] > 32)
+ return;
sta_ptr->tdls_cap.rates_len = pos[1];
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[i] = pos[i + 2];
break;
case WLAN_EID_EXT_SUPP_RATES:
+ if (pos[1] > 32)
+ return;
basic = sta_ptr->tdls_cap.rates_len;
+ if (pos[1] > 32 - basic)
+ return;
for (i = 0; i < pos[1]; i++)
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
sta_ptr->tdls_cap.rates_len += pos[1];
break;
case WLAN_EID_HT_CAPABILITY:
- memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
+ if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
+ return;
+ if (pos[1] != sizeof(struct ieee80211_ht_cap))
+ return;
+ /* copy the ie's value into ht_capb*/
+ memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
sizeof(struct ieee80211_ht_cap));
sta_ptr->is_11n_enabled = 1;
break;
case WLAN_EID_HT_OPERATION:
- memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
+ if (pos > end -
+ sizeof(struct ieee80211_ht_operation) - 2)
+ return;
+ if (pos[1] != sizeof(struct ieee80211_ht_operation))
+ return;
+ /* copy the ie's value into ht_oper*/
+ memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
sizeof(struct ieee80211_ht_operation));
break;
case WLAN_EID_BSS_COEX_2040:
+ if (pos > end - 3)
+ return;
+ if (pos[1] != 1)
+ return;
sta_ptr->tdls_cap.coex_2040 = pos[2];
break;
case WLAN_EID_EXT_CAPABILITY:
+ if (pos > end - sizeof(struct ieee_types_header))
+ return;
+ if (pos[1] < sizeof(struct ieee_types_header))
+ return;
+ if (pos[1] > 8)
+ return;
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], 8));
break;
case WLAN_EID_RSN:
+ if (pos > end - sizeof(struct ieee_types_header))
+ return;
+ if (pos[1] < sizeof(struct ieee_types_header))
+ return;
+ if (pos[1] > IEEE_MAX_IE_SIZE -
+ sizeof(struct ieee_types_header))
+ return;
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
sizeof(struct ieee_types_header) +
min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
sizeof(struct ieee_types_header)));
break;
case WLAN_EID_QOS_CAPA:
+ if (pos > end - 3)
+ return;
+ if (pos[1] != 1)
+ return;
sta_ptr->tdls_cap.qos_info = pos[2];
break;
case WLAN_EID_VHT_OPERATION:
- if (priv->adapter->is_hw_11ac_capable)
- memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
+ if (priv->adapter->is_hw_11ac_capable) {
+ if (pos > end -
+ sizeof(struct ieee80211_vht_operation) - 2)
+ return;
+ if (pos[1] !=
+ sizeof(struct ieee80211_vht_operation))
+ return;
+ /* copy the ie's value into vhtoper*/
+ memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
sizeof(struct ieee80211_vht_operation));
+ }
break;
case WLAN_EID_VHT_CAPABILITY:
if (priv->adapter->is_hw_11ac_capable) {
- memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
+ if (pos > end -
+ sizeof(struct ieee80211_vht_cap) - 2)
+ return;
+ if (pos[1] != sizeof(struct ieee80211_vht_cap))
+ return;
+ /* copy the ie's value into vhtcap*/
+ memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
sizeof(struct ieee80211_vht_cap));
sta_ptr->is_11ac_enabled = 1;
}
break;
case WLAN_EID_AID:
- if (priv->adapter->is_hw_11ac_capable)
+ if (priv->adapter->is_hw_11ac_capable) {
+ if (pos > end - 4)
+ return;
+ if (pos[1] != 2)
+ return;
sta_ptr->tdls_cap.aid =
le16_to_cpu(*(__le16 *)(pos + 2));
+ }
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index a7e9f544f219..f2ef1464e20c 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -287,6 +287,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
if (rate_ie) {
+ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
+ return;
memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
rate_len = rate_ie->len;
}
@@ -294,8 +296,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
params->beacon.tail,
params->beacon.tail_len);
- if (rate_ie)
+ if (rate_ie) {
+ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
+ return;
memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
+ }
return;
}
@@ -413,6 +418,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
params->beacon.tail_len);
if (vendor_ie) {
wmm_ie = (struct ieee_types_header *)vendor_ie;
+ if (*(vendor_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
+ return;
memcpy(&bss_cfg->wmm_info, wmm_ie + 1,
sizeof(bss_cfg->wmm_info));
priv->wmm_enabled = 1;
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index dea2fe671dfe..c93fcafbcc7a 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -240,7 +240,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, INFO,
"info: WMM Parameter IE: version=%d,\t"
"qos_info Parameter Set Count=%d, Reserved=%#x\n",
- wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
+ wmm_ie->version, wmm_ie->qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
wmm_ie->reserved);
@@ -980,6 +980,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
"WMM Parameter Set Count: %d\n",
wmm_param_ie->qos_info_bitmap & mask);
+ if (wmm_param_ie->vend_hdr.len + 2 >
+ sizeof(struct ieee_types_wmm_parameter))
+ break;
+
memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
wmm_ie, wmm_param_ie,
wmm_param_ie->vend_hdr.len + 2);
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index a8bc064bc14f..56cad16e70ca 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -193,10 +193,23 @@ static void mt7601u_complete_rx(struct urb *urb)
struct mt7601u_rx_queue *q = &dev->rx_q;
unsigned long flags;
- spin_lock_irqsave(&dev->rx_lock, flags);
+ /* do no schedule rx tasklet if urb has been unlinked
+ * or the device has been removed
+ */
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+ urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }
- if (mt7601u_urb_has_error(urb))
- dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
+ spin_lock_irqsave(&dev->rx_lock, flags);
if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
goto out;
@@ -228,14 +241,25 @@ static void mt7601u_complete_tx(struct urb *urb)
struct sk_buff *skb;
unsigned long flags;
- spin_lock_irqsave(&dev->tx_lock, flags);
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -ENOENT:
+ return;
+ default:
+ dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
+ urb->status);
+ /* fall through */
+ case 0:
+ break;
+ }
- if (mt7601u_urb_has_error(urb))
- dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
+ spin_lock_irqsave(&dev->tx_lock, flags);
if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
goto out;
skb = q->e[q->start].skb;
+ q->e[q->start].skb = NULL;
trace_mt_tx_dma_done(dev, skb);
__skb_queue_tail(&dev->tx_skb_done, skb);
@@ -363,19 +387,9 @@ int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
static void mt7601u_kill_rx(struct mt7601u_dev *dev)
{
int i;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rx_lock, flags);
-
- for (i = 0; i < dev->rx_q.entries; i++) {
- int next = dev->rx_q.end;
- spin_unlock_irqrestore(&dev->rx_lock, flags);
- usb_poison_urb(dev->rx_q.e[next].urb);
- spin_lock_irqsave(&dev->rx_lock, flags);
- }
-
- spin_unlock_irqrestore(&dev->rx_lock, flags);
+ for (i = 0; i < dev->rx_q.entries; i++)
+ usb_poison_urb(dev->rx_q.e[i].urb);
}
static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
@@ -445,10 +459,10 @@ static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
{
int i;
- WARN_ON(q->used);
-
for (i = 0; i < q->entries; i++) {
usb_poison_urb(q->e[i].urb);
+ if (q->e[i].skb)
+ mt7601u_tx_status(q->dev, q->e[i].skb);
usb_free_urb(q->e[i].urb);
}
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
index 662d12703b69..57b503ae63f1 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
@@ -17,7 +17,7 @@
struct mt7601u_dev;
-#define MT7601U_EE_MAX_VER 0x0c
+#define MT7601U_EE_MAX_VER 0x0d
#define MT7601U_EEPROM_SIZE 256
#define MT7601U_DEFAULT_TX_POWER 6
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index ca09a5d4305e..71a47459bf8a 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -221,7 +221,7 @@ int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
do {
val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
- if (val && ~val)
+ if (val && val != 0xff)
break;
} while (--i);
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index ad77bec1ba0f..2cb1883c0d33 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -117,9 +117,9 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
- spin_lock(&dev->mac_lock);
+ spin_lock_bh(&dev->mac_lock);
ieee80211_tx_status(dev->hw, skb);
- spin_unlock(&dev->mac_lock);
+ spin_unlock_bh(&dev->mac_lock);
}
static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index f68d492129c6..822833a52dd3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -669,7 +669,6 @@ enum rt2x00_state_flags {
CONFIG_CHANNEL_HT40,
CONFIG_POWERSAVING,
CONFIG_HT_DISABLED,
- CONFIG_QOS_DISABLED,
CONFIG_MONITORING,
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 987c7c4f43cd..55036ce5465c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -666,19 +666,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
rt2x00dev->intf_associated--;
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
- clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
}
/*
- * Check for access point which do not support 802.11e . We have to
- * generate data frames sequence number in S/W for such AP, because
- * of H/W bug.
- */
- if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
- set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
- /*
* When the erp information has changed, we should perform
* additional configuration steps. For all other changes we are done.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 68b620b2462f..9a15a69b96a6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
/*
* rt2800 has a H/W (or F/W) bug, device incorrectly increase
- * seqno on retransmited data (non-QOS) frames. To workaround
- * the problem let's generate seqno in software if QOS is
- * disabled.
+ * seqno on retransmitted data (non-QOS) and management frames.
+ * To workaround the problem let's generate seqno in software.
+ * Except for beacons which are transmitted periodically by H/W
+ * hence hardware has to assign seqno for them.
*/
- if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
- __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
- else
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
/* H/W will generate sequence number */
return;
+ }
+
+ __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
}
/*
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 6113624ccec3..17e3d5e83062 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -446,12 +446,13 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
skb_queue_tail(&priv->rx_queue, skb);
usb_anchor_urb(entry, &priv->anchored);
ret = usb_submit_urb(entry, GFP_KERNEL);
- usb_put_urb(entry);
if (ret) {
skb_unlink(skb, &priv->rx_queue);
usb_unanchor_urb(entry);
+ usb_put_urb(entry);
goto err;
}
+ usb_put_urb(entry);
}
return ret;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
index c2d5b495c179..c089540116fa 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
@@ -146,7 +146,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev,
led->dev = dev;
led->ledpin = ledpin;
led->is_radio = is_radio;
- strncpy(led->name, name, sizeof(led->name));
+ strlcpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 08d587a342d3..9143b173935d 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1348,6 +1348,7 @@ struct rtl8xxxu_fileops {
u8 has_s0s1:1;
u8 has_tx_report:1;
u8 gen2_thermal_meter:1;
+ u8 needs_full_init:1;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 02b8ddd98a95..f51ee88d692b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1673,6 +1673,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.has_s0s1 = 1,
.has_tx_report = 1,
.gen2_thermal_meter = 1,
+ .needs_full_init = 1,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 4e725d165aa6..18d5984b78da 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -3905,6 +3905,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
else
macpower = true;
+ if (fops->needs_full_init)
+ macpower = false;
+
ret = fops->power_on(priv);
if (ret < 0) {
dev_warn(dev, "%s: Failed power on\n", __func__);
@@ -5419,6 +5422,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
+ usb_free_urb(urb);
goto error;
}
@@ -5660,6 +5664,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -5885,7 +5890,7 @@ static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
u8 dir, xtype, num;
int ret = 0;
- host_interface = &interface->altsetting[0];
+ host_interface = interface->cur_altsetting;
interface_desc = &host_interface->desc;
endpoints = interface_desc->bNumEndpoints;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 4ac928bf1f8e..7de18ed10db8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -466,6 +466,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
/* <2> work queue */
rtlpriv->works.hw = hw;
rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+ if (unlikely(!rtlpriv->works.rtl_wq)) {
+ pr_err("Failed to allocate work queue\n");
+ return;
+ }
+
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
(void *)rtl_watchdog_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index e15b462d096b..21b7cb845bf4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1095,13 +1095,15 @@ done:
return ret;
}
-static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
+static void _rtl_pci_irq_tasklet(unsigned long data)
{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1223,10 +1225,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
/*task */
tasklet_init(&rtlpriv->works.irq_tasklet,
- (void (*)(unsigned long))_rtl_pci_irq_tasklet,
+ _rtl_pci_irq_tasklet,
(unsigned long)hw);
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
- (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
+ _rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
INIT_WORK(&rtlpriv->works.lps_change_work,
rtl_lps_change_work_callback);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index d0ffc4d508cf..e8963dd2977c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -770,6 +770,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
return;
} else {
noa_num = (noa_len - 2) / 13;
+ if (noa_num > P2P_MAX_NOA_NUM)
+ noa_num = P2P_MAX_NOA_NUM;
+
}
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
@@ -864,6 +867,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
return;
} else {
noa_num = (noa_len - 2) / 13;
+ if (noa_num > P2P_MAX_NOA_NUM)
+ noa_num = P2P_MAX_NOA_NUM;
+
}
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 6ee6bf8e7eaf..ab53cf42cf42 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -427,7 +427,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
struct wiphy *wiphy = hw->wiphy;
struct country_code_to_enum_rd *country = NULL;
- if (wiphy == NULL || &rtlpriv->regd == NULL)
+ if (!wiphy)
return -EINVAL;
/* init country_code from efuse channel plan */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index ae8f055483fa..39a6bd314ca3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1576,6 +1576,8 @@ static bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
* This is maybe necessary:
* rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
*/
+ dev_kfree_skb(skb);
+
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 8de29cc3ced0..a24644f34e65 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -234,7 +234,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
rtl_read_byte(rtlpriv, FW_MAC1_READY));
}
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n",
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
rtl_read_dword(rtlpriv, REG_MCUFWDL));
return -1;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index d91f8bbfe7a0..2c23c9edab5c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1209,6 +1209,7 @@ void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ rtlpci->irq_enabled = true;
}
void rtl92de_disable_interrupt(struct ieee80211_hw *hw)
@@ -1218,7 +1219,7 @@ void rtl92de_disable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
- synchronize_irq(rtlpci->pdev->irq);
+ rtlpci->irq_enabled = false;
}
static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
@@ -1389,7 +1390,7 @@ void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
bcn_interval = mac->beacon_interval;
atim_window = 2;
- /*rtl92de_disable_interrupt(hw); */
+ rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
@@ -1409,9 +1410,9 @@ void rtl92de_set_beacon_interval(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
"beacon_interval:%d\n", bcn_interval);
- /* rtl92de_disable_interrupt(hw); */
+ rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
- /* rtl92de_enable_interrupt(hw); */
+ rtl92de_enable_interrupt(hw);
}
void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 1ebfee18882f..63cad2f875b8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -243,6 +243,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.led_control = rtl92de_led_control,
.set_desc = rtl92de_set_desc,
.get_desc = rtl92de_get_desc,
+ .is_tx_desc_closed = rtl92de_is_tx_desc_closed,
.tx_polling = rtl92de_tx_polling,
.enable_hw_sec = rtl92de_enable_hw_security_config,
.set_key = rtl92de_set_key,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index e998e98d74cb..bddf57cb47f0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -843,13 +843,15 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
break;
}
} else {
- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(pdesc);
+ ret = GET_RX_DESC_OWN(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(pdesc);
+ ret = GET_RX_DESC_PKT_LEN(p_desc);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ ret = GET_RX_DESC_BUFF_ADDR(p_desc);
break;
default:
RT_ASSERT(false, "ERR rxdesc :%d not process\n",
@@ -860,6 +862,23 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
return ret;
}
+bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+ u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+ u8 own = (u8)rtl92de_get_desc(entry, true, HW_DESC_OWN);
+
+ /* a beacon packet will only use the first
+ * descriptor by defaut, and the own bit may not
+ * be cleared by the hardware
+ */
+ if (own)
+ return false;
+ return true;
+}
+
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 194d99f8bacf..d061f33b9f68 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -740,6 +740,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
u32 rtl92de_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
+ u8 hw_queue, u16 index);
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool b_firstseg, bool b_lastseg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index f8be0bd7e326..0741280b65a4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1703,6 +1703,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
rtlhal->oem_id = RT_CID_819X_LENOVO;
break;
}
+ break;
case 0x1025:
rtlhal->oem_id = RT_CID_819X_ACER;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index ae0c48f3c2bc..1f02461de261 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1088,8 +1088,10 @@ int rtl_usb_probe(struct usb_interface *intf,
rtlpriv->hw = hw;
rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
GFP_KERNEL);
- if (!rtlpriv->usb_data)
+ if (!rtlpriv->usb_data) {
+ ieee80211_free_hw(hw);
return -ENOMEM;
+ }
/* this spin lock must be initialized early */
spin_lock_init(&rtlpriv->locks.usb_lock);
@@ -1152,6 +1154,7 @@ error_out:
_rtl_usb_io_handler_release(hw);
usb_put_dev(udev);
complete(&rtlpriv->firmware_loading_complete);
+ kfree(rtlpriv->usb_data);
return -ENODEV;
}
EXPORT_SYMBOL(rtl_usb_probe);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index dbb23899ddcb..891295ebd875 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -199,6 +199,7 @@ void rsi_mac80211_detach(struct rsi_hw *adapter)
ieee80211_stop_queues(hw);
ieee80211_unregister_hw(hw);
ieee80211_free_hw(hw);
+ adapter->hw = NULL;
}
rsi_remove_dbgfs(adapter);
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index ef5d394f185b..974387ad1e8c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -103,7 +103,7 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
__le16 buffer_size;
int ii, bep_found = 0;
- iface_desc = &(interface->altsetting[0]);
+ iface_desc = interface->cur_altsetting;
for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
endpoint = &(iface_desc->endpoint[ii].desc);
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index d3fbe33d2324..a13f08fd8690 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -75,7 +75,6 @@ static inline int rsi_kill_thread(struct rsi_thread *handle)
atomic_inc(&handle->thread_done);
rsi_set_event(&handle->event);
- wait_for_completion(&handle->completion);
return kthread_stop(handle->task);
}
diff --git a/drivers/net/wireless/st/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c
index 30e7646d04af..16be7fa82a23 100644
--- a/drivers/net/wireless/st/cw1200/fwio.c
+++ b/drivers/net/wireless/st/cw1200/fwio.c
@@ -323,12 +323,12 @@ int cw1200_load_firmware(struct cw1200_common *priv)
goto out;
}
- priv->hw_type = cw1200_get_hw_type(val32, &major_revision);
- if (priv->hw_type < 0) {
+ ret = cw1200_get_hw_type(val32, &major_revision);
+ if (ret < 0) {
pr_err("Can't deduce hardware type.\n");
- ret = -ENOTSUPP;
goto out;
}
+ priv->hw_type = ret;
/* Set DPLL Reg value, and read back to confirm writes work */
ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index dc478cedbde0..84624c812a15 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -345,6 +345,11 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
mutex_init(&priv->wsm_cmd_mux);
mutex_init(&priv->conf_mutex);
priv->workqueue = create_singlethread_workqueue("cw1200_wq");
+ if (!priv->workqueue) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
sema_init(&priv->scan.lock, 1);
INIT_WORK(&priv->scan.work, cw1200_scan_work);
INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index c5492d792f43..b35f470b40ff 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -84,8 +84,11 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
- if (!frame.skb)
+ if (!frame.skb) {
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
return -ENOMEM;
+ }
if (req->ie_len)
memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 5438975c7ff2..17d32ce5d16b 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1058,8 +1058,11 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
goto out;
ret = wl12xx_fetch_firmware(wl, plt);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ kfree(wl->fw_status);
+ kfree(wl->raw_fw_status);
+ kfree(wl->tx_res_if);
+ }
out:
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index fd4e9ba176c9..332a3a5c1c90 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -66,7 +66,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
out:
mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 01ca1d57b3d9..213e5022ab93 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1272,7 +1272,7 @@ static void print_id(struct usb_device *udev)
static int eject_installer(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
- struct usb_host_interface *iface_desc = &intf->altsetting[0];
+ struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
unsigned char *cmd;
u8 bulk_out_ep;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index cae691486105..46008f284550 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -171,7 +171,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
return vif->hash.mapping[skb_get_hash_raw(skb) % size];
}
-static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
@@ -706,7 +707,6 @@ err_unmap:
xenvif_unmap_frontend_data_rings(queue);
netif_napi_del(&queue->napi);
err:
- module_put(THIS_MODULE);
return err;
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f57815befc90..a469fbe1abaf 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -927,6 +927,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
+ skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
xenvif_tx_err(queue, &txreq, extra_count, idx);
if (net_ratelimit())
@@ -942,6 +943,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
+ skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
kfree_skb(nskb);
break;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 14ceeaaa7fe5..6d391a268469 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -888,9 +888,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
return 0;
}
-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
- struct sk_buff *skb,
- struct sk_buff_head *list)
+static int xennet_fill_frags(struct netfront_queue *queue,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
{
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
@@ -907,9 +907,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
- queue->rx.rsp_cons = ++cons;
+ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
kfree_skb(nskb);
- return ~0U;
+ return -ENOENT;
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@ -920,7 +920,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
kfree_skb(nskb);
}
- return cons;
+ queue->rx.rsp_cons = cons;
+
+ return 0;
}
static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
@@ -1046,8 +1048,7 @@ err:
skb->data_len = rx->status;
skb->len += rx->status;
- i = xennet_fill_frags(queue, skb, &tmpq);
- if (unlikely(i == ~0U))
+ if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
goto err;
if (rx->flags & XEN_NETRXF_csum_blank)
@@ -1057,7 +1058,7 @@ err:
__skb_queue_tail(&rxq, skb);
- queue->rx.rsp_cons = ++i;
+ i = ++queue->rx.rsp_cons;
work_done++;
}
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index 7c1eaea3b685..d2fe134052f0 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -192,7 +192,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
const struct firmware *fw;
struct sk_buff *skb;
unsigned long len;
- u8 max_size, payload_size;
+ int max_size, payload_size;
int rc = 0;
if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
@@ -215,8 +215,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
while (len) {
- payload_size = min_t(unsigned long, (unsigned long) max_size,
- len);
+ payload_size = min_t(unsigned long, max_size, len);
skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
GFP_KERNEL);
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 712936f5d2d6..f1addfd7b31a 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -268,7 +268,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
*fw_vsc_cfg, len);
if (r) {
- devm_kfree(dev, fw_vsc_cfg);
+ devm_kfree(dev, *fw_vsc_cfg);
goto vsc_read_err;
}
} else {
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 06a157c63416..7eab97585f22 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -238,8 +238,10 @@ static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
if (r == -EREMOTEIO) {
phy->hard_fault = r;
- skb = NULL;
- } else if (r < 0) {
+ if (info->mode == NXP_NCI_MODE_FW)
+ nxp_nci_fw_recv_frame(phy->ndev, NULL);
+ }
+ if (r < 0) {
nfc_err(&client->dev, "Read failed with error %d\n", r);
goto exit_irq_handled;
}
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index f837c39a8017..0f6905123b0a 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -240,6 +240,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
out:
gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity);
+ usleep_range(10000, 15000);
}
static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 12e819ddf17a..3afc53ff7369 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -704,7 +704,7 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
target->nfcid1_len != 10)
return -EOPNOTSUPP;
- return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
+ return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
PN544_RF_READER_CMD_ACTIVATE_NEXT,
target->nfcid1, target->nfcid1_len, NULL);
} else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK |
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 073e4a478c89..151b220381f9 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -573,7 +573,7 @@ static void port100_tx_update_payload_len(void *_frame, int len)
{
struct port100_frame *frame = _frame;
- frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
+ le16_add_cpu(&frame->datalen, len);
}
static bool port100_rx_frame_is_valid(void *_frame)
@@ -791,7 +791,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
if (rc)
- usb_unlink_urb(dev->out_urb);
+ usb_kill_urb(dev->out_urb);
exit:
mutex_unlock(&dev->out_urb_lock);
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 56f2112e0cd8..85df2e009310 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
skb->len - 2, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index dacb9166081b..2f08e16ba566 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -719,6 +719,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
NFC_PROTO_FELICA_MASK;
} else {
kfree_skb(nfcid_skb);
+ nfcid_skb = NULL;
/* P2P in type A */
r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
ST21NFCA_RF_READER_F_NFCID1,
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 3a98563d4a12..eac608a457f0 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -326,6 +326,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
skb->len - 2, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2],
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index c2840e412962..850e75571c8e 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1074,6 +1074,12 @@ static const struct spi_device_id st95hf_id[] = {
};
MODULE_DEVICE_TABLE(spi, st95hf_id);
+static const struct of_device_id st95hf_spi_of_match[] = {
+ { .compatible = "st,st95hf" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
+
static int st95hf_probe(struct spi_device *nfc_spi_dev)
{
int ret;
@@ -1260,6 +1266,7 @@ static struct spi_driver st95hf_driver = {
.driver = {
.name = "st95hf",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(st95hf_spi_of_match),
},
.id_table = st95hf_id,
.probe = st95hf_probe,
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 7310a261c858..e175cbeba266 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -330,7 +330,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
return 0;
}
-static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
+static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
{
u64 shift, mask;
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 97dd2925ed6e..5d2c76682848 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -190,14 +190,15 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
return NULL;
nd_btt->id = ida_simple_get(&nd_region->btt_ida, 0, 0, GFP_KERNEL);
- if (nd_btt->id < 0) {
- kfree(nd_btt);
- return NULL;
- }
+ if (nd_btt->id < 0)
+ goto out_nd_btt;
nd_btt->lbasize = lbasize;
- if (uuid)
+ if (uuid) {
uuid = kmemdup(uuid, 16, GFP_KERNEL);
+ if (!uuid)
+ goto out_put_id;
+ }
nd_btt->uuid = uuid;
dev = &nd_btt->dev;
dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
@@ -212,6 +213,13 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
return NULL;
}
return dev;
+
+out_put_id:
+ ida_simple_remove(&nd_region->btt_ida, nd_btt->id);
+
+out_nd_btt:
+ kfree(nd_btt);
+ return NULL;
}
struct device *nd_btt_create(struct nd_region *nd_region)
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 5768a4749564..65ac1d3870f9 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -851,8 +851,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
return -EFAULT;
}
- if (!desc || (desc->out_num + desc->in_num == 0) ||
- !test_bit(cmd, &cmd_mask))
+ if (!desc ||
+ (desc->out_num + desc->in_num == 0) ||
+ cmd > ND_CMD_CALL ||
+ !test_bit(cmd, &cmd_mask))
return -ENOTTY;
/* fail write commands (when read-only) */
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 45fa82cae87c..da504665b1c7 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -118,7 +118,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
nvdimm_bus_unlock(&ndns->dev);
if (!dax_dev)
return -ENOMEM;
- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, DAX_SIG);
dev_dbg(dev, "%s: dax: %s\n", __func__,
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 66a089d561cf..9108004a0d9b 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -490,15 +490,26 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
- (unsigned long) to_namespace_index(ndd, 0);
}
+static void reap_victim(struct nd_mapping *nd_mapping,
+ struct nd_label_ent *victim)
+{
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ u32 slot = to_slot(ndd, victim->label);
+
+ dev_dbg(ndd->dev, "free: %d\n", slot);
+ nd_label_free_slot(ndd, slot);
+ victim->label = NULL;
+}
+
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
{
u64 cookie = nd_region_interleave_set_cookie(nd_region);
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- struct nd_label_ent *label_ent, *victim = NULL;
struct nd_namespace_label *nd_label;
struct nd_namespace_index *nsindex;
+ struct nd_label_ent *label_ent;
struct nd_label_id label_id;
struct resource *res;
unsigned long *free;
@@ -551,18 +562,10 @@ static int __pmem_label_update(struct nd_region *nd_region,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label)
continue;
- if (memcmp(nspm->uuid, label_ent->label->uuid,
- NSLABEL_UUID_LEN) != 0)
- continue;
- victim = label_ent;
- list_move_tail(&victim->list, &nd_mapping->labels);
- break;
- }
- if (victim) {
- dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
- slot = to_slot(ndd, victim->label);
- nd_label_free_slot(ndd, slot);
- victim->label = NULL;
+ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
+ || memcmp(nspm->uuid, label_ent->label->uuid,
+ NSLABEL_UUID_LEN) == 0)
+ reap_victim(nd_mapping, label_ent);
}
/* update index */
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 9bc5f555ee68..e83453e1b308 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1210,12 +1210,27 @@ static int namespace_update_uuid(struct nd_region *nd_region,
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_label_ent *label_ent;
struct resource *res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, old_label_id.id) == 0)
sprintf((void *) res->name, "%s",
new_label_id.id);
+
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+ struct nd_namespace_label *nd_label = label_ent->label;
+ struct nd_label_id label_id;
+
+ if (!nd_label)
+ continue;
+ nd_label_gen_id(&label_id, nd_label->uuid,
+ __le32_to_cpu(nd_label->flags));
+ if (strcmp(old_label_id.id, label_id.id) == 0)
+ set_bit(ND_LABEL_REAP, &label_ent->flags);
+ }
+ mutex_unlock(&nd_mapping->lock);
}
kfree(*old_uuid);
out:
@@ -2028,9 +2043,12 @@ struct device *create_namespace_blk(struct nd_region *nd_region,
if (!nsblk->uuid)
goto blk_err;
memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
- if (name[0])
+ if (name[0]) {
nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
GFP_KERNEL);
+ if (!nsblk->alt_name)
+ goto blk_err;
+ }
res = nsblk_add_resource(nd_region, ndd, nsblk,
__le64_to_cpu(nd_label->dpa));
if (!res)
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index d869236b474f..bd29e598bac1 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -113,8 +113,12 @@ struct nd_percpu_lane {
spinlock_t lock;
};
+enum nd_label_flags {
+ ND_LABEL_REAP,
+};
struct nd_label_ent {
struct list_head list;
+ unsigned long flags;
struct nd_namespace_label *label;
};
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index dde9853453d3..e901e3a3b04c 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -36,6 +36,7 @@ struct nd_pfn_sb {
__le32 end_trunc;
/* minor-version-2 record the base alignment of the mapping */
__le32 align;
+ /* minor-version-3 guarantee the padding and flags are zero */
u8 padding[4000];
__le64 checksum;
};
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index ba9aa8475e6d..f40c9c626861 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -349,6 +349,15 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
return dev;
}
+/**
+ * nd_pfn_validate - read and validate info-block
+ * @nd_pfn: fsdax namespace runtime state / properties
+ * @sig: 'devdax' or 'fsdax' signature
+ *
+ * Upon return the info-block buffer contents (->pfn_sb) are
+ * indeterminate when validation fails, and a coherent info-block
+ * otherwise.
+ */
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
@@ -486,7 +495,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
nvdimm_bus_unlock(&ndns->dev);
if (!pfn_dev)
return -ENOMEM;
- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn = to_nd_pfn(pfn_dev);
nd_pfn->pfn_sb = pfn_sb;
rc = nd_pfn_validate(nd_pfn, PFN_SIG);
@@ -584,7 +593,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
u64 checksum;
int rc;
- pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+ pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
if (!pfn_sb)
return -ENOMEM;
@@ -593,11 +602,14 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
sig = DAX_SIG;
else
sig = PFN_SIG;
+
rc = nd_pfn_validate(nd_pfn, sig);
if (rc != -ENODEV)
return rc;
/* no info block, do init */;
+ memset(pfn_sb, 0, sizeof(*pfn_sb));
+
nd_region = to_nd_region(nd_pfn->dev.parent);
if (nd_region->ro) {
dev_info(&nd_pfn->dev,
@@ -673,7 +685,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
- pfn_sb->version_minor = cpu_to_le16(2);
+ pfn_sb->version_minor = cpu_to_le16(3);
pfn_sb->start_pad = cpu_to_le32(start_pad);
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 979c6ecc6446..9561a247d0dc 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1043,7 +1043,7 @@ static int nvme_pr_reserve(struct block_device *bdev, u64 key,
static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
enum pr_type type, bool abort)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
+ u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
}
@@ -1055,7 +1055,7 @@ static int nvme_pr_clear(struct block_device *bdev, u64 key)
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
+ u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
@@ -1765,7 +1765,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
{
struct nvme_ns *ns;
__le32 *ns_list;
- unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
+ unsigned i, j, nsid, prev = 0;
+ unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
int ret = 0;
ns_list = kzalloc(0x1000, GFP_KERNEL);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d31d8e252578..253dcc96dfb2 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1863,7 +1863,7 @@ static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{
- *val = readq(to_nvme_dev(ctrl)->bar + off);
+ *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
return 0;
}
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index cdb7752dcbb7..446f61ba018d 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -47,9 +47,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
}
host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
- data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
+ sectors[READ]), 1000);
host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
- data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+ data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
+ sectors[WRITE]), 1000);
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@@ -75,11 +77,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
rcu_read_lock();
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
- data_units_read +=
- part_stat_read(ns->bdev->bd_part, sectors[READ]);
+ data_units_read += DIV_ROUND_UP(
+ part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
- data_units_written +=
- part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
+ data_units_written += DIV_ROUND_UP(
+ part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
}
rcu_read_unlock();
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f12753eb3216..96ea6c76be6e 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -709,6 +709,15 @@ bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
return __nvmet_host_allowed(subsys, hostnqn);
}
+static void nvmet_fatal_error_handler(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, fatal_err_work);
+
+ pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
+ ctrl->ops->delete_ctrl(ctrl);
+}
+
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{
@@ -747,6 +756,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
+ INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -849,21 +859,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
kref_put(&ctrl->ref, nvmet_ctrl_free);
}
-static void nvmet_fatal_error_handler(struct work_struct *work)
-{
- struct nvmet_ctrl *ctrl =
- container_of(work, struct nvmet_ctrl, fatal_err_work);
-
- pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
- ctrl->ops->delete_ctrl(ctrl);
-}
-
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
- INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
schedule_work(&ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 824e282cd80e..2a0c5f3b0e50 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -401,10 +401,17 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
if (!config->base_dev)
return -EINVAL;
- if (nvmem->read_only)
- nvmem->eeprom = bin_attr_ro_root_nvmem;
- else
- nvmem->eeprom = bin_attr_rw_root_nvmem;
+ if (nvmem->read_only) {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_ro_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_ro_nvmem;
+ } else {
+ if (config->root_only)
+ nvmem->eeprom = bin_attr_rw_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_rw_nvmem;
+ }
nvmem->eeprom.attr.name = "eeprom";
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -602,7 +609,7 @@ static struct nvmem_device *nvmem_find(const char *name)
d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match);
if (!d)
- return NULL;
+ return ERR_PTR(-ENOENT);
return to_nvmem_device(d);
}
@@ -934,7 +941,7 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
void *buf)
{
u8 *p, *b;
- int i, bit_offset = cell->bit_offset;
+ int i, extra, bit_offset = cell->bit_offset;
p = b = buf;
if (bit_offset) {
@@ -949,11 +956,16 @@ static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
p = b;
*b++ >>= bit_offset;
}
-
- /* result fits in less bytes */
- if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
- *p-- = 0;
+ } else {
+ /* point to the msb */
+ p += cell->bytes - 1;
}
+
+ /* result fits in less bytes */
+ extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
+ while (--extra >= 0)
+ *p-- = 0;
+
/* clear msb bits if any leftover in the last byte */
*p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ba7b034b2b91..6b8646db110c 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -112,4 +112,8 @@ config OF_OVERLAY
config OF_NUMA
bool
+config OF_DMA_DEFAULT_COHERENT
+ # arches should select this if DMA is coherent by default for OF devices
+ bool
+
endif # OF
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 72914cdfce2a..37619bb2c97a 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -896,12 +896,16 @@ EXPORT_SYMBOL_GPL(of_dma_get_range);
* @np: device node
*
* It returns true if "dma-coherent" property was found
- * for this device in DT.
+ * for this device in the DT, or if DMA is coherent by
+ * default for OF devices on the current platform.
*/
bool of_dma_is_coherent(struct device_node *np)
{
struct device_node *node = of_node_get(np);
+ if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
+ return true;
+
while (node) {
if (of_property_read_bool(node, "dma-coherent")) {
of_node_put(node);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index c5139764d72d..60d242fa9924 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -170,9 +170,6 @@ int __of_attach_node_sysfs(struct device_node *np)
struct property *pp;
int rc;
- if (!IS_ENABLED(CONFIG_SYSFS))
- return 0;
-
if (!of_kset)
return 0;
@@ -2311,7 +2308,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
/* OF on pmac has nodes instead of properties named "l2-cache"
* beneath CPU nodes.
*/
- if (!strcmp(np->type, "cpu"))
+ if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu"))
for_each_child_of_node(np, child)
if (!strcmp(child->type, "cache"))
return child;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 262281bd68fa..1e70851b1530 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -353,7 +353,7 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
struct phy_device *phy;
iface = of_get_phy_mode(np);
- if (iface < 0)
+ if ((int)iface < 0)
return NULL;
phy_np = of_parse_phandle(np, "phy-handle", 0);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 0a1ebbbd3f16..aeb6d3009ae9 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -821,10 +821,13 @@ static void __init of_unittest_platform_populate(void)
of_platform_populate(np, match, NULL, &test_bus->dev);
for_each_child_of_node(np, child) {
- for_each_child_of_node(child, grandchild)
- unittest(of_find_device_by_node(grandchild),
+ for_each_child_of_node(child, grandchild) {
+ pdev = of_find_device_by_node(grandchild);
+ unittest(pdev,
"Could not create device for node '%s'\n",
grandchild->name);
+ of_dev_put(pdev);
+ }
}
of_platform_depopulate(&test_bus->dev);
@@ -933,6 +936,7 @@ static int __init unittest_data_add(void)
of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
if (!unittest_data_node) {
pr_warn("%s: No tree to attach; not running tests\n", __func__);
+ kfree(unittest_data);
return -ENODATA;
}
of_node_set_flag(unittest_data_node, OF_DETACHED);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index c4953eca907d..7a5306bf56c8 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -563,8 +563,6 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
/* We currently only support kernel addresses */
BUG_ON(sid != KERNEL_SPACE);
- mtsp(sid,1);
-
/*
** WORD 1 - low order word
** "hints" parm includes the VALID bit!
@@ -595,7 +593,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
** Grab virtual index [0:11]
** Deposit virt_idx bits into I/O PDIR word
*/
- asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
+ asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index ed92c1254cff..d842ae5310f7 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -160,6 +160,15 @@ struct dino_device
(struct dino_device *)__pdata; })
+/* Check if PCI device is behind a Card-mode Dino. */
+static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
+{
+ struct dino_device *dino_dev;
+
+ dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
+ return is_card_dino(&dino_dev->hba.dev->id);
+}
+
/*
* Dino Configuration Space Accessor Functions
*/
@@ -442,6 +451,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
+#ifdef CONFIG_TULIP
+static void pci_fixup_tulip(struct pci_dev *dev)
+{
+ if (!pci_dev_is_behind_card_dino(dev))
+ return;
+ if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM))
+ return;
+ pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n",
+ pci_name(dev));
+ /* Disable this card by zeroing the PCI resources */
+ memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
+ memset(&dev->resource[1], 0, sizeof(dev->resource[1]));
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip);
+#endif /* CONFIG_TULIP */
static void __init
dino_bios_init(void)
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index b48243131993..3a91e06926ad 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -568,6 +568,9 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
break;
case DISPLAY_MODEL_LASI:
+ /* Skip to register LED in QEMU */
+ if (running_on_qemu)
+ return 1;
LED_DATA_REG = data_reg;
led_func_ptr = led_LASI_driver;
printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 56918d1c0ed3..188fab57d170 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -573,8 +573,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
pa = virt_to_phys(vba);
pa &= IOVP_MASK;
- mtsp(sid,1);
- asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 4399de34054a..a7ceed7182ac 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv)
return 0;
}
+/*
+ * Iterates through all the devices connected to the bus and return 1
+ * if the device is a parallel port.
+ */
+
+static int port_detect(struct device *dev, void *dev_drv)
+{
+ if (is_parport(dev))
+ return 1;
+ return 0;
+}
+
/**
* parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
@@ -282,6 +294,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
if (ret)
return ret;
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
mutex_lock(&registration_lock);
if (drv->match_port)
bus_for_each_dev(&parport_bus_type, NULL, drv,
@@ -895,6 +916,7 @@ parport_register_dev_model(struct parport *port, const char *name,
par_dev->devmodel = true;
ret = device_register(&par_dev->dev);
if (ret) {
+ kfree(par_dev->state);
put_device(&par_dev->dev);
goto err_put_port;
}
@@ -912,6 +934,7 @@ parport_register_dev_model(struct parport *port, const char *name,
spin_unlock(&port->physport->pardevice_lock);
pr_debug("%s: cannot grant exclusive access for device %s\n",
port->name, name);
+ kfree(par_dev->state);
device_unregister(&par_dev->dev);
goto err_put_port;
}
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index b4d8ccfd9f7c..a597619f25d6 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -1575,6 +1575,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
static void hv_eject_device_work(struct work_struct *work)
{
struct pci_eject_response *ejct_pkt;
+ struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
struct pci_dev *pdev;
unsigned long flags;
@@ -1585,6 +1586,7 @@ static void hv_eject_device_work(struct work_struct *work)
} ctxt;
hpdev = container_of(work, struct hv_pci_dev, wrk);
+ hbus = hpdev->hbus;
if (hpdev->state != hv_pcichild_ejecting) {
put_pcichild(hpdev, hv_pcidev_ref_pnp);
@@ -1598,8 +1600,7 @@ static void hv_eject_device_work(struct work_struct *work)
* because hbus->pci_bus may not exist yet.
*/
wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
- pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
- wslot);
+ pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
if (pdev) {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
@@ -1607,21 +1608,24 @@ static void hv_eject_device_work(struct work_struct *work)
pci_unlock_rescan_remove();
}
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_del(&hpdev->list_entry);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
- vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
+ vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
VM_PKT_DATA_INBAND, 0);
- spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
- list_del(&hpdev->list_entry);
- spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
-
put_pcichild(hpdev, hv_pcidev_ref_childlist);
+ put_pcichild(hpdev, hv_pcidev_ref_initial);
put_pcichild(hpdev, hv_pcidev_ref_pnp);
- put_hvpcibus(hpdev->hbus);
+
+ /* hpdev has been freed. Do not use it any more. */
+ put_hvpcibus(hbus);
}
/**
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index 9397c4667106..f011a8780ff5 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -502,7 +502,7 @@ void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
/* Disable Link training */
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
- ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+ ks_dw_app_writel(ks_pcie, CMD_STATUS, val);
/* Initiate Link Training */
val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index eac0a1238e9d..c690299d5c4a 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -43,6 +43,7 @@
#define PCIE_RC_K2HK 0xb008
#define PCIE_RC_K2E 0xb009
#define PCIE_RC_K2L 0xb00a
+#define PCIE_RC_K2G 0xb00b
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
@@ -57,6 +58,8 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ 0, },
};
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 8dfccf733241..90be00c1bab9 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -603,12 +603,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
-/* Tegra PCIE requires relaxed ordering */
+/* Tegra20 and Tegra30 PCIE requires relaxed ordering */
static void tegra_pcie_relax_enable(struct pci_dev *dev)
{
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
{
@@ -1898,14 +1901,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
err = of_pci_get_devfn(port);
if (err < 0) {
dev_err(dev, "failed to parse address: %d\n", err);
- return err;
+ goto err_node_put;
}
index = PCI_SLOT(err);
if (index < 1 || index > soc->num_ports) {
dev_err(dev, "invalid port number: %d\n", index);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_node_put;
}
index--;
@@ -1914,12 +1918,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
if (err < 0) {
dev_err(dev, "failed to parse # of lanes: %d\n",
err);
- return err;
+ goto err_node_put;
}
if (value > 16) {
dev_err(dev, "invalid # of lanes: %u\n", value);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_node_put;
}
lanes |= value << (index << 3);
@@ -1933,13 +1938,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
lane += value;
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
- if (!rp)
- return -ENOMEM;
+ if (!rp) {
+ err = -ENOMEM;
+ goto err_node_put;
+ }
err = of_address_to_resource(port, 0, &rp->regs);
if (err < 0) {
dev_err(dev, "failed to parse address: %d\n", err);
- return err;
+ goto err_node_put;
}
INIT_LIST_HEAD(&rp->list);
@@ -1966,6 +1973,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
return err;
return 0;
+
+err_node_put:
+ of_node_put(port);
+ return err;
}
/*
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index d6196f7b1d58..7f6b454bca65 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -847,7 +847,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
struct rcar_msi *msi = &pcie->msi;
- unsigned long base;
+ phys_addr_t base;
int err, i;
mutex_init(&msi->lock);
@@ -886,10 +886,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
/* setup MSI data target */
msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ if (!msi->pages) {
+ err = -ENOMEM;
+ goto err;
+ }
base = virt_to_phys((void *)msi->pages);
- rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
- rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
+ rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
+ rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
/* enable all MSI interrupts */
rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 94fdd295aae2..3bba87af0b6b 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -456,15 +456,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int i;
mutex_lock(&msi->lock);
- bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
- nr_irqs, 0);
- if (bit >= INT_PCI_MSI_NR) {
+ bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
mutex_unlock(&msi->lock);
return -ENOSPC;
}
- bitmap_set(msi->bitmap, bit, nr_irqs);
-
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
domain->host_data, handle_simple_irq,
@@ -482,7 +480,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
struct nwl_msi *msi = &pcie->msi;
mutex_lock(&msi->lock);
- bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
mutex_unlock(&msi->lock);
}
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 61332f4d51c3..c3964fca57b0 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -337,14 +337,19 @@ static const struct irq_domain_ops msi_domain_ops = {
* xilinx_pcie_enable_msi - Enable MSI support
* @port: PCIe port information
*/
-static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
{
phys_addr_t msg_addr;
port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+ if (!port->msi_pages)
+ return -ENOMEM;
+
msg_addr = virt_to_phys((void *)port->msi_pages);
pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+
+ return 0;
}
/* INTx Functions */
@@ -516,6 +521,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
+ int ret;
/* Setup INTx */
pcie_intc_node = of_get_next_child(node, NULL);
@@ -544,7 +550,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
return -ENODEV;
}
- xilinx_pcie_enable_msi(port);
+ ret = xilinx_pcie_enable_msi(port);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index c614ff7c3bc3..d3562df64456 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -55,6 +55,7 @@ static struct device_node *find_vio_slot_node(char *drc_name)
if ((rc == 0) && (!strcmp(drc_name, name)))
break;
}
+ of_node_put(parent);
return dn;
}
@@ -78,6 +79,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name,
return np;
}
+/* Returns a device_node with its reference count incremented */
static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
{
struct device_node *dn;
@@ -313,6 +315,7 @@ int dlpar_add_slot(char *drc_name)
rc = dlpar_add_phb(drc_name, dn);
break;
}
+ of_node_put(dn);
printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
exit:
@@ -446,6 +449,7 @@ int dlpar_remove_slot(char *drc_name)
rc = dlpar_remove_pci_slot(drc_name, dn);
break;
}
+ of_node_put(dn);
vm_unmap_aliases();
printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 1d32fe2d97aa..9ec3cb628b0b 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -181,6 +181,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
failed2:
sysfs_remove_link(&dev->dev.kobj, buf);
failed1:
+ pci_stop_and_remove_bus_device(virtfn);
pci_dev_put(dev);
mutex_lock(&iov->dev->sriov->lock);
pci_stop_and_remove_bus_device(virtfn);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 37f393f27efc..55ca14fbdd2a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -230,7 +230,7 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
return 0;
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
- if (flag)
+ if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index c7f3408e3148..54b3f9bc5ad8 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -71,8 +71,8 @@ static struct pci_platform_pm_ops mid_pci_platform_pm = {
* arch/x86/platform/intel-mid/pwr.c.
*/
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_PENWELL),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD),
+ ICPU(INTEL_FAM6_ATOM_SALTWELL_MID),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID),
{}
};
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index e5d8e2e2bd30..717540161223 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -371,7 +371,7 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
return count;
}
-static struct device_attribute dev_remove_attr = __ATTR(remove,
+static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove,
(S_IWUSR|S_IWGRP),
NULL, remove_store);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 37f2053c31de..b3bbbee95d1d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -754,19 +754,6 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
}
/**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
- if (platform_pci_power_manageable(dev))
- platform_pci_set_power_state(dev, PCI_D0);
-
- pci_raw_set_power_state(dev, PCI_D0);
- pci_update_current_state(dev, PCI_D0);
-}
-
-/**
* pci_platform_power_transition - Use platform to change device power state
* @dev: PCI device to handle.
* @state: State to put the device into.
@@ -942,6 +929,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
EXPORT_SYMBOL(pci_set_power_state);
/**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+ __pci_start_power_transition(dev, PCI_D0);
+ pci_raw_set_power_state(dev, PCI_D0);
+ pci_update_current_state(dev, PCI_D0);
+}
+
+/**
* pci_choose_state - Choose the power state of a PCI device
* @dev: PCI device to be suspended
* @state: target sleep state for the whole system. This is the value
@@ -1786,6 +1784,13 @@ static void pci_pme_list_scan(struct work_struct *work)
*/
if (bridge && bridge->current_state != PCI_D0)
continue;
+ /*
+ * If the device is in D3cold it should not be
+ * polled either.
+ */
+ if (pme_dev->dev->current_state == PCI_D3cold)
+ continue;
+
pci_pme_wakeup(pme_dev->dev, NULL);
} else {
list_del(&pme_dev->list);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 80b417e697da..8d4a219a99f1 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -210,6 +210,38 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_capable = (blacklist) ? 0 : capable;
}
+static bool pcie_retrain_link(struct pcie_link_state *link)
+{
+ struct pci_dev *parent = link->pdev;
+ unsigned long start_jiffies;
+ u16 reg16;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ reg16 |= PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ if (parent->clear_retrain_link) {
+ /*
+ * Due to an erratum in some devices the Retrain Link bit
+ * needs to be cleared again manually to allow the link
+ * training to succeed.
+ */
+ reg16 &= ~PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ }
+
+ /* Wait for link training end. Break out after waiting for timeout */
+ start_jiffies = jiffies;
+ for (;;) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
+ break;
+ msleep(1);
+ }
+ return !(reg16 & PCI_EXP_LNKSTA_LT);
+}
+
/*
* pcie_aspm_configure_common_clock: check if the 2 ends of a link
* could use common clock. If they are, configure them to use the
@@ -219,7 +251,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
int same_clock = 1;
u16 reg16, parent_reg, child_reg[8];
- unsigned long start_jiffies;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
/*
@@ -259,21 +290,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
reg16 &= ~PCI_EXP_LNKCTL_CCC;
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
- /* Retrain link */
- reg16 |= PCI_EXP_LNKCTL_RL;
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
-
- /* Wait for link training end. Break out after waiting for timeout */
- start_jiffies = jiffies;
- for (;;) {
- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
- break;
- if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
- break;
- msleep(1);
- }
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ if (pcie_retrain_link(link))
return;
/* Training failed. Restore common clock configurations */
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index bab8ac63c4f3..3008bba360f3 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -29,7 +29,7 @@ static void pci_ptm_info(struct pci_dev *dev)
snprintf(clock_desc, sizeof(clock_desc), ">254ns");
break;
default:
- snprintf(clock_desc, sizeof(clock_desc), "%udns",
+ snprintf(clock_desc, sizeof(clock_desc), "%uns",
dev->ptm_granularity);
break;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index dedb12083d86..496296bc3581 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2046,6 +2046,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+/*
+ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
+ * Link bit cleared after starting the link retrain process to allow this
+ * process to finish.
+ *
+ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the
+ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
+ */
+static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
+{
+ dev->clear_retrain_link = 1;
+ pci_info(dev, "Enable PCIe Retrain Link quirk\n");
+}
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
+
static void fixup_rev1_53c810(struct pci_dev *dev)
{
u32 class = dev->class;
@@ -3326,6 +3343,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
static void quirk_no_pm_reset(struct pci_dev *dev)
{
@@ -3866,6 +3884,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
quirk_dma_func1_alias);
@@ -4426,7 +4446,7 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
#define INTEL_BSPR_REG_BPPD (1 << 9)
/* Upstream Peer Decode Configuration Register */
-#define INTEL_UPDCR_REG 0x1114
+#define INTEL_UPDCR_REG 0x1014
/* 5:0 Peer Decode Enable bits */
#define INTEL_UPDCR_REG_MASK 0x3f
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index f30ca75b5b6c..e631636a0aa5 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1833,12 +1833,18 @@ again:
/* restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
+ int idx;
res->start = fail_res->start;
res->end = fail_res->end;
res->flags = fail_res->flags;
- if (fail_res->dev->subordinate)
- res->flags = 0;
+
+ if (pci_is_bridge(fail_res->dev)) {
+ idx = res - &fail_res->dev->resource[0];
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
}
free_list(&fail_head);
@@ -1904,12 +1910,18 @@ again:
/* restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
+ int idx;
res->start = fail_res->start;
res->end = fail_res->end;
res->flags = fail_res->flags;
- if (fail_res->dev->subordinate)
- res->flags = 0;
+
+ if (pci_is_bridge(fail_res->dev)) {
+ idx = res - &fail_res->dev->resource[0];
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
}
free_list(&fail_head);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index af82edc7fa5c..9b899af86cd5 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -804,8 +804,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
cpu_pm_pmu_setup(armpmu, cmd);
break;
case CPU_PM_EXIT:
- cpu_pm_pmu_setup(armpmu, cmd);
case CPU_PM_ENTER_FAILED:
+ cpu_pm_pmu_setup(armpmu, cmd);
armpmu->start(armpmu);
break;
default:
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index 97d4dd6ea924..aa02b19b7e0e 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -288,6 +288,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
error = of_property_read_u32(np, "reg", &channel_num);
if (error || channel_num > 2) {
dev_err(dev, "Invalid \"reg\" property\n");
+ of_node_put(np);
return error;
}
channel->select_mask = select_mask[channel_num];
@@ -303,6 +304,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
&rcar_gen2_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "Failed to create PHY\n");
+ of_node_put(np);
return PTR_ERR(phy->phy);
}
phy_set_drvdata(phy->phy, phy);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 547ca7b3f098..ddb530ee2255 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -144,6 +144,7 @@
#define PMBR1 0x0D
#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
+static irqreturn_t twl4030_usb_irq(int irq, void *_twl);
/*
* If VBUS is valid or ID is ground, then we know a
* cable is present and we need to be runtime-enabled
@@ -392,6 +393,33 @@ static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
}
+static int __maybe_unused twl4030_usb_suspend(struct device *dev)
+{
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+ /*
+ * we need enabled runtime on resume,
+ * so turn irq off here, so we do not get it early
+ * note: wakeup on usb plug works independently of this
+ */
+ dev_dbg(twl->dev, "%s\n", __func__);
+ disable_irq(twl->irq);
+
+ return 0;
+}
+
+static int __maybe_unused twl4030_usb_resume(struct device *dev)
+{
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+ dev_dbg(twl->dev, "%s\n", __func__);
+ enable_irq(twl->irq);
+ /* check whether cable status changed */
+ twl4030_usb_irq(0, twl);
+
+ return 0;
+}
+
static int __maybe_unused twl4030_usb_runtime_suspend(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
@@ -652,6 +680,7 @@ static const struct phy_ops ops = {
static const struct dev_pm_ops twl4030_usb_pm_ops = {
SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
twl4030_usb_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(twl4030_usb_suspend, twl4030_usb_resume)
};
static int twl4030_usb_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 13a4c2774157..6adfb379ac7e 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev,
const struct ns2_pin_function *func;
const struct ns2_pin_group *grp;
- if (grp_select > pinctrl->num_groups ||
- func_select > pinctrl->num_functions)
+ if (grp_select >= pinctrl->num_groups ||
+ func_select >= pinctrl->num_functions)
return -EINVAL;
func = &pinctrl->functions[func_select];
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index fc5b18d3db20..1e945aa77734 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -204,7 +204,6 @@ struct byt_gpio {
struct platform_device *pdev;
struct pinctrl_dev *pctl_dev;
struct pinctrl_desc pctl_desc;
- raw_spinlock_t lock;
const struct byt_pinctrl_soc_data *soc_data;
struct byt_community *communities_copy;
struct byt_gpio_pin_context *saved_context;
@@ -715,6 +714,8 @@ static const struct byt_pinctrl_soc_data *byt_soc_data[] = {
NULL,
};
+static DEFINE_RAW_SPINLOCK(byt_lock);
+
static struct byt_community *byt_get_community(struct byt_gpio *vg,
unsigned int pin)
{
@@ -856,7 +857,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
unsigned long flags;
int i;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@@ -876,7 +877,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static void byt_set_group_mixed_mux(struct byt_gpio *vg,
@@ -886,7 +887,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
unsigned long flags;
int i;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@@ -906,7 +907,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
@@ -955,11 +956,17 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
- value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
+ /* Do not clear direct-irq enabled IRQs (from gpio_disable_free) */
+ if (value & BYT_DIRECT_IRQ_EN)
+ /* nothing to do */ ;
+ else
+ value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+
writel(value, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
@@ -971,7 +978,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
u32 value, gpio_mux;
unsigned long flags;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
/*
* In most cases, func pin mux 000 means GPIO function.
@@ -993,7 +1000,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
"pin %u forcibly re-configured as GPIO\n", offset);
}
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
pm_runtime_get(&vg->pdev->dev);
@@ -1021,7 +1028,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
unsigned long flags;
u32 value;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(val_reg);
value &= ~BYT_DIR_MASK;
@@ -1038,7 +1045,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
"Potential Error: Setting GPIO with direct_irq_en to output");
writel(value, val_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -1107,11 +1114,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
u32 conf, pull, val, debounce;
u16 arg = 0;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
conf = readl(conf_reg);
pull = conf & BYT_PULL_ASSIGN_MASK;
val = readl(val_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
@@ -1138,9 +1145,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
if (!(conf & BYT_DEBOUNCE_EN))
return -EINVAL;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
debounce = readl(db_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
case BYT_DEBOUNCE_PULSE_375US:
@@ -1192,7 +1199,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
u32 conf, val, debounce;
int i, ret = 0;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
conf = readl(conf_reg);
val = readl(val_reg);
@@ -1300,7 +1307,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (!ret)
writel(conf, conf_reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return ret;
}
@@ -1325,9 +1332,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
val = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return !!(val & BYT_LEVEL);
}
@@ -1342,13 +1349,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (!reg)
return;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
old_val = readl(reg);
if (value)
writel(old_val | BYT_LEVEL, reg);
else
writel(old_val & ~BYT_LEVEL, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -1361,9 +1368,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (!reg)
return -EINVAL;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
if (!(value & BYT_OUTPUT_EN))
return GPIOF_DIR_OUT;
@@ -1406,14 +1413,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
const char *label;
unsigned int pin;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
pin = vg->soc_data->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i conf0 reg\n",
pin);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
conf0 = readl(reg);
@@ -1422,11 +1429,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i val reg\n", pin);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
val = readl(reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
comm = byt_get_community(vg, pin);
if (!comm) {
@@ -1510,9 +1517,9 @@ static void byt_irq_ack(struct irq_data *d)
if (!reg)
return;
- raw_spin_lock(&vg->lock);
+ raw_spin_lock(&byt_lock);
writel(BIT(offset % 32), reg);
- raw_spin_unlock(&vg->lock);
+ raw_spin_unlock(&byt_lock);
}
static void byt_irq_mask(struct irq_data *d)
@@ -1536,7 +1543,7 @@ static void byt_irq_unmask(struct irq_data *d)
if (!reg)
return;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
switch (irqd_get_trigger_type(d)) {
@@ -1557,7 +1564,7 @@ static void byt_irq_unmask(struct irq_data *d)
writel(value, reg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_irq_type(struct irq_data *d, unsigned int type)
@@ -1571,7 +1578,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
if (!reg || offset >= vg->chip.ngpio)
return -EINVAL;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
WARN(value & BYT_DIRECT_IRQ_EN,
@@ -1593,7 +1600,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -1629,9 +1636,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
continue;
}
- raw_spin_lock(&vg->lock);
+ raw_spin_lock(&byt_lock);
pending = readl(reg);
- raw_spin_unlock(&vg->lock);
+ raw_spin_unlock(&byt_lock);
for_each_set_bit(pin, &pending, 32) {
virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
generic_handle_irq(virq);
@@ -1833,8 +1840,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(vg->pctl_dev);
}
- raw_spin_lock_init(&vg->lock);
-
ret = byt_gpio_probe(vg);
if (ret) {
pinctrl_unregister(vg->pctl_dev);
@@ -1852,8 +1857,11 @@ static int byt_gpio_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct byt_gpio *vg = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
@@ -1874,6 +1882,7 @@ static int byt_gpio_suspend(struct device *dev)
vg->saved_context[i].val = value;
}
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@@ -1881,8 +1890,11 @@ static int byt_gpio_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct byt_gpio *vg = platform_get_drvdata(pdev);
+ unsigned long flags;
int i;
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
@@ -1920,6 +1932,7 @@ static int byt_gpio_resume(struct device *dev)
}
}
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
#endif
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 88ba9c50cc8e..b596f45426ea 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -479,7 +479,6 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
unsigned num_pins, num_configs, reserve;
unsigned long *configs;
struct property *pins;
- bool has_config;
u32 pinfunc;
int ret, i;
@@ -495,9 +494,6 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
return ret;
}
- if (num_configs)
- has_config = true;
-
num_pins = pins->length / sizeof(u32);
if (!num_pins) {
dev_err(pctldev->dev, "no pins found in node %s\n",
@@ -511,7 +507,7 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
* map for each pin.
*/
reserve = 1;
- if (has_config && num_pins >= 1)
+ if (num_configs)
reserve++;
reserve *= num_pins;
ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps,
@@ -534,7 +530,7 @@ static int atmel_pctl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps,
group, func);
- if (has_config) {
+ if (num_configs) {
ret = pinctrl_utils_add_map_configs(pctldev, map,
reserved_maps, num_maps, group,
configs, num_configs,
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 9f0904185909..9401a0630e80 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1545,16 +1545,6 @@ void at91_pinctrl_gpio_resume(void)
#define gpio_irq_set_wake NULL
#endif /* CONFIG_PM */
-static struct irq_chip gpio_irqchip = {
- .name = "GPIO",
- .irq_ack = gpio_irq_ack,
- .irq_disable = gpio_irq_mask,
- .irq_mask = gpio_irq_mask,
- .irq_unmask = gpio_irq_unmask,
- /* .irq_set_type is set dynamically */
- .irq_set_wake = gpio_irq_set_wake,
-};
-
static void gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1595,12 +1585,22 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct gpio_chip *gpiochip_prev = NULL;
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
+ struct irq_chip *gpio_irqchip;
int ret, i;
+ gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip), GFP_KERNEL);
+ if (!gpio_irqchip)
+ return -ENOMEM;
+
at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
- /* Setup proper .irq_set_type function */
- gpio_irqchip.irq_set_type = at91_gpio->ops->irq_type;
+ gpio_irqchip->name = "GPIO";
+ gpio_irqchip->irq_ack = gpio_irq_ack;
+ gpio_irqchip->irq_disable = gpio_irq_mask;
+ gpio_irqchip->irq_mask = gpio_irq_mask;
+ gpio_irqchip->irq_unmask = gpio_irq_unmask;
+ gpio_irqchip->irq_set_wake = gpio_irq_set_wake,
+ gpio_irqchip->irq_set_type = at91_gpio->ops->irq_type;
/* Disable irqs of this PIO controller */
writel_relaxed(~0, at91_gpio->regbase + PIO_IDR);
@@ -1611,7 +1611,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
* interrupt.
*/
ret = gpiochip_irqchip_add(&at91_gpio->chip,
- &gpio_irqchip,
+ gpio_irqchip,
0,
handle_edge_irq,
IRQ_TYPE_NONE);
@@ -1629,7 +1629,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
if (!gpiochip_prev) {
/* Then register the chain on the parent IRQ */
gpiochip_set_chained_irqchip(&at91_gpio->chip,
- &gpio_irqchip,
+ gpio_irqchip,
at91_gpio->pioc_virq,
gpio_irq_handler);
return 0;
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index e053f1fa5512..ab2a451f3156 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -630,14 +630,8 @@ static const struct pinctrl_pin_desc lpc18xx_pins[] = {
LPC18XX_PIN(i2c0_sda, PIN_I2C0_SDA),
};
-/**
- * enum lpc18xx_pin_config_param - possible pin configuration parameters
- * @PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt
- * controller.
- */
-enum lpc18xx_pin_config_param {
- PIN_CONFIG_GPIO_PIN_INT = PIN_CONFIG_END + 1,
-};
+/* PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt controller */
+#define PIN_CONFIG_GPIO_PIN_INT (PIN_CONFIG_END + 1)
static const struct pinconf_generic_params lpc18xx_params[] = {
{"nxp,gpio-pin-interrupt", PIN_CONFIG_GPIO_PIN_INT, 0},
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 55375b1b3cc8..b2b7e238bda9 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1368,6 +1368,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
if (!of_find_property(child, "gpio-controller", NULL)) {
dev_err(pctl->dev,
"No gpio-controller property for bank %u\n", i);
+ of_node_put(child);
ret = -ENODEV;
goto err;
}
@@ -1375,6 +1376,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
irq = irq_of_parse_and_map(child, 0);
if (irq < 0) {
dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
+ of_node_put(child);
ret = irq;
goto err;
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index f826793e972c..417cd3bd7e0c 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -2208,6 +2208,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
base,
&rockchip_regmap_config);
}
+ of_node_put(node);
}
bank->irq = irq_of_parse_and_map(bank->of_node, 0);
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index dd85ad1807f5..8ea1c6e2a6b2 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1748,14 +1748,6 @@ static int pinmux_xway_probe(struct platform_device *pdev)
}
xway_pctrl_desc.pins = xway_info.pads;
- /* register the gpio chip */
- xway_chip.parent = &pdev->dev;
- ret = devm_gpiochip_add_data(&pdev->dev, &xway_chip, NULL);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register gpio chip\n");
- return ret;
- }
-
/* setup the data needed by pinctrl */
xway_pctrl_desc.name = dev_name(&pdev->dev);
xway_pctrl_desc.npins = xway_chip.ngpio;
@@ -1777,10 +1769,33 @@ static int pinmux_xway_probe(struct platform_device *pdev)
return ret;
}
- /* finish with registering the gpio range in pinctrl */
- xway_gpio_range.npins = xway_chip.ngpio;
- xway_gpio_range.base = xway_chip.base;
- pinctrl_add_gpio_range(xway_info.pctrl, &xway_gpio_range);
+ /* register the gpio chip */
+ xway_chip.parent = &pdev->dev;
+ xway_chip.owner = THIS_MODULE;
+ xway_chip.of_node = pdev->dev.of_node;
+ ret = devm_gpiochip_add_data(&pdev->dev, &xway_chip, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register gpio chip\n");
+ return ret;
+ }
+
+ /*
+ * For DeviceTree-supported systems, the gpio core checks the
+ * pinctrl's device node for the "gpio-ranges" property.
+ * If it is present, it takes care of adding the pin ranges
+ * for the driver. In this case the driver can skip ahead.
+ *
+ * In order to remain compatible with older, existing DeviceTree
+ * files which don't set the "gpio-ranges" property or systems that
+ * utilize ACPI the driver has to call gpiochip_add_pin_range().
+ */
+ if (!of_property_read_bool(pdev->dev.of_node, "gpio-ranges")) {
+ /* finish with registering the gpio range in pinctrl */
+ xway_gpio_range.npins = xway_chip.ngpio;
+ xway_gpio_range.base = xway_chip.base;
+ pinctrl_add_gpio_range(xway_info.pctrl, &xway_gpio_range);
+ }
+
dev_info(&pdev->dev, "Init done\n");
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index e0ecffcbe11f..f8b54cfc90c7 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -967,15 +967,12 @@ enum zynq_io_standards {
zynq_iostd_max
};
-/**
- * enum zynq_pin_config_param - possible pin configuration parameters
- * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to
+/*
+ * PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to
* this parameter (on a custom format) tells the driver which alternative
* IO standard to use.
*/
-enum zynq_pin_config_param {
- PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1,
-};
+#define PIN_CONFIG_IOSTANDARD (PIN_CONFIG_END + 1)
static const struct pinconf_generic_params zynq_dt_params[] = {
{"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18},
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 8093afd17aa4..69641c9e7d17 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -790,10 +790,23 @@ static int pmic_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
- if (ret) {
- dev_err(dev, "failed to add pin range\n");
- goto err_range;
+ /*
+ * For DeviceTree-supported systems, the gpio core checks the
+ * pinctrl's device node for the "gpio-ranges" property.
+ * If it is present, it takes care of adding the pin ranges
+ * for the driver. In this case the driver can skip ahead.
+ *
+ * In order to remain compatible with older, existing DeviceTree
+ * files which don't set the "gpio-ranges" property or systems that
+ * utilize ACPI the driver has to call gpiochip_add_pin_range().
+ */
+ if (!of_property_read_bool(dev->of_node, "gpio-ranges")) {
+ ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0,
+ npins);
+ if (ret) {
+ dev_err(dev, "failed to add pin range\n");
+ goto err_range;
+ }
}
return 0;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index e86c4de2f6db..92855f45bc53 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -762,12 +762,23 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_add_pin_range(&pctrl->chip,
- dev_name(pctrl->dev),
- 0, 0, pctrl->chip.ngpio);
- if (ret) {
- dev_err(pctrl->dev, "failed to add pin range\n");
- goto unregister_gpiochip;
+ /*
+ * For DeviceTree-supported systems, the gpio core checks the
+ * pinctrl's device node for the "gpio-ranges" property.
+ * If it is present, it takes care of adding the pin ranges
+ * for the driver. In this case the driver can skip ahead.
+ *
+ * In order to remain compatible with older, existing DeviceTree
+ * files which don't set the "gpio-ranges" property or systems that
+ * utilize ACPI the driver has to call gpiochip_add_pin_range().
+ */
+ if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) {
+ ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
+ 0, 0, pctrl->chip.ngpio);
+ if (ret) {
+ dev_err(pctrl->dev, "failed to add pin range\n");
+ goto unregister_gpiochip;
+ }
}
platform_set_drvdata(pdev, pctrl);
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 3d92f827da7a..0839b70a30ee 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -495,8 +495,10 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL);
- if (!eint_data)
+ if (!eint_data) {
+ of_node_put(eint_np);
return -ENOMEM;
+ }
eint_data->drvdata = d;
@@ -508,12 +510,14 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint_np);
return -ENXIO;
}
eint_data->parents[i] = irq;
irq_set_chained_handler_and_data(irq, handlers[i], eint_data);
}
+ of_node_put(eint_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index 43407ab248f5..0cd9f3a7bb11 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -713,6 +713,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(dev, "could not allocate memory for wkup eint data\n");
+ of_node_put(eint0_np);
return -ENOMEM;
}
data->drvdata = d;
@@ -723,6 +724,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint0_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint0_np);
return -ENXIO;
}
@@ -730,6 +732,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
s3c64xx_eint0_handlers[i],
data);
}
+ of_node_put(eint0_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 620727fabe64..6b77a1bb5b20 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -281,6 +281,7 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
&reserved_maps, num_maps);
if (ret < 0) {
samsung_dt_free_map(pctldev, *map, *num_maps);
+ of_node_put(np);
return ret;
}
}
@@ -770,8 +771,10 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
if (!of_get_child_count(cfg_np)) {
ret = samsung_pinctrl_create_function(dev, drvdata,
cfg_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
@@ -782,8 +785,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
for_each_child_of_node(cfg_np, func_np) {
ret = samsung_pinctrl_create_function(dev, drvdata,
func_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(func_np);
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/sh-pfc/pfc-emev2.c
index 1cbbe04d7df6..eafd8edbcbe9 100644
--- a/drivers/pinctrl/sh-pfc/pfc-emev2.c
+++ b/drivers/pinctrl/sh-pfc/pfc-emev2.c
@@ -1263,6 +1263,14 @@ static const char * const dtv_groups[] = {
"dtv_b",
};
+static const char * const err_rst_reqb_groups[] = {
+ "err_rst_reqb",
+};
+
+static const char * const ext_clki_groups[] = {
+ "ext_clki",
+};
+
static const char * const iic0_groups[] = {
"iic0",
};
@@ -1285,6 +1293,10 @@ static const char * const lcd_groups[] = {
"yuv3",
};
+static const char * const lowpwr_groups[] = {
+ "lowpwr",
+};
+
static const char * const ntsc_groups[] = {
"ntsc_clk",
"ntsc_data",
@@ -1298,6 +1310,10 @@ static const char * const pwm1_groups[] = {
"pwm1",
};
+static const char * const ref_clko_groups[] = {
+ "ref_clko",
+};
+
static const char * const sd_groups[] = {
"sd_cki",
};
@@ -1391,13 +1407,17 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(cam),
SH_PFC_FUNCTION(cf),
SH_PFC_FUNCTION(dtv),
+ SH_PFC_FUNCTION(err_rst_reqb),
+ SH_PFC_FUNCTION(ext_clki),
SH_PFC_FUNCTION(iic0),
SH_PFC_FUNCTION(iic1),
SH_PFC_FUNCTION(jtag),
SH_PFC_FUNCTION(lcd),
+ SH_PFC_FUNCTION(lowpwr),
SH_PFC_FUNCTION(ntsc),
SH_PFC_FUNCTION(pwm0),
SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(ref_clko),
SH_PFC_FUNCTION(sd),
SH_PFC_FUNCTION(sdi0),
SH_PFC_FUNCTION(sdi1),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index 35f436bcb849..e9739dbcb356 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -1982,7 +1982,7 @@ static const unsigned int gether_gmii_pins[] = {
*/
185, 186, 187, 188, 189, 190, 191, 192, 174, 161, 204,
171, 170, 169, 168, 167, 166, 173, 172, 176, 184, 183, 203,
- 205, 163, 206, 207,
+ 205, 163, 206, 207, 158,
};
static const unsigned int gether_gmii_mux[] = {
ET_ERXD0_MARK, ET_ERXD1_MARK, ET_ERXD2_MARK, ET_ERXD3_MARK,
@@ -2154,6 +2154,7 @@ static const unsigned int lcd0_data24_1_mux[] = {
LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
+ LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK,
LCD0_D16_MARK, LCD0_D17_MARK, LCD0_D18_PORT163_MARK,
LCD0_D19_PORT162_MARK, LCD0_D20_PORT161_MARK, LCD0_D21_PORT158_MARK,
LCD0_D22_PORT160_MARK, LCD0_D23_PORT159_MARK,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 18ef7042b3d1..771689a41dbf 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -2324,7 +2324,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ATAG0_A, 0, FN_REMOCON_B, 0,
/* IP0_11_8 [4] */
FN_SD1_DAT2_A, FN_MMC_D2, 0, FN_BS,
- FN_ATADIR0_A, 0, FN_SDSELF_B, 0,
+ FN_ATADIR0_A, 0, FN_SDSELF_A, 0,
FN_PWM4_B, 0, 0, 0,
0, 0, 0, 0,
/* IP0_7_5 [3] */
@@ -2366,7 +2366,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_TS_SDAT0_A, 0, 0, 0,
0, 0, 0, 0,
/* IP1_10_8 [3] */
- FN_SD1_CLK_B, FN_MMC_D6, 0, FN_A24,
+ FN_SD1_CD_A, FN_MMC_D6, 0, FN_A24,
FN_DREQ1_A, 0, FN_HRX0_B, FN_TS_SPSYNC0_A,
/* IP1_7_5 [3] */
FN_A23, FN_HTX0_B, FN_TX2_B, FN_DACK2_A,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index baa98d7fe947..dd350e296142 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -3136,8 +3136,7 @@ static const unsigned int qspi_data4_b_pins[] = {
RCAR_GP_PIN(6, 4),
};
static const unsigned int qspi_data4_b_mux[] = {
- SPCLK_B_MARK, MOSI_IO0_B_MARK, MISO_IO1_B_MARK,
- IO2_B_MARK, IO3_B_MARK, SSL_B_MARK,
+ MOSI_IO0_B_MARK, MISO_IO1_B_MARK, IO2_B_MARK, IO3_B_MARK,
};
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
@@ -4265,17 +4264,14 @@ static const unsigned int vin1_b_data18_pins[] = {
};
static const unsigned int vin1_b_data18_mux[] = {
/* B */
- VI1_DATA0_B_MARK, VI1_DATA1_B_MARK,
VI1_DATA2_B_MARK, VI1_DATA3_B_MARK,
VI1_DATA4_B_MARK, VI1_DATA5_B_MARK,
VI1_DATA6_B_MARK, VI1_DATA7_B_MARK,
/* G */
- VI1_G0_B_MARK, VI1_G1_B_MARK,
VI1_G2_B_MARK, VI1_G3_B_MARK,
VI1_G4_B_MARK, VI1_G5_B_MARK,
VI1_G6_B_MARK, VI1_G7_B_MARK,
/* R */
- VI1_R0_B_MARK, VI1_R1_B_MARK,
VI1_R2_B_MARK, VI1_R3_B_MARK,
VI1_R4_B_MARK, VI1_R5_B_MARK,
VI1_R6_B_MARK, VI1_R7_B_MARK,
@@ -5082,7 +5078,7 @@ static const char * const scifb2_groups[] = {
"scifb2_data_b",
"scifb2_clk_b",
"scifb2_ctrl_b",
- "scifb0_data_c",
+ "scifb2_data_c",
"scifb2_clk_c",
"scifb2_data_d",
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
index 21badb6166b9..97998929db93 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
@@ -1863,6 +1863,7 @@ static const char * const vin1_groups[] = {
"vin1_data8",
"vin1_data24_b",
"vin1_data20_b",
+ "vin1_data18_b",
"vin1_data16_b",
"vin1_sync",
"vin1_field",
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index ef093ac0cf2f..fe8c9c24634d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -4826,7 +4826,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_AVB_MDC, FN_SSI_SDATA6_B, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32,
- 1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3) {
+ 1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3) {
/* IP9_31 [1] */
0, 0,
/* IP9_30_28 [3] */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 8070765311db..3ddb9565ed80 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -500,17 +500,15 @@ enum {
SD_WP_MARK, SD_CLK_MARK, SD_CMD_MARK,
CRX0_MARK, CRX1_MARK,
CTX0_MARK, CTX1_MARK,
+ CRX0_CRX1_MARK, CTX0_CTX1_MARK,
PWM1A_MARK, PWM1B_MARK, PWM1C_MARK, PWM1D_MARK,
PWM1E_MARK, PWM1F_MARK, PWM1G_MARK, PWM1H_MARK,
PWM2A_MARK, PWM2B_MARK, PWM2C_MARK, PWM2D_MARK,
PWM2E_MARK, PWM2F_MARK, PWM2G_MARK, PWM2H_MARK,
IERXD_MARK, IETXD_MARK,
- CRX0_CRX1_MARK,
WDTOVF_MARK,
- CRX0X1_MARK,
-
/* DMAC */
TEND0_MARK, DACK0_MARK, DREQ0_MARK,
TEND1_MARK, DACK1_MARK, DREQ1_MARK,
@@ -998,12 +996,12 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(PJ3_DATA, PJ3MD_00),
PINMUX_DATA(CRX1_MARK, PJ3MD_01),
- PINMUX_DATA(CRX0X1_MARK, PJ3MD_10),
+ PINMUX_DATA(CRX0_CRX1_MARK, PJ3MD_10),
PINMUX_DATA(IRQ1_PJ_MARK, PJ3MD_11),
PINMUX_DATA(PJ2_DATA, PJ2MD_000),
PINMUX_DATA(CTX1_MARK, PJ2MD_001),
- PINMUX_DATA(CRX0_CRX1_MARK, PJ2MD_010),
+ PINMUX_DATA(CTX0_CTX1_MARK, PJ2MD_010),
PINMUX_DATA(CS2_MARK, PJ2MD_011),
PINMUX_DATA(SCK0_MARK, PJ2MD_100),
PINMUX_DATA(LCD_M_DISP_MARK, PJ2MD_101),
@@ -1248,6 +1246,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(CTX1),
GPIO_FN(CRX1),
GPIO_FN(CTX0),
+ GPIO_FN(CTX0_CTX1),
GPIO_FN(CRX0),
GPIO_FN(CRX0_CRX1),
@@ -1716,6 +1715,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4) {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PF12MD_000, PF12MD_001, 0, PF12MD_011,
PF12MD_100, PF12MD_101, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 }
@@ -1759,8 +1761,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
PF1MD_000, PF1MD_001, PF1MD_010, PF1MD_011,
PF1MD_100, PF1MD_101, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0
- }
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ PF0MD_000, PF0MD_001, PF0MD_010, PF0MD_011,
+ PF0MD_100, PF0MD_101, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }
},
{ PINMUX_CFG_REG("PFIOR0", 0xfffe38b2, 16, 1) {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index a50d22bef1f4..3df0c0d139d0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -740,13 +740,12 @@ enum {
CRX0_MARK, CTX0_MARK,
CRX1_MARK, CTX1_MARK,
CRX2_MARK, CTX2_MARK,
- CRX0_CRX1_MARK,
- CRX0_CRX1_CRX2_MARK,
- CTX0CTX1CTX2_MARK,
+ CRX0_CRX1_MARK, CTX0_CTX1_MARK,
+ CRX0_CRX1_CRX2_MARK, CTX0_CTX1_CTX2_MARK,
CRX1_PJ22_MARK, CTX1_PJ23_MARK,
CRX2_PJ20_MARK, CTX2_PJ21_MARK,
- CRX0CRX1_PJ22_MARK,
- CRX0CRX1CRX2_PJ20_MARK,
+ CRX0_CRX1_PJ22_MARK, CTX0_CTX1_PJ23_MARK,
+ CRX0_CRX1_CRX2_PJ20_MARK, CTX0_CTX1_CTX2_PJ21_MARK,
/* VDC */
DV_CLK_MARK,
@@ -824,6 +823,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(CS3_MARK, PC8MD_001),
PINMUX_DATA(TXD7_MARK, PC8MD_010),
PINMUX_DATA(CTX1_MARK, PC8MD_011),
+ PINMUX_DATA(CTX0_CTX1_MARK, PC8MD_100),
PINMUX_DATA(PC7_DATA, PC7MD_000),
PINMUX_DATA(CKE_MARK, PC7MD_001),
@@ -836,11 +836,12 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(CAS_MARK, PC6MD_001),
PINMUX_DATA(SCK7_MARK, PC6MD_010),
PINMUX_DATA(CTX0_MARK, PC6MD_011),
+ PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC6MD_100),
PINMUX_DATA(PC5_DATA, PC5MD_000),
PINMUX_DATA(RAS_MARK, PC5MD_001),
PINMUX_DATA(CRX0_MARK, PC5MD_011),
- PINMUX_DATA(CTX0CTX1CTX2_MARK, PC5MD_100),
+ PINMUX_DATA(CTX0_CTX1_CTX2_MARK, PC5MD_100),
PINMUX_DATA(IRQ0_PC_MARK, PC5MD_101),
PINMUX_DATA(PC4_DATA, PC4MD_00),
@@ -1292,30 +1293,32 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
- PINMUX_DATA(CTX1_MARK, PJ23MD_101),
+ PINMUX_DATA(CTX1_PJ23_MARK, PJ23MD_101),
+ PINMUX_DATA(CTX0_CTX1_PJ23_MARK, PJ23MD_110),
PINMUX_DATA(PJ22_DATA, PJ22MD_000),
PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
- PINMUX_DATA(CRX1_MARK, PJ22MD_101),
- PINMUX_DATA(CRX0_CRX1_MARK, PJ22MD_110),
+ PINMUX_DATA(CRX1_PJ22_MARK, PJ22MD_101),
+ PINMUX_DATA(CRX0_CRX1_PJ22_MARK, PJ22MD_110),
PINMUX_DATA(PJ21_DATA, PJ21MD_000),
PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
- PINMUX_DATA(CTX2_MARK, PJ21MD_101),
+ PINMUX_DATA(CTX2_PJ21_MARK, PJ21MD_101),
+ PINMUX_DATA(CTX0_CTX1_CTX2_PJ21_MARK, PJ21MD_110),
PINMUX_DATA(PJ20_DATA, PJ20MD_000),
PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
- PINMUX_DATA(CRX2_MARK, PJ20MD_101),
- PINMUX_DATA(CRX0CRX1CRX2_PJ20_MARK, PJ20MD_110),
+ PINMUX_DATA(CRX2_PJ20_MARK, PJ20MD_101),
+ PINMUX_DATA(CRX0_CRX1_CRX2_PJ20_MARK, PJ20MD_110),
PINMUX_DATA(PJ19_DATA, PJ19MD_000),
PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
@@ -1666,12 +1669,24 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(WDTOVF),
/* CAN */
+ GPIO_FN(CTX2),
+ GPIO_FN(CRX2),
GPIO_FN(CTX1),
GPIO_FN(CRX1),
GPIO_FN(CTX0),
GPIO_FN(CRX0),
+ GPIO_FN(CTX0_CTX1),
GPIO_FN(CRX0_CRX1),
+ GPIO_FN(CTX0_CTX1_CTX2),
GPIO_FN(CRX0_CRX1_CRX2),
+ GPIO_FN(CTX2_PJ21),
+ GPIO_FN(CRX2_PJ20),
+ GPIO_FN(CTX1_PJ23),
+ GPIO_FN(CRX1_PJ22),
+ GPIO_FN(CTX0_CTX1_PJ23),
+ GPIO_FN(CRX0_CRX1_PJ22),
+ GPIO_FN(CTX0_CTX1_CTX2_PJ21),
+ GPIO_FN(CRX0_CRX1_CRX2_PJ20),
/* DMAC */
GPIO_FN(TEND0),
@@ -2119,7 +2134,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1) {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PC8_IN, PC8_OUT,
PC7_IN, PC7_OUT,
PC6_IN, PC6_OUT,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index d25e6f674d0a..6dca760f9f28 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3086,6 +3086,7 @@ static const unsigned int tpu4_to2_mux[] = {
};
static const unsigned int tpu4_to3_pins[] = {
/* TO */
+ PIN_NUMBER(6, 26),
};
static const unsigned int tpu4_to3_mux[] = {
TPU4TO3_MARK,
@@ -3366,7 +3367,8 @@ static const char * const fsic_groups[] = {
"fsic_sclk_out",
"fsic_data_in",
"fsic_data_out",
- "fsic_spdif",
+ "fsic_spdif_0",
+ "fsic_spdif_1",
};
static const char * const fsid_groups[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 6502e676d368..c691e5e9d9de 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -1453,7 +1453,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(ET0_ETXD2_A),
GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
GPIO_FN(ET0_ETXD3_A),
- GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
+ GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4),
GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
@@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_20 [1] */
FN_EX_WAIT0, FN_TCLK1_B,
/* IP3_19_18 [2] */
- FN_RD_WR, FN_TCLK1_B, 0, 0,
+ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
/* IP3_17_15 [3] */
FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
FN_ET0_ETXD3_A, 0, 0, 0,
@@ -2213,31 +2213,31 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP10_22 [1] */
FN_CAN_CLK_A, FN_RX4_D,
/* IP10_21_19 [3] */
- FN_AUDIO_CLKOUT, FN_TX1_E, FN_HRTS0_C, FN_FSE_B,
- FN_LCD_M_DISP_B, 0, 0, 0,
+ FN_AUDIO_CLKOUT, FN_TX1_E, 0, FN_HRTS0_C, FN_FSE_B,
+ FN_LCD_M_DISP_B, 0, 0,
/* IP10_18_16 [3] */
- FN_AUDIO_CLKC, FN_SCK1_E, FN_HCTS0_C, FN_FRB_B,
- FN_LCD_VEPWC_B, 0, 0, 0,
+ FN_AUDIO_CLKC, FN_SCK1_E, 0, FN_HCTS0_C, FN_FRB_B,
+ FN_LCD_VEPWC_B, 0, 0,
/* IP10_15 [1] */
FN_AUDIO_CLKB_A, FN_LCD_CLK_B,
/* IP10_14_12 [3] */
FN_AUDIO_CLKA_A, FN_VI1_CLK_B, FN_SCK1_D, FN_IECLK_B,
FN_LCD_FLM_B, 0, 0, 0,
/* IP10_11_9 [3] */
- FN_SSI_SDATA3, FN_VI1_7_B, FN_HTX0_C, FN_FWE_B,
- FN_LCD_CL2_B, 0, 0, 0,
+ FN_SSI_SDATA3, FN_VI1_7_B, 0, FN_HTX0_C, FN_FWE_B,
+ FN_LCD_CL2_B, 0, 0,
/* IP10_8_6 [3] */
- FN_SSI_SDATA2, FN_VI1_6_B, FN_HRX0_C, FN_FRE_B,
- FN_LCD_CL1_B, 0, 0, 0,
+ FN_SSI_SDATA2, FN_VI1_6_B, 0, FN_HRX0_C, FN_FRE_B,
+ FN_LCD_CL1_B, 0, 0,
/* IP10_5_3 [3] */
FN_SSI_WS23, FN_VI1_5_B, FN_TX1_D, FN_HSCK0_C, FN_FALE_B,
- FN_LCD_DON_B, 0, 0, 0,
+ FN_LCD_DON_B, 0, 0,
/* IP10_2_0 [3] */
FN_SSI_SCK23, FN_VI1_4_B, FN_RX1_D, FN_FCLE_B,
FN_LCD_DATA15_B, 0, 0, 0 }
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xFFFC0048, 32,
- 3, 1, 2, 2, 2, 3, 3, 1, 2, 3, 3, 1, 1, 1, 1) {
+ 3, 1, 2, 3, 2, 2, 3, 3, 1, 2, 3, 3, 1, 1, 1, 1) {
/* IP11_31_29 [3] */
0, 0, 0, 0, 0, 0, 0, 0,
/* IP11_28 [1] */
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 277622b4b6fb..1d9f63e954c7 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -52,7 +52,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
{
- writel(val, pmx->regs[bank] + reg);
+ writel_relaxed(val, pmx->regs[bank] + reg);
+ /* make sure pinmux register write completed */
+ pmx_readl(pmx, bank, reg);
}
static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index cfa3e850c49f..d225a835a64c 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -67,6 +67,17 @@ static int send_command(struct cros_ec_device *ec_dev,
else
xfer_fxn = ec_dev->cmd_xfer;
+ if (!xfer_fxn) {
+ /*
+ * This error can happen if a communication error happened and
+ * the EC is trying to use protocol v2, on an underlying
+ * communication mechanism that does not support v2.
+ */
+ dev_err_once(ec_dev->dev,
+ "missing EC transfer API, cannot send command\n");
+ return -EIO;
+ }
+
ret = (*xfer_fxn)(ec_dev, msg);
if (msg->result == EC_RES_IN_PROGRESS) {
int i;
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 4300a558d0f3..d02214a3f8e3 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -155,7 +155,7 @@ static int __init loongson_hwmon_init(void)
cpu_hwmon_dev = hwmon_device_register(NULL);
if (IS_ERR(cpu_hwmon_dev)) {
- ret = -ENOMEM;
+ ret = PTR_ERR(cpu_hwmon_dev);
pr_err("hwmon_device_register fail!\n");
goto fail_hwmon_device_register;
}
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index fe419935041c..bee2115ecf10 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -570,7 +570,7 @@ static ssize_t show_hdmi_source(struct device *dev,
return scnprintf(buf, PAGE_SIZE,
"input [gpu] unknown\n");
}
- pr_err("alienware-wmi: unknown HDMI source status: %d\n", out_data);
+ pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 69ffbd7b76f7..0fd7e40b86a0 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -78,10 +78,12 @@ static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
static struct quirk_entry quirk_asus_unknown = {
.wapf = 0,
+ .wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_q500a = {
.i8042_filter = asus_q500a_i8042_filter,
+ .wmi_backlight_set_devstate = true,
};
/*
@@ -92,15 +94,18 @@ static struct quirk_entry quirk_asus_q500a = {
static struct quirk_entry quirk_asus_x55u = {
.wapf = 4,
.wmi_backlight_power = true,
+ .wmi_backlight_set_devstate = true,
.no_display_toggle = true,
};
static struct quirk_entry quirk_asus_wapf4 = {
.wapf = 4,
+ .wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_x200ca = {
.wapf = 2,
+ .wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_no_rfkill = {
@@ -114,13 +119,16 @@ static struct quirk_entry quirk_no_rfkill_wapf4 = {
static struct quirk_entry quirk_asus_ux303ub = {
.wmi_backlight_native = true,
+ .wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_x550lb = {
+ .wmi_backlight_set_devstate = true,
.xusb2pr = 0x01D9,
};
-static struct quirk_entry quirk_asus_ux330uak = {
+static struct quirk_entry quirk_asus_forceals = {
+ .wmi_backlight_set_devstate = true,
.wmi_force_als_set = true,
};
@@ -431,7 +439,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "UX330UAK"),
},
- .driver_data = &quirk_asus_ux330uak,
+ .driver_data = &quirk_asus_forceals,
},
{
.callback = dmi_matched,
@@ -442,6 +450,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_x550lb,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. UX430UQ",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX430UQ"),
+ },
+ .driver_data = &quirk_asus_forceals,
+ },
{},
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 10bd13b30178..4ae758c3d8ba 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -461,13 +461,7 @@ static void kbd_led_update(struct work_struct *work)
asus = container_of(work, struct asus_wmi, kbd_led_work);
- /*
- * bits 0-2: level
- * bit 7: light on/off
- */
- if (asus->kbd_led_wk > 0)
- ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
-
+ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
}
@@ -2154,7 +2148,7 @@ static int asus_wmi_add(struct platform_device *pdev)
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
- } else
+ } else if (asus->driver->quirks->wmi_backlight_set_devstate)
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
status = wmi_install_notify_handler(asus->driver->event_guid,
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 5db052d1de1e..53bab79780e2 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -45,6 +45,7 @@ struct quirk_entry {
bool store_backlight_power;
bool wmi_backlight_power;
bool wmi_backlight_native;
+ bool wmi_backlight_set_devstate;
bool wmi_force_als_set;
int wapf;
/*
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 454cb2ee3cee..f065529390fe 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -90,7 +90,7 @@ struct bios_args {
u32 command;
u32 commandtype;
u32 datasize;
- u32 data;
+ u8 data[128];
};
struct bios_return {
@@ -198,7 +198,7 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
.command = write ? 0x2 : 0x1,
.commandtype = query,
.datasize = insize,
- .data = 0,
+ .data = { 0 },
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -206,7 +206,7 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
if (WARN_ON(insize > sizeof(args.data)))
return -EINVAL;
- memcpy(&args.data, buffer, insize);
+ memcpy(&args.data[0], buffer, insize);
wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output);
@@ -308,7 +308,7 @@ static int __init hp_wmi_bios_2008_later(void)
static int __init hp_wmi_bios_2009_later(void)
{
- int state = 0;
+ u8 state[128];
int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state,
sizeof(state), sizeof(state));
if (!ret)
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 0bf51d574fa9..f2b9dd82128f 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -620,13 +620,17 @@ static int ipc_create_pmc_devices(void)
if (ret) {
dev_err(ipcdev.dev, "Failed to add punit platform device\n");
platform_device_unregister(ipcdev.tco_dev);
+ return ret;
}
if (!ipcdev.telem_res_inval) {
ret = ipc_create_telemetry_device();
- if (ret)
+ if (ret) {
dev_warn(ipcdev.dev,
"Failed to add telemetry platform device\n");
+ platform_device_unregister(ipcdev.punit_dev);
+ platform_device_unregister(ipcdev.tco_dev);
+ }
}
return ret;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index c890a49587e4..f65558638bc8 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4422,14 +4422,16 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
}
return AE_OK;
}
+
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ return AE_OK;
+
default:
dprintk("Resource %d isn't an IRQ nor an IO port\n",
resource->type);
+ return AE_CTRL_TERMINATE;
- case ACPI_RESOURCE_TYPE_END_TAG:
- return AE_OK;
}
- return AE_CTRL_TERMINATE;
}
static int sony_pic_possible_resources(struct acpi_device *device)
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index fa0f19b975a6..bb7b817cca59 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -994,8 +994,7 @@ static int omap_sr_remove(struct platform_device *pdev)
if (sr_info->autocomp_active)
sr_stop_vddautocomp(sr_info);
- if (sr_info->dbg_dir)
- debugfs_remove_recursive(sr_info->dbg_dir);
+ debugfs_remove_recursive(sr_info->dbg_dir);
pm_runtime_disable(&pdev->dev);
list_del(&sr_info->node);
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 90b0b5a70ce5..04ca990e8f6c 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -246,6 +246,9 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
if (!pdev->dev.of_node)
return -ENODEV;
+ if (at91_shdwc)
+ return -EBUSY;
+
at91_shdwc = devm_kzalloc(&pdev->dev, sizeof(*at91_shdwc), GFP_KERNEL);
if (!at91_shdwc)
return -ENOMEM;
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 2199f673118c..ea8c26a108f0 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2437,17 +2437,14 @@ static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
size_t count)
{
unsigned long charge_full;
- ssize_t ret;
+ int ret;
ret = kstrtoul(buf, 10, &charge_full);
+ if (ret)
+ return ret;
- dev_dbg(di->dev, "Ret %zd charge_full %lu", ret, charge_full);
-
- if (!ret) {
- di->bat_cap.max_mah = (int) charge_full;
- ret = count;
- }
- return ret;
+ di->bat_cap.max_mah = (int) charge_full;
+ return count;
}
static ssize_t charge_now_show(struct ab8500_fg *di, char *buf)
@@ -2459,20 +2456,16 @@ static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf,
size_t count)
{
unsigned long charge_now;
- ssize_t ret;
+ int ret;
ret = kstrtoul(buf, 10, &charge_now);
+ if (ret)
+ return ret;
- dev_dbg(di->dev, "Ret %zd charge_now %lu was %d",
- ret, charge_now, di->bat_cap.prev_mah);
-
- if (!ret) {
- di->bat_cap.user_mah = (int) charge_now;
- di->flags.user_cap = true;
- ret = count;
- queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
- }
- return ret;
+ di->bat_cap.user_mah = (int) charge_now;
+ di->flags.user_cap = true;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ return count;
}
static struct ab8500_fg_sysfs_entry charge_full_attr =
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 75b8e0c7402b..8a0a8fb915d6 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -899,6 +899,10 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
pirq = platform_get_irq(info->pdev, i);
+ if (pirq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
+ return pirq;
+ }
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_warn(&info->pdev->dev,
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index bccb3f595ff3..247be9155694 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1031,7 +1031,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg);
if (IS_ERR(di->bat)) {
- dev_err(di->dev, "failed to register battery\n");
+ if (PTR_ERR(di->bat) == -EPROBE_DEFER)
+ dev_dbg(di->dev, "failed to register battery, deferring probe\n");
+ else
+ dev_err(di->dev, "failed to register battery\n");
return PTR_ERR(di->bat);
}
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index 4adf2ba021ce..043de9d039d5 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -364,7 +364,7 @@ static int ltc294x_i2c_remove(struct i2c_client *client)
{
struct ltc294x_info *info = i2c_get_clientdata(client);
- cancel_delayed_work(&info->work);
+ cancel_delayed_work_sync(&info->work);
power_supply_unregister(info->supply);
return 0;
}
diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c
index b64cf0f14142..66438029bdd0 100644
--- a/drivers/power/supply/max8998_charger.c
+++ b/drivers/power/supply/max8998_charger.c
@@ -85,7 +85,7 @@ static const struct power_supply_desc max8998_battery_desc = {
static int max8998_battery_probe(struct platform_device *pdev)
{
struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8998_platform_data *pdata = iodev->pdata;
struct power_supply_config psy_cfg = {};
struct max8998_battery_data *max8998;
struct i2c_client *i2c;
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 9e05ae0430a9..cb0b3d3d132f 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -764,14 +764,14 @@ __power_supply_register(struct device *parent,
}
spin_lock_init(&psy->changed_lock);
- rc = device_init_wakeup(dev, ws);
- if (rc)
- goto wakeup_init_failed;
-
rc = device_add(dev);
if (rc)
goto device_add_failed;
+ rc = device_init_wakeup(dev, ws);
+ if (rc)
+ goto wakeup_init_failed;
+
rc = psy_register_thermal(psy);
if (rc)
goto register_thermal_failed;
@@ -808,8 +808,8 @@ register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
device_del(dev);
-device_add_failed:
wakeup_init_failed:
+device_add_failed:
check_supplies_failed:
dev_set_name_failed:
put_device(dev);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index bcde8d13476a..85cb5b9c1b6f 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -84,7 +84,8 @@ static ssize_t power_supply_show_property(struct device *dev,
dev_dbg(dev, "driver has no data for `%s' property\n",
attr->attr.name);
else if (ret != -ENODEV && ret != -EAGAIN)
- dev_err(dev, "driver failed to report `%s' property: %zd\n",
+ dev_err_ratelimited(dev,
+ "driver failed to report `%s' property: %zd\n",
attr->attr.name, ret);
return ret;
}
@@ -278,15 +279,11 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
char *prop_buf;
char *attrname;
- dev_dbg(dev, "uevent\n");
-
if (!psy || !psy->desc) {
dev_dbg(dev, "No power supply yet\n");
return ret;
}
- dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->desc->name);
-
ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->desc->name);
if (ret)
return ret;
@@ -322,8 +319,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
goto out;
}
- dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
-
ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
kfree(attrname);
if (ret)
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index bcd4dc304f27..5b1f147b11cb 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -449,7 +449,8 @@ static void twl4030_current_worker(struct work_struct *data)
if (v < USB_MIN_VOLT) {
/* Back up and stop adjusting. */
- bci->usb_cur -= USB_CUR_STEP;
+ if (bci->usb_cur >= USB_CUR_STEP)
+ bci->usb_cur -= USB_CUR_STEP;
bci->usb_cur_target = bci->usb_cur;
} else if (bci->usb_cur >= bci->usb_cur_target ||
bci->usb_cur + USB_CUR_STEP > USB_MAX_CURRENT) {
@@ -468,6 +469,7 @@ static void twl4030_current_worker(struct work_struct *data)
static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
{
int ret;
+ u32 reg;
if (bci->usb_mode == CHARGE_OFF)
enable = false;
@@ -481,14 +483,38 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
bci->usb_enabled = 1;
}
- if (bci->usb_mode == CHARGE_AUTO)
+ if (bci->usb_mode == CHARGE_AUTO) {
+ /* Enable interrupts now. */
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC |
+ TWL4030_TBATOR2 | TWL4030_TBATOR1 |
+ TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(bci->dev,
+ "failed to unmask interrupts: %d\n",
+ ret);
+ return ret;
+ }
/* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
+ }
/* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0,
TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
if (bci->usb_mode == CHARGE_LINEAR) {
+ /* Enable interrupts now. */
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_TBATOR2 |
+ TWL4030_TBATOR1 | TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(bci->dev,
+ "failed to unmask interrupts: %d\n",
+ ret);
+ return ret;
+ }
twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC|TWL4030_CVENAC, 0);
/* Watch dog key: WOVF acknowledge */
ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x33,
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 3c71f608b444..8809c1a20bed 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1175,12 +1175,12 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt),
+ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT, rapl_defaults_byt),
RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht),
- RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng),
- RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann),
+ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT_MID,rapl_defaults_tng),
+ RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT_MID, rapl_defaults_ann),
RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
{}
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 2f07cd615665..76ae38450aea 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -129,6 +129,14 @@ static long pps_cdev_ioctl(struct file *file,
pps->params.mode |= PPS_CANWAIT;
pps->params.api_version = PPS_API_VERS;
+ /*
+ * Clear unused fields of pps_kparams to avoid leaking
+ * uninitialized data of the PPS_SETPARAMS caller via
+ * PPS_GETPARAMS
+ */
+ pps->params.assert_off_tu.flags = 0;
+ pps->params.clear_off_tu.flags = 0;
+
spin_unlock_irq(&pps->lock);
break;
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 172ef8245811..cc12032ee60d 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -302,10 +302,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
if (IS_ENABLED(CONFIG_OF))
of_pwmchip_add(chip);
- pwmchip_sysfs_export(chip);
-
out:
mutex_unlock(&pwm_lock);
+
+ if (!ret)
+ pwmchip_sysfs_export(chip);
+
return ret;
}
EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
@@ -339,7 +341,7 @@ int pwmchip_remove(struct pwm_chip *chip)
unsigned int i;
int ret = 0;
- pwmchip_sysfs_unexport_children(chip);
+ pwmchip_sysfs_unexport(chip);
mutex_lock(&pwm_lock);
@@ -359,8 +361,6 @@ int pwmchip_remove(struct pwm_chip *chip)
free_pwms(chip);
- pwmchip_sysfs_unexport(chip);
-
out:
mutex_unlock(&pwm_lock);
return ret;
@@ -858,6 +858,7 @@ void pwm_put(struct pwm_device *pwm)
if (pwm->chip->ops->free)
pwm->chip->ops->free(pwm->chip, pwm);
+ pwm_set_chip_data(pwm, NULL);
pwm->label = NULL;
module_put(pwm->chip->ops->owner);
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index d961a8207b1c..31b01035d0ab 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -187,6 +187,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops iproc_pwm_ops = {
.apply = iproc_pwmc_apply,
.get_state = iproc_pwmc_get_state,
+ .owner = THIS_MODULE,
};
static int iproc_pwmc_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 01339c152ab0..64d9bb1ac272 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -78,7 +78,6 @@ static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm);
- pwm_set_chip_data(pwm, NULL);
kfree(channel);
}
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index 26ec24e457b1..7e16b7def0dc 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -48,7 +48,7 @@ static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v)
static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v)
{
/* Duty cycle 0..15 max */
- return DIV_ROUND_CLOSEST(v * 0xf, pwm_get_period(pwm));
+ return DIV_ROUND_CLOSEST(v * 0xf, pwm->args.period);
}
static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -71,7 +71,7 @@ static int clps711x_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct clps711x_chip *priv = to_clps711x_chip(chip);
unsigned int duty;
- if (period_ns != pwm_get_period(pwm))
+ if (period_ns != pwm->args.period)
return -EINVAL;
duty = clps711x_get_duty(pwm, duty_ns);
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 5208b3f80ad8..239003807c08 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -205,6 +205,12 @@ EXPORT_SYMBOL_GPL(pwm_lpss_probe);
int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
{
+ int i;
+
+ for (i = 0; i < lpwm->info->npwm; i++) {
+ if (pwm_is_enabled(&lpwm->chip.pwms[i]))
+ pm_runtime_put(lpwm->chip.dev);
+ }
return pwmchip_remove(&lpwm->chip);
}
EXPORT_SYMBOL_GPL(pwm_lpss_remove);
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 9d5bd7d5c610..a196439ee14c 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -110,6 +110,10 @@ struct meson_pwm {
const struct meson_pwm_data *data;
void __iomem *base;
u8 inverter_mask;
+ /*
+ * Protects register (write) access to the REG_MISC_AB register
+ * that is shared between the two PWMs.
+ */
spinlock_t lock;
};
@@ -230,6 +234,7 @@ static void meson_pwm_enable(struct meson_pwm *meson,
{
u32 value, clk_shift, clk_enable, enable;
unsigned int offset;
+ unsigned long flags;
switch (id) {
case 0:
@@ -250,6 +255,8 @@ static void meson_pwm_enable(struct meson_pwm *meson,
return;
}
+ spin_lock_irqsave(&meson->lock, flags);
+
value = readl(meson->base + REG_MISC_AB);
value &= ~(MISC_CLK_DIV_MASK << clk_shift);
value |= channel->pre_div << clk_shift;
@@ -262,11 +269,14 @@ static void meson_pwm_enable(struct meson_pwm *meson,
value = readl(meson->base + REG_MISC_AB);
value |= enable;
writel(value, meson->base + REG_MISC_AB);
+
+ spin_unlock_irqrestore(&meson->lock, flags);
}
static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
{
u32 value, enable;
+ unsigned long flags;
switch (id) {
case 0:
@@ -281,9 +291,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
return;
}
+ spin_lock_irqsave(&meson->lock, flags);
+
value = readl(meson->base + REG_MISC_AB);
value &= ~enable;
writel(value, meson->base + REG_MISC_AB);
+
+ spin_unlock_irqrestore(&meson->lock, flags);
}
static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -291,29 +305,21 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm *meson = to_meson_pwm(chip);
- unsigned long flags;
int err = 0;
if (!state)
return -EINVAL;
- spin_lock_irqsave(&meson->lock, flags);
-
if (!state->enabled) {
meson_pwm_disable(meson, pwm->hwpwm);
channel->state.enabled = false;
- goto unlock;
+ return 0;
}
if (state->period != channel->state.period ||
state->duty_cycle != channel->state.duty_cycle ||
state->polarity != channel->state.polarity) {
- if (channel->state.enabled) {
- meson_pwm_disable(meson, pwm->hwpwm);
- channel->state.enabled = false;
- }
-
if (state->polarity != channel->state.polarity) {
if (state->polarity == PWM_POLARITY_NORMAL)
meson->inverter_mask |= BIT(pwm->hwpwm);
@@ -324,7 +330,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
err = meson_pwm_calc(meson, channel, pwm->hwpwm,
state->duty_cycle, state->period);
if (err < 0)
- goto unlock;
+ return err;
channel->state.polarity = state->polarity;
channel->state.period = state->period;
@@ -336,9 +342,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
channel->state.enabled = true;
}
-unlock:
- spin_unlock_irqrestore(&meson->lock, flags);
- return err;
+ return 0;
}
static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 5ad42f33e70c..2e15acf13893 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -337,6 +337,11 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
{
struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pwmchip_remove(&omap->chip);
+ if (ret)
+ return ret;
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
@@ -345,7 +350,7 @@ static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
mutex_destroy(&omap->mutex);
- return pwmchip_remove(&omap->chip);
+ return 0;
}
static const struct of_device_id pwm_omap_dmtimer_of_match[] = {
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index f113cda47032..219757087995 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -235,7 +235,6 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
devm_kfree(chip->dev, pwm_get_chip_data(pwm));
- pwm_set_chip_data(pwm, NULL);
}
static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm)
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index c0e06f0c19d1..9a232ebbbf96 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -383,6 +383,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
}
/* Update shadow register first before modifying active register */
+ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
+ AQSFRC_RLDCSF_ZRO);
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/*
* Changes to immediate action on Action Qualifier. This puts
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index a813239300c3..0850b11dfd83 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -399,19 +399,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
struct device *parent;
-
- parent = class_find_device(&pwm_class, NULL, chip,
- pwmchip_sysfs_match);
- if (parent) {
- /* for class_find_device() */
- put_device(parent);
- device_unregister(parent);
- }
-}
-
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
- struct device *parent;
unsigned int i;
parent = class_find_device(&pwm_class, NULL, chip,
@@ -427,6 +414,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
}
put_device(parent);
+ device_unregister(parent);
}
static int __init pwm_sysfs_init(void)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index f32fc704cb7e..28c45db45aba 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1743,6 +1743,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
return -EFAULT;
+ dev_info.name[sizeof(dev_info.name) - 1] = '\0';
rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
dev_info.comptag, dev_info.destid, dev_info.hopcount);
@@ -1874,6 +1875,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
return -EFAULT;
+ dev_info.name[sizeof(dev_info.name) - 1] = '\0';
mport = priv->md->mport;
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index bad0e0ea4f30..b29fc258eeba 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -1215,7 +1215,9 @@ static int riocm_ch_listen(u16 ch_id)
riocm_debug(CHOP, "(ch_%d)", ch_id);
ch = riocm_get_channel(ch_id);
- if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
+ if (!ch)
+ return -EINVAL;
+ if (!riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
ret = -EINVAL;
riocm_put_channel(ch);
return ret;
@@ -2145,6 +2147,14 @@ static int riocm_add_mport(struct device *dev,
mutex_init(&cm->rx_lock);
riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+ if (!cm->rx_wq) {
+ riocm_error("failed to allocate IBMBOX_%d on %s",
+ cmbox, mport->name);
+ rio_release_outb_mbox(mport, cmbox);
+ kfree(cm);
+ return -ENOMEM;
+ }
+
INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
cm->tx_slot = 0;
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 0f97514e3474..c9f20e1394e3 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -1099,23 +1099,6 @@ static struct ab8500_regulator_info
.update_val_idle = 0x82,
.update_val_normal = 0x02,
},
- [AB8505_LDO_USB] = {
- .desc = {
- .name = "LDO-USB",
- .ops = &ab8500_regulator_mode_ops,
- .type = REGULATOR_VOLTAGE,
- .id = AB8505_LDO_USB,
- .owner = THIS_MODULE,
- .n_voltages = 1,
- .volt_table = fixed_3300000_voltage,
- },
- .update_bank = 0x03,
- .update_reg = 0x82,
- .update_mask = 0x03,
- .update_val = 0x01,
- .update_val_idle = 0x03,
- .update_val_normal = 0x01,
- },
[AB8505_LDO_AUDIO] = {
.desc = {
.name = "LDO-AUDIO",
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 7652477e6a9d..39e8d60df060 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -131,7 +131,7 @@
* ACT8865 voltage number
*/
#define ACT8865_VOLTAGE_NUM 64
-#define ACT8600_SUDCDC_VOLTAGE_NUM 255
+#define ACT8600_SUDCDC_VOLTAGE_NUM 256
struct act8865 {
struct regmap *regmap;
@@ -222,7 +222,8 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0),
REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000),
REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000),
- REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000),
+ REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000),
+ REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
};
static struct regulator_ops act8865_ops = {
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index f53e63301a20..e71117d7217b 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -33,7 +33,7 @@
/* LM3632 */
#define LM3632_BOOST_VSEL_MAX 0x26
-#define LM3632_LDO_VSEL_MAX 0x29
+#define LM3632_LDO_VSEL_MAX 0x28
#define LM3632_VBOOST_MIN 4500000
#define LM3632_VLDO_MIN 4000000
diff --git a/drivers/regulator/max8907-regulator.c b/drivers/regulator/max8907-regulator.c
index 5e941db5ccaf..c7e70cfb581f 100644
--- a/drivers/regulator/max8907-regulator.c
+++ b/drivers/regulator/max8907-regulator.c
@@ -299,7 +299,10 @@ static int max8907_regulator_probe(struct platform_device *pdev)
memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc));
/* Backwards compatibility with MAX8907B; SD1 uses different voltages */
- regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
+ ret = regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val);
+ if (ret)
+ return ret;
+
if ((val & MAX8907_II2RR_VERSION_MASK) ==
MAX8907_II2RR_VERSION_REV_B) {
pmic->desc[MAX8907_SD1].min_uV = 637500;
@@ -336,14 +339,20 @@ static int max8907_regulator_probe(struct platform_device *pdev)
}
if (pmic->desc[i].ops == &max8907_ldo_ops) {
- regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
&val);
+ if (ret)
+ return ret;
+
if ((val & MAX8907_MASK_LDO_SEQ) !=
MAX8907_MASK_LDO_SEQ)
pmic->desc[i].ops = &max8907_ldo_hwctl_ops;
} else if (pmic->desc[i].ops == &max8907_out5v_ops) {
- regmap_read(config.regmap, pmic->desc[i].enable_reg,
+ ret = regmap_read(config.regmap, pmic->desc[i].enable_reg,
&val);
+ if (ret)
+ return ret;
+
if ((val & (MAX8907_MASK_OUT5V_VINEN |
MAX8907_MASK_OUT5V_ENSRC)) !=
MAX8907_MASK_OUT5V_ENSRC)
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index f11d41dad9c1..1eb605b6f049 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -435,13 +435,16 @@ static int palmas_ldo_write(struct palmas *palmas, unsigned int reg,
static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
{
int id = rdev_get_id(dev);
+ int ret;
struct palmas_pmic *pmic = rdev_get_drvdata(dev);
struct palmas_pmic_driver_data *ddata = pmic->palmas->pmic_ddata;
struct palmas_regs_info *rinfo = &ddata->palmas_regs_info[id];
unsigned int reg;
bool rail_enable = true;
- palmas_smps_read(pmic->palmas, rinfo->ctrl_addr, &reg);
+ ret = palmas_smps_read(pmic->palmas, rinfo->ctrl_addr, &reg);
+ if (ret)
+ return ret;
reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 92bf0c952583..f4f622ee3556 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -635,7 +635,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
/* SW2~SW4 high bit check and modify the voltage value table */
if (i >= sw_check_start && i <= sw_check_end) {
- regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
+ ret = regmap_read(pfuze_chip->regmap,
+ desc->vsel_reg, &val);
+ if (ret) {
+ dev_err(&client->dev, "Fails to read from the register.\n");
+ return ret;
+ }
+
if (val & sw_hi) {
if (pfuze_chip->chip_id == PFUZE3000) {
desc->volt_table = pfuze3000_sw2hi;
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 6c4afc73ecac..d229245d2b5e 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -135,7 +135,7 @@ static int pv88060_set_current_limit(struct regulator_dev *rdev, int min,
int i;
/* search for closest to maximum */
- for (i = info->n_current_limits; i >= 0; i--) {
+ for (i = info->n_current_limits - 1; i >= 0; i--) {
if (min <= info->current_limits[i]
&& max >= info->current_limits[i]) {
return regmap_update_bits(rdev->regmap,
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index 954a20eeb26f..a40ecfb77210 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -279,7 +279,7 @@ static int pv88080_set_current_limit(struct regulator_dev *rdev, int min,
int i;
/* search for closest to maximum */
- for (i = info->n_current_limits; i >= 0; i--) {
+ for (i = info->n_current_limits - 1; i >= 0; i--) {
if (min <= info->current_limits[i]
&& max >= info->current_limits[i]) {
return regmap_update_bits(rdev->regmap,
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index 421641175352..789652c9b014 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -157,7 +157,7 @@ static int pv88090_set_current_limit(struct regulator_dev *rdev, int min,
int i;
/* search for closest to maximum */
- for (i = info->n_current_limits; i >= 0; i--) {
+ for (i = info->n_current_limits - 1; i >= 0; i--) {
if (min <= info->current_limits[i]
&& max >= info->current_limits[i]) {
return regmap_update_bits(rdev->regmap,
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index dfa8d50a5d74..28646e4cf3ba 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -589,7 +589,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
}
if (!pdata->dvs_gpio[i]) {
- dev_warn(dev, "there is no dvs%d gpio\n", i);
+ dev_info(dev, "there is no dvs%d gpio\n", i);
continue;
}
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index 9c930eb68cda..ffc34e1ee35d 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -127,6 +127,7 @@ static struct platform_driver rn5t618_regulator_driver = {
module_platform_driver(rn5t618_regulator_driver);
+MODULE_ALIAS("platform:rn5t618-regulator");
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
MODULE_DESCRIPTION("RN5T618 regulator driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 1fe1c18cc27b..179f3c61a321 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -386,8 +386,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(7, MIN_750_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(8, MIN_750_MV, STEP_12_5_MV),
regulator_desc_s2mps11_buck9,
regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
};
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index d2f994298753..6d17357b3a24 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb)
while (timeout++ <= abb->settling_time) {
status = ti_abb_check_txdone(abb);
if (status)
- break;
+ return 0;
udelay(1);
}
- if (timeout > abb->settling_time) {
- dev_warn_ratelimited(dev,
- "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
- __func__, timeout, readl(abb->int_base));
- return -ETIMEDOUT;
- }
-
- return 0;
+ dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+ __func__, timeout, readl(abb->int_base));
+ return -ETIMEDOUT;
}
/**
@@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb)
status = ti_abb_check_txdone(abb);
if (!status)
- break;
+ return 0;
udelay(1);
}
- if (timeout > abb->settling_time) {
- dev_warn_ratelimited(dev,
- "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
- __func__, timeout, readl(abb->int_base));
- return -ETIMEDOUT;
- }
-
- return 0;
+ dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+ __func__, timeout, readl(abb->int_base));
+ return -ETIMEDOUT;
}
/**
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index 6dbf3cf3951e..12d26261394f 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -89,8 +89,8 @@ static const struct regulator_linear_range tps65086_buck345_25mv_ranges[] = {
static const struct regulator_linear_range tps65086_ldoa1_ranges[] = {
REGULATOR_LINEAR_RANGE(1350000, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(1500000, 0x1, 0x7, 100000),
- REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xA, 100000),
- REGULATOR_LINEAR_RANGE(2700000, 0xB, 0xD, 150000),
+ REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xB, 100000),
+ REGULATOR_LINEAR_RANGE(2850000, 0xC, 0xD, 150000),
REGULATOR_LINEAR_RANGE(3300000, 0xE, 0xE, 0),
};
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 696116ebdf50..9cde7b075701 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -1102,8 +1102,10 @@ static int tps65910_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
/* Give control of all register to control port */
- tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+ err = tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
DEVCTRL_SR_CTL_I2C_SEL_MASK);
+ if (err < 0)
+ return err;
switch (tps65910_chip_id(tps65910)) {
case TPS65910:
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 5a5bc4bb08d2..df591435d12a 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -327,8 +327,8 @@ static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev)
}
/* Current limit options */
-static u16 wm831x_dcdc_ilim[] = {
- 125, 250, 375, 500, 625, 750, 875, 1000
+static const unsigned int wm831x_dcdc_ilim[] = {
+ 125000, 250000, 375000, 500000, 625000, 750000, 875000, 1000000
};
static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev,
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index c6bfb3496684..b99780574044 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1488,7 +1488,7 @@ static int __init remoteproc_init(void)
return 0;
}
-module_init(remoteproc_init);
+subsys_initcall(remoteproc_init);
static void __exit remoteproc_exit(void)
{
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 188205a55261..d0ebca301afc 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -324,28 +324,29 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
break;
}
}
- of_node_put(args.np);
if (!rcdev) {
- mutex_unlock(&reset_list_mutex);
- return ERR_PTR(-EPROBE_DEFER);
+ rstc = ERR_PTR(-EPROBE_DEFER);
+ goto out;
}
if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
- mutex_unlock(&reset_list_mutex);
- return ERR_PTR(-EINVAL);
+ rstc = ERR_PTR(-EINVAL);
+ goto out;
}
rstc_id = rcdev->of_xlate(rcdev, &args);
if (rstc_id < 0) {
- mutex_unlock(&reset_list_mutex);
- return ERR_PTR(rstc_id);
+ rstc = ERR_PTR(rstc_id);
+ goto out;
}
/* reset_list_mutex also protects the rcdev's reset_control list */
rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
+out:
mutex_unlock(&reset_list_mutex);
+ of_node_put(args.np);
return rstc;
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6c107828ad77..57718cbcc7d9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -304,6 +304,7 @@ config RTC_DRV_MAX6900
config RTC_DRV_MAX8907
tristate "Maxim MAX8907"
depends on MFD_MAX8907
+ select REGMAP_IRQ
help
If you say yes here you will get support for the
RTC of Maxim MAX8907 PMIC.
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 466bf7f9a285..7da2a1fb50f8 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -116,12 +116,14 @@ static int pm80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned char buf[4];
unsigned long ticks, base, data;
regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
- base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
/* load 32-bit read-only counter */
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
@@ -144,7 +146,8 @@ static int pm80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* load 32-bit read-only counter */
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
base = ticks - data;
dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
@@ -165,11 +168,13 @@ static int pm80x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
int ret;
regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
- base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
regmap_raw_read(info->map, PM800_RTC_EXPIRE1_1, buf, 4);
- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
@@ -192,12 +197,14 @@ static int pm80x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, 0);
regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
- base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
/* load 32-bit read-only counter */
regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 19e53b3b8e00..7d3e5168fcef 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -115,11 +115,13 @@ static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm)
pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
- base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
+ base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
+ (buf[5] << 8) | buf[7];
/* load 32-bit read-only counter */
pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
@@ -145,7 +147,8 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* load 32-bit read-only counter */
pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
base = ticks - data;
dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
@@ -170,10 +173,12 @@ static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
- base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
+ base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
+ (buf[5] << 8) | buf[7];
pm860x_bulk_read(info->i2c, PM8607_RTC_EXPIRE1, 4, buf);
- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
@@ -198,11 +203,13 @@ static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
- base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
+ base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
+ (buf[5] << 8) | buf[7];
/* load 32-bit read-only counter */
pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
ticks = base + data;
dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
base, data, ticks);
@@ -414,7 +421,7 @@ static int pm860x_rtc_remove(struct platform_device *pdev)
struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
#ifdef VRTC_CALIBRATION
- flush_scheduled_work();
+ cancel_delayed_work_sync(&info->calib_work);
/* disable measurement */
pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
#endif /* VRTC_CALIBRATION */
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c554e529fc4e..b962dbe51750 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -730,7 +730,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rtc_cmos_int_handler = cmos_interrupt;
retval = request_irq(rtc_irq, rtc_cmos_int_handler,
- IRQF_SHARED, dev_name(&cmos_rtc.rtc->dev),
+ 0, dev_name(&cmos_rtc.rtc->dev),
cmos_rtc.rtc);
if (retval < 0) {
dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index f85cae240f12..7e92e491c2e7 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
+ /*
+ * TODO: some models have alarms on a minute boundary but still support
+ * real hardware interrupts. Add this once the core supports it.
+ */
+ if (config->rtc_data_start != RTC_SEC)
+ rtc->rtc_dev->uie_unsupported = 1;
+
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 5c18ac7394c4..c911f2db0af5 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -58,7 +58,8 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
"%s: raw read data - counters=%02x,%02x,%02x,%02x\n",
__func__, buf[0], buf[1], buf[2], buf[3]);
- time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ time = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
+ (buf[1] << 8) | buf[0];
rtc_time_to_tm(time, tm);
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index e5ad527cb75e..a8c2d38b2411 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -105,7 +105,7 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (!hym8563->valid) {
dev_warn(&client->dev, "no valid clock/calendar values available\n");
- return -EPERM;
+ return -EINVAL;
}
ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf);
diff --git a/drivers/rtc/rtc-max8997.c b/drivers/rtc/rtc-max8997.c
index db984d4bf952..4cce5bd448f6 100644
--- a/drivers/rtc/rtc-max8997.c
+++ b/drivers/rtc/rtc-max8997.c
@@ -221,7 +221,7 @@ static int max8997_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
out:
mutex_unlock(&info->lock);
- return 0;
+ return ret;
}
static int max8997_rtc_stop_alarm(struct max8997_rtc_info *info)
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 2f1772a358ca..18a6f15e313d 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -82,7 +82,7 @@ unsigned int mc146818_get_time(struct rtc_time *time)
time->tm_year += real_year - 72;
#endif
- if (century)
+ if (century > 20)
time->tm_year += (century - 19) * 100;
/*
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index c1c5c4e3b3b4..c981301efbe5 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -132,7 +132,8 @@ static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
msm6242_read(priv, MSM6242_SECOND1);
tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 +
msm6242_read(priv, MSM6242_MINUTE1);
- tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10 & 3)) * 10 +
+ tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10) &
+ MSM6242_HOUR10_HR_MASK) * 10 +
msm6242_read(priv, MSM6242_HOUR1);
tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 +
msm6242_read(priv, MSM6242_DAY1);
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 1a61fa56f3ad..494a7fbd512b 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -55,6 +55,14 @@
#define RTC_AL_SEC 0x0018
+#define RTC_AL_SEC_MASK 0x003f
+#define RTC_AL_MIN_MASK 0x003f
+#define RTC_AL_HOU_MASK 0x001f
+#define RTC_AL_DOM_MASK 0x001f
+#define RTC_AL_DOW_MASK 0x0007
+#define RTC_AL_MTH_MASK 0x000f
+#define RTC_AL_YEA_MASK 0x007f
+
#define RTC_PDN2 0x002e
#define RTC_PDN2_PWRON_ALARM BIT(4)
@@ -111,7 +119,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data)
irqen = irqsta & ~RTC_IRQ_EN_AL;
mutex_lock(&rtc->lock);
if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN,
- irqen) < 0)
+ irqen) == 0)
mtk_rtc_write_trigger(rtc);
mutex_unlock(&rtc->lock);
@@ -233,12 +241,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM);
mutex_unlock(&rtc->lock);
- tm->tm_sec = data[RTC_OFFSET_SEC];
- tm->tm_min = data[RTC_OFFSET_MIN];
- tm->tm_hour = data[RTC_OFFSET_HOUR];
- tm->tm_mday = data[RTC_OFFSET_DOM];
- tm->tm_mon = data[RTC_OFFSET_MTH];
- tm->tm_year = data[RTC_OFFSET_YEAR];
+ tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK;
+ tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK;
+ tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK;
+ tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK;
+ tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK;
+ tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK;
tm->tm_year += RTC_MIN_YEAR_OFFSET;
tm->tm_mon--;
@@ -259,14 +267,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
tm->tm_year -= RTC_MIN_YEAR_OFFSET;
tm->tm_mon++;
- data[RTC_OFFSET_SEC] = tm->tm_sec;
- data[RTC_OFFSET_MIN] = tm->tm_min;
- data[RTC_OFFSET_HOUR] = tm->tm_hour;
- data[RTC_OFFSET_DOM] = tm->tm_mday;
- data[RTC_OFFSET_MTH] = tm->tm_mon;
- data[RTC_OFFSET_YEAR] = tm->tm_year;
-
mutex_lock(&rtc->lock);
+ ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
+ data, RTC_OFFSET_COUNT);
+ if (ret < 0)
+ goto exit;
+
+ data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) |
+ (tm->tm_sec & RTC_AL_SEC_MASK));
+ data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) |
+ (tm->tm_min & RTC_AL_MIN_MASK));
+ data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) |
+ (tm->tm_hour & RTC_AL_HOU_MASK));
+ data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) |
+ (tm->tm_mday & RTC_AL_DOM_MASK));
+ data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) |
+ (tm->tm_mon & RTC_AL_MTH_MASK));
+ data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) |
+ (tm->tm_year & RTC_AL_YEA_MASK));
+
if (alm->enabled) {
ret = regmap_bulk_write(rtc->regmap,
rtc->addr_base + RTC_AL_SEC,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index bd5ca548c265..dd9751687760 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -559,9 +559,7 @@ static const struct pinctrl_ops rtc_pinctrl_ops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
-enum rtc_pin_config_param {
- PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1,
-};
+#define PIN_CONFIG_ACTIVE_HIGH (PIN_CONFIG_END + 1)
static const struct pinconf_generic_params rtc_params[] = {
{"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0},
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 28c48b3c1946..a06792966ea9 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -82,23 +82,46 @@ static int pcf8523_write(struct i2c_client *client, u8 reg, u8 value)
return 0;
}
-static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
+static int pcf8523_voltage_low(struct i2c_client *client)
{
u8 value;
int err;
+ err = pcf8523_read(client, REG_CONTROL3, &value);
+ if (err < 0)
+ return err;
+
+ return !!(value & REG_CONTROL3_BLF);
+}
+
+static int pcf8523_load_capacitance(struct i2c_client *client)
+{
+ u32 load;
+ u8 value;
+ int err;
+
err = pcf8523_read(client, REG_CONTROL1, &value);
if (err < 0)
return err;
- if (!high)
- value &= ~REG_CONTROL1_CAP_SEL;
- else
+ load = 12500;
+ of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads",
+ &load);
+
+ switch (load) {
+ default:
+ dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500",
+ load);
+ /* fall through */
+ case 12500:
value |= REG_CONTROL1_CAP_SEL;
+ break;
+ case 7000:
+ value &= ~REG_CONTROL1_CAP_SEL;
+ break;
+ }
err = pcf8523_write(client, REG_CONTROL1, value);
- if (err < 0)
- return err;
return err;
}
@@ -164,6 +187,14 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
struct i2c_msg msgs[2];
int err;
+ err = pcf8523_voltage_low(client);
+ if (err < 0) {
+ return err;
+ } else if (err > 0) {
+ dev_err(dev, "low voltage detected, time is unreliable\n");
+ return -EINVAL;
+ }
+
msgs[0].addr = client->addr;
msgs[0].flags = 0;
msgs[0].len = 1;
@@ -248,17 +279,13 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
unsigned long arg)
{
struct i2c_client *client = to_i2c_client(dev);
- u8 value;
- int ret = 0, err;
+ int ret;
switch (cmd) {
case RTC_VL_READ:
- err = pcf8523_read(client, REG_CONTROL3, &value);
- if (err < 0)
- return err;
-
- if (value & REG_CONTROL3_BLF)
- ret = 1;
+ ret = pcf8523_voltage_low(client);
+ if (ret < 0)
+ return ret;
if (copy_to_user((void __user *)arg, &ret, sizeof(int)))
return -EFAULT;
@@ -291,9 +318,10 @@ static int pcf8523_probe(struct i2c_client *client,
if (!pcf)
return -ENOMEM;
- err = pcf8523_select_capacitance(client, true);
+ err = pcf8523_load_capacitance(client);
if (err < 0)
- return err;
+ dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
+ err);
err = pcf8523_set_pm(client, 0);
if (err < 0)
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index a4b8b603c807..533fb7027f0c 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -563,7 +563,6 @@ static int pcf8563_probe(struct i2c_client *client,
struct pcf8563 *pcf8563;
int err;
unsigned char buf;
- unsigned char alm_pending;
dev_dbg(&client->dev, "%s\n", __func__);
@@ -587,13 +586,13 @@ static int pcf8563_probe(struct i2c_client *client,
return err;
}
- err = pcf8563_get_alarm_mode(client, NULL, &alm_pending);
- if (err) {
- dev_err(&client->dev, "%s: read error\n", __func__);
+ /* Clear flags and disable interrupts */
+ buf = 0;
+ err = pcf8563_write_block_data(client, PCF8563_REG_ST2, 1, &buf);
+ if (err < 0) {
+ dev_err(&client->dev, "%s: write error\n", __func__);
return err;
}
- if (alm_pending)
- pcf8563_set_alarm_mode(client, 0);
pcf8563->rtc = devm_rtc_device_register(&client->dev,
pcf8563_driver.driver.name,
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index fac835530671..3b619b7b2c53 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -74,16 +74,18 @@ struct pm8xxx_rtc {
/*
* Steps to write the RTC registers.
* 1. Disable alarm if enabled.
- * 2. Write 0x00 to LSB.
- * 3. Write Byte[1], Byte[2], Byte[3] then Byte[0].
- * 4. Enable alarm if disabled in step 1.
+ * 2. Disable rtc if enabled.
+ * 3. Write 0x00 to LSB.
+ * 4. Write Byte[1], Byte[2], Byte[3] then Byte[0].
+ * 5. Enable rtc if disabled in step 2.
+ * 6. Enable alarm if disabled in step 1.
*/
static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
int rc, i;
unsigned long secs, irq_flags;
- u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0;
- unsigned int ctrl_reg;
+ u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, rtc_disabled = 0;
+ unsigned int ctrl_reg, rtc_ctrl_reg;
struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
@@ -92,23 +94,38 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_tm_to_time(tm, &secs);
+ dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
+
for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) {
value[i] = secs & 0xFF;
secs >>= 8;
}
- dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
-
spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
- rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
+ rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
if (rc)
goto rtc_rw_fail;
if (ctrl_reg & regs->alarm_en) {
alarm_enabled = 1;
ctrl_reg &= ~regs->alarm_en;
- rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
+ rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
+ if (rc) {
+ dev_err(dev, "Write to RTC Alarm control register failed\n");
+ goto rtc_rw_fail;
+ }
+ }
+
+ /* Disable RTC H/w before writing on RTC register */
+ rc = regmap_read(rtc_dd->regmap, regs->ctrl, &rtc_ctrl_reg);
+ if (rc)
+ goto rtc_rw_fail;
+
+ if (rtc_ctrl_reg & PM8xxx_RTC_ENABLE) {
+ rtc_disabled = 1;
+ rtc_ctrl_reg &= ~PM8xxx_RTC_ENABLE;
+ rc = regmap_write(rtc_dd->regmap, regs->ctrl, rtc_ctrl_reg);
if (rc) {
dev_err(dev, "Write to RTC control register failed\n");
goto rtc_rw_fail;
@@ -137,11 +154,21 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
goto rtc_rw_fail;
}
+ /* Enable RTC H/w after writing on RTC register */
+ if (rtc_disabled) {
+ rtc_ctrl_reg |= PM8xxx_RTC_ENABLE;
+ rc = regmap_write(rtc_dd->regmap, regs->ctrl, rtc_ctrl_reg);
+ if (rc) {
+ dev_err(dev, "Write to RTC control register failed\n");
+ goto rtc_rw_fail;
+ }
+ }
+
if (alarm_enabled) {
ctrl_reg |= regs->alarm_en;
- rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
+ rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
if (rc) {
- dev_err(dev, "Write to RTC control register failed\n");
+ dev_err(dev, "Write to RTC Alarm control register failed\n");
goto rtc_rw_fail;
}
}
@@ -186,7 +213,8 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
}
}
- secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24);
+ secs = value[0] | (value[1] << 8) | (value[2] << 16) |
+ ((unsigned long)value[3] << 24);
rtc_time_to_tm(secs, tm);
@@ -267,7 +295,8 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
return rc;
}
- secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24);
+ secs = value[0] | (value[1] << 8) | (value[2] << 16) |
+ ((unsigned long)value[3] << 24);
rtc_time_to_tm(secs, &alarm->time);
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 5dab4665ca3b..3e0eea3aa876 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -106,7 +106,7 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
*/
static int s35390a_reset(struct s35390a *s35390a, char *status1)
{
- char buf;
+ u8 buf;
int ret;
unsigned initcount = 0;
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 17b6235d67a5..600fb7f93939 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -454,7 +454,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
{
unsigned int byte;
- int value = 0xff; /* return 0xff for ignored values */
+ int value = -1; /* return -1 for ignored values */
byte = readb(rtc->regbase + reg_off);
if (byte & AR_ENB) {
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index e453d2a7d7f9..f40d606f86c9 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -382,6 +382,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
char msg_format;
char msg_no;
+ /*
+ * intrc values ENODEV, ENOLINK and EPERM
+ * will be optained from sleep_on to indicate that no
+ * IO operation can be started
+ */
+ if (cqr->intrc == -ENODEV)
+ return 1;
+
+ if (cqr->intrc == -ENOLINK)
+ return 1;
+
+ if (cqr->intrc == -EPERM)
+ return 1;
+
sense = dasd_get_sense(&cqr->irb);
if (!sense)
return 0;
@@ -446,12 +460,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
lcu->flags &= ~NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
- do {
- rc = dasd_sleep_on(cqr);
- if (rc && suborder_not_supported(cqr))
- return -EOPNOTSUPP;
- } while (rc && (cqr->retries > 0));
- if (rc) {
+ rc = dasd_sleep_on(cqr);
+ if (rc && !suborder_not_supported(cqr)) {
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 11c6335b1951..9d772201e334 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2054,14 +2054,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
raw:
- block->blocks = (private->real_cyl *
+ block->blocks = ((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk);
dev_info(&device->cdev->dev,
- "DASD with %d KB/block, %d KB total size, %d KB/track, "
+ "DASD with %u KB/block, %lu KB total size, %u KB/track, "
"%s\n", (block->bp_block >> 10),
- ((private->real_cyl *
+ (((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk * (block->bp_block >> 9)) >> 1),
((blk_per_trk * block->bp_block) >> 10),
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 285b4006f44b..5d5e78afde88 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -628,7 +628,7 @@ con3270_init(void)
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
- raw3270_add_view(&condev->view, &con3270_fn, 1);
+ raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 85eca1cef063..04a6810a4298 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -462,7 +462,8 @@ fs3270_open(struct inode *inode, struct file *filp)
init_waitqueue_head(&fp->wait);
fp->fs_pid = get_pid(task_pid(current));
- rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
+ rc = raw3270_add_view(&fp->view, &fs3270_fn, minor,
+ RAW3270_VIEW_LOCK_BH);
if (rc) {
fs3270_free_view(&fp->view);
goto out;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index a2da898ce90f..1ebf632e327b 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -919,7 +919,7 @@ raw3270_deactivate_view(struct raw3270_view *view)
* Add view to device with minor "minor".
*/
int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
+raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
{
unsigned long flags;
struct raw3270 *rp;
@@ -941,6 +941,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
view->cols = rp->cols;
view->ascebc = rp->ascebc;
spin_lock_init(&view->lock);
+ lockdep_set_subclass(&view->lock, subclass);
list_add(&view->list, &rp->view_list);
rc = 0;
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 56519cbb165c..7577d7d0ad48 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -149,6 +149,8 @@ struct raw3270_fn {
struct raw3270_view {
struct list_head list;
spinlock_t lock;
+#define RAW3270_VIEW_LOCK_IRQ 0
+#define RAW3270_VIEW_LOCK_BH 1
atomic_t ref_count;
struct raw3270 *dev;
struct raw3270_fn *fn;
@@ -157,7 +159,7 @@ struct raw3270_view {
unsigned char *ascebc; /* ascii -> ebcdic table */
};
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
+int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
int raw3270_activate_view(struct raw3270_view *);
void raw3270_del_view(struct raw3270_view *);
void raw3270_deactivate_view(struct raw3270_view *);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 272cb6cd1b2a..6dd6f9ff7de5 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -978,7 +978,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return PTR_ERR(tp);
rc = raw3270_add_view(&tp->view, &tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR);
+ tty->index + RAW3270_FIRSTMINOR,
+ RAW3270_VIEW_LOCK_BH);
if (rc) {
tty3270_free_view(tp);
return rc;
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 9082476b51db..4e9f794176d3 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -302,8 +302,10 @@ static void *
cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
{
struct ccwdev_iter *iter;
+ loff_t p = *offset;
- if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ (*offset)++;
+ if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
return NULL;
iter = it;
if (iter->devno == __MAX_SUBCHANNEL) {
@@ -313,7 +315,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
return NULL;
} else
iter->devno++;
- (*offset)++;
return iter;
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e443b0d0b236..0d59c128f734 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -369,7 +369,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
goto error;
}
/* Check for trailing stuff. */
- if (i == num_devices && strlen(buf) > 0) {
+ if (i == num_devices && buf && strlen(buf) > 0) {
rc = -EINVAL;
goto error;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index f0e57aefb5f2..d167652a6a23 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -114,7 +114,7 @@ struct subchannel {
struct schib_config config;
} __attribute__ ((aligned(8)));
-DECLARE_PER_CPU(struct irb, cio_irb);
+DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
#define to_subchannel(n) container_of(n, struct subchannel, dev)
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3d2b20ee613f..39a2b0cde9e4 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1120,6 +1120,8 @@ device_initcall(cio_settle_init);
int sch_is_pseudo_sch(struct subchannel *sch)
{
+ if (!sch->dev.parent)
+ return 0;
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 18ab84e9c6b2..b65cab444802 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -758,6 +758,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
switch (state) {
case SLSB_P_OUTPUT_EMPTY:
+ case SLSB_P_OUTPUT_PENDING:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
@@ -1575,13 +1576,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
rc = qdio_kick_outbound_q(q, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
+ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+ state == SLSB_CU_OUTPUT_PRIMED) {
+ /* The previous buffer is not processed yet, tack on. */
+ qperf_inc(q, fast_requeue);
} else {
- /* try to fast requeue buffers */
- get_buf_state(q, prev_buf(bufnr), &state, 0);
- if (state != SLSB_CU_OUTPUT_PRIMED)
- rc = qdio_kick_outbound_q(q, 0);
- else
- qperf_inc(q, fast_requeue);
+ rc = qdio_kick_outbound_q(q, 0);
}
/* in case of SIGA errors we must process the error immediately */
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 35286907c636..d0090c5c88e7 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
return -ENOMEM;
}
irq_ptr_qs[i] = q;
+ INIT_LIST_HEAD(&q->entry);
}
return 0;
}
@@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
+ INIT_LIST_HEAD(&q->entry);
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 30e9fbbff051..debe69adfc70 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -80,7 +80,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
mutex_lock(&tiq_list_lock);
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
- xchg(irq_ptr->dsci, 1 << 7);
}
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
@@ -88,14 +87,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
struct qdio_q *q;
q = irq_ptr->input_qs[0];
- /* if establish triggered an error */
- if (!q || !q->entry.prev || !q->entry.next)
+ if (!q)
return;
mutex_lock(&tiq_list_lock);
list_del_rcu(&q->entry);
mutex_unlock(&tiq_list_lock);
synchronize_rcu();
+ INIT_LIST_HEAD(&q->entry);
}
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index ad17fc5883f6..e22b9ac3e564 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
if (priv->channel[direction] == NULL) {
if (direction == CTCM_WRITE)
channel_free(priv->channel[CTCM_READ]);
+ result = -ENODEV;
goto out_dev;
}
priv->channel[direction]->netdev = dev;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 58404e69aa4b..51152681aba6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -991,7 +991,10 @@ static int __qeth_l2_open(struct net_device *dev)
if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
napi_enable(&card->napi);
+ local_bh_disable();
napi_schedule(&card->napi);
+ /* kick-start the NAPI softirq: */
+ local_bh_enable();
} else
rc = -EIO;
return rc;
@@ -2124,7 +2127,7 @@ static void qeth_bridgeport_an_set_cb(void *priv,
l2entry = (struct qdio_brinfo_entry_l2 *)entry;
code = IPA_ADDR_CHANGE_CODE_MACADDR;
- if (l2entry->addr_lnid.lnid)
+ if (l2entry->addr_lnid.lnid < VLAN_N_VID)
code |= IPA_ADDR_CHANGE_CODE_VLANID;
qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
(struct net_if_token *)&l2entry->nit,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6e6ba1baf9c4..b40a61d9ad9e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3005,7 +3005,10 @@ static int __qeth_l3_open(struct net_device *dev)
if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
napi_enable(&card->napi);
+ local_bh_disable();
napi_schedule(&card->napi);
+ /* kick-start the NAPI softirq: */
+ local_bh_enable();
} else
rc = -EIO;
return rc;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index b6caad0fee24..c53ea0ac5f46 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -93,11 +93,9 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE);
- if (req->fsf_command != FSF_QTCB_FCP_CMND) {
- rec->pl_len = q_head->log_length;
- zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
- rec->pl_len, "fsf_res", req->req_id);
- }
+ rec->pl_len = q_head->log_length;
+ zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
+ rec->pl_len, "fsf_res", req->req_id);
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 2abcd331b05d..d8aee54f6c26 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kthread.h>
+#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
@@ -177,9 +178,6 @@ static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
}
break;
- default:
- need = 0;
- break;
}
return need;
@@ -244,6 +242,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
struct zfcp_erp_action *erp_action;
struct zfcp_scsi_dev *zfcp_sdev;
+ if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
+ need != ZFCP_ERP_ACTION_REOPEN_PORT &&
+ need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
+ need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
+ return NULL;
+
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
@@ -652,6 +656,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
add_timer(&erp_action->timer);
}
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag)
+{
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
+ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+ read_unlock(&adapter->port_list_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
int clear, char *id)
{
@@ -729,7 +747,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter)
adapter->peer_d_id);
if (IS_ERR(port)) /* error or port already attached */
return;
- _zfcp_erp_port_reopen(port, 0, "ereptp1");
+ zfcp_erp_port_reopen(port, 0, "ereptp1");
}
static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
@@ -1306,6 +1324,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
int lun_status;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL)
+ continue;
if (zsdev->port != port)
continue;
/* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b326f05c7f89..aeb93478482f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -68,6 +68,8 @@ extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
@@ -159,6 +161,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */
extern int zfcp_unit_add(struct zfcp_port *, u64);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 237688af179b..f7630cf581cd 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -238,10 +238,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
- if (!port->d_id)
- zfcp_erp_port_reopen(port,
- ZFCP_STATUS_COMMON_ERP_FAILED,
- "fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
@@ -249,6 +245,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+ struct zfcp_adapter *adapter = fsf_req->adapter;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
@@ -261,6 +258,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
/* see FC-FS */
no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
+ if (no_entries > 1) {
+ /* handle failed ports */
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ read_lock_irqsave(&adapter->port_list_lock, flags);
+ list_for_each_entry(port, &adapter->port_list, list) {
+ if (port->d_id)
+ continue;
+ zfcp_erp_port_reopen(port,
+ ZFCP_STATUS_COMMON_ERP_FAILED,
+ "fcrscn1");
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ }
+
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 1964391db904..a3aaef4c53a3 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -20,6 +20,11 @@
struct kmem_cache *zfcp_fsf_qtcb_cache;
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+ "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
@@ -231,10 +236,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
case FSF_STATUS_READ_SENSE_DATA_AVAIL:
break;
case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
- dev_warn(&adapter->ccw_device->dev,
- "The error threshold for checksum statistics "
- "has been exceeded\n");
zfcp_dbf_hba_bit_err("fssrh_3", req);
+ if (ber_stop) {
+ dev_warn(&adapter->ccw_device->dev,
+ "All paths over this FCP device are disused because of excessive bit errors\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+ } else {
+ dev_warn(&adapter->ccw_device->dev,
+ "The error threshold for checksum statistics has been exceeded\n");
+ }
break;
case FSF_STATUS_READ_LINK_DOWN:
zfcp_fsf_status_read_link_down(req);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 3afb200b2829..68146b398603 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -124,6 +124,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
zfcp_sdev->erp_action.port = port;
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (zfcp_sysfs_port_is_removing(port)) {
+ /* port is already gone */
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
+ return -ENXIO;
+ }
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
@@ -326,6 +335,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
int ret = SUCCESS, fc_ret;
+ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+ zfcp_erp_wait(adapter);
+ }
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
fc_ret = fc_block_scsi_eh(scpnt);
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 96a0be13e841..5df597d1b978 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -237,6 +237,53 @@ static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+static void zfcp_sysfs_port_set_removing(struct zfcp_port *const port)
+{
+ lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
+ atomic_set(&port->units, -1);
+}
+
+bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port)
+{
+ lockdep_assert_held(&zfcp_sysfs_port_units_mutex);
+ return atomic_read(&port->units) == -1;
+}
+
+static bool zfcp_sysfs_port_in_use(struct zfcp_port *const port)
+{
+ struct zfcp_adapter *const adapter = port->adapter;
+ unsigned long flags;
+ struct scsi_device *sdev;
+ bool in_use = true;
+
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (atomic_read(&port->units) > 0)
+ goto unlock_port_units_mutex; /* zfcp_unit(s) under port */
+
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host) {
+ const struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL)
+ continue;
+ if (zsdev->port != port)
+ continue;
+ /* alive scsi_device under port of interest */
+ goto unlock_host_lock;
+ }
+
+ /* port is about to be removed, so no more unit_add or slave_alloc */
+ zfcp_sysfs_port_set_removing(port);
+ in_use = false;
+
+unlock_host_lock:
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+unlock_port_units_mutex:
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ return in_use;
+}
+
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -259,15 +306,11 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
else
retval = 0;
- mutex_lock(&zfcp_sysfs_port_units_mutex);
- if (atomic_read(&port->units) > 0) {
+ if (zfcp_sysfs_port_in_use(port)) {
retval = -EBUSY;
- mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
goto out;
}
- /* port is about to be removed, so no more unit_add */
- atomic_set(&port->units, -1);
- mutex_unlock(&zfcp_sysfs_port_units_mutex);
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 9310a547b89f..1435029ade55 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -123,7 +123,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
int retval = 0;
mutex_lock(&zfcp_sysfs_port_units_mutex);
- if (atomic_read(&port->units) == -1) {
+ if (zfcp_sysfs_port_is_removing(port)) {
/* port is already gone */
retval = -ENODEV;
goto out;
@@ -167,8 +167,14 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);
+ /*
+ * lock order: shost->scan_mutex before zfcp_sysfs_port_units_mutex
+ * due to zfcp_unit_scsi_scan() => zfcp_scsi_slave_alloc()
+ */
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
zfcp_unit_scsi_scan(unit);
+ return retval;
out:
mutex_unlock(&zfcp_sysfs_port_units_mutex);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 17b1574920fd..941e3f25b4a9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -986,7 +986,7 @@ config SCSI_SNI_53C710
config 53C700_LE_ON_BE
bool
- depends on SCSI_LASI700
+ depends on SCSI_LASI700 || SCSI_SNI_53C710
default y
config SCSI_STEX
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 790babc5ef66..27270631c70c 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -617,16 +617,15 @@ static void complete_cmd(struct Scsi_Host *instance,
if (hostdata->sensing == cmd) {
/* Autosense processing ends here */
- if ((cmd->result & 0xff) != SAM_STAT_GOOD) {
+ if (status_byte(cmd->result) != GOOD) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- set_host_byte(cmd, DID_ERROR);
- } else
+ } else {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ }
hostdata->sensing = NULL;
}
- hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
-
cmd->scsi_done(cmd);
}
@@ -813,6 +812,8 @@ static void NCR5380_main(struct work_struct *work)
NCR5380_information_transfer(instance);
done = 0;
}
+ if (!hostdata->connected)
+ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
spin_unlock_irq(&hostdata->lock);
if (!done)
cond_resched();
@@ -1086,7 +1087,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) {
/* Command was aborted */
NCR5380_write(MODE_REG, MR_BASE);
- goto out;
+ return NULL;
}
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
@@ -1135,7 +1136,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) {
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- goto out;
+ return NULL;
}
dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
@@ -1208,8 +1209,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_reselect(instance);
- if (!hostdata->connected)
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
goto out;
}
@@ -1217,14 +1216,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (err < 0) {
spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
/* Can't touch cmd if it has been reclaimed by the scsi ML */
- if (hostdata->selecting) {
- cmd->result = DID_BAD_TARGET << 16;
- complete_cmd(instance, cmd);
- dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
- cmd = NULL;
- }
+ if (!hostdata->selecting)
+ return NULL;
+
+ cmd->result = DID_BAD_TARGET << 16;
+ complete_cmd(instance, cmd);
+ dsprintk(NDEBUG_SELECTION, instance,
+ "target did not respond within 250ms\n");
+ cmd = NULL;
goto out;
}
@@ -1252,12 +1253,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (err < 0) {
shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
goto out;
}
if (!hostdata->selecting) {
do_abort(instance);
- goto out;
+ return NULL;
}
dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
@@ -1797,6 +1797,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
return;
#endif
case PHASE_DATAIN:
@@ -1879,6 +1880,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd, scmd_id(cmd), cmd->device->lun);
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result &= ~0xffff;
cmd->result |= cmd->SCp.Status;
@@ -1903,9 +1905,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
- /* Enable reselect interrupts */
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-
maybe_release_dma_irq(instance);
return;
case MESSAGE_REJECT:
@@ -1937,8 +1936,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
- /* Enable reselect interrupts */
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -2043,10 +2040,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_transfer_pio(instance, &phase, &len, &data);
if (msgout == ABORT) {
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
maybe_release_dma_irq(instance);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return;
}
msgout = NOP;
@@ -2106,8 +2103,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(MODE_REG, MR_BASE);
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
-
- dsprintk(NDEBUG_RESELECTION, instance, "reselect\n");
+ if (!target_mask || target_mask & (target_mask - 1)) {
+ shost_printk(KERN_WARNING, instance,
+ "reselect: bad target_mask 0x%02x\n", target_mask);
+ return;
+ }
/*
* At this point, we have detected that our SCSI ID is on the bus,
@@ -2121,6 +2121,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
if (NCR5380_poll_politely(instance,
STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
+ shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
}
@@ -2132,6 +2133,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
if (NCR5380_poll_politely(instance,
STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
+ if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
+ /* BUS FREE phase */
+ return;
+ shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
do_abort(instance);
return;
}
@@ -2193,13 +2198,16 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
"reselect: removed %p from disconnected queue\n", tmp);
} else {
+ int target = ffs(target_mask) - 1;
+
shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
target_mask, lun);
/*
* Since we have an established nexus that we can't do anything
* with, we must abort it.
*/
- do_abort(instance);
+ if (do_abort(instance) == 0)
+ hostdata->busy[target] &= ~(1 << lun);
return;
}
@@ -2361,15 +2369,16 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->autosense, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
out:
if (result == FAILED)
dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
- else
+ else {
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
+ }
queue_work(hostdata->work_q, &hostdata->main_task);
maybe_release_dma_irq(instance);
@@ -2397,7 +2406,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
spin_lock_irqsave(&hostdata->lock, flags);
#if (NDEBUG & NDEBUG_ANY)
- scmd_printk(KERN_INFO, cmd, __func__);
+ shost_printk(KERN_INFO, instance, __func__);
#endif
NCR5380_dprint(NDEBUG_ANY, instance);
NCR5380_dprint_phase(NDEBUG_ANY, instance);
@@ -2415,10 +2424,13 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
* commands!
*/
- if (list_del_cmd(&hostdata->unissued, cmd)) {
+ list_for_each_entry(ncmd, &hostdata->unissued, list) {
+ struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
+
cmd->result = DID_RESET << 16;
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->unissued);
if (hostdata->selecting) {
hostdata->selecting->result = DID_RESET << 16;
@@ -2437,7 +2449,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
- set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
INIT_LIST_HEAD(&hostdata->autosense);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 64ab9eaec428..def3208dd290 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2321,7 +2321,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
* At some speeds, we only support
* ST transfers.
*/
- if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
+ if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
break;
}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a59ad94ea52b..9dc4b689f94b 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -753,7 +753,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
atari_scsi_template.can_queue = 1;
- atari_scsi_template.sg_tablesize = SG_NONE;
+ atari_scsi_template.sg_tablesize = 1;
}
if (setup_can_queue > 0)
@@ -762,8 +762,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- /* Leave sg_tablesize at 0 on a Falcon! */
- if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
+ /* Don't increase sg_tablesize on Falcon! */
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) {
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index d0a504af5b4f..0a70d54a4df6 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -283,8 +283,10 @@ bfad_im_get_stats(struct Scsi_Host *shost)
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
fcstats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- if (rc != BFA_STATUS_OK)
+ if (rc != BFA_STATUS_OK) {
+ kfree(fcstats);
return NULL;
+ }
wait_for_completion(&fcomp.comp);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 5ff9f89c17c7..39b2f60149d9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -829,7 +829,7 @@ ret_err_rqe:
((u64)err_entry->data.err_warn_bitmap_hi << 32) |
(u64)err_entry->data.err_warn_bitmap_lo;
for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
- if (err_warn_bit_map & (u64) (1 << i)) {
+ if (err_warn_bit_map & ((u64)1 << i)) {
err_warn = i;
break;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 133901fd3e35..6d1f8b22bd83 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -915,12 +915,12 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
- pci_dev_put(hba->pcidev);
if (hba->regview) {
pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
+ pci_dev_put(hba->pcidev);
bnx2i_free_mp_bdt(hba);
bnx2i_release_free_cid_que(hba);
iscsi_host_free(shost);
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index dbe416ff46c2..776b99278688 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -648,7 +648,7 @@ csio_shost_init(struct csio_hw *hw, struct device *dev,
if (csio_lnode_init(ln, hw, pln))
goto err_shost_put;
- if (scsi_add_host(shost, dev))
+ if (scsi_add_host_with_dma(shost, dev, &hw->pdev->dev))
goto err_lnode_exit;
return ln;
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index be5ee2d37815..957767d38361 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint8_t *fc4_type;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@@ -377,13 +378,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -404,6 +405,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@@ -483,13 +485,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs);
/* Submit FDMI RHBA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -504,6 +506,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@@ -534,13 +537,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name);
/* Submit FDMI request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/**
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 89a52b941ea8..5db57671fa28 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev,
return -EINVAL;
/* Delete NPIV lnodes */
- csio_lnodes_exit(hw, 1);
+ csio_lnodes_exit(hw, 1);
/* Block upper IOs */
csio_lnodes_block_request(hw);
@@ -1713,8 +1713,11 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
}
out:
- if (req->nsge > 0)
+ if (req->nsge > 0) {
scsi_dma_unmap(cmnd);
+ if (req->dcopy && (host_status == DID_OK))
+ host_status = csio_scsi_copy_to_sgl(hw, req);
+ }
cmnd->result = (((host_status) << 16) | scsi_status);
cmnd->scsi_done(cmnd);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 2ffe029ff2b6..3d6e653f5147 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
"cdev 0x%p, p# %u.\n", cdev, cdev->nports);
cxgbi_hbas_remove(cdev);
cxgbi_device_portmap_cleanup(cdev);
- cxgbi_ppm_release(cdev->cdev2ppm(cdev));
+ if (cdev->cdev2ppm)
+ cxgbi_ppm_release(cdev->cdev2ppm(cdev));
if (cdev->pmap.max_connect)
cxgbi_free_big_mem(cdev->pmap.port_csk);
kfree(cdev);
@@ -637,6 +638,10 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
if (ndev->flags & IFF_LOOPBACK) {
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
+ if (!ndev) {
+ err = -ENETUNREACH;
+ goto rel_neigh;
+ }
mtu = ndev->mtu;
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
n->dev->name, ndev->name, mtu);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 5ee7f44cf869..830b2d2dcf20 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -1972,6 +1972,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
xferred -= psge->length;
} else {
/* Partial SG entry done */
+ pci_dma_sync_single_for_cpu(srb->dcb->
+ acb->dev,
+ srb->sg_bus_addr,
+ SEGMENTX_LEN,
+ PCI_DMA_TODEVICE);
psge->length -= xferred;
psge->address += xferred;
srb->sg_index = idx;
@@ -3450,14 +3455,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
}
}
- if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
- pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
- scsi_sg_count(cmd), dir);
-
ckc_only = 0;
/* Check Error Conditions */
ckc_e:
+ pci_unmap_srb(acb, srb);
+
if (cmd->cmnd[0] == INQUIRY) {
unsigned char *base = NULL;
struct ScsiInqData *ptr;
@@ -3511,7 +3514,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
cmd, cmd->result);
srb_free_insert(acb, srb);
}
- pci_unmap_srb(acb, srb);
cmd->scsi_done(cmd);
waiting_process_next(acb);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 7645949024d7..b54cfb10ee64 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -53,6 +53,7 @@
#define ALUA_FAILOVER_TIMEOUT 60
#define ALUA_FAILOVER_RETRIES 5
#define ALUA_RTPG_DELAY_MSECS 5
+#define ALUA_RTPG_RETRY_DELAY 2
/* device handler flags */
#define ALUA_OPTIMIZE_STPG 0x01
@@ -528,6 +529,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
unsigned long flags;
+ bool transitioning_sense = false;
if (!pg->expiry) {
unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
@@ -572,13 +574,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
goto retry;
}
/*
- * Retry on ALUA state transition or if any
- * UNIT ATTENTION occurred.
+ * If the array returns with 'ALUA state transition'
+ * sense code here it cannot return RTPG data during
+ * transition. So set the state to 'transitioning' directly.
*/
if (sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
- err = SCSI_DH_RETRY;
- else if (sense_hdr.sense_key == UNIT_ATTENTION)
+ sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
+ transitioning_sense = true;
+ goto skip_rtpg;
+ }
+ /*
+ * Retry on any other UNIT ATTENTION occurred.
+ */
+ if (sense_hdr.sense_key == UNIT_ATTENTION)
err = SCSI_DH_RETRY;
if (err == SCSI_DH_RETRY &&
pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
@@ -666,7 +674,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
off = 8 + (desc[7] * 4);
}
+ skip_rtpg:
spin_lock_irqsave(&pg->lock, flags);
+ if (transitioning_sense)
+ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+
sdev_printk(KERN_INFO, sdev,
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
@@ -683,7 +695,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
case SCSI_ACCESS_STATE_TRANSITIONING:
if (time_before(jiffies, pg->expiry)) {
/* State transition, retry */
- pg->interval = 2;
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
err = SCSI_DH_RETRY;
} else {
struct alua_dh_data *h;
@@ -811,6 +823,8 @@ static void alua_rtpg_work(struct work_struct *work)
spin_lock_irqsave(&pg->lock, flags);
pg->flags &= ~ALUA_PG_RUNNING;
pg->flags |= ALUA_PG_RUN_RTPG;
+ if (!pg->interval)
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
spin_unlock_irqrestore(&pg->lock, flags);
queue_delayed_work(alua_wq, &pg->rtpg_work,
pg->interval * HZ);
@@ -822,6 +836,8 @@ static void alua_rtpg_work(struct work_struct *work)
spin_lock_irqsave(&pg->lock, flags);
if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
pg->flags &= ~ALUA_PG_RUNNING;
+ if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
queue_delayed_work(alua_wq, &pg->rtpg_work,
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 7bd376d95ed5..b02ac389e6c6 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -1197,6 +1197,7 @@ bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram))) {
esas2r_hdebug("NVRAM read failed, using defaults");
+ up(&a->nvram_semaphore);
return false;
}
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index cc3994d4e7bc..3c2f34db937b 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1984,7 +1984,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
*/
static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
{
- return (struct fcoe_rport *)(rdata + 1);
+ return container_of(rdata, struct fcoe_rport, rdata);
}
/**
@@ -2244,7 +2244,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
*/
static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
@@ -2252,16 +2252,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct fip_wwn_desc *wwn = NULL;
struct fip_vn_desc *vn = NULL;
struct fip_size_desc *size = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2324,15 +2320,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
case FIP_DT_VN_ID:
if (dlen != sizeof(struct fip_vn_desc))
goto len_err;
vn = (struct fip_vn_desc *)desc;
memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
- rdata->ids.port_id = ntoh24(vn->fd_fc_id);
- rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+ frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
+ frport->rdata.ids.port_name =
+ get_unaligned_be64(&vn->fd_wwpn);
break;
case FIP_DT_FC4F:
if (dlen != sizeof(struct fip_fc4_feat))
@@ -2670,16 +2668,13 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vn2vn_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
- rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
goto drop;
@@ -2688,19 +2683,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
mutex_lock(&fip->ctlr_mutex);
switch (sub) {
case FIP_SC_VN_PROBE_REQ:
- fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
break;
case FIP_SC_VN_PROBE_REP:
- fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
break;
case FIP_SC_VN_CLAIM_NOTIFY:
- fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
break;
case FIP_SC_VN_CLAIM_REP:
- fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
break;
case FIP_SC_VN_BEACON:
- fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+ fcoe_ctlr_vn_beacon(fip, &frport.rdata);
break;
default:
LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
@@ -2724,22 +2719,18 @@ drop:
*/
static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
struct fip_mac_desc *macd = NULL;
struct fip_wwn_desc *wwn = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2793,7 +2784,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
default:
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
@@ -2904,22 +2896,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vlan_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
- rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
goto drop;
}
mutex_lock(&fip->ctlr_mutex);
if (sub == FIP_SC_VL_REQ)
- fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
+ fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata);
mutex_unlock(&fip->ctlr_mutex);
drop:
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index c056b8111ad2..7bf6102b4c3d 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -445,6 +445,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport);
if (ret) {
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 9795d6f3e197..c5b89a003d2a 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -445,26 +445,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
- u64 a0, a1;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
- u64 a0, a1;
+ u64 a[2] = {};
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
- err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
- mac_addr[i] = ((u8 *)&a0)[i];
+ mac_addr[i] = ((u8 *)&a)[i];
return 0;
}
@@ -489,38 +489,32 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
if (err)
- printk(KERN_ERR
- "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
if (err)
- printk(KERN_ERR
- "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
- err);
+ pr_err("Can't del addr [%pM], %d\n", addr, err);
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 2f872f784e10..5252dd5d3f4b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -10,6 +10,7 @@
*/
#include "hisi_sas.h"
+#include "../libsas/sas_internal.h"
#define DRV_NAME "hisi_sas"
#define DEV_IS_GONE(dev) \
@@ -1128,9 +1129,18 @@ static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_phy *sphy = sas_phy->phy;
+ struct sas_phy_data *d = sphy->hostdata;
+
phy->phy_attached = 0;
phy->phy_type = 0;
phy->port = NULL;
+
+ if (d->enable)
+ sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+ else
+ sphy->negotiated_linkrate = SAS_PHY_DISABLED;
}
void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 0b8db8a74d50..b82df8cdf962 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2236,6 +2236,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
case IOACCEL2_SERV_RESPONSE_COMPLETE:
switch (c2->error_data.status) {
case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+ if (cmd)
+ cmd->result = 0;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2423,8 +2425,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
/* check for good status */
if (likely(c2->error_data.serv_response == 0 &&
- c2->error_data.status == 0))
+ c2->error_data.status == 0)) {
+ cmd->result = 0;
return hpsa_cmd_free_and_done(h, c, cmd);
+ }
/*
* Any RAID offload error results in retry which will use
@@ -4815,7 +4819,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
curr_sg->reserved[0] = 0;
curr_sg->reserved[1] = 0;
curr_sg->reserved[2] = 0;
- curr_sg->chain_indicator = 0x80;
+ curr_sg->chain_indicator = IOACCEL2_CHAIN;
curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
}
@@ -4832,6 +4836,11 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
curr_sg++;
}
+ /*
+ * Set the last s/g element bit
+ */
+ (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
+
switch (cmd->sc_data_direction) {
case DMA_TO_DEVICE:
cp->direction &= ~IOACCEL2_DIRECTION_MASK;
@@ -5507,6 +5516,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
c = cmd_tagged_alloc(h, cmd);
/*
+ * This is necessary because the SML doesn't zero out this field during
+ * error recovery.
+ */
+ cmd->result = 0;
+
+ /*
* Call alternate submit routine for I/O accelerated commands.
* Retries always go down the normal I/O path.
*/
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 5961705eef76..39bcbec93c60 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -516,6 +516,7 @@ struct ioaccel2_sg_element {
u8 reserved[3];
u8 chain_indicator;
#define IOACCEL2_CHAIN 0x80
+#define IOACCEL2_LAST_SG 0x40
};
/*
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7e487c78279c..54dea767dfde 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4883,8 +4883,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
- ibmvfc_free_event_pool(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_free_event_pool(vhost);
ibmvfc_free_mem(vhost);
spin_lock(&ibmvfc_driver_lock);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index c5bc41d97f84..7760b9a1e0ae 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9818,6 +9818,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->max_devs_supported = ipr_max_devs;
if (ioa_cfg->sis64) {
+ host->max_channel = IPR_MAX_SIS64_BUSES;
host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
@@ -9826,6 +9827,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
+ ((sizeof(struct ipr_config_table_entry64)
* ioa_cfg->max_devs_supported)));
} else {
+ host->max_channel = IPR_VSET_BUS;
host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
@@ -9835,7 +9837,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
* ioa_cfg->max_devs_supported)));
}
- host->max_channel = IPR_VSET_BUS;
host->unique_id = host->host_no;
host->max_cmd_len = IPR_MAX_CDB_LEN;
host->can_queue = ioa_cfg->max_cmds;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 8995053d01b3..5b2388266c4c 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1306,6 +1306,7 @@ struct ipr_resource_entry {
#define IPR_ARRAY_VIRTUAL_BUS 0x1
#define IPR_VSET_VIRTUAL_BUS 0x2
#define IPR_IOAFP_VIRTUAL_BUS 0x3
+#define IPR_MAX_SIS64_BUSES 0x4
#define IPR_GET_RES_PHYS_LOC(res) \
(((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 02cb76fd4420..6bbf2945a3e0 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3500,6 +3500,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
case START_STOP:
scb->scsi_cmd->result = DID_OK << 16;
+ break;
case TEST_UNIT_READY:
case INQUIRY:
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 609dafd661d1..da4583a2fa23 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
* the task management request.
* @task_request: the handle to the task request object to start.
*/
-enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_status sci_controller_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
enum sci_status status;
@@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
"%s: SCIC Controller starting task from invalid "
"state\n",
__func__);
- return SCI_TASK_FAILURE_INVALID_STATE;
+ return SCI_FAILURE_INVALID_STATE;
}
status = sci_remote_device_start_task(ihost, idev, ireq);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 22a9bb1abae1..15dc6e0d8deb 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -490,7 +490,7 @@ enum sci_status sci_controller_start_io(
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_task_status sci_controller_start_task(
+enum sci_status sci_controller_start_task(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index b709d2b20880..7d71ca421751 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
if (status == SCI_SUCCESS) {
if (ireq->stp.rsp.status & ATA_ERR)
- status = SCI_IO_FAILURE_RESPONSE_VALID;
+ status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
- status = SCI_IO_FAILURE_RESPONSE_VALID;
+ status = SCI_FAILURE_IO_RESPONSE_VALID;
}
if (status != SCI_SUCCESS) {
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 6dcaed0c1fc8..fb6eba331ac6 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
struct isci_tmf *tmf, unsigned long timeout_ms)
{
DECLARE_COMPLETION_ONSTACK(completion);
- enum sci_task_status status = SCI_TASK_FAILURE;
+ enum sci_status status = SCI_FAILURE;
struct isci_request *ireq;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
@@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
/* start the TMF io. */
status = sci_controller_start_task(ihost, idev, ireq);
- if (status != SCI_TASK_SUCCESS) {
+ if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
"%s: start_io failed - status = 0x%x, request = %p\n",
__func__,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index ace4f1f41b8e..60c3e2bf8761 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -798,7 +798,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
- &addr, param, buf);
+ &addr,
+ (enum iscsi_param)param, buf);
default:
return iscsi_host_get_param(shost, param, buf);
}
@@ -881,6 +882,10 @@ free_host:
static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+
+ if (WARN_ON_ONCE(session->leadconn))
+ return;
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 16ca31ad5ec0..d0a86ef80652 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -2506,7 +2506,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
/* lport lock ? */
if (!lport || lport->state == LPORT_ST_DISABLED) {
- FC_LPORT_DBG(lport, "Receiving frames for an lport that "
+ FC_LIBFC_DBG("Receiving frames for an lport that "
"has not been initialized correctly\n");
fc_frame_free(fp);
return;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index e3ffd244603e..70e2958a69a0 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -127,12 +127,15 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
u32 port_id)
{
struct fc_rport_priv *rdata;
+ size_t rport_priv_size = sizeof(*rdata);
rdata = lport->tt.rport_lookup(lport, port_id);
if (rdata)
return rdata;
- rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+ if (lport->rport_priv_size > 0)
+ rport_priv_size = lport->rport_priv_size;
+ rdata = kzalloc(rport_priv_size, GFP_KERNEL);
if (!rdata)
return NULL;
@@ -1935,7 +1938,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
fc_rport_state(rdata));
- rdata->flags &= ~FC_RP_STARTED;
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 2ffe10453e30..c4336b01db23 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1982,7 +1982,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
- spin_lock(&session->frwd_lock);
+ spin_lock_bh(&session->frwd_lock);
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task) {
/*
@@ -2109,7 +2109,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
done:
if (task)
task->last_timeout = jiffies;
- spin_unlock(&session->frwd_lock);
+ spin_unlock_bh(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
"timer reset" : "shutdown or nh");
return rc;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 60de66252fa2..b200edc665a5 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -97,12 +97,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
else
dev->dev_type = SAS_SATA_DEV;
dev->tproto = SAS_PROTOCOL_SATA;
- } else {
+ } else if (port->oob_mode == SAS_OOB_MODE) {
struct sas_identify_frame *id =
(struct sas_identify_frame *) dev->frame_rcvd;
dev->dev_type = id->dev_type;
dev->iproto = id->initiator_bits;
dev->tproto = id->target_bits;
+ } else {
+ /* If the oob mode is OOB_NOT_CONNECTED, the port is
+ * disconnected due to race with PHY down. We cannot
+ * continue to discover this port
+ */
+ sas_put_device(dev);
+ pr_warn("Port %016llx is disconnected when discovering\n",
+ SAS_ADDR(port->attached_sas_addr));
+ return -ENODEV;
}
sas_init_dev(dev);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 7be581f7c35d..7e8274938a3e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -47,17 +47,16 @@ static void smp_task_timedout(unsigned long _task)
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ complete(&task->slow_task->completion);
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- complete(&task->slow_task->completion);
}
static void smp_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -604,7 +603,14 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
}
res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
-
+ if (res) {
+ pr_err("ex %016llx phy%02d PHY control failed: %d\n",
+ SAS_ADDR(dev->sas_addr), phy_id, res);
+ } else if (pc_resp[2] != SMP_RESP_FUNC_ACC) {
+ pr_err("ex %016llx phy%02d PHY control failed: function result 0x%x\n",
+ SAS_ADDR(dev->sas_addr), phy_id, pc_resp[2]);
+ res = pc_resp[2];
+ }
kfree(pc_resp);
kfree(pc_req);
return res;
@@ -807,6 +813,26 @@ static struct domain_device *sas_ex_discover_end_dev(
#ifdef CONFIG_SCSI_SAS_ATA
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
+ if (child->linkrate > parent->min_linkrate) {
+ struct sas_phy_linkrates rates = {
+ .maximum_linkrate = parent->min_linkrate,
+ .minimum_linkrate = parent->min_linkrate,
+ };
+ int ret;
+
+ pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
+ SAS_ADDR(child->sas_addr), phy_id);
+ ret = sas_smp_phy_control(parent, phy_id,
+ PHY_FUNC_LINK_RESET, &rates);
+ if (ret) {
+ pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
+ SAS_ADDR(child->sas_addr), phy_id, ret);
+ goto out_free;
+ }
+ pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
+ SAS_ADDR(child->sas_addr), phy_id);
+ child->linkrate = child->min_linkrate;
+ }
res = sas_get_ata_info(child, phy);
if (res)
goto out_free;
@@ -979,6 +1005,8 @@ static struct domain_device *sas_ex_discover_expander(
list_del(&child->dev_list_node);
spin_unlock_irq(&parent->port->dev_list_lock);
sas_put_device(child);
+ sas_port_delete(phy->port);
+ phy->port = NULL;
return NULL;
}
list_add_tail(&child->siblings, &parent->ex_dev.children);
@@ -2028,6 +2056,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
phy->phy_state = PHY_EMPTY;
sas_unregister_devs_sas_addr(dev, phy_id, last);
+ /*
+ * Even though the PHY is empty, for convenience we discover
+ * the PHY to update the PHY info, like negotiated linkrate.
+ */
+ sas_ex_phy_discover(dev, phy_id);
return res;
} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
dev_type_flutter(type, phy->attached_dev_type)) {
@@ -2043,14 +2076,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
return res;
}
- /* delete the old link */
- if (SAS_ADDR(phy->attached_sas_addr) &&
- SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
- SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
- SAS_ADDR(dev->sas_addr), phy_id,
- SAS_ADDR(phy->attached_sas_addr));
- sas_unregister_devs_sas_addr(dev, phy_id, last);
- }
+ /* we always have to delete the old device when we went here */
+ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
+ SAS_ADDR(dev->sas_addr), phy_id,
+ SAS_ADDR(phy->attached_sas_addr));
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
return sas_discover_new(dev, phy_id);
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b484859464f6..f0f6d71d28b8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -878,7 +878,8 @@ struct lpfc_hba {
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
-#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
+#define LPFC_MAX_VPI 0xFF /* Max number VPI supported 0 - 0xff */
+#define LPFC_MAX_VPORTS 0x100 /* Max vports per port, with pport */
uint16_t max_vports; /*
* For IOV HBAs max_vpi can change
* after a reset. max_vports is max
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index cf15b9754402..aa0435b1ea1e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1214,6 +1214,9 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
(bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+ /* Limit the max we support */
+ if (max_vpi > LPFC_MAX_VPI)
+ max_vpi = LPFC_MAX_VPI;
if (mvpi)
*mvpi = max_vpi;
if (avpi)
@@ -1229,8 +1232,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
*axri = pmb->un.varRdConfig.avail_xri;
if (mvpi)
*mvpi = pmb->un.varRdConfig.max_vpi;
- if (avpi)
- *avpi = pmb->un.varRdConfig.avail_vpi;
+ if (avpi) {
+ /* avail_vpi is only valid if link is up and ready */
+ if (phba->link_state == LPFC_HBA_READY)
+ *avpi = pmb->un.varRdConfig.avail_vpi;
+ else
+ *avpi = pmb->un.varRdConfig.max_vpi;
+ }
}
mempool_free(pmboxq, phba->mbox_mem_pool);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 05dcc2abd541..99f06ac7bf4c 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4352,12 +4352,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
phba->mbox_ext_buf_ctx.seqNum++;
nemb_tp = phba->mbox_ext_buf_ctx.nembType;
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- rc = -ENOMEM;
- goto job_error;
- }
-
pbuf = (uint8_t *)dmabuf->virt;
size = job->request_payload.payload_len;
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -4394,6 +4388,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
"2968 SLI_CONFIG ext-buffer wr all %d "
"ebuffers received\n",
phba->mbox_ext_buf_ctx.numBuf);
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
/* mailbox command structure for base driver */
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
@@ -4441,6 +4442,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
return SLI_CONFIG_HANDLED;
job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 4ac03b16d17f..52afbcff362f 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1561,6 +1561,9 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 256);
+ /* This string MUST be consistent with other FC platforms
+ * supported by Broadcom.
+ */
strncpy(ae->un.AttrString,
"Emulex Corporation",
sizeof(ae->un.AttrString));
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4905455bbfc7..4901bf24916b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1141,6 +1141,7 @@ stop_rr_fcf_flogi:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
+ phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out;
}
if (!rc) {
@@ -1155,6 +1156,7 @@ stop_rr_fcf_flogi:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
+ phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out;
}
}
@@ -3861,7 +3863,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
@@ -6789,7 +6791,10 @@ int
lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
{
struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
- rrq->nlp_DID);
+ rrq->nlp_DID);
+ if (!ndlp)
+ return 1;
+
if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
return lpfc_issue_els_rrq(rrq->vport, ndlp,
rrq->nlp_DID, rrq);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 81736457328a..6eaba1676846 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -901,7 +901,11 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_linkdown_port(vports[i]);
}
lpfc_destroy_vport_work_array(phba, vports);
- /* Clean up any firmware default rpi's */
+
+ /* Clean up any SLI3 firmware default rpi's */
+ if (phba->sli_rev > LPFC_SLI_REV3)
+ goto skip_unreg_did;
+
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
@@ -913,6 +917,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
}
}
+ skip_unreg_did:
/* Setup myDID for link up if we are in pt2pt mode */
if (phba->pport->fc_flag & FC_PT2PT) {
phba->pport->fc_myDID = 0;
@@ -1964,6 +1969,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
"failover and change port state:x%x/x%x\n",
phba->pport->port_state, LPFC_VPORT_UNKNOWN);
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+
+ if (!phba->fcf.fcf_redisc_attempted) {
+ lpfc_unregister_fcf(phba);
+
+ rc = lpfc_sli4_redisc_fcf_table(phba);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3195 Rediscover FCF table\n");
+ phba->fcf.fcf_redisc_attempted = 1;
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3196 Rediscover FCF table "
+ "failed. Status:x%x\n", rc);
+ }
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3197 Already rediscover FCF table "
+ "attempted. No more retry\n");
+ }
goto stop_flogi_current_fcf;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
@@ -4654,6 +4679,10 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
LPFC_MBOXQ_t *mbox;
int rc;
+ /* Unreg DID is an SLI3 operation. */
+ if (phba->sli_rev > LPFC_SLI_REV3)
+ return;
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e9ea8f4ea2c9..8c640bcf107b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4444,7 +4444,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
break;
}
/* If fast FCF failover rescan event is pending, do nothing */
- if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
+ if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
spin_unlock_irq(&phba->hbalock);
break;
}
@@ -6973,6 +6973,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
phba->sli4_hba.max_cfg_param.max_vpi =
bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+ /* Limit the max we support */
+ if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
+ phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
phba->sli4_hba.max_cfg_param.vpi_base =
bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
phba->sli4_hba.max_cfg_param.max_rpi =
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 56a3df4fddb0..fefef2884d59 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -454,8 +454,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
+ * This only applies to a fabric environment.
*/
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+ (vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
@@ -759,9 +761,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!(vport->fc_flag & FC_PT2PT)) {
/* Check config parameter use-adisc or FCP-2 */
- if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+ if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
- (ndlp->nlp_type & NLP_FCP_TARGET))) {
+ (ndlp->nlp_type & NLP_FCP_TARGET)))) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d197aa176dee..d489cc1018b5 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2707,6 +2707,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ struct lpfc_vport *vport = phba->pport;
/*
* Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2812,6 +2813,14 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
*/
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
+ /*
+ * For First burst, we may need to adjust the initial transfer
+ * length for DIF
+ */
+ if (iocb_cmd->un.fcpi.fcpi_XRdy &&
+ (fcpdl < vport->cfg_first_burst_size))
+ iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+
return 0;
err:
if (lpfc_cmd->seg_cnt)
@@ -3364,6 +3373,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ struct lpfc_vport *vport = phba->pport;
/*
* Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
@@ -3480,6 +3490,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
/*
+ * For First burst, we may need to adjust the initial transfer
+ * length for DIF
+ */
+ if (iocb_cmd->un.fcpi.fcpi_XRdy &&
+ (fcpdl < vport->cfg_first_burst_size))
+ iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
+
+ /*
* If the OAS driver feature is enabled and the lun is enabled for
* OAS, set the oas iocb related flags.
*/
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index c05fc61a383b..cbe808e83f47 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -11962,13 +11962,19 @@ send_current_mbox:
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
+ return workposted;
+
out_no_mqe_complete:
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
- return workposted;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return false;
}
/**
@@ -15989,6 +15995,13 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
+ /*
+ * if the rpi value indicates a prior unreg has already
+ * been done, skip the unreg.
+ */
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return;
+
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
@@ -16553,15 +16566,8 @@ next_priority:
goto initial_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n");
- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
- return LPFC_FCOE_FCF_NEXT_NONE;
- else {
- lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
- "3063 Only FCF available idx %d, flag %x\n",
- next_fcf_index,
- phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
- return next_fcf_index;
- }
+
+ return LPFC_FCOE_FCF_NEXT_NONE;
}
if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 0b88b5703e0f..9c69c4215de3 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -237,6 +237,7 @@ struct lpfc_fcf {
#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
+ uint16_t fcf_redisc_attempted;
uint32_t addr_mode;
uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index a590089b9397..5aa60bbbd09a 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -54,7 +54,7 @@ static int setup_cmd_per_lun = -1;
module_param(setup_cmd_per_lun, int, 0);
static int setup_sg_tablesize = -1;
module_param(setup_sg_tablesize, int, 0);
-static int setup_use_pdma = -1;
+static int setup_use_pdma = 512;
module_param(setup_use_pdma, int, 0);
static int setup_hostid = -1;
module_param(setup_hostid, int, 0);
@@ -325,7 +325,7 @@ static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
struct NCR5380_hostdata *hostdata = shost_priv(instance);
if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
- cmd->SCp.this_residual < 16)
+ cmd->SCp.this_residual < setup_use_pdma)
return 0;
return cmd->SCp.this_residual;
@@ -378,7 +378,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 19bffe0b2cc0..2cbfec6a7466 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4219,11 +4219,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
pdev->subsystem_device == 0xC000)
- return -ENODEV;
+ goto out_disable_device;
/* Now check the magic signature byte */
pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
- return -ENODEV;
+ goto out_disable_device;
/* Ok it is probably a megaraid */
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5de024a50e15..9fa6a560b162 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2847,6 +2847,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
u32 size;
unsigned long buff_addr;
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ unsigned long chunk_left_bytes;
unsigned long src_addr;
unsigned long flags;
u32 buff_offset;
@@ -2872,6 +2873,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
}
size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
@@ -3691,12 +3694,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
/*
* The cur_state should not last for more than max_wait secs
*/
- for (i = 0; i < (max_wait * 1000); i++) {
+ for (i = 0; i < max_wait * 50; i++) {
curr_abs_state = instance->instancet->
read_fw_status_reg(instance->reg_set);
if (abs_state == curr_abs_state) {
- msleep(1);
+ msleep(20);
} else
break;
}
@@ -3956,6 +3959,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
if (megasas_create_frame_pool(instance)) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
megasas_free_cmds(instance);
+ return -ENOMEM;
}
return 0;
@@ -3974,7 +3978,8 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
if (!instance->ctrl_context)
return KILL_ADAPTER;
else if (instance->unload ||
- test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+ test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
+ &instance->reset_flags))
return IGNORE_TIMEOUT;
else
return INITIATE_OCR;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index fe1a20973e47..874e5a7f7998 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3438,6 +3438,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
instance->instancet->disable_intr(instance);
msleep(1000);
@@ -3594,7 +3595,7 @@ fail_kill_adapter:
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
}
out:
- clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
mutex_unlock(&instance->reset_mutex);
return retval;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index e3bee04c1eb1..034653d93365 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -93,6 +93,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define MEGASAS_FP_CMD_LEN 16
#define MEGASAS_FUSION_IN_RESET 0
+#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1
#define THRESHOLD_REPLY_COUNT 50
#define JBOD_MAPS_COUNT 2
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index a1a5ceb42ce6..6ccde2b41517 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1707,9 +1707,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
struct sysinfo s;
u64 consistent_dma_mask;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
if (ioc->dma_mask)
- consistent_dma_mask = DMA_BIT_MASK(64);
+ consistent_dma_mask = DMA_BIT_MASK(dma_mask);
else
consistent_dma_mask = DMA_BIT_MASK(32);
@@ -1717,11 +1719,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
const uint64_t required_mask =
dma_get_required_mask(&pdev->dev);
if ((required_mask > DMA_BIT_MASK(32)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask)) &&
!pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = 64;
+ ioc->dma_mask = dma_mask;
goto out;
}
}
@@ -1747,7 +1749,7 @@ static int
_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
struct pci_dev *pdev)
{
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
return -ENODEV;
}
@@ -3381,7 +3383,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
total_sz += sz;
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
- if (ioc->dma_mask == 64) {
+ if (ioc->dma_mask > 32) {
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
pr_warn(MPT3SAS_FMT
"no suitable consistent DMA mask for %s\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index cebfd734fd76..a9fef0cd382b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -674,10 +674,6 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
sizeof(*config_page));
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
out:
return r;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 26cdc127ac89..90a87e59ff60 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -1465,7 +1465,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
" for diag buffers, requested size(%d)\n",
ioc->name, __func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index ec48c010a3ba..aa2078d7e23e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3297,6 +3297,40 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
return _scsih_check_for_pending_tm(ioc, smid);
}
+/** _scsih_allow_scmd_to_device - check whether scmd needs to
+ * issue to IOC or not.
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ *
+ * Returns true if scmd can be issued to IOC otherwise returns false.
+ */
+inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
+{
+
+ if (ioc->pci_error_recovery)
+ return false;
+
+ if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
+ if (ioc->remove_host)
+ return false;
+
+ return true;
+ }
+
+ if (ioc->remove_host) {
+
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ case START_STOP:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
/**
* _scsih_sas_control_complete - completion routine
@@ -4059,7 +4093,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- if (ioc->pci_error_recovery || ioc->remove_host) {
+ if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 10546faac58c..f374abfb7f1f 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1479,6 +1479,12 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
} else {
u32 producer_index;
void *pi_virt = circularQ->pi_virt;
+ /* spurious interrupt during setup if
+ * kexec-ing and driver doing a doorbell access
+ * with the pre-kexec oq interrupt setup
+ */
+ if (!pi_virt)
+ break;
/* Update the producer index from SPC */
producer_index = pm8001_read_32(pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index ce584c31d36e..e64a13f0bce1 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -374,6 +374,13 @@ static int pm8001_task_exec(struct sas_task *task,
return 0;
}
pm8001_ha = pm8001_find_ha_by_dev(task->dev);
+ if (pm8001_ha->controller_fatal_error) {
+ struct task_status_struct *ts = &t->task_status;
+
+ ts->resp = SAS_TASK_UNDELIVERED;
+ t->task_done(t);
+ return 0;
+ }
PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
spin_lock_irqsave(&pm8001_ha->lock, flags);
do {
@@ -466,7 +473,7 @@ err_out:
dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
if (!sas_protocol_ata(t->task_proto))
if (n_elem)
- dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
+ dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t->data_dir);
out_done:
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 6628cc38316c..d8768ac41ebb 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -531,6 +531,7 @@ struct pm8001_hba_info {
u32 logging_level;
u32 fw_status;
u32 smp_exp_mode;
+ bool controller_fatal_error;
const struct firmware *fw_image;
struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index eb4fee61df72..df5f0bc29587 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -572,6 +572,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+ /* Update Fatal error interrupt vector */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ ((pm8001_ha->number_of_intr - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
@@ -1099,6 +1102,9 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
return -EBUSY;
}
+ /* Initialize the controller fatal error flag */
+ pm8001_ha->controller_fatal_error = false;
+
/* Initialize pci space address eg: mpi offset */
init_pci_device_addresses(pm8001_ha);
init_default_table_values(pm8001_ha);
@@ -1207,13 +1213,17 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
u32 bootloader_state;
u32 ibutton0, ibutton1;
- /* Check if MPI is in ready state to reset */
- if (mpi_uninit_check(pm8001_ha) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MPI state is not ready\n"));
- return -1;
+ /* Process MPI table uninitialization only if FW is ready */
+ if (!pm8001_ha->controller_fatal_error) {
+ /* Check if MPI is in ready state to reset */
+ if (mpi_uninit_check(pm8001_ha) != 0) {
+ regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "MPI state is not ready scratch1 :0x%x\n",
+ regval));
+ return -1;
+ }
}
-
/* checked for reset register normal state; 0x0 */
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
PM8001_INIT_DBG(pm8001_ha,
@@ -2358,6 +2368,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -3717,6 +3729,46 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
}
+static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
+{
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)));
+}
+
static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
@@ -3724,10 +3776,28 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
u8 uninitialized_var(bc);
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
+ u32 regval;
+ if (vec == (pm8001_ha->number_of_intr - 1)) {
+ regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
+ SCRATCH_PAD_MIPSALL_READY) {
+ pm8001_ha->controller_fatal_error = true;
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "Firmware Fatal error! Regval:0x%x\n", regval));
+ print_scratchpad_registers(pm8001_ha);
+ return ret;
+ }
+ }
spin_lock_irqsave(&pm8001_ha->lock, flags);
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
do {
+ /* spurious interrupt during setup if kexec-ing and
+ * driver doing a doorbell access w/ the pre-kexec oq
+ * interrupt setup.
+ */
+ if (!circularQ->pi_virt)
+ break;
ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
if (MPI_IO_STATUS_SUCCESS == ret) {
/* process the outbound message */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 7a443bad6163..411b414a9a0e 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -1288,6 +1288,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
+#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
+ SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_RAAE_READY)
/* boot loader state */
#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9a34afcb1c4c..33f4181ba9f7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -345,7 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size;
+ ha->optrom_region_size = size;
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -418,7 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size;
+ ha->optrom_region_size = size;
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -682,7 +682,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
break;
} else {
/* Make sure FC side is not in reset */
- qla2x00_wait_for_hba_online(vha);
+ WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
+ QLA_SUCCESS);
/* Issue MPI reset */
scsi_block_requests(vha->host);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4a6e086279f9..09f7a8cfed4d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -252,7 +252,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
srb_t *sp;
const char *type;
int req_sg_cnt, rsp_sg_cnt;
- int rval = (DRIVER_ERROR << 16);
+ int rval = (DID_ERROR << 16);
uint16_t nextlid = 0;
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
@@ -336,6 +336,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_sg_cnt) {
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
rval = -ENOMEM;
goto done_free_fcport;
}
@@ -343,6 +345,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_sg_cnt) {
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
rval = -ENOMEM;
goto done_free_fcport;
}
@@ -426,7 +430,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
- int rval = (DRIVER_ERROR << 16);
+ int rval = (DID_ERROR << 16);
int req_sg_cnt, rsp_sg_cnt;
uint16_t loop_id;
struct fc_port *fcport;
@@ -1740,8 +1744,8 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
uint16_t nextlid = 0;
uint32_t tot_dsds;
srb_t *sp = NULL;
- uint32_t req_data_len = 0;
- uint32_t rsp_data_len = 0;
+ uint32_t req_data_len;
+ uint32_t rsp_data_len;
/* Check the type of the adapter */
if (!IS_BIDI_CAPABLE(ha)) {
@@ -1846,6 +1850,9 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
goto done_unmap_sg;
}
+ req_data_len = bsg_job->request_payload.payload_len;
+ rsp_data_len = bsg_job->reply_payload.payload_len;
+
if (req_data_len != rsp_data_len) {
rval = EXT_STATUS_BUSY;
ql_log(ql_log_warn, vha, 0x70aa,
@@ -1853,10 +1860,6 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
goto done_unmap_sg;
}
- req_data_len = bsg_job->request_payload.payload_len;
- rsp_data_len = bsg_job->reply_payload.payload_len;
-
-
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
if (!sp) {
@@ -1911,7 +1914,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
- int rval = (DRIVER_ERROR << 16);
+ int rval = (DID_ERROR << 16);
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
srb_t *sp;
int req_sg_cnt = 0, rsp_sg_cnt = 0;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 73c99f237b10..17b1525d492b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -973,8 +973,6 @@ global_port_update:
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
-
- qlt_async_event(mb[0], vha, mb);
break;
}
@@ -995,8 +993,6 @@ global_port_update:
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(VP_CONFIG_OK, &vha->vp_flags);
-
- qlt_async_event(mb[0], vha, mb);
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -3089,7 +3085,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
ql_log(ql_log_fatal, vha, 0x00c8,
"Failed to allocate memory for ha->msix_entries.\n");
ret = -ENOMEM;
- goto msix_out;
+ goto free_irqs;
}
ha->flags.msix_enabled = 1;
@@ -3177,6 +3173,10 @@ msix_register_fail:
msix_out:
kfree(entries);
return ret;
+
+free_irqs:
+ pci_free_irq_vectors(ha->pdev);
+ goto msix_out;
}
int
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index bf29ad454118..9bbea4223917 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -5723,9 +5723,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
mcp->mb[7] = LSW(MSD(req_dma));
mcp->mb[8] = MSW(addr);
/* Setting RAM ID to valid */
- mcp->mb[10] |= BIT_7;
/* For MCTP RAM ID is 0x40 */
- mcp->mb[10] |= 0x40;
+ mcp->mb[10] = BIT_7 | 0x40;
mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 54380b434b30..104e13ae3428 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1600,8 +1600,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
return (u8 *)&ha->hablob->fw->data[offset];
}
-static __le32
-qla82xx_get_fw_size(struct qla_hw_data *ha)
+static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
{
struct qla82xx_uri_data_desc *uri_desc = NULL;
@@ -1612,7 +1611,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha)
return cpu_to_le32(uri_desc->size);
}
- return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
+ return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
}
static u8 *
@@ -1803,7 +1802,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
}
flashaddr = FLASH_ADDR_START;
- size = (__force u32)qla82xx_get_fw_size(ha) / 8;
+ size = qla82xx_get_fw_size(ha) / 8;
ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
for (i = 0; i < size; i++) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c813c9b75a10..65bbca715f57 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -451,6 +451,12 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
goto fail;
}
if (ql2xmultique_tag) {
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
+ if (unlikely(!ha->wq)) {
+ ql_log(ql_log_warn, vha, 0x01e0,
+ "Failed to alloc workqueue.\n");
+ goto fail;
+ }
/* create a request queue for IO */
options |= BIT_7;
req = qla25xx_create_req_que(ha, options, 0, 0, -1,
@@ -458,9 +464,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
if (!req) {
ql_log(ql_log_warn, vha, 0x00e0,
"Failed to create request queue.\n");
- goto fail;
+ goto fail2;
}
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
vha->req = ha->req_q_map[req];
options |= BIT_1;
for (ques = 1; ques < ha->max_rsp_queues; ques++) {
@@ -468,7 +473,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
if (!ret) {
ql_log(ql_log_warn, vha, 0x00e8,
"Failed to create response queue.\n");
- goto fail2;
+ goto fail3;
}
}
ha->flags.cpu_affinity_enabled = 1;
@@ -482,11 +487,13 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
ha->max_rsp_queues, ha->max_req_queues);
}
return 0;
-fail2:
+
+fail3:
qla25xx_delete_queues(vha);
- destroy_workqueue(ha->wq);
- ha->wq = NULL;
vha->req = ha->req_q_map[0];
+fail2:
+ destroy_workqueue(ha->wq);
+ ha->wq = NULL;
fail:
ha->mqenable = 0;
kfree(ha->req_q_map);
@@ -3077,6 +3084,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
/* Stop currently executing firmware. */
qla2x00_try_to_stop_firmware(vha);
+ /* Disable timer */
+ if (vha->timer_active)
+ qla2x00_stop_timer(vha);
+
/* Turn adapter off line */
vha->flags.online = 0;
@@ -6051,8 +6062,7 @@ qla2x00_module_init(void)
/* Initialize target kmem_cache and mem_pools */
ret = qlt_init();
if (ret < 0) {
- kmem_cache_destroy(srb_cachep);
- return ret;
+ goto destroy_cache;
} else if (ret > 0) {
/*
* If initiator mode is explictly disabled by qlt_init(),
@@ -6071,11 +6081,10 @@ qla2x00_module_init(void)
qla2xxx_transport_template =
fc_attach_transport(&qla2xxx_transport_functions);
if (!qla2xxx_transport_template) {
- kmem_cache_destroy(srb_cachep);
ql_log(ql_log_fatal, NULL, 0x0002,
"fc_attach_transport failed...Failing load!.\n");
- qlt_exit();
- return -ENODEV;
+ ret = -ENODEV;
+ goto qlt_exit;
}
apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
@@ -6087,27 +6096,37 @@ qla2x00_module_init(void)
qla2xxx_transport_vport_template =
fc_attach_transport(&qla2xxx_transport_vport_functions);
if (!qla2xxx_transport_vport_template) {
- kmem_cache_destroy(srb_cachep);
- qlt_exit();
- fc_release_transport(qla2xxx_transport_template);
ql_log(ql_log_fatal, NULL, 0x0004,
"fc_attach_transport vport failed...Failing load!.\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto unreg_chrdev;
}
ql_log(ql_log_info, NULL, 0x0005,
"QLogic Fibre Channel HBA Driver: %s.\n",
qla2x00_version_str);
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
- kmem_cache_destroy(srb_cachep);
- qlt_exit();
- fc_release_transport(qla2xxx_transport_template);
- fc_release_transport(qla2xxx_transport_vport_template);
ql_log(ql_log_fatal, NULL, 0x0006,
"pci_register_driver failed...ret=%d Failing load!.\n",
ret);
+ goto release_vport_transport;
}
return ret;
+
+release_vport_transport:
+ fc_release_transport(qla2xxx_transport_vport_template);
+
+unreg_chrdev:
+ if (apidev_major >= 0)
+ unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
+ fc_release_transport(qla2xxx_transport_template);
+
+qlt_exit:
+ qlt_exit();
+
+destroy_cache:
+ kmem_cache_destroy(srb_cachep);
+ return ret;
}
/**
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 11f45cb99892..b889caa556a0 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -572,6 +572,7 @@ static void qlt_free_session_done(struct work_struct *work)
if (logout_started) {
bool traced = false;
+ u16 cnt = 0;
while (!ACCESS_ONCE(sess->logout_completed)) {
if (!traced) {
@@ -581,6 +582,9 @@ static void qlt_free_session_done(struct work_struct *work)
traced = true;
}
msleep(100);
+ cnt++;
+ if (cnt > 200)
+ break;
}
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
@@ -5889,7 +5893,6 @@ static void qlt_abort_work(struct qla_tgt *tgt,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess = NULL;
unsigned long flags = 0, flags2 = 0;
- uint32_t be_s_id;
uint8_t s_id[3];
int rc;
@@ -5902,8 +5905,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
- (unsigned char *)&be_s_id);
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
if (!sess) {
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
@@ -6339,7 +6341,8 @@ qlt_enable_vha(struct scsi_qla_host *vha)
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
- qla2x00_wait_for_hba_online(base_vha);
+ WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
+ QLA_SUCCESS);
}
}
EXPORT_SYMBOL(qlt_enable_vha);
@@ -6369,7 +6372,9 @@ static void qlt_disable_vha(struct scsi_qla_host *vha)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
- qla2x00_wait_for_hba_online(vha);
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_tgt, vha, 0xe081,
+ "qla2x00_wait_for_hba_online() failed\n");
}
/*
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 0ad8ecef1e30..abdd6f93c8fe 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -821,38 +821,14 @@ static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item,
atomic_read(&tpg->lport_tpg_enabled));
}
-static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
-{
- struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
- struct tcm_qla2xxx_tpg, tpg_base_work);
- struct se_portal_group *se_tpg = &base_tpg->se_tpg;
- struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
-
- if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
- atomic_set(&base_tpg->lport_tpg_enabled, 1);
- qlt_enable_vha(base_vha);
- }
- complete(&base_tpg->tpg_base_comp);
-}
-
-static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
-{
- struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
- struct tcm_qla2xxx_tpg, tpg_base_work);
- struct se_portal_group *se_tpg = &base_tpg->se_tpg;
- struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
-
- if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
- atomic_set(&base_tpg->lport_tpg_enabled, 0);
- target_undepend_item(&se_tpg->tpg_group.cg_item);
- }
- complete(&base_tpg->tpg_base_comp);
-}
-
static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = to_tpg(item);
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op;
@@ -871,24 +847,16 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
if (atomic_read(&tpg->lport_tpg_enabled))
return -EEXIST;
- INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
} else {
if (!atomic_read(&tpg->lport_tpg_enabled))
return count;
- INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
}
- init_completion(&tpg->tpg_base_comp);
- schedule_work(&tpg->tpg_base_work);
- wait_for_completion(&tpg->tpg_base_comp);
- if (op) {
- if (!atomic_read(&tpg->lport_tpg_enabled))
- return -ENODEV;
- } else {
- if (atomic_read(&tpg->lport_tpg_enabled))
- return -EPERM;
- }
return count;
}
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 37e026a4823d..8b70fa3105bd 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -48,9 +48,6 @@ struct tcm_qla2xxx_tpg {
struct tcm_qla2xxx_tpg_attrib tpg_attrib;
/* Returned by tcm_qla2xxx_make_tpg() */
struct se_portal_group se_tpg;
- /* Items for dealing with configfs_depend_item */
- struct completion tpg_base_comp;
- struct work_struct tpg_base_work;
};
struct tcm_qla2xxx_fc_loopid {
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c291fdff1b33..ea3b77ba12a2 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -641,9 +641,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
- dma_free_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
goto exit_init_fw_cb;
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f9f899ec9427..3fda5836aac6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3207,6 +3207,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
return -EINVAL;
ep = iscsi_lookup_endpoint(transport_fd);
+ if (!ep)
+ return -EINVAL;
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
@@ -4148,7 +4150,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
- if (ha->fw_dump)
+ if (ha->fw_dump)
vfree(ha->fw_dump);
ha->queues_len = 0;
@@ -4283,7 +4285,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
return QLA_SUCCESS;
mem_alloc_error_exit:
- qla4xxx_mem_free(ha);
return QLA_ERROR;
}
@@ -5937,7 +5938,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
val = rd_nvram_byte(ha, sec_addr);
if (val & BIT_7)
ddb_index[1] = (val & 0x7f);
-
+ goto exit_boot_info;
} else if (is_qla80XX(ha)) {
buf = dma_alloc_coherent(&ha->pdev->dev, size,
&buf_dma, GFP_KERNEL);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2b0e61557317..d7118d3767c3 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -4953,6 +4953,11 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
+ if (sdebug_num_tgts < 0) {
+ pr_err("num_tgts must be >= 0\n");
+ return -EINVAL;
+ }
+
if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 282ea00d0f87..9d555b63d2e2 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -249,6 +249,7 @@ static struct {
{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 375cede0c534..c9bc6f058424 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"NETAPP", "INF-01-00", "rdac", },
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
+ {"LENOVO", "DE_Series", "rdac", },
{NULL, NULL, NULL },
};
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index bd70339c1242..03d9855a6afd 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -16,57 +16,15 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
-#define SCSI_LOG_SPOOLSIZE 4096
-
-#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
-#warning SCSI logging bitmask too large
-#endif
-
-struct scsi_log_buf {
- char buffer[SCSI_LOG_SPOOLSIZE];
- unsigned long map;
-};
-
-static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
-
static char *scsi_log_reserve_buffer(size_t *len)
{
- struct scsi_log_buf *buf;
- unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
- unsigned long idx = 0;
-
- preempt_disable();
- buf = this_cpu_ptr(&scsi_format_log);
- idx = find_first_zero_bit(&buf->map, map_bits);
- if (likely(idx < map_bits)) {
- while (test_and_set_bit(idx, &buf->map)) {
- idx = find_next_zero_bit(&buf->map, map_bits, idx);
- if (idx >= map_bits)
- break;
- }
- }
- if (WARN_ON(idx >= map_bits)) {
- preempt_enable();
- return NULL;
- }
- *len = SCSI_LOG_BUFSIZE;
- return buf->buffer + idx * SCSI_LOG_BUFSIZE;
+ *len = 128;
+ return kmalloc(*len, GFP_ATOMIC);
}
static void scsi_log_release_buffer(char *bufptr)
{
- struct scsi_log_buf *buf;
- unsigned long idx;
- int ret;
-
- buf = this_cpu_ptr(&scsi_format_log);
- if (bufptr >= buf->buffer &&
- bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
- idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
- ret = test_and_clear_bit(idx, &buf->map);
- WARN_ON(!ret);
- }
- preempt_enable();
+ kfree(bufptr);
}
static inline const char *scmd_name(const struct scsi_cmnd *scmd)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 27a6d3c6cb7c..67f6f134abc4 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -219,7 +219,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!sdev)
goto out;
@@ -796,7 +796,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
*/
sdev->inquiry = kmemdup(inq_result,
max_t(size_t, sdev->inquiry_len, 36),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (sdev->inquiry == NULL)
return SCSI_SCAN_NO_RESPONSE;
@@ -1095,7 +1095,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (!sdev)
goto out;
- result = kmalloc(result_len, GFP_ATOMIC |
+ result = kmalloc(result_len, GFP_KERNEL |
((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
if (!result)
goto out_free_sdev;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 56b65b85b121..38830818bfb6 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -710,6 +710,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct kernfs_node *kn;
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ /*
+ * We need to try to get module, avoiding the module been removed
+ * during delete.
+ */
+ if (scsi_device_get(sdev))
+ return -ENODEV;
kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
WARN_ON_ONCE(!kn);
@@ -724,9 +732,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
* state into SDEV_DEL.
*/
device_remove_file(dev, attr);
- scsi_remove_device(to_scsi_device(dev));
+ scsi_remove_device(sdev);
if (kn)
sysfs_unbreak_active_protection(kn);
+ scsi_device_put(sdev);
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 0ff083bbf5b1..22472d140ef7 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -21,7 +21,7 @@
#include <trace/events/scsi.h>
#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
-#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8]))
static const char *
scsi_trace_misc(struct trace_seq *, unsigned char *, int);
@@ -30,15 +30,18 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba = 0, txlen;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
- txlen = cdb[4];
+ /*
+ * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be read (READ(6)) or written (WRITE(6)).
+ */
+ txlen = cdb[4] ? cdb[4] : 256;
- trace_seq_printf(p, "lba=%llu txlen=%llu",
- (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
trace_seq_putc(p, 0);
return ret;
@@ -48,17 +51,12 @@ static const char *
scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba, txlen;
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[7] << 8);
- txlen |= cdb[8];
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be16(&cdb[7]);
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME)
@@ -73,19 +71,12 @@ static const char *
scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[6] << 24);
- txlen |= (cdb[7] << 16);
- txlen |= (cdb[8] << 8);
- txlen |= cdb[9];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u32 lba, txlen;
+
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[6]);
+
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
trace_seq_putc(p, 0);
@@ -96,23 +87,13 @@ static const char *
scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- txlen |= (cdb[10] << 24);
- txlen |= (cdb[11] << 16);
- txlen |= (cdb[12] << 8);
- txlen |= cdb[13];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u64 lba;
+ u32 txlen;
+
+ lba = get_unaligned_be64(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME_16)
@@ -127,8 +108,8 @@ static const char *
scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0, txlen = 0;
- u32 ei_lbrt = 0;
+ u64 lba;
+ u32 ei_lbrt, txlen;
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
@@ -148,26 +129,12 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[12] << 56);
- lba |= ((u64)cdb[13] << 48);
- lba |= ((u64)cdb[14] << 40);
- lba |= ((u64)cdb[15] << 32);
- lba |= (cdb[16] << 24);
- lba |= (cdb[17] << 16);
- lba |= (cdb[18] << 8);
- lba |= cdb[19];
- ei_lbrt |= (cdb[20] << 24);
- ei_lbrt |= (cdb[21] << 16);
- ei_lbrt |= (cdb[22] << 8);
- ei_lbrt |= cdb[23];
- txlen |= (cdb[28] << 24);
- txlen |= (cdb[29] << 16);
- txlen |= (cdb[30] << 8);
- txlen |= cdb[31];
-
- trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
- cmd, (unsigned long long)lba,
- (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+ lba = get_unaligned_be64(&cdb[12]);
+ ei_lbrt = get_unaligned_be32(&cdb[20]);
+ txlen = get_unaligned_be32(&cdb[28]);
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u",
+ cmd, lba, txlen, cdb[10] >> 5, ei_lbrt);
if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
@@ -182,7 +149,7 @@ static const char *
scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- unsigned int regions = cdb[7] << 8 | cdb[8];
+ unsigned int regions = get_unaligned_be16(&cdb[7]);
trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
trace_seq_putc(p, 0);
@@ -194,8 +161,8 @@ static const char *
scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0;
- u32 alloc_len = 0;
+ u64 lba;
+ u32 alloc_len;
switch (SERVICE_ACTION16(cdb)) {
case SAI_READ_CAPACITY_16:
@@ -209,21 +176,10 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- alloc_len |= (cdb[10] << 24);
- alloc_len |= (cdb[11] << 16);
- alloc_len |= (cdb[12] << 8);
- alloc_len |= cdb[13];
-
- trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
- (unsigned long long)lba, alloc_len);
+ lba = get_unaligned_be64(&cdb[2]);
+ alloc_len = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len);
out:
trace_seq_putc(p, 0);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index c39551b32e94..fff9c4d0f7c8 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -37,6 +37,8 @@
#define ISCSI_TRANSPORT_VERSION "2.0-870"
+#define ISCSI_SEND_MAX_ALLOWED 10
+
static int dbg_session;
module_param_named(debug_session, dbg_session, int,
S_IRUGO | S_IWUSR);
@@ -2962,6 +2964,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
return err;
}
+static int iscsi_session_has_conns(int sid)
+{
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&connlock, flags);
+ list_for_each_entry(conn, &connlist, conn_list) {
+ if (iscsi_conn_get_sid(conn) == sid) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&connlock, flags);
+
+ return found;
+}
+
static int
iscsi_set_iface_params(struct iscsi_transport *transport,
struct iscsi_uevent *ev, uint32_t len)
@@ -3536,10 +3556,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
- if (session)
- transport->destroy_session(session);
- else
+ if (!session)
err = -EINVAL;
+ else if (iscsi_session_has_conns(ev->u.d_session.sid))
+ err = -EBUSY;
+ else
+ transport->destroy_session(session);
break;
case ISCSI_UEVENT_UNBIND_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -3694,6 +3716,7 @@ iscsi_if_rx(struct sk_buff *skb)
struct nlmsghdr *nlh;
struct iscsi_uevent *ev;
uint32_t group;
+ int retries = ISCSI_SEND_MAX_ALLOWED;
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
@@ -3724,6 +3747,10 @@ iscsi_if_rx(struct sk_buff *skb)
break;
err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+ if (err == -EAGAIN && --retries < 0) {
+ printk(KERN_WARNING "Send reply failed, error %d\n", err);
+ break;
+ }
} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
skb_pull(skb, rlen);
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c10125fcd35b..ba2fb661edae 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1283,11 +1283,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
- /*
- * XXX and what if there are packets in flight and this close()
- * XXX is followed by a "rmmod sd_mod"?
- */
-
scsi_disk_put(sdkp);
}
@@ -2009,8 +2004,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
u8 type;
int ret = 0;
- if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
+ sdkp->protection_type = 0;
return ret;
+ }
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
@@ -2407,7 +2404,6 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
int res;
struct scsi_device *sdp = sdkp->device;
struct scsi_mode_data data;
- int disk_ro = get_disk_ro(sdkp->disk);
int old_wp = sdkp->write_prot;
set_disk_ro(sdkp->disk, 0);
@@ -2448,7 +2444,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
"Test WP failed, assume Write Enabled\n");
} else {
sdkp->write_prot = ((data.device_specific & 0x80) != 0);
- set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
+ set_disk_ro(sdkp->disk, sdkp->write_prot);
if (sdkp->first_scan || old_wp != sdkp->write_prot) {
sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
sdkp->write_prot ? "on" : "off");
@@ -2845,6 +2841,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
unsigned int opt_xfer_bytes =
logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ if (sdkp->opt_xfer_blocks == 0)
+ return false;
+
if (sdkp->opt_xfer_blocks > dev_max) {
sd_first_printk(KERN_WARNING, sdkp,
"Optimal transfer size %u logical blocks " \
@@ -2949,9 +2948,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
- } else
+ } else {
+ q->limits.io_opt = 0;
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
+ }
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
@@ -3256,11 +3257,23 @@ static void scsi_disk_release(struct device *dev)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
-
+ struct request_queue *q = disk->queue;
+
spin_lock(&sd_index_lock);
ida_remove(&sd_index_ida, sdkp->index);
spin_unlock(&sd_index_lock);
+ /*
+ * Wait until all requests that are in progress have completed.
+ * This is necessary to avoid that e.g. scsi_end_request() crashes
+ * due to clearing the disk->private_data pointer. Wait from inside
+ * scsi_disk_release() instead of from sd_release() to avoid that
+ * freezing and unfreezing the request queue affects user space I/O
+ * in case multiple processes open a /dev/sd... node concurrently.
+ */
+ blk_mq_freeze_queue(q);
+ blk_mq_unfreeze_queue(q);
+
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8d9b416399f9..c924df5538dd 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -809,8 +809,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (hp->dxfer_len >= SZ_256M)
+ if (hp->dxfer_len >= SZ_256M) {
+ sg_remove_request(sfp, srp);
return -EINVAL;
+ }
k = sg_start_req(srp, cmnd);
if (k) {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 06a062455404..b12f7f952b70 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -5478,7 +5478,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
else
mask = DMA_BIT_MASK(32);
- rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
+ rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
goto disable_device;
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 76278072147e..b0f5220ae23a 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -78,10 +78,8 @@ static int snirm710_probe(struct platform_device *dev)
base = res->start;
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
- if (!hostdata) {
- dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+ if (!hostdata)
return -ENOMEM;
- }
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index d92b2808d191..6df34d68737f 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -641,13 +641,22 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
static void handle_multichannel_storage(struct hv_device *device, int max_chns)
{
struct storvsc_device *stor_device;
- int num_cpus = num_online_cpus();
int num_sc;
struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
- num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
+ /*
+ * If the number of CPUs is artificially restricted, such as
+ * with maxcpus=1 on the kernel boot line, Hyper-V could offer
+ * sub-channels >= the number of CPUs. These sub-channels
+ * should not be created. The primary channel is already created
+ * and assigned to one CPU, so check against # CPUs - 1.
+ */
+ num_sc = min((int)(num_online_cpus() - 1), max_chns);
+ if (!num_sc)
+ return;
+
stor_device = get_out_stor_device(device);
if (!stor_device)
return;
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3c4c07038948..6f75693cf7d2 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -419,7 +419,7 @@ static struct scsi_host_template sun3_scsi_template = {
.eh_bus_reset_handler = sun3scsi_bus_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = 1,
.cmd_per_lun = 2,
.use_clustering = DISABLE_CLUSTERING,
.cmd_size = NCR5380_CMD_SIZE,
@@ -440,7 +440,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
sun3_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
sun3_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index c6425e3df5a0..f1c771437752 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -4371,6 +4371,13 @@ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym
OUTB(np, HS_PRT, HS_BUSY);
}
+#define sym_printk(lvl, tp, cp, fmt, v...) do { \
+ if (cp) \
+ scmd_printk(lvl, cp->cmd, fmt, ##v); \
+ else \
+ starget_printk(lvl, tp->starget, fmt, ##v); \
+} while (0)
+
/*
* chip exception handler for programmed interrupts.
*/
@@ -4416,7 +4423,7 @@ static void sym_int_sir(struct sym_hcb *np)
* been selected with ATN. We do not want to handle that.
*/
case SIR_SEL_ATN_NO_MSG_OUT:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No MSG OUT phase after selection with ATN\n");
goto out_stuck;
/*
@@ -4424,7 +4431,7 @@ static void sym_int_sir(struct sym_hcb *np)
* having reselected the initiator.
*/
case SIR_RESEL_NO_MSG_IN:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No MSG IN phase after reselection\n");
goto out_stuck;
/*
@@ -4432,7 +4439,7 @@ static void sym_int_sir(struct sym_hcb *np)
* an IDENTIFY.
*/
case SIR_RESEL_NO_IDENTIFY:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No IDENTIFY after reselection\n");
goto out_stuck;
/*
@@ -4461,7 +4468,7 @@ static void sym_int_sir(struct sym_hcb *np)
case SIR_RESEL_ABORTED:
np->lastmsg = np->msgout[0];
np->msgout[0] = M_NOOP;
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"message %x sent on bad reselection\n", np->lastmsg);
goto out;
/*
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 51d559214db6..1fe193590b8b 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1094,7 +1094,7 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
}
- if (host->hw_ver.major >= 0x2) {
+ if (host->hw_ver.major == 0x2) {
hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
if (!ufs_qcom_cap_qunipro(host))
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index a72a4ba78125..b47decc1fb5b 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -342,24 +342,21 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto dealloc_host;
}
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
ufshcd_init_lanes_per_dir(hba);
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err(dev, "Initialization failed\n");
- goto out_disable_rpm;
+ goto dealloc_host;
}
platform_set_drvdata(pdev, hba);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
return 0;
-out_disable_rpm:
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
dealloc_host:
ufshcd_dealloc_host(hba);
out:
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 6812e80fc8c4..035011860882 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -682,6 +682,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -941,7 +946,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
/* Get the descriptor */
- if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ if (hba->dev_cmd.query.descriptor &&
+ lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
GENERAL_UPIU_REQUEST_SIZE;
u16 resp_len;
@@ -2005,10 +2011,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
+ hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release(hba);
@@ -4391,19 +4397,30 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- enabled_intr_status =
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status) {
+ ufshcd_sl_intr(hba, enabled_intr_status);
+ retval = IRQ_HANDLED;
+ }
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
- }
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -4875,19 +4892,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
goto out;
}
- if (hba->vreg_info.vcc)
+ if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vcc->max_uA,
POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
&desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
- if (hba->vreg_info.vccq)
+ if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vccq->max_uA,
icc_level,
&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
- if (hba->vreg_info.vccq2)
+ if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vccq2->max_uA,
icc_level,
@@ -5346,7 +5363,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_init_icc_levels(hba);
/* Add required well known logical units to scsi mid layer */
- if (ufshcd_scsi_add_wlus(hba))
+ ret = ufshcd_scsi_add_wlus(hba);
+ if (ret)
goto out;
scsi_scan_host(hba->host);
@@ -5449,6 +5467,15 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
if (!vreg)
return 0;
+ /*
+ * "set_load" operation shall be required on those regulators
+ * which specifically configured current limitation. Otherwise
+ * zero max_uA may cause unexpected behavior when regulator is
+ * enabled or set as high power mode.
+ */
+ if (!vreg->max_uA)
+ return 0;
+
ret = regulator_set_load(vreg->reg, ua);
if (ret < 0) {
dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
@@ -5495,12 +5522,15 @@ static int ufshcd_config_vreg(struct device *dev,
name = vreg->name;
if (regulator_count_voltages(reg) > 0) {
- min_uV = on ? vreg->min_uV : 0;
- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
- if (ret) {
- dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ if (vreg->min_uV && vreg->max_uV) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev,
+ "%s: %s set voltage failed, err=%d\n",
__func__, name, ret);
- goto out;
+ goto out;
+ }
}
uA_load = on ? vreg->max_uA : 0;
@@ -6479,6 +6509,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
{
int ret = 0;
+ if (!hba->is_powered)
+ goto out;
+
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 23129d7b2678..c77e36526447 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -52,7 +52,7 @@
#define RX_HS_UNTERMINATED_ENABLE 0x00A6
#define RX_ENTER_HIBERN8 0x00A7
#define RX_BYPASS_8B10B_ENABLE 0x00A8
-#define RX_TERMINATION_FORCE_ENABLE 0x0089
+#define RX_TERMINATION_FORCE_ENABLE 0x00A9
#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
#define RX_HIBERN8TIME_CAPABILITY 0x0092
#define RX_REFCLKFREQ 0x00EB
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index fcfbe2dcd025..df6fabcce4f7 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -766,6 +766,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
struct pvscsi_adapter *adapter = shost_priv(host);
struct pvscsi_ctx *ctx;
unsigned long flags;
+ unsigned char op;
spin_lock_irqsave(&adapter->hw_lock, flags);
@@ -778,13 +779,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
}
cmd->scsi_done = done;
+ op = cmd->cmnd[0];
dev_dbg(&cmd->device->sdev_gendev,
- "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
spin_unlock_irqrestore(&adapter->hw_lock, flags);
- pvscsi_kick_io(adapter, cmd->cmnd[0]);
+ pvscsi_kick_io(adapter, op);
return 0;
}
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 0aaf429f31d5..b5a7107a9c0a 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -152,8 +152,10 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
if (err < 0)
goto err0;
gc = gpio_to_chip(err);
- if (WARN_ON(!gc))
+ if (WARN_ON(!gc)) {
+ err = -ENODEV;
goto err0;
+ }
if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
pr_debug("%s: tried to get a non-qe pin\n", __func__);
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index e929f5142862..36226976773f 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -778,7 +778,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
static int pwrap_init_cipher(struct pmic_wrapper *wrp)
{
int ret;
- u32 rdata;
+ u32 rdata = 0;
pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 09c669e70d63..038abc377fdb 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
struct gsbi_info *gsbi;
- int i;
+ int i, ret;
u32 mask, gsbi_num;
const struct crci_config *config = NULL;
@@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gsbi);
- return of_platform_populate(node, NULL, NULL, &pdev->dev);
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ clk_disable_unprepare(gsbi->hclk);
+ return ret;
}
static int gsbi_remove(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 18ec52f2078a..89dd50fa404f 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -646,7 +646,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL;
}
- if (header->size != entry->size) {
+ if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
dev_err(smem->dev,
"Partition %d has invalid size\n", i);
return -EINVAL;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index de2c1bfe28b5..c4f5e5bbb8dc 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -131,13 +131,17 @@ static int tegra_fuse_probe(struct platform_device *pdev)
/* take over the memory region from the early initialization */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fuse->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fuse->base))
- return PTR_ERR(fuse->base);
+ if (IS_ERR(fuse->base)) {
+ err = PTR_ERR(fuse->base);
+ fuse->base = base;
+ return err;
+ }
fuse->clk = devm_clk_get(&pdev->dev, "fuse");
if (IS_ERR(fuse->clk)) {
dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
PTR_ERR(fuse->clk));
+ fuse->base = base;
return PTR_ERR(fuse->clk);
}
@@ -146,8 +150,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
if (fuse->soc->probe) {
err = fuse->soc->probe(fuse);
- if (err < 0)
+ if (err < 0) {
+ fuse->base = base;
return err;
+ }
}
if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 5b18f6ffa45c..cd61c883c19f 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -134,7 +134,7 @@ void __init tegra_init_apbmisc(void)
apbmisc.flags = IORESOURCE_MEM;
/* strapping options */
- if (tegra_get_chip_id() == TEGRA124) {
+ if (of_machine_is_compatible("nvidia,tegra124")) {
straps.start = 0x7000e864;
straps.end = 0x7000e867;
} else {
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 9685f9b8be07..a12710c917a1 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -512,16 +512,10 @@ EXPORT_SYMBOL(tegra_powergate_power_off);
*/
int tegra_powergate_is_powered(unsigned int id)
{
- int status;
-
if (!tegra_powergate_is_valid(id))
return -EINVAL;
- mutex_lock(&pmc->powergates_lock);
- status = tegra_powergate_state(id);
- mutex_unlock(&pmc->powergates_lock);
-
- return status;
+ return tegra_powergate_state(id);
}
/**
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 5bb376009d98..fc33bfdc957c 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -377,6 +377,8 @@ static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
ret = rproc_boot(m3_ipc->rproc);
if (ret)
dev_err(dev, "rproc_boot failed\n");
+ else
+ m3_ipc_state = m3_ipc;
do_exit(0);
}
@@ -463,8 +465,6 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc;
}
- m3_ipc_state = m3_ipc;
-
return 0;
err_put_rproc:
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 44be6b593b30..938840af9c50 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -315,7 +315,6 @@ struct atmel_spi {
struct atmel_spi_dma dma;
bool keep_cs;
- bool cs_active;
u32 fifo_size;
};
@@ -1209,10 +1208,8 @@ static int atmel_spi_setup(struct spi_device *spi)
as = spi_master_get_devdata(spi->master);
/* see notes above re chipselect */
- if (!atmel_spi_is_v2(as)
- && spi->chip_select == 0
- && (spi->mode & SPI_CS_HIGH)) {
- dev_dbg(&spi->dev, "setup: can't be active-high\n");
+ if (!as->use_cs_gpios && (spi->mode & SPI_CS_HIGH)) {
+ dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
return -EINVAL;
}
@@ -1406,11 +1403,9 @@ static int atmel_spi_one_transfer(struct spi_master *master,
&msg->transfers)) {
as->keep_cs = true;
} else {
- as->cs_active = !as->cs_active;
- if (as->cs_active)
- cs_activate(as, msg->spi);
- else
- cs_deactivate(as, msg->spi);
+ cs_deactivate(as, msg->spi);
+ udelay(10);
+ cs_activate(as, msg->spi);
}
}
@@ -1433,7 +1428,6 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
atmel_spi_lock(as);
cs_activate(as, spi);
- as->cs_active = true;
as->keep_cs = false;
msg->status = 0;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 25abf2d1732a..eab27d41ba83 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -554,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
/* handle all the 3-wire mode */
- if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
+ if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
+ tfr->rx_buf != master->dummy_rx)
cs |= BCM2835_SPI_CS_REN;
else
cs &= ~BCM2835_SPI_CS_REN;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7428091d3f5b..e075712c501e 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -178,19 +178,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
}
-static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
{
- struct spi_master *master = dev_id;
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
- irqreturn_t ret = IRQ_NONE;
+ u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
/* check if we have data to read */
- while (bs->rx_len &&
- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
- BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
+ for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
+ stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
bcm2835aux_rd_fifo(bs);
- ret = IRQ_HANDLED;
- }
/* check if we have data to write */
while (bs->tx_len &&
@@ -198,16 +193,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
(!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
BCM2835_AUX_SPI_STAT_TX_FULL))) {
bcm2835aux_wr_fifo(bs);
- ret = IRQ_HANDLED;
}
+}
- /* and check if we have reached "done" */
- while (bs->rx_len &&
- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
- BCM2835_AUX_SPI_STAT_BUSY))) {
- bcm2835aux_rd_fifo(bs);
- ret = IRQ_HANDLED;
- }
+static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ /* IRQ may be shared, so return if our interrupts are disabled */
+ if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
+ (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
+ return IRQ_NONE;
+
+ /* do common fifo handling */
+ bcm2835aux_spi_transfer_helper(bs);
if (!bs->tx_len) {
/* disable tx fifo empty interrupt */
@@ -221,8 +221,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
complete(&master->xfer_completion);
}
- /* and return */
- return ret;
+ return IRQ_HANDLED;
}
static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
@@ -268,7 +267,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
{
struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
unsigned long timeout;
- u32 stat;
/* configure spi */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
@@ -279,24 +277,9 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
/* loop until finished the transfer */
while (bs->rx_len) {
- /* read status */
- stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
- /* fill in tx fifo with remaining data */
- if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
- bcm2835aux_wr_fifo(bs);
- continue;
- }
-
- /* read data from fifo for both cases */
- if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
- bcm2835aux_rd_fifo(bs);
- continue;
- }
- if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
- bcm2835aux_rd_fifo(bs);
- continue;
- }
+ /* do common fifo handling */
+ bcm2835aux_spi_transfer_helper(bs);
/* there is still data pending to read check the timeout */
if (bs->rx_len && time_after(jiffies, timeout)) {
@@ -433,7 +416,18 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->num_chipselect = -1;
+ /* even though the driver never officially supported native CS
+ * allow a single native CS for legacy DT support purposes when
+ * no cs-gpio is configured.
+ * Known limitations for native cs are:
+ * * multiple chip-selects: cs0-cs2 are all simultaniously asserted
+ * whenever there is a transfer - this even includes SPI_NO_CS
+ * * SPI_CS_HIGH: is ignores - cs are always asserted low
+ * * cs_change: cs is deasserted after each spi_transfer
+ * * cs_delay_usec: cs is always deasserted one SCK cycle after
+ * a spi_transfer
+ */
+ master->num_chipselect = 1;
master->transfer_one = bcm2835aux_spi_transfer_one;
master->handle_err = bcm2835aux_spi_handle_err;
master->prepare_message = bcm2835aux_spi_prepare_message;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 3aa9e6e3dac8..4ef54436b9d4 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -392,7 +392,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
if (ret)
spi_master_put(master);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(spi_bitbang_start);
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
index 877937706240..828fbbebc3c4 100644
--- a/drivers/spi/spi-cavium-thunderx.c
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -81,6 +81,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
error:
clk_disable_unprepare(p->clk);
+ pci_release_regions(pdev);
spi_master_put(master);
return ret;
}
@@ -95,6 +96,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
return;
clk_disable_unprepare(p->clk);
+ pci_release_regions(pdev);
/* Put everything in a known state. */
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
}
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 8b290d9d7935..5419de19859a 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -408,7 +408,6 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
}
m->status = status;
- spi_finalize_current_message(master);
if (status || !cs_change) {
ndelay(nsecs);
@@ -416,6 +415,7 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
}
fsl_spi_setup_transfer(spi, NULL);
+ spi_finalize_current_message(master);
return 0;
}
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 7a37090dabbe..2e65b70c7879 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -673,6 +673,8 @@ static int img_spfi_probe(struct platform_device *pdev)
dma_release_channel(spfi->tx_ch);
if (spfi->rx_ch)
dma_release_channel(spfi->rx_ch);
+ spfi->tx_ch = NULL;
+ spfi->rx_ch = NULL;
dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
} else {
master->dma_tx = spfi->tx_ch;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index a47cf638460a..ccb6f98550da 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -298,7 +298,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
unsigned int wcnt;
- int max_fifo_depth, fifo_depth, bytes_per_word;
+ int max_fifo_depth, bytes_per_word;
u32 chconf, xferlevel;
mcspi = spi_master_get_devdata(master);
@@ -314,10 +314,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
else
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
- fifo_depth = gcd(t->len, max_fifo_depth);
- if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
- goto disable_fifo;
-
wcnt = t->len / bytes_per_word;
if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
goto disable_fifo;
@@ -325,16 +321,17 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
xferlevel = wcnt << 16;
if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER;
- xferlevel |= (fifo_depth - 1) << 8;
+ xferlevel |= (bytes_per_word - 1) << 8;
}
+
if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET;
- xferlevel |= fifo_depth - 1;
+ xferlevel |= bytes_per_word - 1;
}
mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
mcspi_write_chconf0(spi, chconf);
- mcspi->fifo_depth = fifo_depth;
+ mcspi->fifo_depth = max_fifo_depth;
return;
}
@@ -601,7 +598,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
struct dma_slave_config cfg;
enum dma_slave_buswidth width;
unsigned es;
- u32 burst;
void __iomem *chstat_reg;
void __iomem *irqstat_reg;
int wait_res;
@@ -623,22 +619,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
}
count = xfer->len;
- burst = 1;
-
- if (mcspi->fifo_depth > 0) {
- if (count > mcspi->fifo_depth)
- burst = mcspi->fifo_depth / es;
- else
- burst = count / es;
- }
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
cfg.src_addr_width = width;
cfg.dst_addr_width = width;
- cfg.src_maxburst = burst;
- cfg.dst_maxburst = burst;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index fefb688a3432..2f4df804c4d8 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -320,7 +320,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
xfer->rx_sg.sgl,
xfer->rx_sg.nents,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EINVAL;
@@ -330,7 +330,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
xfer->tx_sg.sgl,
xfer->tx_sg.nents,
- DMA_TO_DEVICE,
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EINVAL;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index f2209ec4cb68..2f84d7653afd 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -921,10 +921,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
rate = min_t(int, ssp_clk, rate);
+ /*
+ * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
+ * that the SSP transmission rate can be greater than the device rate
+ */
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
- return (ssp_clk / (2 * rate) - 1) & 0xff;
+ return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
else
- return (ssp_clk / rate - 1) & 0xfff;
+ return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff;
}
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
@@ -1471,12 +1475,7 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
- struct device *dev = param;
-
- if (dev != chan->device->dev->parent)
- return false;
-
- return true;
+ return param == chan->device->dev;
}
static struct pxa2xx_spi_master *
@@ -1530,7 +1529,13 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
}
ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ssp->clk))
+ return NULL;
+
ssp->irq = platform_get_irq(pdev, 0);
+ if (ssp->irq < 0)
+ return NULL;
+
ssp->type = type;
ssp->pdev = pdev;
ssp->port_id = pxa2xx_spi_get_port_id(adev);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 1bfa889b8427..88b108e1c85f 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -974,6 +974,11 @@ static int spi_qup_suspend(struct device *device)
struct spi_qup *controller = spi_master_get_devdata(master);
int ret;
+ if (pm_runtime_suspended(device)) {
+ ret = spi_qup_pm_resume_runtime(device);
+ if (ret)
+ return ret;
+ }
ret = spi_master_suspend(master);
if (ret)
return ret;
@@ -982,10 +987,8 @@ static int spi_qup_suspend(struct device *device)
if (ret)
return ret;
- if (!pm_runtime_suspended(device)) {
- clk_disable_unprepare(controller->cclk);
- clk_disable_unprepare(controller->iclk);
- }
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
return 0;
}
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 0f89c2169c24..3a94f465e8e0 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -443,6 +443,9 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
struct dma_slave_config rxconf, txconf;
struct dma_async_tx_descriptor *rxdesc, *txdesc;
+ memset(&rxconf, 0, sizeof(rxconf));
+ memset(&txconf, 0, sizeof(txconf));
+
spin_lock_irqsave(&rs->lock, flags);
rs->state &= ~RXBUSY;
rs->state &= ~TXBUSY;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 093c9cf92bfd..07612e8c58ee 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -279,7 +279,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Sets parity, interrupt mask */
rspi_write8(rspi, 0x00, RSPI_SPCR2);
- /* Sets SPCMD */
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
@@ -323,7 +324,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
rspi_write8(rspi, 0x00, RSPI_SSLND);
rspi_write8(rspi, 0x00, RSPI_SPND);
- /* Sets SPCMD */
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
@@ -374,7 +376,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
/* Sets buffer to allow normal operation */
rspi_write8(rspi, 0x00, QSPI_SPBFCR);
- /* Sets SPCMD */
+ /* Resets sequencer */
+ rspi_write8(rspi, 0, RSPI_SPSCR);
rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Enables SPI function in master mode */
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 711ea523b325..8a69148a962a 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1198,8 +1198,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
i = platform_get_irq(pdev, 0);
if (i < 0) {
- dev_err(&pdev->dev, "cannot get platform IRQ\n");
- ret = -ENOENT;
+ dev_err(&pdev->dev, "cannot get IRQ\n");
+ ret = i;
goto err1;
}
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index e54b59638458..710adbc2485f 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -385,6 +385,7 @@ static int spi_st_probe(struct platform_device *pdev)
return 0;
clk_disable:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(spi_st->clk);
put_master:
spi_master_put(master);
@@ -396,6 +397,8 @@ static int spi_st_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_st *spi_st = spi_master_get_devdata(master);
+ pm_runtime_disable(&pdev->dev);
+
clk_disable_unprepare(spi_st->clk);
pinctrl_pm_select_sleep_state(&pdev->dev);
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 73779cecc3bb..e37712bed0b2 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -307,10 +307,16 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
+
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
} else {
+ unsigned int write_bytes;
max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
written_words = max_n_32bit;
nbytes = written_words * tspi->bytes_per_word;
+ if (nbytes > t->len - tspi->cur_pos)
+ nbytes = t->len - tspi->cur_pos;
+ write_bytes = nbytes;
for (count = 0; count < max_n_32bit; count++) {
u32 x = 0;
@@ -319,8 +325,10 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
x |= (u32)(*tx_buf++) << (i * 8);
tegra_spi_writel(tspi, x, SPI_TX_FIFO);
}
+
+ tspi->cur_tx_pos += write_bytes;
}
- tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+
return written_words;
}
@@ -344,20 +352,27 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
for (i = 0; len && (i < 4); i++, len--)
*rx_buf++ = (x >> i*8) & 0xFF;
}
- tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
read_words += tspi->curr_dma_words;
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ u8 bytes_per_word = tspi->bytes_per_word;
+ unsigned int read_bytes;
+ len = rx_full_count * bytes_per_word;
+ if (len > t->len - tspi->cur_pos)
+ len = t->len - tspi->cur_pos;
+ read_bytes = len;
for (count = 0; count < rx_full_count; count++) {
u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
- for (i = 0; (i < tspi->bytes_per_word); i++)
+ for (i = 0; len && (i < bytes_per_word); i++, len--)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
- tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
read_words += rx_full_count;
+ tspi->cur_rx_pos += read_bytes;
}
+
return read_words;
}
@@ -372,12 +387,17 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
unsigned int i;
unsigned int count;
u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int write_bytes;
+ if (consume > t->len - tspi->cur_pos)
+ consume = t->len - tspi->cur_pos;
+ write_bytes = consume;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = 0;
@@ -386,8 +406,9 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
x |= (u32)(*tx_buf++) << (i * 8);
tspi->tx_dma_buf[count] = x;
}
+
+ tspi->cur_tx_pos += write_bytes;
}
- tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
@@ -405,20 +426,28 @@ static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
} else {
unsigned int i;
unsigned int count;
unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+ unsigned int read_bytes;
+ if (consume > t->len - tspi->cur_pos)
+ consume = t->len - tspi->cur_pos;
+ read_bytes = consume;
for (count = 0; count < tspi->curr_dma_words; count++) {
u32 x = tspi->rx_dma_buf[count] & rx_mask;
- for (i = 0; (i < tspi->bytes_per_word); i++)
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
*rx_buf++ = (x >> (i*8)) & 0xFF;
}
+
+ tspi->cur_rx_pos += read_bytes;
}
- tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
/* Make the dma buffer to read by dma */
dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
@@ -730,6 +759,8 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
if (tspi->is_packed)
command1 |= SPI_PACKED;
+ else
+ command1 &= ~SPI_PACKED;
command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
tspi->cur_direction = 0;
@@ -1067,27 +1098,19 @@ static int tegra_spi_probe(struct platform_device *pdev)
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
- ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
- tegra_spi_isr_thread, IRQF_ONESHOT,
- dev_name(&pdev->dev), tspi);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
- tspi->irq);
- goto exit_free_master;
- }
tspi->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(tspi->clk)) {
dev_err(&pdev->dev, "can not get clock\n");
ret = PTR_ERR(tspi->clk);
- goto exit_free_irq;
+ goto exit_free_master;
}
tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
if (IS_ERR(tspi->rst)) {
dev_err(&pdev->dev, "can not get reset\n");
ret = PTR_ERR(tspi->rst);
- goto exit_free_irq;
+ goto exit_free_master;
}
tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
@@ -1095,7 +1118,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
ret = tegra_spi_init_dma_param(tspi, true);
if (ret < 0)
- goto exit_free_irq;
+ goto exit_free_master;
ret = tegra_spi_init_dma_param(tspi, false);
if (ret < 0)
goto exit_rx_dma_free;
@@ -1117,18 +1140,32 @@ static int tegra_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
goto exit_pm_disable;
}
+
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
tspi->def_command1_reg = SPI_M_S;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
pm_runtime_put(&pdev->dev);
+ ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
+ tegra_spi_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_pm_disable;
+ }
master->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "can not register to master err %d\n", ret);
- goto exit_pm_disable;
+ goto exit_free_irq;
}
return ret;
+exit_free_irq:
+ free_irq(spi_irq, tspi);
exit_pm_disable:
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
@@ -1136,8 +1173,6 @@ exit_pm_disable:
tegra_spi_deinit_dma_param(tspi, false);
exit_rx_dma_free:
tegra_spi_deinit_dma_param(tspi, true);
-exit_free_irq:
- free_irq(spi_irq, tspi);
exit_free_master:
spi_master_put(master);
return ret;
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index af2880d0c112..cf2a329fd895 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1078,7 +1078,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
ret = clk_enable(tspi->clk);
if (ret < 0) {
dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
- goto exit_free_master;
+ goto exit_clk_unprepare;
}
spi_irq = platform_get_irq(pdev, 0);
@@ -1151,6 +1151,8 @@ exit_free_irq:
free_irq(spi_irq, tspi);
exit_clk_disable:
clk_disable(tspi->clk);
+exit_clk_unprepare:
+ clk_unprepare(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
@@ -1164,6 +1166,7 @@ static int tegra_slink_remove(struct platform_device *pdev)
free_irq(tspi->irq, tspi);
clk_disable(tspi->clk);
+ clk_unprepare(tspi->clk);
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index c54ee6674471..fe707440f8c3 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1306,18 +1306,27 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
return;
}
-static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
struct pch_spi_data *data)
{
struct pch_spi_dma_ctrl *dma;
+ int ret;
dma = &data->dma;
+ ret = 0;
/* Get Consistent memory for Tx DMA */
dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
+ if (!dma->tx_buf_virt)
+ ret = -ENOMEM;
+
/* Get Consistent memory for Rx DMA */
dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
+ if (!dma->rx_buf_virt)
+ ret = -ENOMEM;
+
+ return ret;
}
static int pch_spi_pd_probe(struct platform_device *plat_dev)
@@ -1394,7 +1403,9 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
if (use_dma) {
dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
- pch_alloc_dma_buf(board_dat, data);
+ ret = pch_alloc_dma_buf(board_dat, data);
+ if (ret)
+ goto err_spi_register_master;
}
ret = spi_register_master(master);
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 18aeaceee286..d26c0eda2d9e 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -415,9 +415,6 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
- /* Dummy generic FIFO entry */
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
-
/* Manually start the generic FIFO command */
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2ebcd1851ab0..ed6b8ae2f1ee 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -958,6 +958,8 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
if (max_tx || max_rx) {
list_for_each_entry(xfer, &msg->transfers,
transfer_list) {
+ if (!xfer->len)
+ continue;
if (!xfer->tx_buf)
xfer->tx_buf = master->dummy_tx;
if (!xfer->rx_buf)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 2e05046f866b..a685c6114a8d 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -663,6 +663,9 @@ static int spidev_release(struct inode *inode, struct file *filp)
if (dofree)
kfree(spidev);
}
+#ifdef CONFIG_SPI_SLAVE
+ spi_slave_abort(spidev->spi);
+#endif
mutex_unlock(&device_list_lock);
return 0;
@@ -751,11 +754,9 @@ static int spidev_probe(struct spi_device *spi)
* compatible string, it is a Linux implementation thing
* rather than a description of the hardware.
*/
- if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
- dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n");
- WARN_ON(spi->dev.of_node &&
- !of_match_device(spidev_dt_ids, &spi->dev));
- }
+ WARN(spi->dev.of_node &&
+ of_device_is_compatible(spi->dev.of_node, "spidev"),
+ "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
spidev_probe_acpi(spi);
diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
index d70568ea02d5..2ff7d90e166a 100644
--- a/drivers/ssb/bridge_pcmcia_80211.c
+++ b/drivers/ssb/bridge_pcmcia_80211.c
@@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = {
.resume = ssb_host_pcmcia_resume,
};
+static int pcmcia_init_failed;
+
/*
* These are not module init/exit functions!
* The module_pcmcia_driver() helper cannot be used here.
*/
int ssb_host_pcmcia_init(void)
{
- return pcmcia_register_driver(&ssb_host_pcmcia_driver);
+ pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver);
+
+ return pcmcia_init_failed;
}
void ssb_host_pcmcia_exit(void)
{
- pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
+ if (!pcmcia_init_failed)
+ pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index c6314d1552ea..99fd4f53c856 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -370,8 +370,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
_calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
}
+static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /* do not allow to mmap ashmem backing shmem file directly */
+ return -EPERM;
+}
+
+static unsigned long
+ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
{
+ static struct file_operations vmfile_fops;
struct ashmem_area *asma = file->private_data;
int ret = 0;
@@ -412,6 +427,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
}
vmfile->f_mode |= FMODE_LSEEK;
asma->file = vmfile;
+ /*
+ * override mmap operation of the vmfile so that it can't be
+ * remapped which would lead to creation of a new vma with no
+ * asma permission checks. Have to override get_unmapped_area
+ * as well to prevent VM_BUG_ON check for f_ops modification.
+ */
+ if (!vmfile_fops.mmap) {
+ vmfile_fops = *vmfile->f_op;
+ vmfile_fops.mmap = ashmem_vmfile_mmap;
+ vmfile_fops.get_unmapped_area =
+ ashmem_vmfile_get_unmapped_area;
+ }
+ vmfile->f_op = &vmfile_fops;
}
get_file(asma->file);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index dcb637665eb7..35432fbd6551 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -984,6 +984,8 @@ int comedi_dio_insn_config(struct comedi_device *, struct comedi_subdevice *,
unsigned int mask);
unsigned int comedi_dio_update_state(struct comedi_subdevice *,
unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
unsigned int comedi_nscans_left(struct comedi_subdevice *s,
unsigned int nscans);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 1736248bc5b8..8ca5493c66fe 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -390,11 +390,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
EXPORT_SYMBOL_GPL(comedi_dio_update_state);
/**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
+ * bytes
* @s: COMEDI subdevice.
+ * @cmd: COMEDI command.
*
* Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
+ * number of channels in the scan for the specified command.
*
* For digital input, output or input/output subdevices, samples for
* multiple channels are assumed to be packed into one or more unsigned
@@ -404,9 +406,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
*
* Returns the overall scan length in bytes.
*/
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
- struct comedi_cmd *cmd = &s->async->cmd;
unsigned int num_samples;
unsigned int bits_per_sample;
@@ -423,6 +425,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
}
return comedi_samples_to_bytes(s, num_samples);
}
+EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+
+/**
+ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * @s: COMEDI subdevice.
+ *
+ * Determines the overall scan length according to the subdevice type and the
+ * number of channels in the scan for the current command.
+ *
+ * For digital input, output or input/output subdevices, samples for
+ * multiple channels are assumed to be packed into one or more unsigned
+ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
+ * flag. For other types of subdevice, samples are assumed to occupy a
+ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
+ *
+ * Returns the overall scan length in bytes.
+ */
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ return comedi_bytes_per_scan_cmd(s, cmd);
+}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 2c1b6de30da8..385e14269870 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -45,8 +45,8 @@
#define PCI171X_RANGE_UNI BIT(4)
#define PCI171X_RANGE_GAIN(x) (((x) & 0x7) << 0)
#define PCI171X_MUX_REG 0x04 /* W: A/D multiplexor control */
-#define PCI171X_MUX_CHANH(x) (((x) & 0xf) << 8)
-#define PCI171X_MUX_CHANL(x) (((x) & 0xf) << 0)
+#define PCI171X_MUX_CHANH(x) (((x) & 0xff) << 8)
+#define PCI171X_MUX_CHANL(x) (((x) & 0xff) << 0)
#define PCI171X_MUX_CHAN(x) (PCI171X_MUX_CHANH(x) | PCI171X_MUX_CHANL(x))
#define PCI171X_STATUS_REG 0x06 /* R: status register */
#define PCI171X_STATUS_IRQ BIT(11) /* 1=IRQ occurred */
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 42945de31fe2..d23aa5d8e62a 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -2337,7 +2337,8 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
devpriv->intr_running = false;
spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
- comedi_handle_events(dev, s_ao);
+ if (s_ao)
+ comedi_handle_events(dev, s_ao);
comedi_handle_events(dev, s_ai);
return IRQ_HANDLED;
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index d5295bbdd28c..37133d54dda1 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -566,7 +566,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
}
#endif
comedi_handle_events(dev, s);
- comedi_handle_events(dev, s_ao);
+ if (s_ao)
+ comedi_handle_events(dev, s_ao);
return IRQ_RETVAL(handled);
}
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 19e0b7be8495..917d13abef88 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -351,9 +351,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
unsigned int flags)
{
- int divider, base, prescale;
+ unsigned int divider, base, prescale;
- /* This function needs improvment */
+ /* This function needs improvement */
/* Don't know if divider==0 works. */
for (prescale = 0; prescale < 16; prescale++) {
@@ -367,7 +367,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
- divider = (*nanosec) / base;
+ divider = DIV_ROUND_UP(*nanosec, base);
break;
}
if (divider < 65536) {
@@ -377,7 +377,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
}
prescale = 15;
- base = timer_base * (1 << prescale);
+ base = timer_base * (prescale + 1);
divider = 65535;
*nanosec = divider * base;
return (prescale << 16) | (divider);
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index e5b948405fd9..a09631f9d813 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -632,6 +632,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE,
&devpriv->dio_buffer_phys_addr[i],
GFP_KERNEL);
+ if (!devpriv->dio_buffer[i]) {
+ dev_warn(dev->class_dev,
+ "failed to allocate DMA buffer\n");
+ return -ENOMEM;
+ }
}
/* allocate dma descriptors */
devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev,
@@ -639,6 +644,11 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
NUM_DMA_DESCRIPTORS,
&devpriv->dma_desc_phys_addr,
GFP_KERNEL);
+ if (!devpriv->dma_desc) {
+ dev_warn(dev->class_dev,
+ "failed to allocate DMA descriptors\n");
+ return -ENOMEM;
+ }
if (devpriv->dma_desc_phys_addr & 0xf) {
dev_warn(dev->class_dev,
" dma descriptors not quad-word aligned (bug)\n");
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 0fa85d55c82f..12056eb3cbe8 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3477,6 +3477,7 @@ static int ni_cdio_check_chanlist(struct comedi_device *dev,
static int ni_cdio_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
+ unsigned int bytes_per_scan;
int err = 0;
int tmp;
@@ -3506,9 +3507,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
- err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
- s->async->prealloc_bufsz /
- comedi_bytes_per_scan(s));
+ bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
+ if (bytes_per_scan) {
+ err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+ s->async->prealloc_bufsz /
+ bytes_per_scan);
+ }
if (err)
return 3;
@@ -4941,7 +4945,10 @@ static int ni_valid_rtsi_output_source(struct comedi_device *dev,
case NI_RTSI_OUTPUT_G_SRC0:
case NI_RTSI_OUTPUT_G_GATE0:
case NI_RTSI_OUTPUT_RGOUT0:
- case NI_RTSI_OUTPUT_RTSI_BRD_0:
+ case NI_RTSI_OUTPUT_RTSI_BRD(0):
+ case NI_RTSI_OUTPUT_RTSI_BRD(1):
+ case NI_RTSI_OUTPUT_RTSI_BRD(2):
+ case NI_RTSI_OUTPUT_RTSI_BRD(3):
return 1;
case NI_RTSI_OUTPUT_RTSI_OSC:
return (devpriv->is_m_series) ? 1 : 0;
@@ -4962,11 +4969,18 @@ static int ni_set_rtsi_routing(struct comedi_device *dev,
devpriv->rtsi_trig_a_output_reg |= NISTC_RTSI_TRIG(chan, src);
ni_stc_writew(dev, devpriv->rtsi_trig_a_output_reg,
NISTC_RTSI_TRIGA_OUT_REG);
- } else if (chan < 8) {
+ } else if (chan < NISTC_RTSI_TRIG_NUM_CHAN(devpriv->is_m_series)) {
devpriv->rtsi_trig_b_output_reg &= ~NISTC_RTSI_TRIG_MASK(chan);
devpriv->rtsi_trig_b_output_reg |= NISTC_RTSI_TRIG(chan, src);
ni_stc_writew(dev, devpriv->rtsi_trig_b_output_reg,
NISTC_RTSI_TRIGB_OUT_REG);
+ } else if (chan != NISTC_RTSI_TRIG_OLD_CLK_CHAN) {
+ /* probably should never reach this, since the
+ * ni_valid_rtsi_output_source above errors out if chan is too
+ * high
+ */
+ dev_err(dev->class_dev, "%s: unknown rtsi channel\n", __func__);
+ return -EINVAL;
}
return 2;
}
@@ -4982,12 +4996,12 @@ static unsigned int ni_get_rtsi_routing(struct comedi_device *dev,
} else if (chan < NISTC_RTSI_TRIG_NUM_CHAN(devpriv->is_m_series)) {
return NISTC_RTSI_TRIG_TO_SRC(chan,
devpriv->rtsi_trig_b_output_reg);
- } else {
- if (chan == NISTC_RTSI_TRIG_OLD_CLK_CHAN)
- return NI_RTSI_OUTPUT_RTSI_OSC;
- dev_err(dev->class_dev, "bug! should never get here?\n");
- return 0;
+ } else if (chan == NISTC_RTSI_TRIG_OLD_CLK_CHAN) {
+ return NI_RTSI_OUTPUT_RTSI_OSC;
}
+
+ dev_err(dev->class_dev, "%s: unknown rtsi channel\n", __func__);
+ return -EINVAL;
}
static int ni_rtsi_insn_config(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 5036eebb9162..2f174a34d9e9 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -472,10 +472,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
size = usb_endpoint_maxp(devpriv->ep_tx);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_tx_buf) {
- kfree(devpriv->usb_rx_buf);
+ if (!devpriv->usb_tx_buf)
return -ENOMEM;
- }
return 0;
}
@@ -527,6 +525,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
if (!devpriv)
return -ENOMEM;
+ mutex_init(&devpriv->mut);
+ usb_set_intfdata(intf, devpriv);
+
ret = ni6501_find_endpoints(dev);
if (ret)
return ret;
@@ -535,9 +536,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- mutex_init(&devpriv->mut);
- usb_set_intfdata(intf, devpriv);
-
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 608403c7586b..f0572d6a5f63 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
+ * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,7 +17,7 @@
* Description: University of Stirling USB DAQ & INCITE Technology Limited
* Devices: [ITL] USB-DUX-FAST (usbduxfast)
* Author: Bernd Porr <mail@berndporr.me.uk>
- * Updated: 10 Oct 2014
+ * Updated: 16 Nov 2019
* Status: stable
*/
@@ -31,6 +31,7 @@
*
*
* Revision history:
+ * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest
* 0.9: Dropping the first data packet which seems to be from the last transfer.
* Buffer overflows in the FX2 are handed over to comedi.
* 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
@@ -359,6 +360,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
struct comedi_cmd *cmd)
{
int err = 0;
+ int err2 = 0;
unsigned int steps;
unsigned int arg;
@@ -408,11 +410,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
*/
steps = (cmd->convert_arg * 30) / 1000;
if (cmd->chanlist_len != 1)
- err |= comedi_check_trigger_arg_min(&steps,
- MIN_SAMPLING_PERIOD);
- err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
- arg = (steps * 1000) / 30;
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ err2 |= comedi_check_trigger_arg_min(&steps,
+ MIN_SAMPLING_PERIOD);
+ else
+ err2 |= comedi_check_trigger_arg_min(&steps, 1);
+ err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
+ if (err2) {
+ err |= err2;
+ arg = (steps * 1000) / 30;
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ }
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index a004aed0147a..1800eb3ae017 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -691,10 +691,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
size = usb_endpoint_maxp(devpriv->ep_tx);
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
- if (!devpriv->usb_tx_buf) {
- kfree(devpriv->usb_rx_buf);
+ if (!devpriv->usb_tx_buf)
return -ENOMEM;
- }
return 0;
}
@@ -809,6 +807,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
devpriv->model = board->model;
+ sema_init(&devpriv->limit_sem, 8);
+
ret = vmk80xx_find_usb_endpoints(dev);
if (ret)
return ret;
@@ -817,8 +817,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- sema_init(&devpriv->limit_sem, 8);
-
usb_set_intfdata(intf, devpriv);
if (devpriv->model == VMK8055_MODEL)
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 587f68aa466c..ece713d02660 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -247,7 +247,7 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
static int fbtft_backlight_update_status(struct backlight_device *bd)
{
struct fbtft_par *par = bl_get_data(bd);
- bool polarity = !!(bd->props.state & BL_CORE_DRIVER1);
+ bool polarity = par->polarity;
fbtft_par_dbg(DEBUG_BACKLIGHT, par,
"%s: polarity=%d, power=%d, fb_blank=%d\n",
@@ -296,7 +296,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
/* Assume backlight is off, get polarity from current state of pin */
bl_props.power = FB_BLANK_POWERDOWN;
if (!gpio_get_value(par->gpio.led[0]))
- bl_props.state |= BL_CORE_DRIVER1;
+ par->polarity = true;
bd = backlight_device_register(dev_driver_string(par->info->device),
par->info->device, par, &fbtft_bl_ops, &bl_props);
@@ -766,7 +766,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
fbdefio->deferred_io = fbtft_deferred_io;
fb_deferred_io_init(info);
- strncpy(info->fix.id, dev->driver->name, 16);
+ snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.xpanstep = 0;
@@ -814,7 +814,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
if (par->gamma.curves && gamma) {
if (fbtft_gamma_parse_str(par,
par->gamma.curves, gamma, strlen(gamma)))
- goto alloc_fail;
+ goto release_framebuf;
}
/* Transmit buffer */
@@ -839,7 +839,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
}
if (!txbuf)
- goto alloc_fail;
+ goto release_framebuf;
par->txbuf.buf = txbuf;
par->txbuf.len = txbuflen;
}
@@ -875,6 +875,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
return info;
+release_framebuf:
+ framebuffer_release(info);
+
alloc_fail:
vfree(vmem);
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 89c4b5b76ce6..027531990674 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -241,6 +241,7 @@ struct fbtft_par {
ktime_t update_time;
bool bgr;
void *extra;
+ bool polarity;
};
#define NUMARGS(...) (sizeof((int[]){__VA_ARGS__})/sizeof(int))
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
index aa6508b44fab..ed7c32542cb3 100644
--- a/drivers/staging/greybus/audio_manager.c
+++ b/drivers/staging/greybus/audio_manager.c
@@ -90,8 +90,8 @@ void gb_audio_manager_remove_all(void)
list_for_each_entry_safe(module, next, &modules_list, list) {
list_del(&module->list);
- kobject_put(&module->kobj);
ida_simple_remove(&module_id, module->id);
+ kobject_put(&module->kobj);
}
is_empty = list_empty(&modules_list);
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 9f01427f35f9..1cb82cc28aa7 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -1102,21 +1102,21 @@ static void gb_lights_channel_release(struct gb_channel *channel)
static void gb_lights_light_release(struct gb_light *light)
{
int i;
- int count;
light->ready = false;
- count = light->channels_count;
-
if (light->has_flash)
gb_lights_light_v4l2_unregister(light);
+ light->has_flash = false;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < light->channels_count; i++)
gb_lights_channel_release(&light->channels[i]);
- light->channels_count--;
- }
+ light->channels_count = 0;
+
kfree(light->channels);
+ light->channels = NULL;
kfree(light->name);
+ light->name = NULL;
}
static void gb_lights_release(struct gb_lights *glights)
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index e85c988b7034..adea629b8065 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -521,7 +521,7 @@ static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy)
op = gb_operation_create(connection,
GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
- sizeof(req), sizeof(*resp) + props_count *
+ sizeof(*req), sizeof(*resp) + props_count *
sizeof(struct gb_power_supply_props_desc),
GFP_KERNEL);
if (!op)
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index f7f4cd6fb55b..8d2678b22c36 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -20,6 +20,7 @@
#include <signal.h>
#define MAX_NUM_DEVICES 10
+#define MAX_SYSFS_PREFIX 0x80
#define MAX_SYSFS_PATH 0x200
#define CSV_MAX_LINE 0x1000
#define SYSFS_MAX_INT 0x20
@@ -68,7 +69,7 @@ struct loopback_results {
};
struct loopback_device {
- char name[MAX_SYSFS_PATH];
+ char name[MAX_STR_LEN];
char sysfs_entry[MAX_SYSFS_PATH];
char debugfs_entry[MAX_SYSFS_PATH];
struct loopback_results results;
@@ -94,8 +95,8 @@ struct loopback_test {
int stop_all;
int poll_count;
char test_name[MAX_STR_LEN];
- char sysfs_prefix[MAX_SYSFS_PATH];
- char debugfs_prefix[MAX_SYSFS_PATH];
+ char sysfs_prefix[MAX_SYSFS_PREFIX];
+ char debugfs_prefix[MAX_SYSFS_PREFIX];
struct timespec poll_timeout;
struct loopback_device devices[MAX_NUM_DEVICES];
struct loopback_results aggregate_results;
@@ -646,7 +647,7 @@ baddir:
static int open_poll_files(struct loopback_test *t)
{
struct loopback_device *dev;
- char buf[MAX_STR_LEN];
+ char buf[MAX_SYSFS_PATH + MAX_STR_LEN];
char dummy;
int fds_idx = 0;
int i;
@@ -917,10 +918,10 @@ int main(int argc, char *argv[])
t.iteration_max = atoi(optarg);
break;
case 'S':
- snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+ snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
break;
case 'D':
- snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+ snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
break;
case 'm':
t.mask = atol(optarg);
@@ -971,10 +972,10 @@ int main(int argc, char *argv[])
}
if (!strcmp(t.sysfs_prefix, ""))
- snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix);
+ snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", sysfs_prefix);
if (!strcmp(t.debugfs_prefix, ""))
- snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix);
+ snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", debugfs_prefix);
ret = find_loopback_devices(&t);
if (ret)
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 4dc9ca3a11b4..b82a4ab77860 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -109,10 +109,10 @@
#define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */
#define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */
-#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */
-#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */
-#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */
-#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */
#define AD7193_CH_TEMP 0x100 /* Temp senseor */
#define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */
#define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 0ccf192b9a03..5950225e45d1 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -35,6 +35,8 @@ static int adt7316_i2c_read(void *client, u8 reg, u8 *data)
return ret;
}
+ *data = ret;
+
return 0;
}
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 3faffe59c933..95f5be1cd498 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -47,6 +47,8 @@
#define ADT7516_MSB_AIN3 0xA
#define ADT7516_MSB_AIN4 0xB
#define ADT7316_DA_DATA_BASE 0x10
+#define ADT7316_DA_10_BIT_LSB_SHIFT 6
+#define ADT7316_DA_12_BIT_LSB_SHIFT 4
#define ADT7316_DA_MSB_DATA_REGS 4
#define ADT7316_LSB_DAC_A 0x10
#define ADT7316_MSB_DAC_A 0x11
@@ -1089,7 +1091,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
if (data & 0x1)
ldac_config |= ADT7516_DAC_AB_IN_VREF;
- else if (data & 0x2)
+ if (data & 0x2)
ldac_config |= ADT7516_DAC_CD_IN_VREF;
} else {
ret = kstrtou8(buf, 16, &data);
@@ -1411,7 +1413,7 @@ static IIO_DEVICE_ATTR(ex_analog_temp_offset, S_IRUGO | S_IWUSR,
static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
int channel, char *buf)
{
- u16 data;
+ u16 data = 0;
u8 msb, lsb, offset;
int ret;
@@ -1436,7 +1438,11 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
if (ret)
return -EIO;
- data = (msb << offset) + (lsb & ((1 << offset) - 1));
+ if (chip->dac_bits == 12)
+ data = lsb >> ADT7316_DA_12_BIT_LSB_SHIFT;
+ else if (chip->dac_bits == 10)
+ data = lsb >> ADT7316_DA_10_BIT_LSB_SHIFT;
+ data |= msb << offset;
return sprintf(buf, "%d\n", data);
}
@@ -1444,7 +1450,7 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
int channel, const char *buf, size_t len)
{
- u8 msb, lsb, offset;
+ u8 msb, lsb, lsb_reg, offset;
u16 data;
int ret;
@@ -1462,9 +1468,13 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
return -EINVAL;
if (chip->dac_bits > 8) {
- lsb = data & (1 << offset);
+ lsb = data & ((1 << offset) - 1);
+ if (chip->dac_bits == 12)
+ lsb_reg = lsb << ADT7316_DA_12_BIT_LSB_SHIFT;
+ else
+ lsb_reg = lsb << ADT7316_DA_10_BIT_LSB_SHIFT;
ret = chip->bus.write(chip->bus.client,
- ADT7316_DA_DATA_BASE + channel * 2, lsb);
+ ADT7316_DA_DATA_BASE + channel * 2, lsb_reg);
if (ret)
return -EIO;
}
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 50a5b0c2cc7b..7ab95efcf1dc 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -6,6 +6,7 @@
* Licensed under the GPL-2 or later.
*/
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -129,7 +130,7 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev,
{
int ret;
u8 threshtype;
- bool adaptive;
+ bool thrfixed;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
ret = i2c_smbus_read_byte_data(chip->client, AD7150_CFG);
@@ -137,21 +138,23 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev,
return ret;
threshtype = (ret >> 5) & 0x03;
- adaptive = !!(ret & 0x80);
+
+ /*check if threshold mode is fixed or adaptive*/
+ thrfixed = FIELD_GET(AD7150_CFG_FIX, ret);
switch (type) {
case IIO_EV_TYPE_MAG_ADAPTIVE:
if (dir == IIO_EV_DIR_RISING)
- return adaptive && (threshtype == 0x1);
- return adaptive && (threshtype == 0x0);
+ return !thrfixed && (threshtype == 0x1);
+ return !thrfixed && (threshtype == 0x0);
case IIO_EV_TYPE_THRESH_ADAPTIVE:
if (dir == IIO_EV_DIR_RISING)
- return adaptive && (threshtype == 0x3);
- return adaptive && (threshtype == 0x2);
+ return !thrfixed && (threshtype == 0x3);
+ return !thrfixed && (threshtype == 0x2);
case IIO_EV_TYPE_THRESH:
if (dir == IIO_EV_DIR_RISING)
- return !adaptive && (threshtype == 0x1);
- return !adaptive && (threshtype == 0x0);
+ return thrfixed && (threshtype == 0x1);
+ return thrfixed && (threshtype == 0x0);
default:
break;
}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 89dd6b989254..e0440807b4ed 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -423,6 +423,9 @@ static int vpfe_open(struct file *file)
/* If decoder is not initialized. initialize it */
if (!video->initialized && vpfe_update_pipe_state(video)) {
mutex_unlock(&video->lock);
+ v4l2_fh_del(&handle->vfh);
+ v4l2_fh_exit(&handle->vfh);
+ kfree(handle);
return -ENODEV;
}
/* Increment device users counter */
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/staging/media/pulse8-cec/pulse8-cec.c
index 1732c3857b8e..2785cc03c529 100644
--- a/drivers/staging/media/pulse8-cec/pulse8-cec.c
+++ b/drivers/staging/media/pulse8-cec/pulse8-cec.c
@@ -580,7 +580,7 @@ unlock:
else
pulse8->config_pending = true;
mutex_unlock(&pulse8->config_lock);
- return err;
+ return log_addr == CEC_LOG_ADDR_INVALID ? 0 : err;
}
static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
index 7f51024dc5eb..e87b9ed4f37d 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -451,7 +451,9 @@ static int aim_probe(struct most_interface *iface, int channel_id,
c->devno = MKDEV(major, current_minor);
cdev_init(&c->cdev, &channel_fops);
c->cdev.owner = THIS_MODULE;
- cdev_add(&c->cdev, c->devno, 1);
+ retval = cdev_add(&c->cdev, c->devno, 1);
+ if (retval < 0)
+ goto err_free_c;
c->iface = iface;
c->cfg = cfg;
c->channel_id = channel_id;
@@ -487,6 +489,7 @@ error_create_device:
list_del(&c->list);
error_alloc_kfifo:
cdev_del(&c->cdev);
+err_free_c:
kfree(c);
error_alloc_channel:
ida_simple_remove(&minor_id, current_minor);
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index 4659a6450c04..6b18afb62145 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -87,6 +87,11 @@ static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
unsigned int payload_len = skb->len - ETH_HLEN;
unsigned int mdp_len = payload_len + MDP_HDR_LEN;
+ if (mdp_len < skb->len) {
+ pr_err("drop: too large packet! (%u)\n", skb->len);
+ return -EINVAL;
+ }
+
if (mbo->buffer_length < mdp_len) {
pr_err("drop: too small buffer! (%d for %d)\n",
mbo->buffer_length, mdp_len);
@@ -134,6 +139,11 @@ static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
u8 *buff = mbo->virt_address;
unsigned int mep_len = skb->len + MEP_HDR_LEN;
+ if (mep_len < skb->len) {
+ pr_err("drop: too large packet! (%u)\n", skb->len);
+ return -EINVAL;
+ }
+
if (mbo->buffer_length < mep_len) {
pr_err("drop: too small buffer! (%d for %d)\n",
mbo->buffer_length, mep_len);
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 0f8b8e0bffdf..dedc313e9dea 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -805,7 +805,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -813,7 +813,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index c7bf8ab26192..50793c9df1b3 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2052,7 +2052,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
struct ieee_param *param;
uint ret = 0;
- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
+ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
ret = -EINVAL;
goto out;
}
@@ -2859,7 +2859,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
goto out;
}
- if (!p->pointer) {
+ if (!p->pointer || p->length != sizeof(struct ieee_param)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d22360849b88..c14088075c59 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -40,11 +40,14 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
/****** 8188EUS ********/
{USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
{USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x0B05, 0x18F0)}, /* ASUS USB-N10 Nano B1 */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+ {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
+ {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
@@ -78,7 +81,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
phost_conf = pusbd->actconfig;
pconf_desc = &phost_conf->desc;
- phost_iface = &usb_intf->altsetting[0];
+ phost_iface = usb_intf->cur_altsetting;
piface_desc = &phost_iface->desc;
pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
@@ -366,8 +369,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
}
padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
- if (!padapter->HalData)
- DBG_88E("cant not alloc memory for HAL DATA\n");
+ if (!padapter->HalData) {
+ DBG_88E("Failed to allocate memory for HAL data\n");
+ goto free_adapter;
+ }
padapter->intf_start = &usb_intf_start;
padapter->intf_stop = &usb_intf_stop;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 4c30eea45f89..0d6fe039ac19 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -1630,14 +1630,15 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb_push(skb, priv->rtllib->tx_headroom);
ret = _rtl92e_tx(dev, skb);
- if (ret != 0)
- kfree_skb(skb);
if (queue_index != MGNT_QUEUE) {
priv->rtllib->stats.tx_bytes += (skb->len -
priv->rtllib->tx_headroom);
priv->rtllib->stats.tx_packets++;
}
+
+ if (ret != 0)
+ kfree_skb(skb);
}
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 5fe95937d811..6ec379056650 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1509,7 +1509,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
(tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
struct usb_device *udev = priv->udev;
int pend;
- int status;
+ int status, rt = -1;
struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
unsigned int idx_pipe;
@@ -1653,8 +1653,10 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
if (bSend0Byte) {
tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
- if (!tx_urb_zero)
- return -ENOMEM;
+ if (!tx_urb_zero) {
+ rt = -ENOMEM;
+ goto error;
+ }
usb_fill_bulk_urb(tx_urb_zero, udev,
usb_sndbulkpipe(udev, idx_pipe),
&zero, 0, tx_zero_isr, dev);
@@ -1664,7 +1666,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
"Error TX URB for zero byte %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+ goto error;
}
}
netif_trans_update(dev);
@@ -1675,7 +1677,12 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
RT_TRACE(COMP_ERR, "Error TX URB %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+
+error:
+ dev_kfree_skb_any(skb);
+ usb_free_urb(tx_urb);
+ usb_free_urb(tx_urb_zero);
+ return rt;
}
static short rtl8192_usb_initendpoints(struct net_device *dev)
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 9f61583af150..41b667c8385c 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -158,17 +158,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
- u32 val;
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
- if (pcmd->rsp && pcmd->rspsz > 0)
- memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
+ r8712_free_cmd_obj(pcmd);
return H2C_SUCCESS;
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 67e9e910aef9..d10a59d4a550 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -152,7 +152,7 @@ enum rtl8712_h2c_cmd {
static struct _cmd_callback cmd_callback[] = {
{GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
{GEN_CMD_CODE(_Write_MACREG), NULL},
- {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Read_BBREG), NULL},
{GEN_CMD_CODE(_Write_BBREG), NULL},
{GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
{GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 897d4621a5ce..d0ba42dfafeb 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -275,7 +275,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
pdvobjpriv->padapter = padapter;
padapter->EepromAddressSize = 6;
- phost_iface = &pintf->altsetting[0];
+ phost_iface = pintf->cur_altsetting;
piface_desc = &phost_iface->desc;
pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
if (pusbd->speed == USB_SPEED_HIGH) {
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 97ca4ecca8a9..37bc3c664630 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -565,8 +565,7 @@ static u_long get_word(struct vc_data *vc)
return 0;
} else if ((tmpx < vc->vc_cols - 2)
&& (ch == SPACE || ch == 0 || IS_WDLM(ch))
- && ((char)get_char(vc, (u_short *) &tmp_pos + 1, &temp) >
- SPACE)) {
+ && ((char)get_char(vc, (u_short *)tmp_pos + 1, &temp) > SPACE)) {
tmp_pos += 2;
tmpx++;
} else
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index ab96629b7889..bcdbf38b6916 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -977,8 +977,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
return;
}
- MACvIntDisable(priv->PortOffset);
-
spin_lock_irqsave(&priv->lock, flags);
/* Read low level stats */
@@ -1067,8 +1065,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static void vnt_interrupt_work(struct work_struct *work)
@@ -1078,14 +1074,17 @@ static void vnt_interrupt_work(struct work_struct *work)
if (priv->vif)
vnt_interrupt_process(priv);
+
+ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static irqreturn_t vnt_interrupt(int irq, void *arg)
{
struct vnt_private *priv = arg;
- if (priv->vif)
- schedule_work(&priv->interrupt_work);
+ schedule_work(&priv->interrupt_work);
+
+ MACvIntDisable(priv->PortOffset);
return IRQ_HANDLED;
}
@@ -1674,8 +1673,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
priv->hw->max_signal = 100;
- if (vnt_init(priv))
+ if (vnt_init(priv)) {
+ device_free_info(priv);
return -ENODEV;
+ }
device_print_info(priv);
pci_set_drvdata(pcid, priv);
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 4832666cc580..0578bf82f557 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -62,6 +62,8 @@
#define RATE_AUTO 12
#define MAX_RATE 12
+#define VNT_B_RATES (BIT(RATE_1M) | BIT(RATE_2M) |\
+ BIT(RATE_5M) | BIT(RATE_11M))
/*
* device specific
@@ -269,6 +271,7 @@ struct vnt_private {
u8 mac_hw;
/* netdev */
struct usb_device *usb;
+ struct usb_interface *intf;
u64 tsf_time;
u8 rx_rate;
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 655f0002f880..7b73fa2f8834 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -140,7 +140,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
- priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
+ priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
priv->current_rssi = priv->bb_pre_ed_rssi;
frame = skb_data + 8;
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index 73538fb4e4e2..b554e881e67f 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -107,9 +107,11 @@ static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr)
info->status.rates[0].count = tx_retry;
- if (!(tsr & (TSR_TMO | TSR_RETRYTMO))) {
+ if (!(tsr & TSR_TMO)) {
info->status.rates[0].idx = idx;
- info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status_irqsafe(priv->hw, context->skb);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index b1955378852a..b93773af2031 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -972,6 +972,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
priv = hw->priv;
priv->hw = hw;
priv->usb = udev;
+ priv->intf = intf;
vnt_set_options(priv);
@@ -994,6 +995,7 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(priv->hw, SUPPORTS_PS);
+ ieee80211_hw_set(priv->hw, PS_NULLFUNC_STACK);
priv->hw->max_signal = 100;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index aa59e7f14ab3..5ccb9eb7ff1c 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -277,11 +277,9 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
PK_TYPE_11B, &buf->b);
/* Get Duration and TimeStamp */
- if (ieee80211_is_pspoll(hdr->frame_control)) {
- __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-
- buf->duration_a = dur;
- buf->duration_b = dur;
+ if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ buf->duration_a = hdr->duration_id;
+ buf->duration_b = hdr->duration_id;
} else {
buf->duration_a = vnt_get_duration_le(priv,
tx_context->pkt_type, need_ack);
@@ -370,10 +368,8 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
tx_context->pkt_type, &buf->ab);
/* Get Duration and TimeStampOff */
- if (ieee80211_is_pspoll(hdr->frame_control)) {
- __le16 dur = cpu_to_le16(priv->current_aid | BIT(14) | BIT(15));
-
- buf->duration = dur;
+ if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ buf->duration = hdr->duration_id;
} else {
buf->duration = vnt_get_duration_le(priv, tx_context->pkt_type,
need_ack);
@@ -816,10 +812,14 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
- if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
- pkt_type = PK_TYPE_11GB;
- else
- pkt_type = PK_TYPE_11GA;
+ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ if (priv->basic_rates & VNT_B_RATES)
+ pkt_type = PK_TYPE_11GB;
+ else
+ pkt_type = PK_TYPE_11GA;
+ } else {
+ pkt_type = PK_TYPE_11A;
+ }
}
} else {
pkt_type = PK_TYPE_11B;
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 95faaeb7432a..4dc3521932ba 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -110,6 +110,7 @@ void vnt_run_command(struct work_struct *work)
if (vnt_init(priv)) {
/* If fail all ends TODO retry */
dev_err(&priv->usb->dev, "failed to start\n");
+ usb_set_intfdata(priv->intf, NULL);
ieee80211_free_hw(priv->hw);
return;
}
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 182b2d564627..4c6384328615 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -489,10 +489,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
/* Set the encryption - we only support wep */
if (is_wep) {
if (sme->key) {
- if (sme->key_idx >= NUM_WEPKEYS) {
- err = -EINVAL;
- goto exit;
- }
+ if (sme->key_idx >= NUM_WEPKEYS)
+ return -EINVAL;
result = prism2_domibset_uint32(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 6a107f8a06e2..52ad5c137d9f 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3443,6 +3443,8 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
WLAN_HDR_A4_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)) {
pr_debug("overlen frm: len=%zd\n",
skblen - sizeof(struct p80211_caphdr));
+
+ return;
}
skb = dev_alloc_skb(skblen);
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index f815f9d5045f..08e2bbd9772f 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -938,7 +938,7 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
}
}
- return 0;
+ return result;
}
/*----------------------------------------------------------------
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index bfb6b0a6528d..f7149bb23180 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -179,6 +179,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
cancel_work_sync(&hw->link_bh);
cancel_work_sync(&hw->commsqual_bh);
+ cancel_work_sync(&hw->usb_work);
/* Now we complete any outstanding commands
* and tell everyone who is waiting for their
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b6c4f55f79e7..2b8fbcd8dde2 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4321,30 +4321,37 @@ int iscsit_close_connection(
if (!atomic_read(&sess->session_reinstatement) &&
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
iscsit_close_session(sess);
return 0;
} else if (atomic_read(&sess->session_logout)) {
pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
sess->session_state = TARG_SESS_STATE_FREE;
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
} else {
pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
sess->session_state = TARG_SESS_STATE_FAILED;
- if (!atomic_read(&sess->session_continuation)) {
- spin_unlock_bh(&sess->conn_lock);
+ if (!atomic_read(&sess->session_continuation))
iscsit_start_time2retain_handler(sess);
- } else
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
}
@@ -4453,9 +4460,9 @@ static void iscsit_logout_post_handler_closesession(
complete(&conn->conn_logout_comp);
iscsit_dec_conn_usage_count(conn);
+ atomic_set(&sess->session_close, 1);
iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
}
static void iscsit_logout_post_handler_samecid(
@@ -4590,49 +4597,6 @@ void iscsit_fail_session(struct iscsi_session *sess)
sess->session_state = TARG_SESS_STATE_FAILED;
}
-int iscsit_free_session(struct iscsi_session *sess)
-{
- u16 conn_count = atomic_read(&sess->nconn);
- struct iscsi_conn *conn, *conn_tmp = NULL;
- int is_last;
-
- spin_lock_bh(&sess->conn_lock);
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
-
- list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
- conn_list) {
- if (conn_count == 0)
- break;
-
- if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
- is_last = 1;
- } else {
- iscsit_inc_conn_usage_count(conn_tmp);
- is_last = 0;
- }
- iscsit_inc_conn_usage_count(conn);
-
- spin_unlock_bh(&sess->conn_lock);
- iscsit_cause_connection_reinstatement(conn, 1);
- spin_lock_bh(&sess->conn_lock);
-
- iscsit_dec_conn_usage_count(conn);
- if (is_last == 0)
- iscsit_dec_conn_usage_count(conn_tmp);
-
- conn_count--;
- }
-
- if (atomic_read(&sess->nconn)) {
- spin_unlock_bh(&sess->conn_lock);
- wait_for_completion(&sess->session_wait_comp);
- } else
- spin_unlock_bh(&sess->conn_lock);
-
- iscsit_close_session(sess);
- return 0;
-}
-
void iscsit_stop_session(
struct iscsi_session *sess,
int session_sleep,
@@ -4643,8 +4607,6 @@ void iscsit_stop_session(
int is_last;
spin_lock_bh(&sess->conn_lock);
- if (session_sleep)
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
if (connection_sleep) {
list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
@@ -4702,12 +4664,15 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
continue;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
list_move_tail(&se_sess->sess_list, &free_list);
@@ -4717,7 +4682,9 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
- iscsit_free_session(sess);
+ list_del_init(&se_sess->sess_list);
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
session_count++;
}
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 4cf2c0f2ba2f..cfe87b629a8b 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -30,7 +30,6 @@ extern int iscsi_target_rx_thread(void *);
extern int iscsit_close_connection(struct iscsi_conn *);
extern int iscsit_close_session(struct iscsi_session *);
extern void iscsit_fail_session(struct iscsi_session *);
-extern int iscsit_free_session(struct iscsi_session *);
extern void iscsit_stop_session(struct iscsi_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index f0d97305575d..aa3f98994c7d 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -74,7 +74,7 @@ static int chap_check_algorithm(const char *a_str)
if (!token)
goto out;
- if (!strncmp(token, "5", 1)) {
+ if (!strcmp(token, "5")) {
pr_debug("Selected MD5 Algorithm\n");
kfree(orig);
return CHAP_DIGEST_MD5;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 8a4bc15bc3f5..0718f688277a 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1527,20 +1527,23 @@ static void lio_tpg_close_session(struct se_session *se_sess)
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
spin_unlock_bh(&se_tpg->session_lock);
return;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
- iscsit_close_session(sess);
+ iscsit_dec_session_usage_count(sess);
}
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index d2f82aaf6a85..985e600908e0 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -195,6 +195,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
spin_lock(&sess_p->conn_lock);
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess_p->conn_lock);
continue;
@@ -205,6 +206,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
atomic_set(&sess_p->session_fall_back_to_erl0, 1);
+ atomic_set(&sess_p->session_close, 1);
spin_unlock(&sess_p->conn_lock);
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
@@ -229,7 +231,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
if (sess->session_state == TARG_SESS_STATE_FAILED) {
spin_unlock_bh(&sess->conn_lock);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
return 0;
}
spin_unlock_bh(&sess->conn_lock);
@@ -237,7 +238,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
- iscsit_close_session(sess);
return 0;
}
@@ -525,6 +525,7 @@ static int iscsi_login_non_zero_tsih_s2(
sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
if (!memcmp(sess_p->isid, pdu->isid, 6) &&
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cc38a3509f78..c3d576ed6f13 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1047,27 +1047,6 @@ passthrough_parse_cdb(struct se_cmd *cmd,
unsigned char *cdb = cmd->t_task_cdb;
/*
- * Clear a lun set in the cdb if the initiator talking to use spoke
- * and old standards version, as we can't assume the underlying device
- * won't choke up on it.
- */
- switch (cdb[0]) {
- case READ_10: /* SBC - RDProtect */
- case READ_12: /* SBC - RDProtect */
- case READ_16: /* SBC - RDProtect */
- case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
- case VERIFY: /* SBC - VRProtect */
- case VERIFY_16: /* SBC - VRProtect */
- case WRITE_VERIFY: /* SBC - VRProtect */
- case WRITE_VERIFY_12: /* SBC - VRProtect */
- case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
- break;
- default:
- cdb[1] &= 0x1f; /* clear logical unit number */
- break;
- }
-
- /*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
*/
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index cb6497ce4b61..6e75095af681 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -130,7 +130,7 @@ static int srp_get_pr_transport_id(
memset(buf + 8, 0, leading_zero_bytes);
rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
if (rc < 0) {
- pr_debug("hex2bin failed for %s: %d\n", __func__, rc);
+ pr_debug("hex2bin failed for %s: %d\n", p, rc);
return rc;
}
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index f49d2989d000..984e5f514140 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -607,7 +607,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
load = 0;
total_load += load;
- if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
+ if (load_cpu)
load_cpu[i] = load;
i++;
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index 5836e5554433..d4c374cc4f74 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -20,6 +20,13 @@ enum int3400_thermal_uuid {
INT3400_THERMAL_PASSIVE_1,
INT3400_THERMAL_ACTIVE,
INT3400_THERMAL_CRITICAL,
+ INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
+ INT3400_THERMAL_EMERGENCY_CALL_MODE,
+ INT3400_THERMAL_PASSIVE_2,
+ INT3400_THERMAL_POWER_BOSS,
+ INT3400_THERMAL_VIRTUAL_SENSOR,
+ INT3400_THERMAL_COOLING_MODE,
+ INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
INT3400_THERMAL_MAXIMUM_UUID,
};
@@ -27,6 +34,13 @@ static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
"42A441D6-AE6A-462b-A84B-4A8CE79027D3",
"3A95C389-E4B8-4629-A526-C52C88626BAE",
"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
+ "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
+ "5349962F-71E6-431D-9AE8-0A635B710AEE",
+ "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
+ "F5A35014-C209-46A4-993A-EB56DE7530A1",
+ "6ED722A7-9240-48A5-B479-31EEF723D7CF",
+ "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
+ "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
};
struct int3400_thermal_priv {
@@ -271,10 +285,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
- }
+ int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
+ int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
+
priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
priv, &int3400_thermal_ops,
&int3400_thermal_params, 0, 0);
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index b2bbaa1c60b0..18788109cae6 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -43,7 +43,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
}
static const struct x86_cpu_id soc_thermal_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0,
BYT_SOC_DTS_APIC_IRQ},
{}
};
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 34169c32d495..ea9558679634 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -348,7 +348,8 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
u32 raw;
for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
- raw = readl(mt->thermal_base + conf->msr[i]);
+ raw = readl(mt->thermal_base +
+ conf->msr[conf->bank_data[bank->id].sensors[i]]);
temp = raw_to_mcelsius(mt,
conf->bank_data[bank->id].sensors[i],
@@ -485,7 +486,8 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
for (i = 0; i < conf->bank_data[num].num_sensors; i++)
writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
- mt->thermal_base + conf->adcpnp[i]);
+ mt->thermal_base +
+ conf->adcpnp[conf->bank_data[num].sensors[i]]);
writel((1 << conf->bank_data[num].num_sensors) - 1,
mt->thermal_base + TEMP_MONCTL0);
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 3f9fe6aa51cc..ebbe1ec7b9e8 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -162,7 +162,8 @@ static int tsens_probe(struct platform_device *pdev)
if (tmdev->ops->calibrate) {
ret = tmdev->ops->calibrate(tmdev);
if (ret < 0) {
- dev_err(dev, "tsens calibration failed\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "tsens calibration failed\n");
return ret;
}
}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 73e5fee6cf1d..83126e2dce36 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -401,8 +401,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
rcar_thermal_for_each_priv(priv, common) {
if (rcar_thermal_had_changed(priv, status)) {
rcar_thermal_irq_disable(priv);
- schedule_delayed_work(&priv->work,
- msecs_to_jiffies(300));
+ queue_delayed_work(system_freezable_wq, &priv->work,
+ msecs_to_jiffies(300));
}
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index cd82ae34ddfa..90c033b4ec98 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -2073,7 +2073,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
mutex_unlock(&thermal_list_lock);
- thermal_zone_device_set_polling(tz, 0);
+ cancel_delayed_work_sync(&tz->poll_queue);
if (tz->type[0])
device_remove_file(&tz->device, &dev_attr_type);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index cba6bc6ab9ed..c963593eedbe 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -95,9 +95,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
return io;
}
-static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
+static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{
- iowrite16(value, ring_desc_base(ring) + offset);
+ /*
+ * The other 16-bits in the register is read-only and writes to it
+ * are ignored by the hardware so we can save one ioread32() by
+ * filling the read-only bits with zeroes.
+ */
+ iowrite32(cons, ring_desc_base(ring) + 8);
+}
+
+static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
+{
+ /* See ring_iowrite_cons() above for explanation */
+ iowrite32(prod << 16, ring_desc_base(ring) + 8);
}
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
@@ -149,7 +160,10 @@ static void ring_write_descriptors(struct tb_ring *ring)
descriptor->sof = frame->sof;
}
ring->head = (ring->head + 1) % ring->size;
- ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
+ if (ring->is_tx)
+ ring_iowrite_prod(ring, ring->head);
+ else
+ ring_iowrite_cons(ring, ring->head);
}
}
@@ -369,7 +383,7 @@ void ring_stop(struct tb_ring *ring)
ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0);
- ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
+ ring_iowrite32desc(ring, 0, 8);
ring_iowrite32desc(ring, 0, 12);
ring->head = 0;
ring->tail = 0;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 95103054c0e4..fdd2860cb9bd 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -455,4 +455,27 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
+config LDISC_AUTOLOAD
+ bool "Automatically load TTY Line Disciplines"
+ default y
+ help
+ Historically the kernel has always automatically loaded any
+ line discipline that is in a kernel module when a user asks
+ for it to be loaded with the TIOCSETD ioctl, or through other
+ means. This is not always the best thing to do on systems
+ where you know you will not be using some of the more
+ "ancient" line disciplines, so prevent the kernel from doing
+ this unless the request is coming from a process with the
+ CAP_SYS_MODULE permissions.
+
+ Say 'Y' here if you trust your userspace users to do the right
+ thing, or if you have only provided the line disciplines that
+ you know you will be using, or if you wish to continue to use
+ the traditional method of on-demand loading of these modules
+ by any user.
+
+ This functionality can be changed at runtime with the
+ dev.tty.ldisc_autoload sysctl, this configuration option will
+ only set the default value of this functionality.
+
endif # TTY
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 7ac9bcdf1e61..1ba0dc11153d 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -139,6 +139,21 @@ static int find_console_handle(void)
return 1;
}
+static unsigned int local_ev_byte_channel_send(unsigned int handle,
+ unsigned int *count,
+ const char *p)
+{
+ char buffer[EV_BYTE_CHANNEL_MAX_BYTES];
+ unsigned int c = *count;
+
+ if (c < sizeof(buffer)) {
+ memcpy(buffer, p, c);
+ memset(&buffer[c], 0, sizeof(buffer) - c);
+ p = buffer;
+ }
+ return ev_byte_channel_send(handle, count, p);
+}
+
/*************************** EARLY CONSOLE DRIVER ***************************/
#ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC
@@ -157,7 +172,7 @@ static void byte_channel_spin_send(const char data)
do {
count = 1;
- ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
+ ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE,
&count, &data);
} while (ret == EV_EAGAIN);
}
@@ -224,7 +239,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s,
while (count) {
len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES);
do {
- ret = ev_byte_channel_send(handle, &len, s);
+ ret = local_ev_byte_channel_send(handle, &len, s);
} while (ret == EV_EAGAIN);
count -= len;
s += len;
@@ -404,7 +419,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc)
CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE),
EV_BYTE_CHANNEL_MAX_BYTES);
- ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
+ ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail);
/* 'len' is valid only if the return code is 0 or EV_EAGAIN */
if (!ret || (ret == EV_EAGAIN))
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index b05dc5086627..8bab8b00d47d 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -120,6 +120,14 @@ static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
return got;
}
+/**
+ * hvterm_raw_put_chars: send characters to firmware for given vterm adapter
+ * @vtermno: The virtual terminal number.
+ * @buf: The characters to send. Because of the underlying hypercall in
+ * hvc_put_chars(), this buffer must be at least 16 bytes long, even if
+ * you are sending fewer chars.
+ * @count: number of chars to send.
+ */
static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count)
{
struct hvterm_priv *pv = hvterm_privs[vtermno];
@@ -232,6 +240,7 @@ static const struct hv_ops hvterm_hvsi_ops = {
static void udbg_hvc_putc(char c)
{
int count = -1;
+ unsigned char bounce_buffer[16];
if (!hvterm_privs[0])
return;
@@ -242,7 +251,12 @@ static void udbg_hvc_putc(char c)
do {
switch(hvterm_privs[0]->proto) {
case HV_PROTOCOL_RAW:
- count = hvterm_raw_put_chars(0, &c, 1);
+ /*
+ * hvterm_raw_put_chars requires at least a 16-byte
+ * buffer, so go via the bounce buffer
+ */
+ bounce_buffer[0] = c;
+ count = hvterm_raw_put_chars(0, bounce_buffer, 1);
break;
case HV_PROTOCOL_HVSI:
count = hvterm_hvsi_put_chars(0, &c, 1);
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index df0204b6148f..4417f7568422 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -1515,6 +1515,8 @@ static void ipw_send_setup_packet(struct ipw_hardware *hw)
sizeof(struct ipw_setup_get_version_query_packet),
ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
TL_SETUP_SIGNO_GET_VERSION_QRY);
+ if (!ver_packet)
+ return;
ver_packet->header.length = sizeof(struct tl_setup_get_version_qry);
/*
diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
index 655c7948261c..2fa4f9123469 100644
--- a/drivers/tty/ipwireless/main.c
+++ b/drivers/tty/ipwireless/main.c
@@ -113,6 +113,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ipw->common_memory = ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
+ if (!ipw->common_memory) {
+ ret = -ENOMEM;
+ goto exit1;
+ }
if (!request_mem_region(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]),
IPWIRELESS_PCCARD_NAME)) {
@@ -133,6 +137,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ipw->attr_memory = ioremap(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]));
+ if (!ipw->attr_memory) {
+ ret = -ENOMEM;
+ goto exit3;
+ }
if (!request_mem_region(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]),
IPWIRELESS_PCCARD_NAME)) {
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 8d6253903f24..0c12dec110bc 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -614,7 +614,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
}
/* no data */
- if (file->f_flags & O_NONBLOCK) {
+ if (tty_io_nonblock(tty, file)) {
ret = -EAGAIN;
break;
}
@@ -681,7 +681,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
if (tbuf)
break;
- if (file->f_flags & O_NONBLOCK) {
+ if (tty_io_nonblock(tty, file)) {
error = -EAGAIN;
break;
}
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 345111467b85..ee0e07b4a13d 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1080,7 +1080,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
pMsg = remove_msg(pInfo, pClient);
if (pMsg == NULL) {
/* no messages available. */
- if (file->f_flags & O_NONBLOCK) {
+ if (tty_io_nonblock(tty, file)) {
ret = -EAGAIN;
goto unlock;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 904fc9c37fde..8214b0326b3a 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1704,7 +1704,7 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
down_read(&tty->termios_rwsem);
- while (1) {
+ do {
/*
* When PARMRK is set, each input char may take up to 3 chars
* in the read buf; reduce the buffer space avail by 3x
@@ -1746,7 +1746,7 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
fp += n;
count -= n;
rcvd += n;
- }
+ } while (!test_bit(TTY_LDISC_CHANGING, &tty->flags));
tty->receive_room = room;
@@ -2213,7 +2213,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
break;
if (!timeout)
break;
- if (file->f_flags & O_NONBLOCK) {
+ if (tty_io_nonblock(tty, file)) {
retval = -EAGAIN;
break;
}
@@ -2367,7 +2367,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
}
if (!nr)
break;
- if (file->f_flags & O_NONBLOCK) {
+ if (tty_io_nonblock(tty, file)) {
retval = -EAGAIN;
break;
}
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index e8e8973939d3..447d791bde22 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -279,7 +279,7 @@ MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc1
module_param_array(pc104_4, ulong, NULL, 0);
MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,...");
-static int rp_init(void);
+static int __init rp_init(void);
static void rp_cleanup_module(void);
module_init(rp_init);
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index e10f1244409b..2fa92ec1d1cc 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -119,7 +119,7 @@ static int bcm2835aux_serial_remove(struct platform_device *pdev)
{
struct bcm2835aux_data *data = platform_get_drvdata(pdev);
- serial8250_unregister_port(data->uart.port.line);
+ serial8250_unregister_port(data->line);
clk_disable_unprepare(data->clk);
return 0;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index e8819aa20415..c4e9eba36023 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -181,7 +181,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
struct hlist_head *h;
struct hlist_node *n;
struct irq_info *i;
- int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
+ int ret;
mutex_lock(&hash_mutex);
@@ -216,9 +216,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
INIT_LIST_HEAD(&up->list);
i->head = &up->list;
spin_unlock_irq(&i->lock);
- irq_flags |= up->port.irqflags;
ret = request_irq(up->port.irq, serial8250_interrupt,
- irq_flags, "serial", i);
+ up->port.irqflags, "serial", i);
if (ret < 0)
serial_do_unlink(i, up);
}
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 3177264a1166..22d65a33059e 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -269,7 +269,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param)
static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
{
- return param == chan->device->dev->parent;
+ return param == chan->device->dev;
}
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
@@ -311,7 +311,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
p->set_termios = dw8250_set_termios;
}
- /* Platforms with iDMA */
+ /* Platforms with iDMA 64-bit */
if (platform_get_resource_byname(to_platform_device(p->dev),
IORESOURCE_MEM, "lpss_priv")) {
p->set_termios = dw8250_set_termios;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 5b54439a8a9b..c7a7574172fa 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1814,13 +1814,13 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial_port_in(port, UART_LSR);
- if (status & (UART_LSR_DR | UART_LSR_BI) &&
- iir & UART_IIR_RDI) {
+ if (status & (UART_LSR_DR | UART_LSR_BI)) {
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
}
serial8250_modem_status(up);
- if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE))
+ if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) &&
+ (up->ier & UART_IER_THRI))
serial8250_tx_chars(up);
spin_unlock_irqrestore(&port->lock, flags);
@@ -2199,6 +2199,10 @@ int serial8250_do_startup(struct uart_port *port)
}
}
+ /* Check if we need to have shared IRQs */
+ if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
+ up->port.irqflags |= IRQF_SHARED;
+
if (port->irq) {
unsigned char iir1;
/*
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 2d8089fc2139..f6586a8681b9 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -811,10 +811,8 @@ __acquires(&uap->port.lock)
if (!uap->using_tx_dma)
return;
- /* Avoid deadlock with the DMA engine callback */
- spin_unlock(&uap->port.lock);
- dmaengine_terminate_all(uap->dmatx.chan);
- spin_lock(&uap->port.lock);
+ dmaengine_terminate_async(uap->dmatx.chan);
+
if (uap->dmatx.queued) {
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
DMA_TO_DEVICE);
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 73137f4aac20..246f4aab7407 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -52,11 +52,6 @@ struct ar933x_uart_port {
struct clk *clk;
};
-static inline bool ar933x_uart_console_enabled(void)
-{
- return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
int offset)
{
@@ -294,6 +289,10 @@ static void ar933x_uart_set_termios(struct uart_port *port,
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
+ /* enable RX and TX ready overide */
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
+
/* reenable the UART */
ar933x_uart_rmw(up, AR933X_UART_CS_REG,
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
@@ -426,6 +425,10 @@ static int ar933x_uart_startup(struct uart_port *port)
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
+ /* enable RX and TX ready overide */
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
+
/* Enable RX interrupts */
up->ier = AR933X_UART_INT_RX_VALID;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
@@ -511,6 +514,7 @@ static struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
static struct ar933x_uart_port *
ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
@@ -607,14 +611,7 @@ static struct console ar933x_uart_console = {
.index = -1,
.data = &ar933x_uart_driver,
};
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
- if (!ar933x_uart_console_enabled())
- return;
-
- ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
static struct uart_driver ar933x_uart_driver = {
.owner = THIS_MODULE,
@@ -703,7 +700,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
- ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+ ar933x_console_ports[up->port.line] = up;
+#endif
ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
if (ret)
@@ -752,8 +751,9 @@ static int __init ar933x_uart_init(void)
{
int ret;
- if (ar933x_uart_console_enabled())
- ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+ ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
ret = uart_register_driver(&ar933x_uart_driver);
if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 5a341b1c65c3..4a7eb85f7c85 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -175,6 +175,8 @@ struct atmel_uart_port {
unsigned int pending_status;
spinlock_t lock_suspended;
+ bool hd_start_rx; /* can start RX during half-duplex operation */
+
int (*prepare_rx)(struct uart_port *port);
int (*prepare_tx)(struct uart_port *port);
void (*schedule_rx)(struct uart_port *port);
@@ -241,6 +243,12 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
#endif
+static inline int atmel_uart_is_half_duplex(struct uart_port *port)
+{
+ return (port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX);
+}
+
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_pdc_rx(struct uart_port *port)
{
@@ -492,9 +500,10 @@ static void atmel_stop_tx(struct uart_port *port)
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
- if ((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX))
- atmel_start_rx(port);
+ if (atmel_uart_is_half_duplex(port))
+ if (!atomic_read(&atmel_port->tasklet_shutdown))
+ atmel_start_rx(port);
+
}
/*
@@ -511,8 +520,7 @@ static void atmel_start_tx(struct uart_port *port)
return;
if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
- if ((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ if (atmel_uart_is_half_duplex(port))
atmel_stop_rx(port);
if (atmel_use_pdc_tx(port))
@@ -809,10 +817,14 @@ static void atmel_complete_tx_dma(void *arg)
*/
if (!uart_circ_empty(xmit))
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
- else if ((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
- /* DMA done, stop TX, start RX for RS485 */
- atmel_start_rx(port);
+ else if (atmel_uart_is_half_duplex(port)) {
+ /*
+ * DMA done, re-enable TXEMPTY and signal that we can stop
+ * TX and start RX for RS485
+ */
+ atmel_port->hd_start_rx = true;
+ atmel_uart_writel(port, ATMEL_US_IER,
+ atmel_port->tx_done_mask);
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -1166,6 +1178,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
sg_dma_len(&atmel_port->sg_rx)/2,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(port->dev, "Preparing DMA cyclic failed\n");
+ goto chan_err;
+ }
desc->callback = atmel_complete_rx_dma;
desc->callback_param = port;
atmel_port->desc_rx = desc;
@@ -1253,9 +1269,19 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
- /* Either PDC or interrupt transmission */
atmel_uart_writel(port, ATMEL_US_IDR,
atmel_port->tx_done_mask);
+
+ /* Start RX if flag was set and FIFO is empty */
+ if (atmel_port->hd_start_rx) {
+ if (!(atmel_uart_readl(port, ATMEL_US_CSR)
+ & ATMEL_US_TXEMPTY))
+ dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
+
+ atmel_port->hd_start_rx = false;
+ atmel_start_rx(port);
+ }
+
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
}
}
@@ -1382,8 +1408,7 @@ static void atmel_tx_pdc(struct uart_port *port)
atmel_uart_writel(port, ATMEL_US_IER,
atmel_port->tx_done_mask);
} else {
- if ((port->rs485.flags & SER_RS485_ENABLED) &&
- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+ if (atmel_uart_is_half_duplex(port)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
@@ -2176,27 +2201,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode |= ATMEL_US_USMODE_NORMAL;
}
- /* set the mode, clock divisor, parity, stop bits and data size */
- atmel_uart_writel(port, ATMEL_US_MR, mode);
-
- /*
- * when switching the mode, set the RTS line state according to the
- * new mode, otherwise keep the former state
- */
- if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
- unsigned int rts_state;
-
- if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
- /* let the hardware control the RTS line */
- rts_state = ATMEL_US_RTSDIS;
- } else {
- /* force RTS line to low level */
- rts_state = ATMEL_US_RTSEN;
- }
-
- atmel_uart_writel(port, ATMEL_US_CR, rts_state);
- }
-
/*
* Set the baud rate:
* Fractional baudrate allows to setup output frequency more
@@ -2223,6 +2227,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
quot = cd | fp << ATMEL_US_FP_OFFSET;
atmel_uart_writel(port, ATMEL_US_BRGR, quot);
+
+ /* set the mode, clock divisor, parity, stop bits and data size */
+ atmel_uart_writel(port, ATMEL_US_MR, mode);
+
+ /*
+ * when switching the mode, set the RTS line state according to the
+ * new mode, otherwise keep the former state
+ */
+ if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
+ unsigned int rts_state;
+
+ if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
+ /* let the hardware control the RTS line */
+ rts_state = ATMEL_US_RTSDIS;
+ } else {
+ /* force RTS line to low level */
+ rts_state = ATMEL_US_RTSEN;
+ }
+
+ atmel_uart_writel(port, ATMEL_US_CR, rts_state);
+ }
+
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 0040c29f651a..b9e137c03fe3 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -421,7 +421,16 @@ static int cpm_uart_startup(struct uart_port *port)
clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX);
}
cpm_uart_initbd(pinfo);
- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+ if (IS_SMC(pinfo)) {
+ out_be32(&pinfo->smcup->smc_rstate, 0);
+ out_be32(&pinfo->smcup->smc_tstate, 0);
+ out_be16(&pinfo->smcup->smc_rbptr,
+ in_be16(&pinfo->smcup->smc_rbase));
+ out_be16(&pinfo->smcup->smc_tbptr,
+ in_be16(&pinfo->smcup->smc_tbase));
+ } else {
+ cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+ }
}
/* Install interrupt handler. */
retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port);
@@ -875,16 +884,14 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
(u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE);
/*
- * In case SMC1 is being relocated...
+ * In case SMC is being relocated...
*/
-#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase));
out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase));
out_be32(&up->smc_rstate, 0);
out_be32(&up->smc_tstate, 0);
out_be16(&up->smc_brkcr, 1); /* number of break chars */
out_be16(&up->smc_brkec, 0);
-#endif
/* Set up the uart parameters in the
* parameter ram.
@@ -898,8 +905,6 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
out_be16(&up->smc_brkec, 0);
out_be16(&up->smc_brkcr, 1);
- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
-
/* Set UART mode, 8 bit, no parity, one stop.
* Enable receive and transmit.
*/
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index 02ad6953b167..50ec5f1ac77f 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -545,7 +545,11 @@ static int __init digicolor_uart_init(void)
if (ret)
return ret;
- return platform_driver_register(&digicolor_uart_platform);
+ ret = platform_driver_register(&digicolor_uart_platform);
+ if (ret)
+ uart_unregister_driver(&digicolor_uart);
+
+ return ret;
}
module_init(digicolor_uart_init);
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 91d2ddd6ef88..180b77334fdd 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1244,6 +1244,9 @@ static int ifx_spi_spi_remove(struct spi_device *spi)
struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
/* stop activity */
tasklet_kill(&ifx_dev->io_work_tasklet);
+
+ pm_runtime_disable(&spi->dev);
+
/* free irq */
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 5f7452eaa728..8de9ad8d01ca 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -573,7 +573,7 @@ static void dma_tx_work(struct work_struct *w)
dev_err(dev, "DMA mapping error for TX.\n");
goto err_out;
}
- desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
+ desc = dmaengine_prep_slave_sg(chan, sgl, ret,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "We cannot prepare for the TX slave dma!\n");
@@ -1981,7 +1981,7 @@ imx_console_setup(struct console *co, char *options)
retval = clk_prepare(sport->clk_per);
if (retval)
- clk_disable_unprepare(sport->clk_ipg);
+ clk_unprepare(sport->clk_ipg);
error_console:
return retval;
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index f2b0d8cee8ef..0314e78e31ff 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
char *cptr = config;
struct console *cons;
- if (!strlen(config) || isspace(config[0]))
+ if (!strlen(config) || isspace(config[0])) {
+ err = 0;
goto noconfig;
+ }
kgdboc_io_ops.is_console = 0;
kgdb_tty_driver = NULL;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 8a3e92638e10..80ab672d61cc 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -494,37 +494,48 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
static int max310x_set_baud(struct uart_port *port, int baud)
{
- unsigned int mode = 0, clk = port->uartclk, div = clk / baud;
+ unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
- /* Check for minimal value for divider */
- if (div < 16)
- div = 16;
-
- if (clk % baud && (div / 16) < 0x8000) {
+ /*
+ * Calculate the integer divisor first. Select a proper mode
+ * in case if the requested baud is too high for the pre-defined
+ * clocks frequency.
+ */
+ div = port->uartclk / baud;
+ if (div < 8) {
+ /* Mode x4 */
+ c = 4;
+ mode = MAX310X_BRGCFG_4XMODE_BIT;
+ } else if (div < 16) {
/* Mode x2 */
+ c = 8;
mode = MAX310X_BRGCFG_2XMODE_BIT;
- clk = port->uartclk * 2;
- div = clk / baud;
-
- if (clk % baud && (div / 16) < 0x8000) {
- /* Mode x4 */
- mode = MAX310X_BRGCFG_4XMODE_BIT;
- clk = port->uartclk * 4;
- div = clk / baud;
- }
+ } else {
+ c = 16;
}
- max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8);
- max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16);
- max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode);
+ /* Calculate the divisor in accordance with the fraction coefficient */
+ div /= c;
+ F = c*baud;
- return DIV_ROUND_CLOSEST(clk, div);
+ /* Calculate the baud rate fraction */
+ if (div > 0)
+ frac = (16*(port->uartclk % F)) / F;
+ else
+ div = 1;
+
+ max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8);
+ max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div);
+ max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode);
+
+ /* Return the actual baud rate we just programmed */
+ return (16*port->uartclk) / (c*(16*div + frac));
}
static int max310x_update_best_err(unsigned long f, long *besterr)
{
/* Use baudrate 115200 for calculate error */
- long err = f % (115200 * 16);
+ long err = f % (460800 * 16);
if ((*besterr < 0) || (*besterr > err)) {
*besterr = err;
@@ -579,7 +590,7 @@ static int max310x_set_ref_clk(struct max310x_port *s, unsigned long freq,
}
/* Configure clock source */
- clksrc = xtal ? MAX310X_CLKSRC_CRYST_BIT : MAX310X_CLKSRC_EXTCLK_BIT;
+ clksrc = MAX310X_CLKSRC_EXTCLK_BIT | (xtal ? MAX310X_CLKSRC_CRYST_BIT : 0);
/* Configure PLL */
if (pllcfg) {
@@ -758,12 +769,9 @@ static void max310x_start_tx(struct uart_port *port)
static unsigned int max310x_tx_empty(struct uart_port *port)
{
- unsigned int lvl, sts;
-
- lvl = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
- sts = max310x_port_read(port, MAX310X_IRQSTS_REG);
+ u8 lvl = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
- return ((sts & MAX310X_IRQ_TXEMPTY_BIT) && !lvl) ? TIOCSER_TEMT : 0;
+ return lvl ? 0 : TIOCSER_TEMT;
}
static unsigned int max310x_get_mctrl(struct uart_port *port)
@@ -1323,6 +1331,8 @@ static int max310x_spi_probe(struct spi_device *spi)
if (spi->dev.of_node) {
const struct of_device_id *of_id =
of_match_device(max310x_dt_ids, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
devtype = (struct max310x_devtype *)of_id->data;
} else {
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 6788e7532dff..9e6d44df3fab 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -391,10 +391,14 @@ no_rx:
static inline void msm_wait_for_xmitr(struct uart_port *port)
{
+ unsigned int timeout = 500000;
+
while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
break;
udelay(1);
+ if (!timeout--)
+ break;
}
msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
}
@@ -868,6 +872,7 @@ static void msm_handle_tx(struct uart_port *port)
struct circ_buf *xmit = &msm_port->uart.state->xmit;
struct msm_dma *dma = &msm_port->tx_dma;
unsigned int pio_count, dma_count, dma_min;
+ char buf[4] = { 0 };
void __iomem *tf;
int err = 0;
@@ -877,10 +882,12 @@ static void msm_handle_tx(struct uart_port *port)
else
tf = port->membase + UART_TF;
+ buf[0] = port->x_char;
+
if (msm_port->is_uartdm)
msm_reset_dm_count(port, 1);
- iowrite8_rep(tf, &port->x_char, 1);
+ iowrite32_rep(tf, buf, 1);
port->icount.tx++;
port->x_char = 0;
return;
@@ -981,6 +988,7 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int mr;
/* reset everything */
msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
@@ -988,7 +996,10 @@ static void msm_reset(struct uart_port *port)
msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ mr = msm_read(port, UART_MR1);
+ mr &= ~UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
/* Disable DM modes */
if (msm_port->is_uartdm)
@@ -1568,6 +1579,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
int num_newlines = 0;
bool replaced = false;
void __iomem *tf;
+ int locked = 1;
if (is_uartdm)
tf = port->membase + UARTDM_TF;
@@ -1580,7 +1592,13 @@ static void __msm_console_write(struct uart_port *port, const char *s,
num_newlines++;
count += num_newlines;
- spin_lock(&port->lock);
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&port->lock);
+ else
+ spin_lock(&port->lock);
+
if (is_uartdm)
msm_reset_dm_count(port, count);
@@ -1616,7 +1634,9 @@ static void __msm_console_write(struct uart_port *port, const char *s,
iowrite32_rep(tf, buf, 1);
i += num_chars;
}
- spin_unlock(&port->lock);
+
+ if (locked)
+ spin_unlock(&port->lock);
}
static void msm_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 401c983ec5f3..a10e4aa9e18e 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -581,7 +581,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
port->membase = devm_ioremap_resource(&pdev->dev, reg);
if (IS_ERR(port->membase))
- return -PTR_ERR(port->membase);
+ return PTR_ERR(port->membase);
data = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart_data),
GFP_KERNEL);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 1d9d778828ba..515bf18c8294 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1635,8 +1635,9 @@ static int mxs_auart_request_gpio_irq(struct mxs_auart_port *s)
/*
* If something went wrong, rollback.
+ * Be careful: i may be unsigned.
*/
- while (err && (--i >= 0))
+ while (err && (i-- > 0))
if (irq[i] >= 0)
free_irq(irq[i], s);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 42caccb5e87e..30b577384a1d 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -252,6 +252,7 @@ struct eg20t_port {
struct dma_chan *chan_rx;
struct scatterlist *sg_tx_p;
int nent;
+ int orig_nent;
struct scatterlist sg_rx;
int tx_dma_use;
void *rx_buf_virt;
@@ -806,9 +807,10 @@ static void pch_dma_tx_complete(void *arg)
}
xmit->tail &= UART_XMIT_SIZE - 1;
async_tx_ack(priv->desc_tx);
- dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE);
+ dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
priv->tx_dma_use = 0;
priv->nent = 0;
+ priv->orig_nent = 0;
kfree(priv->sg_tx_p);
pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
}
@@ -1033,6 +1035,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__);
return 0;
}
+ priv->orig_nent = num;
priv->nent = nent;
for (i = 0; i < nent; i++, sg++) {
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index ea6b62cece88..f80a88d107d7 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -332,6 +332,7 @@ struct sc16is7xx_port {
struct kthread_worker kworker;
struct task_struct *kworker_task;
struct kthread_work irq_work;
+ struct mutex efr_lock;
struct sc16is7xx_one p[0];
};
@@ -503,6 +504,21 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
div /= 4;
}
+ /* In an amazing feat of design, the Enhanced Features Register shares
+ * the address of the Interrupt Identification Register, and is
+ * switched in by writing a magic value (0xbf) to the Line Control
+ * Register. Any interrupt firing during this time will see the EFR
+ * where it expects the IIR to be, leading to "Unexpected interrupt"
+ * messages.
+ *
+ * Prevent this possibility by claiming a mutex while accessing the
+ * EFR, and claiming the same mutex from within the interrupt handler.
+ * This is similar to disabling the interrupt, but that doesn't work
+ * because the bulk of the interrupt processing is run as a workqueue
+ * job in thread context.
+ */
+ mutex_lock(&s->efr_lock);
+
lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
/* Open the LCR divisors for configuration */
@@ -518,6 +534,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
/* Put LCR back to the normal mode */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+ mutex_unlock(&s->efr_lock);
+
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
prescaler);
@@ -700,6 +718,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
{
struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
+ mutex_lock(&s->efr_lock);
+
while (1) {
bool keep_polling = false;
int i;
@@ -709,6 +729,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
if (!keep_polling)
break;
}
+
+ mutex_unlock(&s->efr_lock);
}
static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
@@ -903,6 +925,9 @@ static void sc16is7xx_set_termios(struct uart_port *port,
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
+ /* As above, claim the mutex while accessing the EFR. */
+ mutex_lock(&s->efr_lock);
+
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_B);
@@ -924,6 +949,8 @@ static void sc16is7xx_set_termios(struct uart_port *port,
/* Update LCR register */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
+ mutex_unlock(&s->efr_lock);
+
/* Get baud rate generator configuration */
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 4 / 0xffff,
@@ -1186,6 +1213,7 @@ static int sc16is7xx_probe(struct device *dev,
s->regmap = regmap;
s->devtype = devtype;
dev_set_drvdata(dev, s);
+ mutex_init(&s->efr_lock);
kthread_init_worker(&s->kworker);
kthread_init_work(&s->irq_work, sc16is7xx_ist);
@@ -1482,7 +1510,7 @@ static int __init sc16is7xx_init(void)
ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
- return ret;
+ goto err_i2c;
}
#endif
@@ -1490,10 +1518,18 @@ static int __init sc16is7xx_init(void)
ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx spi --> %d\n", ret);
- return ret;
+ goto err_spi;
}
#endif
return ret;
+
+err_spi:
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+ i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+#endif
+err_i2c:
+ uart_unregister_driver(&sc16is7xx_uart);
+ return ret;
}
module_init(sc16is7xx_init);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 55ed4c66f77f..3701b240fcb3 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -141,9 +141,6 @@ static void uart_start(struct tty_struct *tty)
struct uart_port *port;
unsigned long flags;
- if (!state)
- return;
-
port = uart_port_lock(state, flags);
__uart_start(tty);
uart_port_unlock(port, flags);
@@ -1109,7 +1106,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
if (!uport)
goto out;
- if (uport->type != PORT_UNKNOWN)
+ if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl)
uport->ops->break_ctl(uport, break_state);
ret = 0;
out:
@@ -1714,11 +1711,8 @@ static void uart_dtr_rts(struct tty_port *port, int onoff)
*/
static int uart_open(struct tty_struct *tty, struct file *filp)
{
- struct uart_driver *drv = tty->driver->driver_state;
- int retval, line = tty->index;
- struct uart_state *state = drv->state + line;
-
- tty->driver_data = state;
+ struct uart_state *state = tty->driver_data;
+ int retval;
retval = tty_port_open(&state->port, tty, filp);
if (retval > 0)
@@ -1731,6 +1725,7 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
+ int ret;
uport = uart_port_check(state);
if (!uport || uport->flags & UPF_DEAD)
@@ -1741,7 +1736,11 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
/*
* Start up the serial port.
*/
- return uart_startup(tty, state, 0);
+ ret = uart_startup(tty, state, 0);
+ if (ret > 0)
+ tty_port_set_active(port, 1);
+
+ return ret;
}
static const char *uart_type(struct uart_port *port)
@@ -2409,9 +2408,6 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
struct uart_state *state = drv->state + line;
struct uart_port *port;
- if (!state)
- return;
-
port = uart_port_ref(state);
if (!port)
return;
@@ -2423,7 +2419,18 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
}
#endif
+static int uart_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct uart_driver *drv = driver->driver_state;
+ struct uart_state *state = drv->state + tty->index;
+
+ tty->driver_data = state;
+
+ return tty_standard_install(driver, tty);
+}
+
static const struct tty_operations uart_ops = {
+ .install = uart_install,
.open = uart_open,
.close = uart_close,
.write = uart_write,
@@ -2788,6 +2795,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
+ tty_port_link_device(port, drv->tty_driver, uport->line);
uart_configure_port(drv, state, uport);
port->console = uart_console(uport);
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index d2da6aa7f27d..1bb15edcf1e7 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -68,6 +68,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
+ if (gpios == NULL)
+ return NULL;
+
return gpios->gpio[gidx];
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 6ff53b604ff6..ea35f5144237 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -834,19 +834,9 @@ static void sci_transmit_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit)) {
+ if (uart_circ_empty(xmit))
sci_stop_tx(port);
- } else {
- ctrl = serial_port_in(port, SCSCR);
-
- if (port->type != PORT_SCI) {
- serial_port_in(port, SCxSR); /* Dummy read */
- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
- }
- ctrl |= SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
- }
}
/* On SH3, SCIF may read end-of-break as a space->mark char */
@@ -1301,6 +1291,7 @@ static void work_fn_tx(struct work_struct *work)
struct uart_port *port = &s->port;
struct circ_buf *xmit = &port->state->xmit;
dma_addr_t buf;
+ int head, tail;
/*
* DMA is idle now.
@@ -1310,16 +1301,23 @@ static void work_fn_tx(struct work_struct *work)
* consistent xmit buffer state.
*/
spin_lock_irq(&port->lock);
- buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
+ head = xmit->head;
+ tail = xmit->tail;
+ buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
s->tx_dma_len = min_t(unsigned int,
- CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
- CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
- spin_unlock_irq(&port->lock);
+ CIRC_CNT(head, tail, UART_XMIT_SIZE),
+ CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
+ if (!s->tx_dma_len) {
+ /* Transmit buffer has been flushed */
+ spin_unlock_irq(&port->lock);
+ return;
+ }
desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
+ spin_unlock_irq(&port->lock);
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
/* switch to PIO */
sci_tx_dma_release(s, true);
@@ -1329,20 +1327,20 @@ static void work_fn_tx(struct work_struct *work)
dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
DMA_TO_DEVICE);
- spin_lock_irq(&port->lock);
desc->callback = sci_dma_tx_complete;
desc->callback_param = s;
- spin_unlock_irq(&port->lock);
s->cookie_tx = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_tx)) {
+ spin_unlock_irq(&port->lock);
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
/* switch to PIO */
sci_tx_dma_release(s, true);
return;
}
+ spin_unlock_irq(&port->lock);
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
- __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
+ __func__, xmit->buf, tail, head, s->cookie_tx);
dma_async_issue_pending(chan);
}
@@ -1548,11 +1546,18 @@ static void sci_free_dma(struct uart_port *port)
static void sci_flush_buffer(struct uart_port *port)
{
+ struct sci_port *s = to_sci_port(port);
+
/*
* In uart_flush_buffer(), the xmit circular buffer has just been
- * cleared, so we have to reset tx_dma_len accordingly.
+ * cleared, so we have to reset tx_dma_len accordingly, and stop any
+ * pending transfers
*/
- to_sci_port(port)->tx_dma_len = 0;
+ s->tx_dma_len = 0;
+ if (s->chan_tx) {
+ dmaengine_terminate_async(s->chan_tx);
+ s->cookie_tx = -EINVAL;
+ }
}
#else /* !CONFIG_SERIAL_SH_SCI_DMA */
static inline void sci_request_dma(struct uart_port *port)
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 747560feb63e..2e34239ac8a9 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -240,7 +240,7 @@ static inline void sprd_rx(struct uart_port *port)
if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
SPRD_LSR_FE | SPRD_LSR_OE))
- if (handle_lsr_errors(port, &lsr, &flag))
+ if (handle_lsr_errors(port, &flag, &lsr))
continue;
if (uart_handle_sysrq_char(port, ch))
continue;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 033856287ca2..ea8b591dd46f 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -293,13 +293,8 @@ static void stm32_transmit_chars(struct uart_port *port)
return;
}
- if (uart_tx_stopped(port)) {
- stm32_stop_tx(port);
- return;
- }
-
- if (uart_circ_empty(xmit)) {
- stm32_stop_tx(port);
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ stm32_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
return;
}
@@ -312,7 +307,7 @@ static void stm32_transmit_chars(struct uart_port *port)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
- stm32_stop_tx(port);
+ stm32_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
}
static irqreturn_t stm32_interrupt(int irq, void *ptr)
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 59828d819145..5ad978acd90c 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -392,7 +392,7 @@ static struct uart_ops sunhv_pops = {
static struct uart_driver sunhv_reg = {
.owner = THIS_MODULE,
.driver_name = "sunhv",
- .dev_name = "ttyS",
+ .dev_name = "ttyHV",
.major = TTY_MAJOR,
};
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 817bb0d3f326..9a54aafe8405 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -746,7 +746,8 @@ err_uart:
static void __exit ulite_exit(void)
{
platform_driver_unregister(&ulite_platform_driver);
- uart_unregister_driver(&ulite_uart_driver);
+ if (ulite_uart_driver.state)
+ uart_unregister_driver(&ulite_uart_driver);
}
module_init(ulite_init);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index ffb474c49f0f..eb61a07fcbbc 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1261,7 +1261,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
*
* Return: 0 on success, negative errno otherwise.
*/
-static int __init cdns_uart_console_setup(struct console *co, char *options)
+static int cdns_uart_console_setup(struct console *co, char *options)
{
struct uart_port *port = &cdns_uart_port[co->index];
int baud = 9600;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 7aca2d4670e4..7446ce29f677 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1187,14 +1187,13 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct slgt_info *info = tty->driver_data;
- int rc = -ENOIOCTLCMD;
+ int rc;
if (sanity_check(info, tty->name, "compat_ioctl"))
return -ENODEV;
DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
switch (cmd) {
-
case MGSL_IOCSPARAMS32:
rc = set_params32(info, compat_ptr(arg));
break;
@@ -1214,18 +1213,11 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
case MGSL_IOCWAITGPIO:
case MGSL_IOCGXSYNC:
case MGSL_IOCGXCTRL:
- case MGSL_IOCSTXIDLE:
- case MGSL_IOCTXENABLE:
- case MGSL_IOCRXENABLE:
- case MGSL_IOCTXABORT:
- case TIOCMIWAIT:
- case MGSL_IOCSIF:
- case MGSL_IOCSXSYNC:
- case MGSL_IOCSXCTRL:
- rc = ioctl(tty, cmd, arg);
+ rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
break;
+ default:
+ rc = ioctl(tty, cmd, arg);
}
-
DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
return rc;
}
@@ -1357,10 +1349,10 @@ static void throttle(struct tty_struct * tty)
DBGINFO(("%s throttle\n", info->device_name));
if (I_IXOFF(tty))
send_xchar(tty, STOP_CHAR(tty));
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals &= ~SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1382,10 +1374,10 @@ static void unthrottle(struct tty_struct * tty)
else
send_xchar(tty, START_CHAR(tty));
}
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals |= SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -2584,8 +2576,8 @@ static void change_params(struct slgt_info *info)
info->read_status_mask = IRQ_RXOVER;
if (I_INPCK(info->port.tty))
info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
- if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
- info->read_status_mask |= MASK_BREAK;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask |= MASK_BREAK;
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
if (I_IGNBRK(info->port.tty)) {
@@ -3216,7 +3208,7 @@ static int tiocmset(struct tty_struct *tty,
info->signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
@@ -3227,7 +3219,7 @@ static int carrier_raised(struct tty_port *port)
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return (info->signals & SerialSignal_DCD) ? 1 : 0;
}
@@ -3242,7 +3234,7 @@ static void dtr_rts(struct tty_port *port, int on)
info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index dec156586de1..2f6df8d74b4a 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1467,10 +1467,10 @@ static void throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
send_xchar(tty, STOP_CHAR(tty));
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->serial_signals &= ~SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1496,10 +1496,10 @@ static void unthrottle(struct tty_struct * tty)
send_xchar(tty, START_CHAR(tty));
}
- if (C_CRTSCTS(tty)) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->serial_signals |= SerialSignal_RTS;
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -2485,7 +2485,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
if (status & SerialSignal_CTS) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("CTS tx start...");
- info->port.tty->hw_stopped = 0;
+ info->port.tty->hw_stopped = 0;
tx_start(info);
info->pending_bh |= BH_TRANSMIT;
return;
@@ -2494,7 +2494,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
if (!(status & SerialSignal_CTS)) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("CTS tx stop...");
- info->port.tty->hw_stopped = 1;
+ info->port.tty->hw_stopped = 1;
tx_stop(info);
}
}
@@ -2821,8 +2821,8 @@ static void change_params(SLMP_INFO *info)
info->read_status_mask2 = OVRN;
if (I_INPCK(info->port.tty))
info->read_status_mask2 |= PE | FRME;
- if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
- info->read_status_mask1 |= BRKD;
+ if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
+ info->read_status_mask1 |= BRKD;
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask2 |= PE | FRME;
if (I_IGNBRK(info->port.tty)) {
@@ -3192,7 +3192,7 @@ static int tiocmget(struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS : 0) |
@@ -3230,7 +3230,7 @@ static int tiocmset(struct tty_struct *tty,
info->serial_signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
@@ -3242,7 +3242,7 @@ static int carrier_raised(struct tty_port *port)
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
@@ -3258,7 +3258,7 @@ static void dtr_rts(struct tty_port *port, int on)
info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 53cbf4ebef10..b6ff01131eef 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -543,7 +543,6 @@ void __handle_sysrq(int key, bool check_mask)
*/
orig_log_level = console_loglevel;
console_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
- pr_info("SysRq : ");
op_p = __sysrq_get_key_op(key);
if (op_p) {
@@ -552,14 +551,15 @@ void __handle_sysrq(int key, bool check_mask)
* should not) and is the invoked operation enabled?
*/
if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
- pr_cont("%s\n", op_p->action_msg);
+ pr_info("%s\n", op_p->action_msg);
console_loglevel = orig_log_level;
op_p->handler(key);
} else {
- pr_cont("This sysrq operation is disabled.\n");
+ pr_info("This sysrq operation is disabled.\n");
+ console_loglevel = orig_log_level;
}
} else {
- pr_cont("HELP : ");
+ pr_info("HELP : ");
/* Only print the help msg once per handler */
for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
if (sysrq_key_table[i]) {
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 41b9a7ccce08..ca9c82ee6c35 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -25,7 +25,7 @@
* Byte threshold to limit memory consumption for flip buffers.
* The actual memory limit is > 2x this amount.
*/
-#define TTYB_DEFAULT_MEM_LIMIT 65536
+#define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL)
/*
* We default to dicing tty buffer allocations to this many characters
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 19fe1e8fc124..15e0116e1232 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -520,6 +520,8 @@ void proc_clear_tty(struct task_struct *p)
tty_kref_put(tty);
}
+extern void tty_sysctl_init(void);
+
/**
* proc_set_tty - set the controlling terminal
*
@@ -3705,6 +3707,7 @@ void console_sysfs_notify(void)
*/
int __init tty_init(void)
{
+ tty_sysctl_init();
cdev_init(&tty_cdev, &tty_fops);
if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 4ab518d43758..706faca834f2 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -155,6 +155,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
* takes tty_ldiscs_lock to guard against ldisc races
*/
+#if defined(CONFIG_LDISC_AUTOLOAD)
+ #define INITIAL_AUTOLOAD_STATE 1
+#else
+ #define INITIAL_AUTOLOAD_STATE 0
+#endif
+static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
+
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
@@ -169,6 +176,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
*/
ldops = get_ldops(disc);
if (IS_ERR(ldops)) {
+ if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
+ return ERR_PTR(-EPERM);
request_module("tty-ldisc-%d", disc);
ldops = get_ldops(disc);
if (IS_ERR(ldops))
@@ -339,6 +348,11 @@ int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
{
int ret;
+ /* Kindly asking blocked readers to release the read side */
+ set_bit(TTY_LDISC_CHANGING, &tty->flags);
+ wake_up_interruptible_all(&tty->read_wait);
+ wake_up_interruptible_all(&tty->write_wait);
+
ret = __tty_ldisc_lock(tty, timeout);
if (!ret)
return -EBUSY;
@@ -349,6 +363,8 @@ int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
void tty_ldisc_unlock(struct tty_struct *tty)
{
clear_bit(TTY_LDISC_HALTED, &tty->flags);
+ /* Can be cleared here - ldisc_unlock will wake up writers firstly */
+ clear_bit(TTY_LDISC_CHANGING, &tty->flags);
__tty_ldisc_unlock(tty);
}
@@ -774,3 +790,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
}
+
+static int zero;
+static int one = 1;
+static struct ctl_table tty_table[] = {
+ {
+ .procname = "ldisc_autoload",
+ .data = &tty_ldisc_autoload,
+ .maxlen = sizeof(tty_ldisc_autoload),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ { }
+};
+
+static struct ctl_table tty_dir_table[] = {
+ {
+ .procname = "tty",
+ .mode = 0555,
+ .child = tty_table,
+ },
+ { }
+};
+
+static struct ctl_table tty_root_table[] = {
+ {
+ .procname = "dev",
+ .mode = 0555,
+ .child = tty_dir_table,
+ },
+ { }
+};
+
+void tty_sysctl_init(void)
+{
+ register_sysctl_table(tty_root_table);
+}
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index dbd7ba32caac..6c5eb99fcfce 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -137,8 +137,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
tsk = waiter->task;
- smp_mb();
- waiter->task = NULL;
+ smp_store_release(&waiter->task, NULL);
wake_up_process(tsk);
put_task_struct(tsk);
}
@@ -234,7 +233,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
for (;;) {
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- if (!waiter.task)
+ if (!smp_load_acquire(&waiter.task))
break;
if (!timeout)
break;
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index ece10e6b731b..b4e7a7317713 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -121,6 +121,7 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals);
static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock);
static DEFINE_SPINLOCK(led_lock);
+static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static bool dead_key_next;
@@ -1459,7 +1460,7 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type,
if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev))
kbd_rawcode(value);
- if (event_type == EV_KEY)
+ if (event_type == EV_KEY && event_code <= KEY_MAX)
kbd_keycode(event_code, value, HW_RAW(handle->dev));
spin_unlock(&kbd_event_lock);
@@ -1959,11 +1960,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
char *p;
u_char *q;
u_char __user *up;
- int sz;
+ int sz, fnw_sz;
int delta;
char *first_free, *fj, *fnw;
int i, j, k;
int ret;
+ unsigned long flags;
if (!capable(CAP_SYS_TTY_CONFIG))
perm = 0;
@@ -2006,7 +2008,14 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
goto reterr;
}
+ fnw = NULL;
+ fnw_sz = 0;
+ /* race aginst other writers */
+ again:
+ spin_lock_irqsave(&func_buf_lock, flags);
q = func_table[i];
+
+ /* fj pointer to next entry after 'q' */
first_free = funcbufptr + (funcbufsize - funcbufleft);
for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
;
@@ -2014,10 +2023,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
fj = func_table[j];
else
fj = first_free;
-
+ /* buffer usage increase by new entry */
delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
+
if (delta <= funcbufleft) { /* it fits in current buf */
if (j < MAX_NR_FUNC) {
+ /* make enough space for new entry at 'fj' */
memmove(fj + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++)
if (func_table[k])
@@ -2030,20 +2041,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
sz = 256;
while (sz < funcbufsize - funcbufleft + delta)
sz <<= 1;
- fnw = kmalloc(sz, GFP_KERNEL);
- if(!fnw) {
- ret = -ENOMEM;
- goto reterr;
+ if (fnw_sz != sz) {
+ spin_unlock_irqrestore(&func_buf_lock, flags);
+ kfree(fnw);
+ fnw = kmalloc(sz, GFP_KERNEL);
+ fnw_sz = sz;
+ if (!fnw) {
+ ret = -ENOMEM;
+ goto reterr;
+ }
+ goto again;
}
if (!q)
func_table[i] = fj;
+ /* copy data before insertion point to new location */
if (fj > funcbufptr)
memmove(fnw, funcbufptr, fj - funcbufptr);
for (k = 0; k < j; k++)
if (func_table[k])
func_table[k] = fnw + (func_table[k] - funcbufptr);
+ /* copy data after insertion point to new location */
if (first_free > fj) {
memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++)
@@ -2056,7 +2075,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
funcbufleft = funcbufleft - delta + sz - funcbufsize;
funcbufsize = sz;
}
+ /* finally insert item itself */
strcpy(func_table[i], kbs->kb_string);
+ spin_unlock_irqrestore(&func_buf_lock, flags);
break;
}
ret = 0;
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 368ce1803e8f..225689c76623 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -13,6 +13,7 @@
#include <linux/tty.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -40,6 +41,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */
static int sel_end;
static int sel_buffer_lth;
static char *sel_buffer;
+static DEFINE_MUTEX(sel_lock);
/* clear_selection, highlight and highlight_pointer can be called
from interrupt (via scrollback/front) */
@@ -78,6 +80,11 @@ void clear_selection(void)
}
}
+bool vc_is_sel(struct vc_data *vc)
+{
+ return vc == sel_cons;
+}
+
/*
* User settable table: what characters are to be considered alphabetic?
* 256 bits. Locked by the console lock.
@@ -156,14 +163,14 @@ static int store_utf8(u16 c, char *p)
* The entire selection process is managed under the console_lock. It's
* a lot under the lock but its hardly a performance path
*/
-int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
+static int __set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
{
struct vc_data *vc = vc_cons[fg_console].d;
int sel_mode, new_sel_start, new_sel_end, spc;
char *bp, *obp;
int i, ps, pe, multiplier;
u16 c;
- int mode;
+ int mode, ret = 0;
poke_blanked_console();
@@ -324,7 +331,21 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
}
}
sel_buffer_lth = bp - sel_buffer;
- return 0;
+
+ return ret;
+}
+
+int set_selection(const struct tiocl_selection __user *v, struct tty_struct *tty)
+{
+ int ret;
+
+ mutex_lock(&sel_lock);
+ console_lock();
+ ret = __set_selection(v, tty);
+ console_unlock();
+ mutex_unlock(&sel_lock);
+
+ return ret;
}
/* Insert the contents of the selection buffer into the
@@ -341,6 +362,7 @@ int paste_selection(struct tty_struct *tty)
unsigned int count;
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
console_lock();
poke_blanked_console();
@@ -352,10 +374,17 @@ int paste_selection(struct tty_struct *tty)
tty_buffer_lock_exclusive(&vc->port);
add_wait_queue(&vc->paste_wait, &wait);
+ mutex_lock(&sel_lock);
while (sel_buffer && sel_buffer_lth > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
if (tty_throttled(tty)) {
+ mutex_unlock(&sel_lock);
schedule();
+ mutex_lock(&sel_lock);
continue;
}
__set_current_state(TASK_RUNNING);
@@ -364,10 +393,11 @@ int paste_selection(struct tty_struct *tty)
count);
pasted += count;
}
+ mutex_unlock(&sel_lock);
remove_wait_queue(&vc->paste_wait, &wait);
__set_current_state(TASK_RUNNING);
tty_buffer_unlock_exclusive(&vc->port);
tty_ldisc_deref(ld);
- return 0;
+ return ret;
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 232cb0a760b9..29fb08c8a2fd 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -595,8 +595,9 @@ static void hide_softcursor(struct vc_data *vc)
static void hide_cursor(struct vc_data *vc)
{
- if (vc == sel_cons)
+ if (vc_is_sel(vc))
clear_selection();
+
vc->vc_sw->con_cursor(vc, CM_ERASE);
hide_softcursor(vc);
}
@@ -606,7 +607,7 @@ static void set_cursor(struct vc_data *vc)
if (!con_is_fg(vc) || console_blanked || vc->vc_mode == KD_GRAPHICS)
return;
if (vc->vc_deccm) {
- if (vc == sel_cons)
+ if (vc_is_sel(vc))
clear_selection();
add_softcursor(vc);
if ((vc->vc_cursor_type & 0x0f) != 1)
@@ -753,6 +754,17 @@ static void visual_init(struct vc_data *vc, int num, int init)
vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row;
}
+static void vc_port_destruct(struct tty_port *port)
+{
+ struct vc_data *vc = container_of(port, struct vc_data, port);
+
+ kfree(vc);
+}
+
+static const struct tty_port_operations vc_port_ops = {
+ .destruct = vc_port_destruct,
+};
+
int vc_allocate(unsigned int currcons) /* return 0 on success */
{
struct vt_notifier_param param;
@@ -778,6 +790,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
vc_cons[currcons].d = vc;
tty_port_init(&vc->port);
+ vc->port.ops = &vc_port_ops;
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
visual_init(vc, currcons, 1);
@@ -876,7 +889,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (!newscreen)
return -ENOMEM;
- if (vc == sel_cons)
+ if (vc_is_sel(vc))
clear_selection();
old_rows = vc->vc_rows;
@@ -2690,9 +2703,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
switch (type)
{
case TIOCL_SETSEL:
- console_lock();
ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
- console_unlock();
break;
case TIOCL_PASTESEL:
ret = paste_selection(tty);
@@ -2898,6 +2909,7 @@ static int con_install(struct tty_driver *driver, struct tty_struct *tty)
tty->driver_data = vc;
vc->port.tty = tty;
+ tty_port_get(&vc->port);
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = vc_cons[currcons].d->vc_rows;
@@ -2933,6 +2945,13 @@ static void con_shutdown(struct tty_struct *tty)
console_unlock();
}
+static void con_cleanup(struct tty_struct *tty)
+{
+ struct vc_data *vc = tty->driver_data;
+
+ tty_port_put(&vc->port);
+}
+
static int default_color = 7; /* white */
static int default_italic_color = 2; // green (ASCII)
static int default_underline_color = 3; // cyan (ASCII)
@@ -3057,7 +3076,8 @@ static const struct tty_operations con_ops = {
.throttle = con_throttle,
.unthrottle = con_unthrottle,
.resize = vt_resize,
- .shutdown = con_shutdown
+ .shutdown = con_shutdown,
+ .cleanup = con_cleanup,
};
static struct cdev vc0_cdev;
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 638eb9bbd59f..4ed0d77e5918 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -38,11 +38,32 @@
#include <linux/kbd_diacr.h>
#include <linux/selection.h>
-char vt_dont_switch;
-extern struct tty_driver *console_driver;
+bool vt_dont_switch;
-#define VT_IS_IN_USE(i) (console_driver->ttys[i] && console_driver->ttys[i]->count)
-#define VT_BUSY(i) (VT_IS_IN_USE(i) || i == fg_console || vc_cons[i].d == sel_cons)
+static inline bool vt_in_use(unsigned int i)
+{
+ const struct vc_data *vc = vc_cons[i].d;
+
+ /*
+ * console_lock must be held to prevent the vc from being deallocated
+ * while we're checking whether it's in-use.
+ */
+ WARN_CONSOLE_UNLOCKED();
+
+ return vc && kref_read(&vc->port.kref) > 1;
+}
+
+static inline bool vt_busy(int i)
+{
+ if (vt_in_use(i))
+ return true;
+ if (i == fg_console)
+ return true;
+ if (vc_is_sel(vc_cons[i].d))
+ return true;
+
+ return false;
+}
/*
* Console (vt and kd) routines, as defined by USL SVR4 manual, and by
@@ -292,16 +313,14 @@ static int vt_disallocate(unsigned int vc_num)
int ret = 0;
console_lock();
- if (VT_BUSY(vc_num))
+ if (vt_busy(vc_num))
ret = -EBUSY;
else if (vc_num)
vc = vc_deallocate(vc_num);
console_unlock();
- if (vc && vc_num >= MIN_NR_CONSOLES) {
- tty_port_destroy(&vc->port);
- kfree(vc);
- }
+ if (vc && vc_num >= MIN_NR_CONSOLES)
+ tty_port_put(&vc->port);
return ret;
}
@@ -314,17 +333,15 @@ static void vt_disallocate_all(void)
console_lock();
for (i = 1; i < MAX_NR_CONSOLES; i++)
- if (!VT_BUSY(i))
+ if (!vt_busy(i))
vc[i] = vc_deallocate(i);
else
vc[i] = NULL;
console_unlock();
for (i = 1; i < MAX_NR_CONSOLES; i++) {
- if (vc[i] && i >= MIN_NR_CONSOLES) {
- tty_port_destroy(&vc[i]->port);
- kfree(vc[i]);
- }
+ if (vc[i] && i >= MIN_NR_CONSOLES)
+ tty_port_put(&vc[i]->port);
}
}
@@ -338,22 +355,13 @@ int vt_ioctl(struct tty_struct *tty,
{
struct vc_data *vc = tty->driver_data;
struct console_font_op op; /* used in multiple places here */
- unsigned int console;
+ unsigned int console = vc->vc_num;
unsigned char ucval;
unsigned int uival;
void __user *up = (void __user *)arg;
int i, perm;
int ret = 0;
- console = vc->vc_num;
-
-
- if (!vc_cons_allocated(console)) { /* impossible? */
- ret = -ENOIOCTLCMD;
- goto out;
- }
-
-
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
@@ -644,15 +652,16 @@ int vt_ioctl(struct tty_struct *tty,
struct vt_stat __user *vtstat = up;
unsigned short state, mask;
- /* Review: FIXME: Console lock ? */
if (put_user(fg_console + 1, &vtstat->v_active))
ret = -EFAULT;
else {
state = 1; /* /dev/tty0 is always open */
+ console_lock(); /* required by vt_in_use() */
for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask;
++i, mask <<= 1)
- if (VT_IS_IN_USE(i))
+ if (vt_in_use(i))
state |= mask;
+ console_unlock();
ret = put_user(state, &vtstat->v_state);
}
break;
@@ -662,10 +671,11 @@ int vt_ioctl(struct tty_struct *tty,
* Returns the first available (non-opened) console.
*/
case VT_OPENQRY:
- /* FIXME: locking ? - but then this is a stupid API */
+ console_lock(); /* required by vt_in_use() */
for (i = 0; i < MAX_NR_CONSOLES; ++i)
- if (! VT_IS_IN_USE(i))
+ if (!vt_in_use(i))
break;
+ console_unlock();
uival = i < MAX_NR_CONSOLES ? (i+1) : -1;
goto setint;
@@ -850,58 +860,49 @@ int vt_ioctl(struct tty_struct *tty,
case VT_RESIZEX:
{
- struct vt_consize __user *vtconsize = up;
- ushort ll,cc,vlin,clin,vcol,ccol;
+ struct vt_consize v;
if (!perm)
return -EPERM;
- if (!access_ok(VERIFY_READ, vtconsize,
- sizeof(struct vt_consize))) {
- ret = -EFAULT;
- break;
- }
+ if (copy_from_user(&v, up, sizeof(struct vt_consize)))
+ return -EFAULT;
/* FIXME: Should check the copies properly */
- __get_user(ll, &vtconsize->v_rows);
- __get_user(cc, &vtconsize->v_cols);
- __get_user(vlin, &vtconsize->v_vlin);
- __get_user(clin, &vtconsize->v_clin);
- __get_user(vcol, &vtconsize->v_vcol);
- __get_user(ccol, &vtconsize->v_ccol);
- vlin = vlin ? vlin : vc->vc_scan_lines;
- if (clin) {
- if (ll) {
- if (ll != vlin/clin) {
- /* Parameters don't add up */
- ret = -EINVAL;
- break;
- }
- } else
- ll = vlin/clin;
+ if (!v.v_vlin)
+ v.v_vlin = vc->vc_scan_lines;
+ if (v.v_clin) {
+ int rows = v.v_vlin/v.v_clin;
+ if (v.v_rows != rows) {
+ if (v.v_rows) /* Parameters don't add up */
+ return -EINVAL;
+ v.v_rows = rows;
+ }
}
- if (vcol && ccol) {
- if (cc) {
- if (cc != vcol/ccol) {
- ret = -EINVAL;
- break;
- }
- } else
- cc = vcol/ccol;
+ if (v.v_vcol && v.v_ccol) {
+ int cols = v.v_vcol/v.v_ccol;
+ if (v.v_cols != cols) {
+ if (v.v_cols)
+ return -EINVAL;
+ v.v_cols = cols;
+ }
}
- if (clin > 32) {
- ret = -EINVAL;
- break;
- }
-
+ if (v.v_clin > 32)
+ return -EINVAL;
+
for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ struct vc_data *vcp;
+
if (!vc_cons[i].d)
continue;
console_lock();
- if (vlin)
- vc_cons[i].d->vc_scan_lines = vlin;
- if (clin)
- vc_cons[i].d->vc_font.height = clin;
- vc_cons[i].d->vc_resize_user = 1;
- vc_resize(vc_cons[i].d, cc, ll);
+ vcp = vc_cons[i].d;
+ if (vcp) {
+ if (v.v_vlin)
+ vcp->vc_scan_lines = v.v_vlin;
+ if (v.v_clin)
+ vcp->vc_font.height = v.v_clin;
+ vcp->vc_resize_user = 1;
+ vc_resize(vcp, v.v_cols, v.v_rows);
+ }
console_unlock();
}
break;
@@ -1023,12 +1024,12 @@ int vt_ioctl(struct tty_struct *tty,
case VT_LOCKSWITCH:
if (!capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
- vt_dont_switch = 1;
+ vt_dont_switch = true;
break;
case VT_UNLOCKSWITCH:
if (!capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
- vt_dont_switch = 0;
+ vt_dont_switch = false;
break;
case VT_GETHIFONTMASK:
ret = put_user(vc->vc_hi_font_mask,
@@ -1196,18 +1197,10 @@ long vt_compat_ioctl(struct tty_struct *tty,
{
struct vc_data *vc = tty->driver_data;
struct console_font_op op; /* used in multiple places here */
- unsigned int console;
void __user *up = (void __user *)arg;
int perm;
int ret = 0;
- console = vc->vc_num;
-
- if (!vc_cons_allocated(console)) { /* impossible? */
- ret = -ENOIOCTLCMD;
- goto out;
- }
-
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
@@ -1267,7 +1260,7 @@ long vt_compat_ioctl(struct tty_struct *tty,
arg = (unsigned long)compat_ptr(arg);
goto fallback;
}
-out:
+
return ret;
fallback:
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index e1134a4d97f3..a00b4aee6c79 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -135,11 +135,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
if (irq_on) {
if (test_and_clear_bit(0, &priv->flags))
enable_irq(dev_info->irq);
+ spin_unlock_irqrestore(&priv->lock, flags);
} else {
- if (!test_and_set_bit(0, &priv->flags))
+ if (!test_and_set_bit(0, &priv->flags)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
disable_irq(dev_info->irq);
+ }
}
- spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index df67815f74e6..127fcaf13856 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2167,10 +2167,11 @@ resubmit:
/*
* Start the modem : init the data and start kernel thread
*/
-static int uea_boot(struct uea_softc *sc)
+static int uea_boot(struct uea_softc *sc, struct usb_interface *intf)
{
- int ret, size;
struct intr_pkt *intr;
+ int ret = -ENOMEM;
+ int size;
uea_enters(INS_TO_USBDEV(sc));
@@ -2195,6 +2196,11 @@ static int uea_boot(struct uea_softc *sc)
if (UEA_CHIP_VERSION(sc) == ADI930)
load_XILINX_firmware(sc);
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
+ ret = -ENODEV;
+ goto err0;
+ }
+
intr = kmalloc(size, GFP_KERNEL);
if (!intr)
goto err0;
@@ -2206,8 +2212,7 @@ static int uea_boot(struct uea_softc *sc)
usb_fill_int_urb(sc->urb_int, sc->usb_dev,
usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE),
intr, size, uea_intr, sc,
- sc->usb_dev->actconfig->interface[0]->altsetting[0].
- endpoint[0].desc.bInterval);
+ intf->cur_altsetting->endpoint[0].desc.bInterval);
ret = usb_submit_urb(sc->urb_int, GFP_KERNEL);
if (ret < 0) {
@@ -2222,6 +2227,7 @@ static int uea_boot(struct uea_softc *sc)
sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm");
if (IS_ERR(sc->kthread)) {
uea_err(INS_TO_USBDEV(sc), "failed to create thread\n");
+ ret = PTR_ERR(sc->kthread);
goto err2;
}
@@ -2236,7 +2242,7 @@ err1:
kfree(intr);
err0:
uea_leaves(INS_TO_USBDEV(sc));
- return -ENOMEM;
+ return ret;
}
/*
@@ -2597,7 +2603,7 @@ static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
if (ret < 0)
goto error;
- ret = uea_boot(sc);
+ ret = uea_boot(sc, intf);
if (ret < 0)
goto error_rm_grp;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index a9b3795a150b..068d05dea4f7 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -986,8 +986,15 @@ static int ci_hdrc_probe(struct platform_device *pdev)
} else if (ci->platdata->usb_phy) {
ci->usb_phy = ci->platdata->usb_phy;
} else {
+ ci->usb_phy = devm_usb_get_phy_by_phandle(dev->parent, "phys",
+ 0);
ci->phy = devm_phy_get(dev->parent, "usb-phy");
- ci->usb_phy = devm_usb_get_phy(dev->parent, USB_PHY_TYPE_USB2);
+
+ /* Fallback to grabbing any registered USB2 PHY */
+ if (IS_ERR(ci->usb_phy) &&
+ PTR_ERR(ci->usb_phy) != -EPROBE_DEFER)
+ ci->usb_phy = devm_usb_get_phy(dev->parent,
+ USB_PHY_TYPE_USB2);
/* if both generic PHY and USB PHY layers aren't enabled */
if (PTR_ERR(ci->phy) == -ENOSYS &&
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 4b687bb8db0c..2118fd092543 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -42,6 +42,7 @@ static int (*orig_hub_control)(struct usb_hcd *hcd,
struct ehci_ci_priv {
struct regulator *reg_vbus;
+ bool enabled;
};
/* This function is used to override WKCN, WKDN, and WKOC */
@@ -67,7 +68,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
int ret = 0;
int port = HCS_N_PORTS(ehci->hcs_params);
- if (priv->reg_vbus) {
+ if (priv->reg_vbus && enable != priv->enabled) {
if (port > 1) {
dev_warn(dev,
"Not support multi-port regulator control\n");
@@ -83,6 +84,7 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
enable ? "enable" : "disable", ret);
return ret;
}
+ priv->enabled = enable;
}
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 9a17e04f4bfc..6b2603c963dd 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -286,14 +286,17 @@ static void ci_otg_work(struct work_struct *work)
}
pm_runtime_get_sync(ci->dev);
+
if (ci->id_event) {
ci->id_event = false;
ci_handle_id_switch(ci);
- } else if (ci->b_sess_valid_event) {
+ }
+
+ if (ci->b_sess_valid_event) {
ci->b_sess_valid_event = false;
ci_handle_vbus_change(ci);
- } else
- dev_err(ci->dev, "unexpected event occurs at %s\n", __func__);
+ }
+
pm_runtime_put_sync(ci->dev);
enable_irq(ci->irq);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index cf24527d24eb..6f1839a260c2 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1676,6 +1676,25 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
static int ci_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static int ci_udc_stop(struct usb_gadget *gadget);
+
+/* Match ISOC IN from the highest endpoint */
+static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *comp_desc)
+{
+ struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
+ struct usb_ep *ep;
+
+ if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) {
+ list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) {
+ if (ep->caps.dir_in && !ep->claimed)
+ return ep;
+ }
+ }
+
+ return NULL;
+}
+
/**
* Device operations part of the API to the USB controller hardware,
* which don't involve endpoints (or i/o)
@@ -1689,6 +1708,7 @@ static const struct usb_gadget_ops usb_gadget_ops = {
.vbus_draw = ci_udc_vbus_draw,
.udc_start = ci_udc_start,
.udc_stop = ci_udc_stop,
+ .match_ep = ci_udc_match_ep,
};
static int init_eps(struct ci_hdrc *ci)
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index b7d21b052c19..d8d311987135 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -418,6 +418,8 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
} else if (data->oc_polarity == 1) {
/* High active */
reg &= ~(MX6_BM_OVER_CUR_DIS | MX6_BM_OVER_CUR_POLARITY);
+ } else {
+ reg &= ~(MX6_BM_OVER_CUR_DIS);
}
writel(reg, usbmisc->base + data->index * 4);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 8d4d46f3fd16..5b0bffba4aac 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -828,10 +828,10 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info)
tmp.flags = ASYNC_LOW_LATENCY;
tmp.xmit_fifo_size = acm->writesize;
tmp.baud_base = le32_to_cpu(acm->line.dwDTERate);
- tmp.close_delay = acm->port.close_delay / 10;
+ tmp.close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
tmp.closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
- acm->port.closing_wait / 10;
+ jiffies_to_msecs(acm->port.closing_wait) / 10;
if (copy_to_user(info, &tmp, sizeof(tmp)))
return -EFAULT;
@@ -844,20 +844,28 @@ static int set_serial_info(struct acm *acm,
{
struct serial_struct new_serial;
unsigned int closing_wait, close_delay;
+ unsigned int old_closing_wait, old_close_delay;
int retval = 0;
if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
return -EFAULT;
- close_delay = new_serial.close_delay * 10;
+ close_delay = msecs_to_jiffies(new_serial.close_delay * 10);
closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
- ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+ ASYNC_CLOSING_WAIT_NONE :
+ msecs_to_jiffies(new_serial.closing_wait * 10);
+
+ /* we must redo the rounding here, so that the values match */
+ old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
+ old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+ ASYNC_CLOSING_WAIT_NONE :
+ jiffies_to_msecs(acm->port.closing_wait) / 10;
mutex_lock(&acm->port.mutex);
if (!capable(CAP_SYS_ADMIN)) {
- if ((close_delay != acm->port.close_delay) ||
- (closing_wait != acm->port.closing_wait))
+ if ((new_serial.close_delay != old_close_delay) ||
+ (new_serial.closing_wait != old_closing_wait))
retval = -EPERM;
else
retval = -EOPNOTSUPP;
@@ -1264,10 +1272,6 @@ made_compressed_probe:
if (acm == NULL)
goto alloc_fail;
- minor = acm_alloc_minor(acm);
- if (minor < 0)
- goto alloc_fail1;
-
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1275,6 +1279,13 @@ made_compressed_probe:
acm->writesize = usb_endpoint_maxp(epwrite) * 20;
acm->control = control_interface;
acm->data = data_interface;
+
+ usb_get_intf(acm->control); /* undone in destruct() */
+
+ minor = acm_alloc_minor(acm);
+ if (minor < 0)
+ goto alloc_fail1;
+
acm->minor = minor;
acm->dev = usb_dev;
if (h.usb_cdc_acm_descriptor)
@@ -1420,7 +1431,6 @@ skip_countries:
usb_driver_claim_interface(&acm_driver, data_interface, acm);
usb_set_intfdata(data_interface, acm);
- usb_get_intf(control_interface);
tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
&control_interface->dev);
if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 9f001659807a..09337a973335 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -597,10 +597,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
{
struct wdm_device *desc = file->private_data;
- wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
+ wait_event(desc->wait,
+ /*
+ * needs both flags. We cannot do with one
+ * because resetting it would cause a race
+ * with write() yet we need to signal
+ * a disconnect
+ */
+ !test_bit(WDM_IN_USE, &desc->flags) ||
+ test_bit(WDM_DISCONNECTING, &desc->flags));
/* cannot dereference desc->intf if WDM_DISCONNECTING */
- if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
+ if (test_bit(WDM_DISCONNECTING, &desc->flags))
+ return -ENODEV;
+ if (desc->werr < 0)
dev_err(&desc->intf->dev, "Error in flush path: %d\n",
desc->werr);
@@ -968,8 +978,6 @@ static void wdm_disconnect(struct usb_interface *intf)
spin_lock_irqsave(&desc->iuspin, flags);
set_bit(WDM_DISCONNECTING, &desc->flags);
set_bit(WDM_READ, &desc->flags);
- /* to terminate pending flushes */
- clear_bit(WDM_IN_USE, &desc->flags);
spin_unlock_irqrestore(&desc->iuspin, flags);
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
@@ -1090,7 +1098,7 @@ static int wdm_post_reset(struct usb_interface *intf)
rv = recover_from_urb_loss(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
- return 0;
+ return rv;
}
static struct usb_driver wdm_driver = {
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 071964c7847f..07c3c3449147 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -458,6 +458,7 @@ static void usblp_cleanup(struct usblp *usblp)
kfree(usblp->readbuf);
kfree(usblp->device_id_string);
kfree(usblp->statusbuf);
+ usb_put_intf(usblp->intf);
kfree(usblp);
}
@@ -474,10 +475,12 @@ static int usblp_release(struct inode *inode, struct file *file)
mutex_lock(&usblp_mutex);
usblp->used = 0;
- if (usblp->present) {
+ if (usblp->present)
usblp_unlink_urbs(usblp);
- usb_autopm_put_interface(usblp->intf);
- } else /* finish cleanup from disconnect */
+
+ usb_autopm_put_interface(usblp->intf);
+
+ if (!usblp->present) /* finish cleanup from disconnect */
usblp_cleanup(usblp);
mutex_unlock(&usblp_mutex);
return 0;
@@ -1118,7 +1121,7 @@ static int usblp_probe(struct usb_interface *intf,
init_waitqueue_head(&usblp->wwait);
init_usb_anchor(&usblp->urbs);
usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
- usblp->intf = intf;
+ usblp->intf = usb_get_intf(intf);
/* Malloc device ID string buffer to the largest expected length,
* since we can re-query it on an ioctl and a dynamic string
@@ -1207,6 +1210,7 @@ abort:
kfree(usblp->readbuf);
kfree(usblp->statusbuf);
kfree(usblp->device_id_string);
+ usb_put_intf(usblp->intf);
kfree(usblp);
abort_ret:
return retval;
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 5ef8da6e67c3..64c76403a542 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -148,6 +148,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
do {
controller = of_find_node_with_property(controller, "phys");
+ if (!of_device_is_available(controller))
+ continue;
index = 0;
do {
if (arg0 == -1) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index c6578b321838..edf66b2f916b 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -198,9 +198,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_INT] = 1024,
};
-static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
- int asnum, struct usb_host_interface *ifp, int num_ep,
- unsigned char *buffer, int size)
+static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
+ struct usb_endpoint_descriptor *e2)
+{
+ if (e1->bEndpointAddress == e2->bEndpointAddress)
+ return true;
+
+ if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
+ if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Check for duplicate endpoint addresses in other interfaces and in the
+ * altsetting currently being parsed.
+ */
+static bool config_endpoint_is_duplicate(struct usb_host_config *config,
+ int inum, int asnum, struct usb_endpoint_descriptor *d)
+{
+ struct usb_endpoint_descriptor *epd;
+ struct usb_interface_cache *intfc;
+ struct usb_host_interface *alt;
+ int i, j, k;
+
+ for (i = 0; i < config->desc.bNumInterfaces; ++i) {
+ intfc = config->intf_cache[i];
+
+ for (j = 0; j < intfc->num_altsetting; ++j) {
+ alt = &intfc->altsetting[j];
+
+ if (alt->desc.bInterfaceNumber == inum &&
+ alt->desc.bAlternateSetting != asnum)
+ continue;
+
+ for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
+ epd = &alt->endpoint[k].desc;
+
+ if (endpoint_is_duplicate(epd, d))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ struct usb_host_config *config, int inum, int asnum,
+ struct usb_host_interface *ifp, int num_ep,
+ unsigned char *buffer, int size)
{
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
@@ -237,13 +286,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
goto skip_to_next_endpoint_or_interface_descriptor;
/* Check for duplicate endpoint addresses */
- for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
- if (ifp->endpoint[i].desc.bEndpointAddress ==
- d->bEndpointAddress) {
- dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
- cfgno, inum, asnum, d->bEndpointAddress);
- goto skip_to_next_endpoint_or_interface_descriptor;
- }
+ if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
}
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
@@ -341,8 +387,17 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
endpoint->desc.wMaxPacketSize = cpu_to_le16(8);
}
- /* Validate the wMaxPacketSize field */
+ /*
+ * Validate the wMaxPacketSize field.
+ * Some devices have isochronous endpoints in altsetting 0;
+ * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
+ * (see the end of section 5.6.3), so don't warn about them.
+ */
maxp = usb_endpoint_maxp(&endpoint->desc);
+ if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ }
/* Find the highest legal maxpacket size for this endpoint */
i = 0; /* additional transactions per microframe */
@@ -512,8 +567,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
if (((struct usb_descriptor_header *) buffer)->bDescriptorType
== USB_DT_INTERFACE)
break;
- retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
- num_ep, buffer, size);
+ retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
+ alt, num_ep, buffer, size);
if (retval < 0)
return retval;
++n;
@@ -763,21 +818,18 @@ void usb_destroy_configuration(struct usb_device *dev)
return;
if (dev->rawdescriptors) {
- for (i = 0; i < dev->descriptor.bNumConfigurations &&
- i < USB_MAXCONFIG; i++)
+ for (i = 0; i < dev->descriptor.bNumConfigurations; i++)
kfree(dev->rawdescriptors[i]);
kfree(dev->rawdescriptors);
dev->rawdescriptors = NULL;
}
- for (c = 0; c < dev->descriptor.bNumConfigurations &&
- c < USB_MAXCONFIG; c++) {
+ for (c = 0; c < dev->descriptor.bNumConfigurations; c++) {
struct usb_host_config *cf = &dev->config[c];
kfree(cf->string);
- for (i = 0; i < cf->desc.bNumInterfaces &&
- i < USB_MAXINTERFACES; i++) {
+ for (i = 0; i < cf->desc.bNumInterfaces; i++) {
if (cf->intf_cache[i])
kref_put(&cf->intf_cache[i]->ref,
usb_release_interface_cache);
@@ -923,7 +975,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
struct usb_bos_descriptor *bos;
struct usb_dev_cap_header *cap;
struct usb_ssp_cap_descriptor *ssp_cap;
- unsigned char *buffer;
+ unsigned char *buffer, *buffer0;
int length, total_len, num, i, ssac;
__u8 cap_type;
int ret;
@@ -934,8 +986,8 @@ int usb_get_bos_descriptor(struct usb_device *dev)
/* Get BOS descriptor */
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
- if (ret < USB_DT_BOS_SIZE) {
- dev_err(ddev, "unable to get BOS descriptor\n");
+ if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) {
+ dev_err(ddev, "unable to get BOS descriptor or descriptor too short\n");
if (ret >= 0)
ret = -ENOMSG;
kfree(bos);
@@ -968,10 +1020,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
ret = -ENOMSG;
goto err;
}
+
+ buffer0 = buffer;
total_len -= length;
+ buffer += length;
for (i = 0; i < num; i++) {
- buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
if (total_len < sizeof(*cap) || total_len < cap->bLength) {
@@ -985,8 +1039,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
break;
}
- total_len -= length;
-
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_warn(ddev, "descriptor type invalid, skip\n");
continue;
@@ -1021,7 +1073,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
default:
break;
}
+
+ total_len -= length;
+ buffer += length;
}
+ dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
return 0;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 52fc2084b310..059e71d71b66 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -754,8 +754,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
- else
+ else {
+ unsigned int old_suppress;
+
+ /* suppress uevents while claiming interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
+ }
if (err == 0)
set_bit(ifnum, &ps->ifclaimed);
return err;
@@ -775,7 +782,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
if (!intf)
err = -ENOENT;
else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
+ unsigned int old_suppress;
+
+ /* suppress uevents while releasing interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
usb_driver_release_interface(&usbfs_driver, intf);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
err = 0;
}
return err;
@@ -1810,8 +1823,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
return 0;
error:
- if (as && as->usbm)
- dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
kfree(isopkt);
kfree(dr);
if (as)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e9d6cf146fcc..654199c6a36c 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -470,11 +470,6 @@ static int usb_unbind_interface(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
- /* Undo any residual pm_autopm_get_interface_* calls */
- for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
- usb_autopm_put_interface_no_suspend(intf);
- atomic_set(&intf->pm_usage_cnt, 0);
-
if (!error)
usb_autosuspend_device(udev);
@@ -1625,7 +1620,6 @@ void usb_autopm_put_interface(struct usb_interface *intf)
int status;
usb_mark_last_busy(udev);
- atomic_dec(&intf->pm_usage_cnt);
status = pm_runtime_put_sync(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1654,7 +1648,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
int status;
usb_mark_last_busy(udev);
- atomic_dec(&intf->pm_usage_cnt);
status = pm_runtime_put(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
@@ -1676,7 +1669,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
- atomic_dec(&intf->pm_usage_cnt);
pm_runtime_put_noidle(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
@@ -1707,8 +1699,6 @@ int usb_autopm_get_interface(struct usb_interface *intf)
status = pm_runtime_get_sync(&intf->dev);
if (status < 0)
pm_runtime_put_sync(&intf->dev);
- else
- atomic_inc(&intf->pm_usage_cnt);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
@@ -1742,8 +1732,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
status = pm_runtime_get(&intf->dev);
if (status < 0 && status != -EINPROGRESS)
pm_runtime_put_noidle(&intf->dev);
- else
- atomic_inc(&intf->pm_usage_cnt);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
@@ -1767,7 +1755,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
- atomic_inc(&intf->pm_usage_cnt);
pm_runtime_get_noresume(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
@@ -1888,14 +1875,11 @@ int usb_runtime_idle(struct device *dev)
return -EBUSY;
}
-int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
int ret = -EPERM;
- if (enable && !udev->usb2_hw_lpm_allowed)
- return 0;
-
if (hcd->driver->set_usb2_hw_lpm) {
ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
if (!ret)
@@ -1905,6 +1889,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
return ret;
}
+int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+ if (!udev->usb2_hw_lpm_capable ||
+ !udev->usb2_hw_lpm_allowed ||
+ udev->usb2_hw_lpm_enabled)
+ return 0;
+
+ return usb_set_usb2_hardware_lpm(udev, 1);
+}
+
+int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
+{
+ if (!udev->usb2_hw_lpm_enabled)
+ return 0;
+
+ return usb_set_usb2_hardware_lpm(udev, 0);
+}
+
#endif /* CONFIG_PM */
struct bus_type usb_bus_type = {
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 422ce7b20d73..1626abeca8e8 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -191,9 +191,10 @@ int usb_register_dev(struct usb_interface *intf,
intf->minor = minor;
break;
}
- up_write(&minor_rwsem);
- if (intf->minor < 0)
+ if (intf->minor < 0) {
+ up_write(&minor_rwsem);
return -EXFULL;
+ }
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -201,12 +202,11 @@ int usb_register_dev(struct usb_interface *intf,
MKDEV(USB_MAJOR, minor), class_driver,
"%s", kbasename(name));
if (IS_ERR(intf->usb_dev)) {
- down_write(&minor_rwsem);
usb_minors[minor] = NULL;
intf->minor = -1;
- up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
+ up_write(&minor_rwsem);
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -232,12 +232,12 @@ void usb_deregister_dev(struct usb_interface *intf,
return;
dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+ device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
down_write(&minor_rwsem);
usb_minors[intf->minor] = NULL;
up_write(&minor_rwsem);
- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
intf->usb_dev = NULL;
intf->minor = -1;
destroy_usb_class();
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 0f7ef2b2e0a6..a4709352e249 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -3177,6 +3177,9 @@ usb_hcd_platform_shutdown(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
+ /* No need for pm_runtime_put(), we're shutting down */
+ pm_runtime_get_sync(&dev->dev);
+
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 92aadb9c1883..eb2625b48ba1 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -103,6 +103,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
+static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
+ u16 portstatus);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
@@ -953,13 +955,17 @@ int usb_remove_device(struct usb_device *udev)
{
struct usb_hub *hub;
struct usb_interface *intf;
+ int ret;
if (!udev->parent) /* Can't remove a root hub */
return -EINVAL;
hub = usb_hub_to_struct_hub(udev->parent);
intf = to_usb_interface(hub->intfdev);
- usb_autopm_get_interface(intf);
+ ret = usb_autopm_get_interface(intf);
+ if (ret < 0)
+ return ret;
+
set_bit(udev->portnum, hub->removed_bits);
hub_port_logical_disconnect(hub, udev->portnum);
usb_autopm_put_interface(intf);
@@ -1109,6 +1115,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_ENABLE);
}
+ /* Make sure a warm-reset request is handled by port_event */
+ if (type == HUB_RESUME &&
+ hub_port_warm_reset_required(hub, port1, portstatus))
+ set_bit(port1, hub->event_bits);
+
/*
* Add debounce if USB3 link is in polling/link training state.
* Link will automatically transition to Enabled state after
@@ -1160,6 +1171,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
* PORT_OVER_CURRENT is not. So check for any of them.
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
+ (portchange & USB_PORT_STAT_C_CONNECTION) ||
(portstatus & USB_PORT_STAT_OVERCURRENT) ||
(portchange & USB_PORT_STAT_C_OVERCURRENT))
set_bit(port1, hub->change_bits);
@@ -1184,11 +1196,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
- /* Don't set the change_bits when the device
- * was powered off.
- */
- if (test_bit(port1, hub->power_bits))
- set_bit(port1, hub->change_bits);
} else {
/* The power session is gone; tell hub_wq */
@@ -3182,8 +3189,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
}
/* disable USB2 hardware LPM */
- if (udev->usb2_hw_lpm_enabled == 1)
- usb_set_usb2_hardware_lpm(udev, 0);
+ usb_disable_usb2_hardware_lpm(udev);
if (usb_disable_ltm(udev)) {
dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
@@ -3229,8 +3235,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
usb_enable_ltm(udev);
err_ltm:
/* Try to enable USB2 hardware LPM again */
- if (udev->usb2_hw_lpm_capable == 1)
- usb_set_usb2_hardware_lpm(udev, 1);
+ usb_enable_usb2_hardware_lpm(udev);
if (udev->do_remote_wakeup)
(void) usb_disable_remote_wakeup(udev);
@@ -3513,8 +3518,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
hub_port_logical_disconnect(hub, port1);
} else {
/* Try to enable USB2 hardware LPM */
- if (udev->usb2_hw_lpm_capable == 1)
- usb_set_usb2_hardware_lpm(udev, 1);
+ usb_enable_usb2_hardware_lpm(udev);
/* Try to enable USB3 LTM and LPM */
usb_enable_ltm(udev);
@@ -3552,6 +3556,7 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
struct usb_device *hdev;
struct usb_device *udev;
int connect_change = 0;
+ u16 link_state;
int ret;
hdev = hub->hdev;
@@ -3561,9 +3566,11 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
return 0;
usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
} else {
+ link_state = portstatus & USB_PORT_STAT_LINK_STATE;
if (!udev || udev->state != USB_STATE_SUSPENDED ||
- (portstatus & USB_PORT_STAT_LINK_STATE) !=
- USB_SS_PORT_LS_U0)
+ (link_state != USB_SS_PORT_LS_U0 &&
+ link_state != USB_SS_PORT_LS_U1 &&
+ link_state != USB_SS_PORT_LS_U2))
return 0;
}
@@ -3893,6 +3900,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
* control transfers to set the hub timeout or enable device-initiated U1/U2
* will be successful.
*
+ * If the control transfer to enable device-initiated U1/U2 entry fails, then
+ * hub-initiated U1/U2 will be disabled.
+ *
* If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI
* driver know about it. If that call fails, it should be harmless, and just
* take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
@@ -3947,23 +3957,24 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
* host know that this link state won't be enabled.
*/
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
- } else {
- /* Only a configured device will accept the Set Feature
- * U1/U2_ENABLE
- */
- if (udev->actconfig)
- usb_set_device_initiated_lpm(udev, state, true);
+ return;
+ }
- /* As soon as usb_set_lpm_timeout(timeout) returns 0, the
- * hub-initiated LPM is enabled. Thus, LPM is enabled no
- * matter the result of usb_set_device_initiated_lpm().
- * The only difference is whether device is able to initiate
- * LPM.
- */
+ /* Only a configured device will accept the Set Feature
+ * U1/U2_ENABLE
+ */
+ if (udev->actconfig &&
+ usb_set_device_initiated_lpm(udev, state, true) == 0) {
if (state == USB3_LPM_U1)
udev->usb3_lpm_u1_enabled = 1;
else if (state == USB3_LPM_U2)
udev->usb3_lpm_u2_enabled = 1;
+ } else {
+ /* Don't request U1/U2 entry if the device
+ * cannot transition to U1/U2.
+ */
+ usb_set_lpm_timeout(udev, state, 0);
+ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
}
}
@@ -4351,7 +4362,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
udev->usb2_hw_lpm_allowed = 1;
- usb_set_usb2_hardware_lpm(udev, 1);
+ usb_enable_usb2_hardware_lpm(udev);
}
}
@@ -5509,8 +5520,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
/* Disable USB2 hardware LPM.
* It will be re-enabled by the enumeration process.
*/
- if (udev->usb2_hw_lpm_enabled == 1)
- usb_set_usb2_hardware_lpm(udev, 0);
+ usb_disable_usb2_hardware_lpm(udev);
/* Disable LPM and LTM while we reset the device and reinstall the alt
* settings. Device-initiated LPM settings, and system exit latency
@@ -5620,7 +5630,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
done:
/* Now that the alt settings are re-installed, enable LTM and LPM. */
- usb_set_usb2_hardware_lpm(udev, 1);
+ usb_enable_usb2_hardware_lpm(udev);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
usb_release_bos_descriptor(udev);
@@ -5638,7 +5648,7 @@ re_enumerate_no_bos:
/**
* usb_reset_device - warn interface drivers and perform a USB port reset
- * @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
+ * @udev: device to reset (not in NOTATTACHED state)
*
* Warns all drivers bound to registered interfaces (using their pre_reset
* method), performs the port reset, and then lets the drivers know that
@@ -5666,8 +5676,7 @@ int usb_reset_device(struct usb_device *udev)
struct usb_host_config *config = udev->actconfig;
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
- if (udev->state == USB_STATE_NOTATTACHED ||
- udev->state == USB_STATE_SUSPENDED) {
+ if (udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
udev->state);
return -EINVAL;
@@ -5735,7 +5744,10 @@ int usb_reset_device(struct usb_device *udev)
cintf->needs_binding = 1;
}
}
- usb_unbind_and_rebind_marked_interfaces(udev);
+
+ /* If the reset failed, hub_wq will unbind drivers later */
+ if (ret == 0)
+ usb_unbind_and_rebind_marked_interfaces(udev);
}
usb_autosuspend_device(udev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index c0c5d5b3ec40..d6075d45e10a 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -817,9 +817,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
- if (size <= 0 || !buf || !index)
+ if (size <= 0 || !buf)
return -EINVAL;
buf[0] = 0;
+ if (index <= 0 || index >= 256)
+ return -EINVAL;
tbuf = kmalloc(256, GFP_NOIO);
if (!tbuf)
return -ENOMEM;
@@ -1181,8 +1183,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
dev->actconfig->interface[i] = NULL;
}
- if (dev->usb2_hw_lpm_enabled == 1)
- usb_set_usb2_hardware_lpm(dev, 0);
+ usb_disable_usb2_hardware_lpm(dev);
usb_unlocked_disable_lpm(dev);
usb_disable_ltm(dev);
@@ -2141,14 +2142,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_desc))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
goto next_desc;
if (detail)
return -EINVAL;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 460c855be0d0..53c1f6e604b1 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -179,7 +179,10 @@ static int usb_port_runtime_resume(struct device *dev)
if (!port_dev->is_superspeed && peer)
pm_runtime_get_sync(&peer->dev);
- usb_autopm_get_interface(intf);
+ retval = usb_autopm_get_interface(intf);
+ if (retval < 0)
+ return retval;
+
retval = usb_hub_set_port_power(hdev, hub, port1, true);
msleep(hub_power_on_good_delay(hub));
if (udev && !retval) {
@@ -232,7 +235,10 @@ static int usb_port_runtime_suspend(struct device *dev)
if (usb_port_block_power_off)
return -EBUSY;
- usb_autopm_get_interface(intf);
+ retval = usb_autopm_get_interface(intf);
+ if (retval < 0)
+ return retval;
+
retval = usb_hub_set_port_power(hdev, hub, port1, false);
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
if (!port_dev->is_superspeed)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 733479ddf8a7..6c4bb47922ac 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -64,9 +64,15 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Microsoft Surface Dock Ethernet (RTL8153 GigE) */
+ { USB_DEVICE(0x045e, 0x07c6), .driver_info = USB_QUIRK_NO_LPM },
+
/* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
{ USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech HD Webcam C270 */
+ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -80,6 +86,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Logitech PTZ Pro Camera */
{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Logitech Screen Share */
+ { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -220,6 +229,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Realtek hub in Dell WD19 (Type-C) */
+ { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
+
+ /* Generic RTL8153 based ethernet adapters */
+ { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@@ -285,6 +300,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* novation SoundControl XL */
+ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
+
{ } /* terminating entry must be last */
};
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index c953a0f1c695..1a232b4ffe71 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -494,7 +494,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
if (!ret) {
udev->usb2_hw_lpm_allowed = value;
- ret = usb_set_usb2_hardware_lpm(udev, value);
+ if (value)
+ ret = usb_enable_usb2_hardware_lpm(udev);
+ else
+ ret = usb_disable_usb2_hardware_lpm(udev);
}
usb_unlock_device(udev);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 5133ab965229..7b81bcfa19cb 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -40,6 +40,7 @@ void usb_init_urb(struct urb *urb)
if (urb) {
memset(urb, 0, sizeof(*urb));
kref_init(&urb->kref);
+ INIT_LIST_HEAD(&urb->urb_list);
INIT_LIST_HEAD(&urb->anchor_list);
}
}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 53318126ed91..6b2f11544283 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -84,7 +84,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
extern int usb_runtime_suspend(struct device *dev);
extern int usb_runtime_resume(struct device *dev);
extern int usb_runtime_idle(struct device *dev);
-extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
+extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
+extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
#else
@@ -104,7 +105,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
return 0;
}
-static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
+static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+{
+ return 0;
+}
+
+static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
{
return 0;
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 0e5435330c07..120c8f716acf 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2552,8 +2552,10 @@ static void dwc2_free_dma_aligned_buffer(struct urb *urb)
return;
/* Restore urb->transfer_buffer from the end of the allocated area */
- memcpy(&stored_xfer_buffer, urb->transfer_buffer +
- urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
+ memcpy(&stored_xfer_buffer,
+ PTR_ALIGN(urb->transfer_buffer + urb->transfer_buffer_length,
+ dma_get_cache_alignment()),
+ sizeof(urb->transfer_buffer));
if (usb_urb_dir_in(urb))
memcpy(stored_xfer_buffer, urb->transfer_buffer,
@@ -2580,6 +2582,7 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
* DMA
*/
kmalloc_size = urb->transfer_buffer_length +
+ (dma_get_cache_alignment() - 1) +
sizeof(urb->transfer_buffer);
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
@@ -2590,7 +2593,8 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
* Position value of original urb->transfer_buffer pointer to the end
* of allocation for later referencing
*/
- memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+ memcpy(PTR_ALIGN(kmalloc_ptr + urb->transfer_buffer_length,
+ dma_get_cache_alignment()),
&urb->transfer_buffer, sizeof(urb->transfer_buffer));
if (usb_urb_dir_out(urb))
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 09e7bedc5e1d..ecbe1c153f32 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -317,8 +317,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
dft = reg & DWC3_GFLADJ_30MHZ_MASK;
- if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj,
- "request value same as default, ignoring\n")) {
+ if (dft != dwc->fladj) {
reg &= ~DWC3_GFLADJ_30MHZ_MASK;
reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
@@ -1012,6 +1011,9 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
/* do nothing */
break;
}
+
+ /* de-assert DRVVBUS for HOST and OTG mode */
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
}
#define DWC3_ALIGN_MASK (16 - 1)
@@ -1066,7 +1068,7 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->regs_size = resource_size(res);
/* default to highest possible threshold */
- lpm_nyet_threshold = 0xff;
+ lpm_nyet_threshold = 0xf;
/* default to -3.5dB de-emphasis */
tx_de_emphasis = 1;
@@ -1191,7 +1193,8 @@ static int dwc3_probe(struct platform_device *pdev)
ret = dwc3_core_init(dwc);
if (ret) {
- dev_err(dev, "failed to initialize core\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to initialize core: %d\n", ret);
goto err4;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 599b096dd6cc..d5dd9e76d99b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3121,9 +3121,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc->gadget.sg_supported = true;
dwc->gadget.name = "dwc3-gadget";
dwc->gadget.is_otg = (dwc->dr_mode == USB_DR_MODE_OTG) &&
- (dwc->otg_caps.hnp_support ||
- dwc->otg_caps.srp_support ||
- dwc->otg_caps.adp_support);
+ (dwc->otg_caps.hnp_support ||
+ dwc->otg_caps.srp_support ||
+ dwc->otg_caps.adp_support);
/*
* FIXME We might be setting max_speed to <SUPER, however versions
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 2c022a08f163..2e545d025030 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -437,12 +437,14 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
val = CONFIG_USB_GADGET_VBUS_DRAW;
if (!val)
return 0;
- switch (speed) {
- case USB_SPEED_SUPER:
- return DIV_ROUND_UP(val, 8);
- default:
- return DIV_ROUND_UP(val, 2);
- }
+ if (speed < USB_SPEED_SUPER)
+ return min(val, 500U) / 2;
+ else
+ /*
+ * USB 3.x supports up to 900mA, but since 900 isn't divisible
+ * by 8 the integral division will effectively cap to 896mA.
+ */
+ return min(val, 900U) / 8;
}
static int config_buf(struct usb_configuration *config,
@@ -835,7 +837,16 @@ static int set_config(struct usb_composite_dev *cdev,
/* when we return, be sure our power usage is valid */
power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
+ if (gadget->speed < USB_SPEED_SUPER)
+ power = min(power, 500U);
+ else
+ power = min(power, 900U);
done:
+ if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
+ usb_gadget_set_selfpowered(gadget);
+ else
+ usb_gadget_clear_selfpowered(gadget);
+
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
result = USB_GADGET_DELAYED_STATUS;
@@ -2000,6 +2011,7 @@ void composite_disconnect(struct usb_gadget *gadget)
* disconnect callbacks?
*/
spin_lock_irqsave(&cdev->lock, flags);
+ cdev->suspended = 0;
if (cdev->config)
reset_config(cdev);
if (cdev->driver->disconnect)
@@ -2178,14 +2190,18 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev)
usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req);
kfree(cdev->os_desc_req->buf);
+ cdev->os_desc_req->buf = NULL;
usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req);
+ cdev->os_desc_req = NULL;
}
if (cdev->req) {
if (cdev->setup_pending)
usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
kfree(cdev->req->buf);
+ cdev->req->buf = NULL;
usb_ep_free_request(cdev->gadget->ep0, cdev->req);
+ cdev->req = NULL;
}
cdev->next_string_id = 0;
device_remove_file(&cdev->gadget->dev, &dev_attr_suspended);
@@ -2262,6 +2278,7 @@ void composite_suspend(struct usb_gadget *gadget)
cdev->suspended = 1;
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, 2);
}
@@ -2269,7 +2286,7 @@ void composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
- u16 maxpower;
+ unsigned maxpower;
/* REVISIT: should we have config level
* suspend/resume callbacks?
@@ -2283,10 +2300,17 @@ void composite_resume(struct usb_gadget *gadget)
f->resume(f);
}
- maxpower = cdev->config->MaxPower;
+ maxpower = cdev->config->MaxPower ?
+ cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
+ if (gadget->speed < USB_SPEED_SUPER)
+ maxpower = min(maxpower, 500U);
+ else
+ maxpower = min(maxpower, 900U);
+
+ if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
+ usb_gadget_clear_selfpowered(gadget);
- usb_gadget_vbus_draw(gadget, maxpower ?
- maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
+ usb_gadget_vbus_draw(gadget, maxpower);
}
cdev->suspended = 0;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index a5ca409dc97e..3f1662b64bab 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -60,6 +60,8 @@ struct gadget_info {
bool use_os_desc;
char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN];
+ spinlock_t spinlock;
+ bool unbind;
};
static inline struct gadget_info *to_gadget_info(struct config_item *item)
@@ -1241,6 +1243,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
int ret;
/* the gi->lock is hold by the caller */
+ gi->unbind = 0;
cdev->gadget = gadget;
set_gadget_data(gadget, cdev);
ret = composite_dev_prepare(composite, cdev);
@@ -1373,31 +1376,128 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
+ unsigned long flags;
/* the gi->lock is hold by the caller */
cdev = get_gadget_data(gadget);
gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ gi->unbind = 1;
+ spin_unlock_irqrestore(&gi->spinlock, flags);
kfree(otg_desc[0]);
otg_desc[0] = NULL;
purge_configs_funcs(gi);
composite_dev_cleanup(cdev);
usb_ep_autoconfig_reset(cdev->gadget);
+ spin_lock_irqsave(&gi->spinlock, flags);
cdev->gadget = NULL;
set_gadget_data(gadget, NULL);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static int configfs_composite_setup(struct usb_gadget *gadget,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+ int ret;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return 0;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return 0;
+ }
+
+ ret = composite_setup(gadget, ctrl);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return ret;
+}
+
+static void configfs_composite_disconnect(struct usb_gadget *gadget)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return;
+ }
+
+ composite_disconnect(gadget);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static void configfs_composite_suspend(struct usb_gadget *gadget)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return;
+ }
+
+ composite_suspend(gadget);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static void configfs_composite_resume(struct usb_gadget *gadget)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return;
+ }
+
+ composite_resume(gadget);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
}
static const struct usb_gadget_driver configfs_driver_template = {
.bind = configfs_composite_bind,
.unbind = configfs_composite_unbind,
- .setup = composite_setup,
- .reset = composite_disconnect,
- .disconnect = composite_disconnect,
+ .setup = configfs_composite_setup,
+ .reset = configfs_composite_disconnect,
+ .disconnect = configfs_composite_disconnect,
- .suspend = composite_suspend,
- .resume = composite_resume,
+ .suspend = configfs_composite_suspend,
+ .resume = configfs_composite_resume,
.max_speed = USB_SPEED_SUPER,
.driver = {
@@ -1441,6 +1541,7 @@ static struct config_group *gadgets_make(
gi->composite.resume = NULL;
gi->composite.max_speed = USB_SPEED_SUPER;
+ spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
INIT_LIST_HEAD(&gi->string_list);
INIT_LIST_HEAD(&gi->available_func);
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 4c488d15b6f6..8e3e44382785 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -56,6 +56,7 @@ struct f_ecm {
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
+ atomic_t notify_count;
bool is_open;
/* FIXME is_open needs some irq-ish locking
@@ -384,7 +385,7 @@ static void ecm_do_notify(struct f_ecm *ecm)
int status;
/* notification already in flight? */
- if (!req)
+ if (atomic_read(&ecm->notify_count))
return;
event = req->buf;
@@ -424,10 +425,10 @@ static void ecm_do_notify(struct f_ecm *ecm)
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ecm->ctrl_id);
- ecm->notify_req = NULL;
+ atomic_inc(&ecm->notify_count);
status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
if (status < 0) {
- ecm->notify_req = req;
+ atomic_dec(&ecm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
@@ -452,17 +453,19 @@ static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req)
switch (req->status) {
case 0:
/* no fault */
+ atomic_dec(&ecm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
+ atomic_set(&ecm->notify_count, 0);
ecm->notify_state = ECM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
+ atomic_dec(&ecm->notify_count);
break;
}
- ecm->notify_req = req;
ecm_do_notify(ecm);
}
@@ -625,8 +628,12 @@ static void ecm_disable(struct usb_function *f)
DBG(cdev, "ecm deactivated\n");
- if (ecm->port.in_ep->enabled)
+ if (ecm->port.in_ep->enabled) {
gether_disconnect(&ecm->port);
+ } else {
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ }
usb_ep_disable(ecm->notify);
ecm->notify->desc = NULL;
@@ -905,6 +912,11 @@ static void ecm_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
+ if (atomic_read(&ecm->notify_count)) {
+ usb_ep_dequeue(ecm->notify, ecm->notify_req);
+ atomic_set(&ecm->notify_count, 0);
+ }
+
kfree(ecm->notify_req->buf);
usb_ep_free_request(ecm->notify, ecm->notify_req);
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 04eb64381d92..223b28cbf3c8 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1008,6 +1008,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* condition with req->complete callback.
*/
usb_ep_dequeue(ep->ep, req);
+ wait_for_completion(&done);
interrupted = ep->status < 0;
}
@@ -1035,6 +1036,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
if (unlikely(ret)) {
+ io_data->req = NULL;
usb_ep_free_request(ep->ep, req);
goto error_lock;
}
@@ -1076,18 +1078,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
{
struct ffs_io_data *io_data = kiocb->private;
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ unsigned long flags;
int value;
ENTER();
- spin_lock_irq(&epfile->ffs->eps_lock);
+ spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
if (likely(io_data && io_data->ep && io_data->req))
value = usb_ep_dequeue(io_data->ep, io_data->req);
else
value = -EINVAL;
- spin_unlock_irq(&epfile->ffs->eps_lock);
+ spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
return value;
}
@@ -1100,11 +1103,12 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
ENTER();
if (!is_sync_kiocb(kiocb)) {
- p = kmalloc(sizeof(io_data), GFP_KERNEL);
+ p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
return -ENOMEM;
p->aio = true;
} else {
+ memset(p, 0, sizeof(*p));
p->aio = false;
}
@@ -1136,11 +1140,12 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
ENTER();
if (!is_sync_kiocb(kiocb)) {
- p = kmalloc(sizeof(io_data), GFP_KERNEL);
+ p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
return -ENOMEM;
p->aio = true;
} else {
+ memset(p, 0, sizeof(*p));
p->aio = false;
}
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 5815120c0402..8e83649f77ce 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -340,20 +340,20 @@ try_again:
req->complete = f_hidg_req_complete;
req->context = hidg;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
- goto release_write_pending_unlocked;
+ goto release_write_pending;
} else {
status = count;
}
- spin_unlock_irqrestore(&hidg->write_spinlock, flags);
return status;
release_write_pending:
spin_lock_irqsave(&hidg->write_spinlock, flags);
-release_write_pending_unlocked:
hidg->write_pending = 0;
spin_unlock_irqrestore(&hidg->write_spinlock, flags);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 639603722709..6399923239e7 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -57,6 +57,7 @@ struct f_ncm {
struct usb_ep *notify;
struct usb_request *notify_req;
u8 notify_state;
+ atomic_t notify_count;
bool is_open;
const struct ndp_parser_opts *parser_opts;
@@ -552,7 +553,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
int status;
/* notification already in flight? */
- if (!req)
+ if (atomic_read(&ncm->notify_count))
return;
event = req->buf;
@@ -592,7 +593,8 @@ static void ncm_do_notify(struct f_ncm *ncm)
event->bmRequestType = 0xA1;
event->wIndex = cpu_to_le16(ncm->ctrl_id);
- ncm->notify_req = NULL;
+ atomic_inc(&ncm->notify_count);
+
/*
* In double buffering if there is a space in FIFO,
* completion callback can be called right after the call,
@@ -602,7 +604,7 @@ static void ncm_do_notify(struct f_ncm *ncm)
status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC);
spin_lock(&ncm->lock);
if (status < 0) {
- ncm->notify_req = req;
+ atomic_dec(&ncm->notify_count);
DBG(cdev, "notify --> %d\n", status);
}
}
@@ -637,17 +639,19 @@ static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req)
case 0:
VDBG(cdev, "Notification %02x sent\n",
event->bNotificationType);
+ atomic_dec(&ncm->notify_count);
break;
case -ECONNRESET:
case -ESHUTDOWN:
+ atomic_set(&ncm->notify_count, 0);
ncm->notify_state = NCM_NOTIFY_NONE;
break;
default:
DBG(cdev, "event %02x --> %d\n",
event->bNotificationType, req->status);
+ atomic_dec(&ncm->notify_count);
break;
}
- ncm->notify_req = req;
ncm_do_notify(ncm);
spin_unlock(&ncm->lock);
}
@@ -1639,6 +1643,11 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
+ if (atomic_read(&ncm->notify_count)) {
+ usb_ep_dequeue(ncm->notify, ncm->notify_req);
+ atomic_set(&ncm->notify_count, 0);
+ }
+
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index ba00cdb809d6..865cb070bf8b 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -622,6 +622,7 @@ static void rndis_disable(struct usb_function *f)
gether_disconnect(&rndis->port);
usb_ep_disable(rndis->notify);
+ rndis->notify->desc = NULL;
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 5d1bd13a56c1..d5fbc2352029 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -198,11 +198,12 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
out = dev->port_usb->out_ep;
else
out = NULL;
- spin_unlock_irqrestore(&dev->lock, flags);
if (!out)
+ {
+ spin_unlock_irqrestore(&dev->lock, flags);
return -ENOTCONN;
-
+ }
/* Padding up to RX_EXTRA handles minor disagreements with host.
* Normally we use the USB "terminate on short read" convention;
@@ -223,6 +224,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
if (dev->port_usb->is_fixed)
size = max_t(size_t, size, dev->port_usb->fixed_out_len);
+ spin_unlock_irqrestore(&dev->lock, flags);
skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
if (skb == NULL) {
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 2f151e0aa6da..5d7d0f2e80a5 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -715,8 +715,10 @@ static int gs_start_io(struct gs_port *port)
port->n_read = 0;
started = gs_start_rx(port);
- /* unblock any pending writes into our circular buffer */
if (started) {
+ gs_start_tx(port);
+ /* Unblock any pending writes into our circular buffer, in case
+ * we didn't in gs_start_tx() */
tty_wakeup(port->port.tty);
} else {
gs_free_requests(ep, head, &port->read_allocated);
@@ -1391,8 +1393,10 @@ int gserial_alloc_line(unsigned char *line_num)
__func__, port_num, PTR_ERR(tty_dev));
ret = PTR_ERR(tty_dev);
+ mutex_lock(&ports[port_num].lock);
port = ports[port_num].port;
ports[port_num].port = NULL;
+ mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
goto err;
}
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index d7dcd39fe12c..3d843e14447b 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -543,6 +543,7 @@ static int uvcg_control_class_allow_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
@@ -584,6 +585,7 @@ static int uvcg_control_class_drop_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
@@ -770,6 +772,7 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
format_ptr->fmt = target_fmt;
list_add_tail(&format_ptr->entry, &src_hdr->formats);
++src_hdr->num_fmt;
+ ++target_fmt->linked;
out:
mutex_unlock(&opts->lock);
@@ -808,6 +811,8 @@ static int uvcg_streaming_header_drop_link(struct config_item *src,
break;
}
+ --target_fmt->linked;
+
out:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
@@ -2047,6 +2052,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
@@ -2091,6 +2097,7 @@ static int uvcg_streaming_class_drop_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 0f01c04d7cbd..d6bab12b0b47 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -129,6 +129,21 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
* Request handling
*/
+static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
+{
+ int ret;
+
+ ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ printk(KERN_INFO "Failed to queue request (%d).\n", ret);
+ /* Isochronous endpoints can't be halted. */
+ if (usb_endpoint_xfer_bulk(video->ep->desc))
+ usb_ep_set_halt(video->ep);
+ }
+
+ return ret;
+}
+
/*
* I somehow feel that synchronisation won't be easy to achieve here. We have
* three events that control USB requests submission:
@@ -193,14 +208,13 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
video->encode(req, video, buf);
- if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) {
- printk(KERN_INFO "Failed to queue request (%d).\n", ret);
- usb_ep_set_halt(ep);
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ ret = uvcg_video_ep_queue(video, req);
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
+ if (ret < 0) {
uvcg_queue_cancel(queue, 0);
goto requeue;
}
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
return;
@@ -320,15 +334,13 @@ int uvcg_video_pump(struct uvc_video *video)
video->encode(req, video, buf);
/* Queue the USB request */
- ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
+ ret = uvcg_video_ep_queue(video, req);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
if (ret < 0) {
- printk(KERN_INFO "Failed to queue request (%d)\n", ret);
- usb_ep_set_halt(video->ep);
- spin_unlock_irqrestore(&queue->irqlock, flags);
uvcg_queue_cancel(queue, 0);
break;
}
- spin_unlock_irqrestore(&queue->irqlock, flags);
}
spin_lock_irqsave(&video->req_lock, flags);
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index 51c08682de84..5ee25beb52f0 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -229,7 +229,7 @@ static struct usb_composite_driver cdc_driver = {
.name = "g_cdc",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = cdc_bind,
.unbind = cdc_unbind,
};
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 6da7316f8e87..54ee4e31645b 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -153,7 +153,7 @@ static struct usb_composite_driver gfs_driver = {
.name = DRIVER_NAME,
.dev = &gfs_dev_desc,
.strings = gfs_dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = gfs_bind,
.unbind = gfs_unbind,
};
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index a70a406580ea..3b7fc5c7e9c3 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -486,7 +486,7 @@ static struct usb_composite_driver multi_driver = {
.name = "g_multi",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = multi_bind,
.unbind = multi_unbind,
.needs_serial = 1,
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index 0aba68253e3d..2fb4a847dd52 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -203,7 +203,7 @@ static struct usb_composite_driver ncm_driver = {
.name = "g_ncm",
.dev = &device_desc,
.strings = dev_strings,
- .max_speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_SUPER,
.bind = gncm_bind,
.unbind = gncm_unbind,
};
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 9705bcdbc577..57dd3bad9539 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -403,9 +403,11 @@ static void submit_request(struct usba_ep *ep, struct usba_request *req)
next_fifo_transaction(ep, req);
if (req->last_transaction) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
- usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+ if (ep_is_control(ep))
+ usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
} else {
- usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+ if (ep_is_control(ep))
+ usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
}
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 53abf7e7875b..d57e927b6fb6 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -106,6 +106,17 @@ int usb_ep_enable(struct usb_ep *ep)
if (ep->enabled)
goto out;
+ /* UDC drivers can't handle endpoints with maxpacket size 0 */
+ if (usb_endpoint_maxp(ep->desc) == 0) {
+ /*
+ * We should log an error message here, but we can't call
+ * dev_err() because there's no way to find the gadget
+ * given only ep.
+ */
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ep->ops->enable(ep, ep->desc);
if (ret)
goto out;
@@ -806,6 +817,8 @@ int usb_gadget_map_request_by_dev(struct device *dev,
dev_err(dev, "failed to map buffer\n");
return -EFAULT;
}
+
+ req->dma_mapped = 1;
}
return 0;
@@ -830,9 +843,10 @@ void usb_gadget_unmap_request_by_dev(struct device *dev,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->num_mapped_sgs = 0;
- } else {
+ } else if (req->dma_mapped) {
dma_unmap_single(dev, req->dma, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->dma_mapped = 0;
}
}
EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ab89fa3b4118..2f7023a289c9 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -50,6 +50,7 @@
#define DRIVER_VERSION "02 May 2005"
#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
+#define POWER_BUDGET_3 900 /* in mA */
static const char driver_name[] = "dummy_hcd";
static const char driver_desc[] = "USB Host+Gadget Emulator";
@@ -2433,7 +2434,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
- dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+ dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
#ifdef CONFIG_USB_OTG
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 95df2b3bb6a1..76e991557116 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -744,7 +744,7 @@ static void fotg210_get_status(struct fotg210_udc *fotg210,
fotg210->ep0_req->length = 2;
spin_unlock(&fotg210->lock);
- fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_KERNEL);
+ fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC);
spin_lock(&fotg210->lock);
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 8991a4070792..bd98557caa28 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2570,7 +2570,7 @@ static int fsl_udc_remove(struct platform_device *pdev)
dma_pool_destroy(udc_controller->td_pool);
free_irq(udc_controller->irq, udc_controller);
iounmap(dr_regs);
- if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
+ if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE))
release_mem_region(res->start, resource_size(res));
/* free udc --wait for the release() finished */
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 948845c90e47..351012c498c5 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1345,12 +1345,15 @@ static const struct usb_gadget_ops fusb300_gadget_ops = {
static int fusb300_remove(struct platform_device *pdev)
{
struct fusb300 *fusb300 = platform_get_drvdata(pdev);
+ int i;
usb_del_gadget_udc(&fusb300->gadget);
iounmap(fusb300->reg);
free_irq(platform_get_irq(pdev, 0), fusb300);
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+ kfree(fusb300->ep[i]);
kfree(fusb300);
return 0;
@@ -1494,6 +1497,8 @@ clean_up:
if (fusb300->ep0_req)
fusb300_free_request(&fusb300->ep[0]->ep,
fusb300->ep0_req);
+ for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+ kfree(fusb300->ep[i]);
kfree(fusb300);
}
if (reg)
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 39b7136d31d9..9e246d2e55ca 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2200,8 +2200,6 @@ static int gr_probe(struct platform_device *pdev)
return -ENOMEM;
}
- spin_lock(&dev->lock);
-
/* Inside lock so that no gadget can use this udc until probe is done */
retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
if (retval) {
@@ -2210,15 +2208,21 @@ static int gr_probe(struct platform_device *pdev)
}
dev->added = 1;
+ spin_lock(&dev->lock);
+
retval = gr_udc_init(dev);
- if (retval)
+ if (retval) {
+ spin_unlock(&dev->lock);
goto out;
-
- gr_dfs_create(dev);
+ }
/* Clear all interrupt enables that might be left on since last boot */
gr_disable_interrupts_and_pullup(dev);
+ spin_unlock(&dev->lock);
+
+ gr_dfs_create(dev);
+
retval = gr_request_irq(dev, dev->irq);
if (retval) {
dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
@@ -2247,8 +2251,6 @@ static int gr_probe(struct platform_device *pdev)
dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
out:
- spin_unlock(&dev->lock);
-
if (retval)
gr_remove(pdev);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 8f32b5ee7734..ac2aa04ca657 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -935,8 +935,7 @@ static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
dma_addr_t dma;
struct lpc32xx_usbd_dd_gad *dd;
- dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
- udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
+ dd = dma_pool_alloc(udc->dd_cache, GFP_ATOMIC | GFP_DMA, &dma);
if (dd)
dd->this_dma = dma;
@@ -1179,11 +1178,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
bl = bytes - n;
- if (bl > 3)
- bl = 3;
+ if (bl > 4)
+ bl = 4;
for (i = 0; i < bl; i++)
- data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
+ data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
}
break;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 40396a265a3f..f57d293a1791 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -958,6 +958,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
break;
}
if (&req->req != _req) {
+ ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 7a8c36642293..dfaed8e8cc52 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -870,9 +870,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
(void) readl(&ep->dev->pci->pcimstctl);
writel(BIT(DMA_START), &dma->dmastat);
-
- if (!ep->is_in)
- stop_out_naking(ep);
}
static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -911,6 +908,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
writel(BIT(DMA_START), &dma->dmastat);
return;
}
+ stop_out_naking(ep);
}
tmp = dmactl_default;
@@ -1279,9 +1277,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
break;
}
if (&req->req != _req) {
+ ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
- dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
- __func__);
+ ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
return -EINVAL;
}
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 27b53f4386bd..c10dd8b747bc 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -40,6 +40,10 @@
/*-------------------------------------------------------------------------*/
+/* PID Codes that are used here, from EHCI specification, Table 3-16. */
+#define PID_CODE_IN 1
+#define PID_CODE_SETUP 2
+
/* fill a qtd, returning how much of the buffer we were able to queue up */
static int
@@ -203,7 +207,7 @@ static int qtd_copy_status (
int status = -EINPROGRESS;
/* count IN/OUT bytes, not SETUP (even short packets) */
- if (likely (QTD_PID (token) != 2))
+ if (likely(QTD_PID(token) != PID_CODE_SETUP))
urb->actual_length += length - QTD_LENGTH (token);
/* don't modify error codes */
@@ -219,6 +223,13 @@ static int qtd_copy_status (
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
status = -EOVERFLOW;
+ /*
+ * When MMF is active and PID Code is IN, queue is halted.
+ * EHCI Specification, Table 4-13.
+ */
+ } else if ((token & QTD_STS_MMF) &&
+ (QTD_PID(token) == PID_CODE_IN)) {
+ status = -EPROTO;
/* CERR nonzero + halt --> stall */
} else if (QTD_CERR(token)) {
status = -EPIPE;
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 66efa9a67687..72853020a542 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1653,6 +1653,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* see what we found out */
temp = check_reset_complete(fotg210, wIndex, status_reg,
fotg210_readl(fotg210, status_reg));
+
+ /* restart schedule */
+ fotg210->command |= CMD_RUN;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
}
if (!(temp & (PORT_RESUME|PORT_RESET))) {
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 97750f162f01..c14e4a64b0e8 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -173,7 +173,7 @@ out:
return result;
error_set_cluster_id:
- wusb_cluster_id_put(wusbhc->cluster_id);
+ wusb_cluster_id_put(addr);
error_cluster_id_get:
goto out;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 1afb76e8b1a5..17f1cf02ce34 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -417,8 +417,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
* other cases where the next software may expect clean state from the
* "firmware". this is bus-neutral, unlike shutdown() methods.
*/
-static void
-ohci_shutdown (struct usb_hcd *hcd)
+static void _ohci_shutdown(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci;
@@ -434,6 +433,16 @@ ohci_shutdown (struct usb_hcd *hcd)
ohci->rh_state = OHCI_RH_HALTED;
}
+static void ohci_shutdown(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ohci->lock, flags);
+ _ohci_shutdown(hcd);
+ spin_unlock_irqrestore(&ohci->lock, flags);
+}
+
/*-------------------------------------------------------------------------*
* HC functions
*-------------------------------------------------------------------------*/
@@ -752,7 +761,7 @@ static void io_watchdog_func(unsigned long _ohci)
died:
usb_hc_died(ohci_to_hcd(ohci));
ohci_dump(ohci);
- ohci_shutdown(ohci_to_hcd(ohci));
+ _ohci_shutdown(ohci_to_hcd(ohci));
goto done;
} else {
/* No write back because the done queue was empty */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index ee213c5f4107..11b0767ca1ba 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -187,7 +187,7 @@ int usb_amd_find_chipset_info(void)
{
unsigned long flags;
struct amd_chipset_info info;
- int ret;
+ int need_pll_quirk = 0;
spin_lock_irqsave(&amd_lock, flags);
@@ -201,21 +201,28 @@ int usb_amd_find_chipset_info(void)
spin_unlock_irqrestore(&amd_lock, flags);
if (!amd_chipset_sb_type_init(&info)) {
- ret = 0;
goto commit;
}
- /* Below chipset generations needn't enable AMD PLL quirk */
- if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
- info.sb_type.gen == AMD_CHIPSET_SB600 ||
- info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
- (info.sb_type.gen == AMD_CHIPSET_SB700 &&
- info.sb_type.rev > 0x3b)) {
+ switch (info.sb_type.gen) {
+ case AMD_CHIPSET_SB700:
+ need_pll_quirk = info.sb_type.rev <= 0x3B;
+ break;
+ case AMD_CHIPSET_SB800:
+ case AMD_CHIPSET_HUDSON2:
+ case AMD_CHIPSET_BOLTON:
+ need_pll_quirk = 1;
+ break;
+ default:
+ need_pll_quirk = 0;
+ break;
+ }
+
+ if (!need_pll_quirk) {
if (info.smbus_dev) {
pci_dev_put(info.smbus_dev);
info.smbus_dev = NULL;
}
- ret = 0;
goto commit;
}
@@ -234,7 +241,7 @@ int usb_amd_find_chipset_info(void)
}
}
- ret = info.probe_result = 1;
+ need_pll_quirk = info.probe_result = 1;
printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
commit:
@@ -245,7 +252,7 @@ commit:
/* Mark that we where here */
amd_chipset.probe_count++;
- ret = amd_chipset.probe_result;
+ need_pll_quirk = amd_chipset.probe_result;
spin_unlock_irqrestore(&amd_lock, flags);
@@ -259,7 +266,7 @@ commit:
spin_unlock_irqrestore(&amd_lock, flags);
}
- return ret;
+ return need_pll_quirk;
}
EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 43618976d68a..3efb7b0e8269 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3208,6 +3208,9 @@ static int __init u132_hcd_init(void)
printk(KERN_INFO "driver %s\n", hcd_name);
workqueue = create_singlethread_workqueue("u132");
retval = platform_driver_register(&u132_platform_driver);
+ if (retval)
+ destroy_workqueue(workqueue);
+
return retval;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 03fe5e26fd07..418d27898e03 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -843,7 +843,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
struct xhci_bus_state *bus_state,
__le32 __iomem **port_array,
u16 wIndex, u32 raw_port_status,
- unsigned long flags)
+ unsigned long *flags)
__releases(&xhci->lock)
__acquires(&xhci->lock)
{
@@ -875,6 +875,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_C_BH_RESET << 16;
if ((raw_port_status & PORT_CEC))
status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
+
+ /* USB3 remote wake resume signaling completed */
+ if (bus_state->port_remote_wakeup & (1 << wIndex) &&
+ (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME &&
+ (raw_port_status & PORT_PLS_MASK) != XDEV_RECOVERY) {
+ bus_state->port_remote_wakeup &= ~(1 << wIndex);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
+ }
}
if (hcd->speed < HCD_USB3) {
@@ -925,12 +933,12 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ spin_unlock_irqrestore(&xhci->lock, *flags);
time_left = wait_for_completion_timeout(
&bus_state->rexit_done[wIndex],
msecs_to_jiffies(
XHCI_MAX_REXIT_TIMEOUT_MS));
- spin_lock_irqsave(&xhci->lock, flags);
+ spin_lock_irqsave(&xhci->lock, *flags);
if (time_left) {
slot_id = xhci_find_slot_id_by_port(hcd,
@@ -1077,7 +1085,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
}
status = xhci_get_port_status(hcd, bus_state, port_array,
- wIndex, temp, flags);
+ wIndex, temp, &flags);
if (status == 0xffffffff)
goto error;
@@ -1097,7 +1105,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
port_li = readl(port_array[wIndex] + PORTLI);
status = xhci_get_ext_port_status(temp, port_li);
- put_unaligned_le32(cpu_to_le32(status), &buf[4]);
+ put_unaligned_le32(status, &buf[4]);
}
break;
case SetPortFeature:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 436e787d95b4..d8cf3107df53 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1532,9 +1532,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Allow 3 retries for everything but isoc, set CErr = 3 */
if (!usb_endpoint_xfer_isoc(&ep->desc))
err_count = 3;
- /* Some devices get this wrong */
- if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
- max_packet = 512;
+ /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
+ if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ if (udev->speed == USB_SPEED_HIGH)
+ max_packet = 512;
+ if (udev->speed == USB_SPEED_FULL) {
+ max_packet = rounddown_pow_of_two(max_packet);
+ max_packet = clamp_val(max_packet, 8, 64);
+ }
+ }
/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
avg_trb_len = 8;
@@ -1930,10 +1936,14 @@ no_bw:
kfree(xhci->port_array);
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
+ kfree(xhci->usb2_rhub.psi);
+ kfree(xhci->usb3_rhub.psi);
xhci->usb2_ports = NULL;
xhci->usb3_ports = NULL;
xhci->port_array = NULL;
+ xhci->usb2_rhub.psi = NULL;
+ xhci->usb3_rhub.psi = NULL;
xhci->rh_bw = NULL;
xhci->ext_caps = NULL;
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 73f763c4f5f5..144674913c78 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -122,7 +122,9 @@ static void setup_sch_info(struct usb_device *udev,
}
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
- if (esit_pkts <= sch_ep->esit)
+ if (sch_ep->esit == 1)
+ sch_ep->pkts = esit_pkts;
+ else if (esit_pkts <= sch_ep->esit)
sch_ep->pkts = 1;
else
sch_ep->pkts = roundup_pow_of_two(esit_pkts)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b5140555a8d5..4355fbc36fce 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -53,6 +53,7 @@
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
+#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
@@ -170,7 +171,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -468,6 +470,18 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
retval = xhci_resume(xhci, hibernated);
return retval;
}
+
+static void xhci_pci_shutdown(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+ xhci_shutdown(hcd);
+
+ /* Yet another workaround for spurious wakeups at shutdown with HSW */
+ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
#endif /* CONFIG_PM */
/*-------------------------------------------------------------------------*/
@@ -505,6 +519,7 @@ static int __init xhci_pci_init(void)
#ifdef CONFIG_PM
xhci_pci_hc_driver.pci_suspend = xhci_pci_suspend;
xhci_pci_hc_driver.pci_resume = xhci_pci_resume;
+ xhci_pci_hc_driver.shutdown = xhci_pci_shutdown;
#endif
return pci_register_driver(&xhci_pci_driver);
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 248cdaa62397..c76b038c8bea 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -398,6 +398,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = &xhci_plat_pm_ops,
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 0e4535e632ec..89ec9f0905ca 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -84,7 +84,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
- of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
+ of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
}
static int xhci_rcar_is_gen3(struct device *dev)
@@ -192,5 +192,6 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
xhci_rcar_is_gen3(hcd->self.controller))
xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
return xhci_rcar_download_firmware(hcd);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 0cc271969ffb..61aeab4c759c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -678,6 +678,7 @@ void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct xhci_segment *seg = td->bounce_seg;
struct urb *urb = td->urb;
+ size_t len;
if (!seg || !urb)
return;
@@ -688,11 +689,14 @@ void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
return;
}
- /* for in tranfers we need to copy the data from bounce to sg */
- sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
- seg->bounce_len, seg->bounce_offs);
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
+ /* for in tranfers we need to copy the data from bounce to sg */
+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
@@ -1605,9 +1609,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
usb_hcd_resume_root_hub(hcd);
}
- if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
- bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
-
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
@@ -1626,6 +1627,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
bus_state->port_remote_wakeup |= 1 << faked_port_index;
xhci_test_and_clear_bit(xhci, port_array,
faked_port_index, PORT_PLC);
+ usb_hcd_start_port_resume(&hcd->self, faked_port_index);
xhci_set_link_state(xhci, port_array, faked_port_index,
XDEV_U0);
/* Need to wait until the next link state change
@@ -1645,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
}
- if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
- DEV_SUPERSPEED_ANY(temp)) {
+ if ((temp & PORT_PLC) &&
+ DEV_SUPERSPEED_ANY(temp) &&
+ ((temp & PORT_PLS_MASK) == XDEV_U0 ||
+ (temp & PORT_PLS_MASK) == XDEV_U1 ||
+ (temp & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
- /* We've just brought the device into U0 through either the
+ /* We've just brought the device into U0/1/2 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
* initiated remote wake, don't pass up the link state change,
@@ -1660,8 +1665,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
if (slot_id && xhci->devs[slot_id])
xhci_ring_device(xhci, slot_id);
if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
- bus_state->port_remote_wakeup &=
- ~(1 << faked_port_index);
xhci_test_and_clear_bit(xhci, port_array,
faked_port_index, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
@@ -3155,6 +3158,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
unsigned int unalign;
unsigned int max_pkt;
u32 new_buff_len;
+ size_t len;
max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
unalign = (enqd_len + *trb_buff_len) % max_pkt;
@@ -3185,8 +3189,12 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
- sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
seg->bounce_buf, new_buff_len, enqd_len);
+ if (len != new_buff_len)
+ xhci_warn(xhci,
+ "WARN Wrong bounce buffer write length: %zu != %d\n",
+ len, new_buff_len);
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7c95779c7a0e..eb9535221800 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -21,6 +21,7 @@
*/
#include <linux/pci.h>
+#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/log2.h>
#include <linux/module.h>
@@ -47,7 +48,6 @@ static unsigned int quirks;
module_param(quirks, uint, S_IRUGO);
MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
-/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* xhci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
@@ -64,18 +64,16 @@ MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
{
u32 result;
+ int ret;
- do {
- result = readl(ptr);
- if (result == ~(u32)0) /* card removed */
- return -ENODEV;
- result &= mask;
- if (result == done)
- return 0;
- udelay(1);
- usec--;
- } while (usec > 0);
- return -ETIMEDOUT;
+ ret = readl_poll_timeout_atomic(ptr, result,
+ (result & mask) == done ||
+ result == U32_MAX,
+ 1, usec);
+ if (result == U32_MAX) /* card removed */
+ return -ENODEV;
+
+ return ret;
}
/*
@@ -764,11 +762,8 @@ void xhci_shutdown(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_shutdown completed - status = %x",
readl(&xhci->op_regs->status));
-
- /* Yet another workaround for spurious wakeups at shutdown with HSW */
- if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
- pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
}
+EXPORT_SYMBOL_GPL(xhci_shutdown);
#ifdef CONFIG_PM
static void xhci_save_registers(struct xhci_hcd *xhci)
@@ -939,7 +934,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci)
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
{
int rc = 0;
- unsigned int delay = XHCI_MAX_HALT_USEC;
+ unsigned int delay = XHCI_MAX_HALT_USEC * 2;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
@@ -991,7 +986,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
command |= CMD_CSS;
writel(command, &xhci->op_regs->command);
if (xhci_handshake(&xhci->op_regs->status,
- STS_SAVE, 0, 10 * 1000)) {
+ STS_SAVE, 0, 20 * 1000)) {
xhci_warn(xhci, "WARN: xHC save state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
@@ -1051,6 +1046,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
hibernated = true;
if (!hibernated) {
+ /*
+ * Some controllers might lose power during suspend, so wait
+ * for controller not ready bit to clear, just as in xHC init.
+ */
+ retval = xhci_handshake(&xhci->op_regs->status,
+ STS_CNR, 0, 10 * 1000 * 1000);
+ if (retval) {
+ xhci_warn(xhci, "Controller not ready at resume %d\n",
+ retval);
+ spin_unlock_irq(&xhci->lock);
+ return retval;
+ }
/* step 1: restore register */
xhci_restore_registers(xhci);
/* step 2: initialize command ring buffer */
@@ -4192,7 +4199,6 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
pm_addr = port_array[port_num] + PORTPMSC;
pm_val = readl(pm_addr);
hlpm_addr = port_array[port_num] + PORTHLPMC;
- field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num + 1);
@@ -4204,6 +4210,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
* default one which works with mixed HIRD and BESL
* systems. See XHCI_DEFAULT_BESL definition in xhci.h
*/
+ field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
if ((field & USB_BESL_SUPPORT) &&
(field & USB_BESL_BASELINE_VALID))
hird = USB_GET_BESL_BASELINE(field);
@@ -4530,12 +4537,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
desc, state, timeout);
- /* If we found we can't enable hub-initiated LPM, or
+ /* If we found we can't enable hub-initiated LPM, and
* the U1 or U2 exit latency was too high to allow
- * device-initiated LPM as well, just stop searching.
+ * device-initiated LPM as well, then we will disable LPM
+ * for this device, so stop searching any further.
*/
- if (alt_timeout == USB3_LPM_DISABLED ||
- alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+ if (alt_timeout == USB3_LPM_DISABLED) {
*timeout = alt_timeout;
return -E2BIG;
}
@@ -4646,10 +4653,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
if (intf->dev.driver) {
driver = to_usb_driver(intf->dev.driver);
if (driver && driver->disable_hub_initiated_lpm) {
- dev_dbg(&udev->dev, "Hub-initiated %s disabled "
- "at request of driver %s\n",
- state_name, driver->name);
- return xhci_get_timeout_no_hub_lpm(udev, state);
+ dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
+ state_name, driver->name);
+ timeout = xhci_get_timeout_no_hub_lpm(udev,
+ state);
+ if (timeout == USB3_LPM_DISABLED)
+ return timeout;
}
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index aff24f0be38e..8640560171cf 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -311,10 +311,12 @@ struct xhci_op_regs {
*/
#define PORT_PLS_MASK (0xf << 5)
#define XDEV_U0 (0x0 << 5)
+#define XDEV_U1 (0x1 << 5)
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_INACTIVE (0x6 << 5)
#define XDEV_POLLING (0x7 << 5)
+#define XDEV_RECOVERY (0x8 << 5)
#define XDEV_COMP_MODE (0xa << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
@@ -1868,6 +1870,7 @@ int xhci_run(struct usb_hcd *hcd);
void xhci_stop(struct usb_hcd *hcd);
void xhci_shutdown(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
+void xhci_shutdown(struct usb_hcd *hcd);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
int xhci_disable_slot(struct xhci_hcd *xhci,
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index a4dbb0cd80da..0fecc002fa9f 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -724,6 +724,10 @@ static int mts_usb_probe(struct usb_interface *intf,
}
+ if (ep_in_current != &ep_in_set[2]) {
+ MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n");
+ return -ENODEV;
+ }
if ( ep_out == -1 ) {
MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 47b357760afc..2de07a3653a0 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -46,16 +46,6 @@ config USB_SEVSEG
To compile this driver as a module, choose M here: the
module will be called usbsevseg.
-config USB_RIO500
- tristate "USB Diamond Rio500 support"
- help
- Say Y here if you want to connect a USB Rio500 mp3 player to your
- computer's USB port. Please read <file:Documentation/usb/rio.txt>
- for more information.
-
- To compile this driver as a module, choose M here: the
- module will be called rio500.
-
config USB_LEGOTOWER
tristate "USB Lego Infrared Tower support"
help
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 3d1992750da4..2b21872cd733 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o
obj-$(CONFIG_USB_LCD) += usblcd.o
obj-$(CONFIG_USB_LD) += ldusb.o
obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o
-obj-$(CONFIG_USB_RIO500) += rio500.o
obj-$(CONFIG_USB_TEST) += usbtest.o
obj-$(CONFIG_USB_EHSET_TEST_FIXTURE) += ehset.o
obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 564268fca07a..7fb0590187d4 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -80,6 +80,7 @@ struct adu_device {
char serial_number[8];
int open_count; /* number of times this port has been opened */
+ unsigned long disconnected:1;
char *read_buffer_primary;
int read_buffer_length;
@@ -121,7 +122,7 @@ static void adu_abort_transfers(struct adu_device *dev)
{
unsigned long flags;
- if (dev->udev == NULL)
+ if (dev->disconnected)
return;
/* shutdown transfer */
@@ -151,6 +152,7 @@ static void adu_delete(struct adu_device *dev)
kfree(dev->read_buffer_secondary);
kfree(dev->interrupt_in_buffer);
kfree(dev->interrupt_out_buffer);
+ usb_put_dev(dev->udev);
kfree(dev);
}
@@ -244,7 +246,7 @@ static int adu_open(struct inode *inode, struct file *file)
}
dev = usb_get_intfdata(interface);
- if (!dev || !dev->udev) {
+ if (!dev) {
retval = -ENODEV;
goto exit_no_device;
}
@@ -327,7 +329,7 @@ static int adu_release(struct inode *inode, struct file *file)
}
adu_release_internal(dev);
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
/* the device was unplugged before the file was released */
if (!dev->open_count) /* ... and we're the last user */
adu_delete(dev);
@@ -356,7 +358,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
return -ERESTARTSYS;
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto exit;
@@ -525,7 +527,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
goto exit_nolock;
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto exit;
@@ -679,11 +681,11 @@ static int adu_probe(struct usb_interface *interface,
mutex_init(&dev->mtx);
spin_lock_init(&dev->buflock);
- dev->udev = udev;
+ dev->udev = usb_get_dev(udev);
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
- iface_desc = &interface->altsetting[0];
+ iface_desc = &interface->cur_altsetting[0];
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
@@ -789,19 +791,21 @@ error:
static void adu_disconnect(struct usb_interface *interface)
{
struct adu_device *dev;
- int minor;
dev = usb_get_intfdata(interface);
- mutex_lock(&dev->mtx); /* not interruptible */
- dev->udev = NULL; /* poison */
- minor = dev->minor;
usb_deregister_dev(interface, &adu_class);
- mutex_unlock(&dev->mtx);
+
+ usb_poison_urb(dev->interrupt_in_urb);
+ usb_poison_urb(dev->interrupt_out_urb);
mutex_lock(&adutux_mutex);
usb_set_intfdata(interface, NULL);
+ mutex_lock(&dev->mtx); /* not interruptible */
+ dev->disconnected = 1;
+ mutex_unlock(&dev->mtx);
+
/* if the device is not opened, then we clean up right now */
if (!dev->open_count)
adu_delete(dev);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index b8092bcf89a2..32dc0d9f0519 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -160,8 +160,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd)
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
mutex_unlock(&pdata->sysfslock);
-
- return retval;
+
+ if (retval < 0)
+ return retval;
+ else
+ return 0;
}
static int appledisplay_bl_get_brightness(struct backlight_device *bd)
@@ -179,7 +182,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
0,
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
- brightness = pdata->msgdata[1];
+ if (retval < 2) {
+ if (retval >= 0)
+ retval = -EMSGSIZE;
+ } else {
+ brightness = pdata->msgdata[1];
+ }
mutex_unlock(&pdata->sysfslock);
if (retval < 0)
@@ -321,6 +329,7 @@ error:
if (pdata) {
if (pdata->urb) {
usb_kill_urb(pdata->urb);
+ cancel_delayed_work_sync(&pdata->work);
if (pdata->urbdata)
usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index efecb87428b1..b694e200a98e 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -108,6 +108,7 @@ static void chaoskey_free(struct chaoskey *dev)
usb_free_urb(dev->urb);
kfree(dev->name);
kfree(dev->buf);
+ usb_put_intf(dev->interface);
kfree(dev);
}
}
@@ -157,6 +158,8 @@ static int chaoskey_probe(struct usb_interface *interface,
if (dev == NULL)
goto out;
+ dev->interface = usb_get_intf(interface);
+
dev->buf = kmalloc(size, GFP_KERNEL);
if (dev->buf == NULL)
@@ -190,8 +193,6 @@ static int chaoskey_probe(struct usb_interface *interface,
strcat(dev->name, udev->serial);
}
- dev->interface = interface;
-
dev->in_ep = in_ep;
if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
@@ -411,13 +412,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
!dev->reading,
(started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
- if (result < 0)
+ if (result < 0) {
+ usb_kill_urb(dev->urb);
goto out;
+ }
- if (result == 0)
+ if (result == 0) {
result = -ETIMEDOUT;
- else
+ usb_kill_urb(dev->urb);
+ } else {
result = dev->valid;
+ }
out:
/* Let the device go back to sleep eventually */
usb_autopm_put_interface(dev->interface);
@@ -553,7 +558,21 @@ static int chaoskey_suspend(struct usb_interface *interface,
static int chaoskey_resume(struct usb_interface *interface)
{
+ struct chaoskey *dev;
+ struct usb_device *udev = interface_to_usbdev(interface);
+
usb_dbg(interface, "resume");
+ dev = usb_get_intfdata(interface);
+
+ /*
+ * We may have lost power.
+ * In that case the device that needs a long time
+ * for the first requests needs an extended timeout
+ * again
+ */
+ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
+ dev->reads_started = false;
+
return 0;
}
#else
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 9a67ae39185b..9cf8a9b16336 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -342,7 +342,7 @@ static int idmouse_probe(struct usb_interface *interface,
int result;
/* check if we have gotten the data or the hid interface */
- iface_desc = &interface->altsetting[0];
+ iface_desc = interface->cur_altsetting;
if (iface_desc->desc.bInterfaceClass != 0x0A)
return -ENODEV;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 0ef29d202263..1b83946bfb18 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -89,6 +89,7 @@ struct iowarrior {
char chip_serial[9]; /* the serial number string of the chip connected */
int report_size; /* number of bytes in a report */
u16 product_id;
+ struct usb_anchor submitted;
};
/*--------------*/
@@ -248,6 +249,7 @@ static inline void iowarrior_delete(struct iowarrior *dev)
kfree(dev->int_in_buffer);
usb_free_urb(dev->int_in_urb);
kfree(dev->read_queue);
+ usb_put_intf(dev->interface);
kfree(dev);
}
@@ -434,11 +436,13 @@ static ssize_t iowarrior_write(struct file *file,
retval = -EFAULT;
goto error;
}
+ usb_anchor_urb(int_out_urb, &dev->submitted);
retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
if (retval) {
dev_dbg(&dev->interface->dev,
"submit error %d for urb nr.%d\n",
retval, atomic_read(&dev->write_busy));
+ usb_unanchor_urb(int_out_urb);
goto error;
}
/* submit was ok */
@@ -776,11 +780,13 @@ static int iowarrior_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->write_wait);
dev->udev = udev;
- dev->interface = interface;
+ dev->interface = usb_get_intf(interface);
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ init_usb_anchor(&dev->submitted);
+
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
@@ -888,8 +894,9 @@ static void iowarrior_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
minor = dev->minor;
+ mutex_unlock(&iowarrior_open_disc_lock);
+ /* give back our minor - this will call close() locks need to be dropped at this point*/
- /* give back our minor */
usb_deregister_dev(interface, &iowarrior_class);
mutex_lock(&dev->mutex);
@@ -897,19 +904,19 @@ static void iowarrior_disconnect(struct usb_interface *interface)
/* prevent device read, write and ioctl */
dev->present = 0;
- mutex_unlock(&dev->mutex);
- mutex_unlock(&iowarrior_open_disc_lock);
-
if (dev->opened) {
/* There is a process that holds a filedescriptor to the device ,
so we only shutdown read-/write-ops going on.
Deleting the device is postponed until close() was called.
*/
usb_kill_urb(dev->int_in_urb);
+ usb_kill_anchored_urbs(&dev->submitted);
wake_up_interruptible(&dev->read_wait);
wake_up_interruptible(&dev->write_wait);
+ mutex_unlock(&dev->mutex);
} else {
/* no process is using the device, cleanup now */
+ mutex_unlock(&dev->mutex);
iowarrior_delete(dev);
}
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index c5e3032a4d6b..0f0cdb7b7bf8 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -158,6 +158,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in
struct ld_usb {
struct mutex mutex; /* locks this structure */
struct usb_interface* intf; /* save off the usb interface pointer */
+ unsigned long disconnected:1;
int open_count; /* number of times this port has been opened */
@@ -197,12 +198,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
/* shutdown transfer */
if (dev->interrupt_in_running) {
dev->interrupt_in_running = 0;
- if (dev->intf)
- usb_kill_urb(dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_in_urb);
}
if (dev->interrupt_out_busy)
- if (dev->intf)
- usb_kill_urb(dev->interrupt_out_urb);
+ usb_kill_urb(dev->interrupt_out_urb);
}
/**
@@ -210,8 +209,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
*/
static void ld_usb_delete(struct ld_usb *dev)
{
- ld_usb_abort_transfers(dev);
-
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
@@ -267,7 +264,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
resubmit:
/* resubmit if we're still running */
- if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) {
+ if (dev->interrupt_in_running && !dev->buffer_overflow) {
retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->intf->dev,
@@ -387,16 +384,13 @@ static int ld_usb_release(struct inode *inode, struct file *file)
goto exit;
}
- if (mutex_lock_interruptible(&dev->mutex)) {
- retval = -ERESTARTSYS;
- goto exit;
- }
+ mutex_lock(&dev->mutex);
if (dev->open_count != 1) {
retval = -ENODEV;
goto unlock_exit;
}
- if (dev->intf == NULL) {
+ if (dev->disconnected) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->mutex);
/* unlock here as ld_usb_delete frees dev */
@@ -427,7 +421,7 @@ static unsigned int ld_usb_poll(struct file *file, poll_table *wait)
dev = file->private_data;
- if (!dev->intf)
+ if (dev->disconnected)
return POLLERR | POLLHUP;
poll_wait(file, &dev->read_wait, wait);
@@ -466,7 +460,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
}
/* verify that the device wasn't unplugged */
- if (dev->intf == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -474,7 +468,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
/* wait for data */
spin_lock_irq(&dev->rbsl);
- if (dev->ring_head == dev->ring_tail) {
+ while (dev->ring_head == dev->ring_tail) {
dev->interrupt_in_done = 0;
spin_unlock_irq(&dev->rbsl);
if (file->f_flags & O_NONBLOCK) {
@@ -484,12 +478,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
if (retval < 0)
goto unlock_exit;
- } else {
- spin_unlock_irq(&dev->rbsl);
+
+ spin_lock_irq(&dev->rbsl);
}
+ spin_unlock_irq(&dev->rbsl);
/* actual_buffer contains actual_length + interrupt_in_buffer */
actual_buffer = (size_t*)(dev->ring_buffer + dev->ring_tail*(sizeof(size_t)+dev->interrupt_in_endpoint_size));
+ if (*actual_buffer > dev->interrupt_in_endpoint_size) {
+ retval = -EIO;
+ goto unlock_exit;
+ }
bytes_to_read = min(count, *actual_buffer);
if (bytes_to_read < *actual_buffer)
dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
@@ -500,11 +499,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
retval = -EFAULT;
goto unlock_exit;
}
- dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
-
retval = bytes_to_read;
spin_lock_irq(&dev->rbsl);
+ dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size;
+
if (dev->buffer_overflow) {
dev->buffer_overflow = 0;
spin_unlock_irq(&dev->rbsl);
@@ -546,7 +545,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
}
/* verify that the device wasn't unplugged */
- if (dev->intf == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -585,7 +584,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
1 << 8, 0,
dev->interrupt_out_buffer,
bytes_to_write,
- USB_CTRL_SET_TIMEOUT * HZ);
+ USB_CTRL_SET_TIMEOUT);
if (retval < 0)
dev_err(&dev->intf->dev,
"Couldn't submit HID_REQ_SET_REPORT %d\n",
@@ -705,7 +704,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
- dev->ring_buffer = kmalloc(ring_buffer_size*(sizeof(size_t)+dev->interrupt_in_endpoint_size), GFP_KERNEL);
+ dev->ring_buffer = kcalloc(ring_buffer_size,
+ sizeof(size_t) + dev->interrupt_in_endpoint_size,
+ GFP_KERNEL);
if (!dev->ring_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
@@ -768,6 +769,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
/* give back our minor */
usb_deregister_dev(intf, &ld_usb_class);
+ usb_poison_urb(dev->interrupt_in_urb);
+ usb_poison_urb(dev->interrupt_out_urb);
+
mutex_lock(&dev->mutex);
/* if the device is not opened, then we clean up right now */
@@ -775,7 +779,7 @@ static void ld_usb_disconnect(struct usb_interface *intf)
mutex_unlock(&dev->mutex);
ld_usb_delete(dev);
} else {
- dev->intf = NULL;
+ dev->disconnected = 1;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index c2e2b2ea32d8..7cac3ee09b09 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -185,7 +185,6 @@ static const struct usb_device_id tower_table[] = {
};
MODULE_DEVICE_TABLE (usb, tower_table);
-static DEFINE_MUTEX(open_disc_mutex);
#define LEGO_USB_TOWER_MINOR_BASE 160
@@ -197,6 +196,7 @@ struct lego_usb_tower {
unsigned char minor; /* the starting minor number for this device */
int open_count; /* number of times this port has been opened */
+ unsigned long disconnected:1;
char* read_buffer;
size_t read_buffer_length; /* this much came in */
@@ -296,14 +296,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
*/
static inline void tower_delete (struct lego_usb_tower *dev)
{
- tower_abort_transfers (dev);
-
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
kfree (dev->read_buffer);
kfree (dev->interrupt_in_buffer);
kfree (dev->interrupt_out_buffer);
+ usb_put_dev(dev->udev);
kfree (dev);
}
@@ -338,18 +337,14 @@ static int tower_open (struct inode *inode, struct file *file)
goto exit;
}
- mutex_lock(&open_disc_mutex);
dev = usb_get_intfdata(interface);
-
if (!dev) {
- mutex_unlock(&open_disc_mutex);
retval = -ENODEV;
goto exit;
}
/* lock this device */
if (mutex_lock_interruptible(&dev->lock)) {
- mutex_unlock(&open_disc_mutex);
retval = -ERESTARTSYS;
goto exit;
}
@@ -357,12 +352,9 @@ static int tower_open (struct inode *inode, struct file *file)
/* allow opening only once */
if (dev->open_count) {
- mutex_unlock(&open_disc_mutex);
retval = -EBUSY;
goto unlock_exit;
}
- dev->open_count = 1;
- mutex_unlock(&open_disc_mutex);
/* reset the tower */
result = usb_control_msg (dev->udev,
@@ -402,13 +394,14 @@ static int tower_open (struct inode *inode, struct file *file)
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_in_urb %d\n", retval);
dev->interrupt_in_running = 0;
- dev->open_count = 0;
goto unlock_exit;
}
/* save device in the file's private structure */
file->private_data = dev;
+ dev->open_count = 1;
+
unlock_exit:
mutex_unlock(&dev->lock);
@@ -429,22 +422,19 @@ static int tower_release (struct inode *inode, struct file *file)
if (dev == NULL) {
retval = -ENODEV;
- goto exit_nolock;
- }
-
- mutex_lock(&open_disc_mutex);
- if (mutex_lock_interruptible(&dev->lock)) {
- retval = -ERESTARTSYS;
goto exit;
}
+ mutex_lock(&dev->lock);
+
if (dev->open_count != 1) {
dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
__func__);
retval = -ENODEV;
goto unlock_exit;
}
- if (dev->udev == NULL) {
+
+ if (dev->disconnected) {
/* the device was unplugged before the file was released */
/* unlock here as tower_delete frees dev */
@@ -462,10 +452,7 @@ static int tower_release (struct inode *inode, struct file *file)
unlock_exit:
mutex_unlock(&dev->lock);
-
exit:
- mutex_unlock(&open_disc_mutex);
-exit_nolock:
return retval;
}
@@ -483,10 +470,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
if (dev->interrupt_in_running) {
dev->interrupt_in_running = 0;
mb();
- if (dev->udev)
- usb_kill_urb (dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_in_urb);
}
- if (dev->interrupt_out_busy && dev->udev)
+ if (dev->interrupt_out_busy)
usb_kill_urb(dev->interrupt_out_urb);
}
@@ -522,7 +508,7 @@ static unsigned int tower_poll (struct file *file, poll_table *wait)
dev = file->private_data;
- if (!dev->udev)
+ if (dev->disconnected)
return POLLERR | POLLHUP;
poll_wait(file, &dev->read_wait, wait);
@@ -569,7 +555,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
}
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -655,7 +641,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
}
/* verify that the device wasn't unplugged */
- if (dev->udev == NULL) {
+ if (dev->disconnected) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
@@ -764,7 +750,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
resubmit:
/* resubmit if we're still running */
- if (dev->interrupt_in_running && dev->udev) {
+ if (dev->interrupt_in_running) {
retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
if (retval)
dev_err(&dev->udev->dev,
@@ -830,8 +816,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
mutex_init(&dev->lock);
- dev->udev = udev;
+ dev->udev = usb_get_dev(udev);
dev->open_count = 0;
+ dev->disconnected = 0;
dev->read_buffer = NULL;
dev->read_buffer_length = 0;
@@ -911,8 +898,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
get_version_reply,
sizeof(*get_version_reply),
1000);
- if (result < 0) {
- dev_err(idev, "LEGO USB Tower get version control request failed\n");
+ if (result != sizeof(*get_version_reply)) {
+ if (result >= 0)
+ result = -EIO;
+ dev_err(idev, "get version request failed: %d\n", result);
retval = result;
goto error;
}
@@ -930,7 +919,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
- usb_set_intfdata (interface, NULL);
goto error;
}
dev->minor = interface->minor;
@@ -962,23 +950,24 @@ static void tower_disconnect (struct usb_interface *interface)
int minor;
dev = usb_get_intfdata (interface);
- mutex_lock(&open_disc_mutex);
- usb_set_intfdata (interface, NULL);
minor = dev->minor;
- /* give back our minor */
+ /* give back our minor and prevent further open() */
usb_deregister_dev (interface, &tower_class);
+ /* stop I/O */
+ usb_poison_urb(dev->interrupt_in_urb);
+ usb_poison_urb(dev->interrupt_out_urb);
+
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->lock);
tower_delete (dev);
} else {
- dev->udev = NULL;
+ dev->disconnected = 1;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
deleted file mode 100644
index 13731d512624..000000000000
--- a/drivers/usb/misc/rio500.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/* -*- linux-c -*- */
-
-/*
- * Driver for USB Rio 500
- *
- * Cesar Miquel (miquel@df.uba.ar)
- *
- * based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
- *
- * Changelog:
- * 30/05/2003 replaced lock/unlock kernel with up/down
- * Daniele Bellucci bellucda@tiscali.it
- * */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-#include <linux/wait.h>
-
-#include "rio500_usb.h"
-
-/*
- * Version Information
- */
-#define DRIVER_VERSION "v1.1"
-#define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>"
-#define DRIVER_DESC "USB Rio 500 driver"
-
-#define RIO_MINOR 64
-
-/* stall/wait timeout for rio */
-#define NAK_TIMEOUT (HZ)
-
-#define IBUF_SIZE 0x1000
-
-/* Size of the rio buffer */
-#define OBUF_SIZE 0x10000
-
-struct rio_usb_data {
- struct usb_device *rio_dev; /* init: probe_rio */
- unsigned int ifnum; /* Interface number of the USB device */
- int isopen; /* nz if open */
- int present; /* Device is present on the bus */
- char *obuf, *ibuf; /* transfer buffers */
- char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */
- wait_queue_head_t wait_q; /* for timeouts */
- struct mutex lock; /* general race avoidance */
-};
-
-static DEFINE_MUTEX(rio500_mutex);
-static struct rio_usb_data rio_instance;
-
-static int open_rio(struct inode *inode, struct file *file)
-{
- struct rio_usb_data *rio = &rio_instance;
-
- /* against disconnect() */
- mutex_lock(&rio500_mutex);
- mutex_lock(&(rio->lock));
-
- if (rio->isopen || !rio->present) {
- mutex_unlock(&(rio->lock));
- mutex_unlock(&rio500_mutex);
- return -EBUSY;
- }
- rio->isopen = 1;
-
- init_waitqueue_head(&rio->wait_q);
-
- mutex_unlock(&(rio->lock));
-
- dev_info(&rio->rio_dev->dev, "Rio opened.\n");
- mutex_unlock(&rio500_mutex);
-
- return 0;
-}
-
-static int close_rio(struct inode *inode, struct file *file)
-{
- struct rio_usb_data *rio = &rio_instance;
-
- rio->isopen = 0;
-
- dev_info(&rio->rio_dev->dev, "Rio closed.\n");
- return 0;
-}
-
-static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct RioCommand rio_cmd;
- struct rio_usb_data *rio = &rio_instance;
- void __user *data;
- unsigned char *buffer;
- int result, requesttype;
- int retries;
- int retval=0;
-
- mutex_lock(&(rio->lock));
- /* Sanity check to make sure rio is connected, powered, etc */
- if (rio->present == 0 || rio->rio_dev == NULL) {
- retval = -ENODEV;
- goto err_out;
- }
-
- switch (cmd) {
- case RIO_RECV_COMMAND:
- data = (void __user *) arg;
- if (data == NULL)
- break;
- if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
- retval = -EFAULT;
- goto err_out;
- }
- if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
- retval = -EINVAL;
- goto err_out;
- }
- buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
- if (buffer == NULL) {
- retval = -ENOMEM;
- goto err_out;
- }
- if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
- retval = -EFAULT;
- free_page((unsigned long) buffer);
- goto err_out;
- }
-
- requesttype = rio_cmd.requesttype | USB_DIR_IN |
- USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- dev_dbg(&rio->rio_dev->dev,
- "sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
- requesttype, rio_cmd.request, rio_cmd.value,
- rio_cmd.index, rio_cmd.length);
- /* Send rio control message */
- retries = 3;
- while (retries) {
- result = usb_control_msg(rio->rio_dev,
- usb_rcvctrlpipe(rio-> rio_dev, 0),
- rio_cmd.request,
- requesttype,
- rio_cmd.value,
- rio_cmd.index, buffer,
- rio_cmd.length,
- jiffies_to_msecs(rio_cmd.timeout));
- if (result == -ETIMEDOUT)
- retries--;
- else if (result < 0) {
- dev_err(&rio->rio_dev->dev,
- "Error executing ioctrl. code = %d\n",
- result);
- retries = 0;
- } else {
- dev_dbg(&rio->rio_dev->dev,
- "Executed ioctl. Result = %d (data=%02x)\n",
- result, buffer[0]);
- if (copy_to_user(rio_cmd.buffer, buffer,
- rio_cmd.length)) {
- free_page((unsigned long) buffer);
- retval = -EFAULT;
- goto err_out;
- }
- retries = 0;
- }
-
- /* rio_cmd.buffer contains a raw stream of single byte
- data which has been returned from rio. Data is
- interpreted at application level. For data that
- will be cast to data types longer than 1 byte, data
- will be little_endian and will potentially need to
- be swapped at the app level */
-
- }
- free_page((unsigned long) buffer);
- break;
-
- case RIO_SEND_COMMAND:
- data = (void __user *) arg;
- if (data == NULL)
- break;
- if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
- retval = -EFAULT;
- goto err_out;
- }
- if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
- retval = -EINVAL;
- goto err_out;
- }
- buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
- if (buffer == NULL) {
- retval = -ENOMEM;
- goto err_out;
- }
- if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
- free_page((unsigned long)buffer);
- retval = -EFAULT;
- goto err_out;
- }
-
- requesttype = rio_cmd.requesttype | USB_DIR_OUT |
- USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- dev_dbg(&rio->rio_dev->dev,
- "sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
- requesttype, rio_cmd.request, rio_cmd.value,
- rio_cmd.index, rio_cmd.length);
- /* Send rio control message */
- retries = 3;
- while (retries) {
- result = usb_control_msg(rio->rio_dev,
- usb_sndctrlpipe(rio-> rio_dev, 0),
- rio_cmd.request,
- requesttype,
- rio_cmd.value,
- rio_cmd.index, buffer,
- rio_cmd.length,
- jiffies_to_msecs(rio_cmd.timeout));
- if (result == -ETIMEDOUT)
- retries--;
- else if (result < 0) {
- dev_err(&rio->rio_dev->dev,
- "Error executing ioctrl. code = %d\n",
- result);
- retries = 0;
- } else {
- dev_dbg(&rio->rio_dev->dev,
- "Executed ioctl. Result = %d\n", result);
- retries = 0;
-
- }
-
- }
- free_page((unsigned long) buffer);
- break;
-
- default:
- retval = -ENOTTY;
- break;
- }
-
-
-err_out:
- mutex_unlock(&(rio->lock));
- return retval;
-}
-
-static ssize_t
-write_rio(struct file *file, const char __user *buffer,
- size_t count, loff_t * ppos)
-{
- DEFINE_WAIT(wait);
- struct rio_usb_data *rio = &rio_instance;
-
- unsigned long copy_size;
- unsigned long bytes_written = 0;
- unsigned int partial;
-
- int result = 0;
- int maxretry;
- int errn = 0;
- int intr;
-
- intr = mutex_lock_interruptible(&(rio->lock));
- if (intr)
- return -EINTR;
- /* Sanity check to make sure rio is connected, powered, etc */
- if (rio->present == 0 || rio->rio_dev == NULL) {
- mutex_unlock(&(rio->lock));
- return -ENODEV;
- }
-
-
-
- do {
- unsigned long thistime;
- char *obuf = rio->obuf;
-
- thistime = copy_size =
- (count >= OBUF_SIZE) ? OBUF_SIZE : count;
- if (copy_from_user(rio->obuf, buffer, copy_size)) {
- errn = -EFAULT;
- goto error;
- }
- maxretry = 5;
- while (thistime) {
- if (!rio->rio_dev) {
- errn = -ENODEV;
- goto error;
- }
- if (signal_pending(current)) {
- mutex_unlock(&(rio->lock));
- return bytes_written ? bytes_written : -EINTR;
- }
-
- result = usb_bulk_msg(rio->rio_dev,
- usb_sndbulkpipe(rio->rio_dev, 2),
- obuf, thistime, &partial, 5000);
-
- dev_dbg(&rio->rio_dev->dev,
- "write stats: result:%d thistime:%lu partial:%u\n",
- result, thistime, partial);
-
- if (result == -ETIMEDOUT) { /* NAK - so hold for a while */
- if (!maxretry--) {
- errn = -ETIME;
- goto error;
- }
- prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(NAK_TIMEOUT);
- finish_wait(&rio->wait_q, &wait);
- continue;
- } else if (!result && partial) {
- obuf += partial;
- thistime -= partial;
- } else
- break;
- }
- if (result) {
- dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n",
- result);
- errn = -EIO;
- goto error;
- }
- bytes_written += copy_size;
- count -= copy_size;
- buffer += copy_size;
- } while (count > 0);
-
- mutex_unlock(&(rio->lock));
-
- return bytes_written ? bytes_written : -EIO;
-
-error:
- mutex_unlock(&(rio->lock));
- return errn;
-}
-
-static ssize_t
-read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
-{
- DEFINE_WAIT(wait);
- struct rio_usb_data *rio = &rio_instance;
- ssize_t read_count;
- unsigned int partial;
- int this_read;
- int result;
- int maxretry = 10;
- char *ibuf;
- int intr;
-
- intr = mutex_lock_interruptible(&(rio->lock));
- if (intr)
- return -EINTR;
- /* Sanity check to make sure rio is connected, powered, etc */
- if (rio->present == 0 || rio->rio_dev == NULL) {
- mutex_unlock(&(rio->lock));
- return -ENODEV;
- }
-
- ibuf = rio->ibuf;
-
- read_count = 0;
-
-
- while (count > 0) {
- if (signal_pending(current)) {
- mutex_unlock(&(rio->lock));
- return read_count ? read_count : -EINTR;
- }
- if (!rio->rio_dev) {
- mutex_unlock(&(rio->lock));
- return -ENODEV;
- }
- this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
-
- result = usb_bulk_msg(rio->rio_dev,
- usb_rcvbulkpipe(rio->rio_dev, 1),
- ibuf, this_read, &partial,
- 8000);
-
- dev_dbg(&rio->rio_dev->dev,
- "read stats: result:%d this_read:%u partial:%u\n",
- result, this_read, partial);
-
- if (partial) {
- count = this_read = partial;
- } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */
- if (!maxretry--) {
- mutex_unlock(&(rio->lock));
- dev_err(&rio->rio_dev->dev,
- "read_rio: maxretry timeout\n");
- return -ETIME;
- }
- prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
- schedule_timeout(NAK_TIMEOUT);
- finish_wait(&rio->wait_q, &wait);
- continue;
- } else if (result != -EREMOTEIO) {
- mutex_unlock(&(rio->lock));
- dev_err(&rio->rio_dev->dev,
- "Read Whoops - result:%u partial:%u this_read:%u\n",
- result, partial, this_read);
- return -EIO;
- } else {
- mutex_unlock(&(rio->lock));
- return (0);
- }
-
- if (this_read) {
- if (copy_to_user(buffer, ibuf, this_read)) {
- mutex_unlock(&(rio->lock));
- return -EFAULT;
- }
- count -= this_read;
- read_count += this_read;
- buffer += this_read;
- }
- }
- mutex_unlock(&(rio->lock));
- return read_count;
-}
-
-static const struct file_operations usb_rio_fops = {
- .owner = THIS_MODULE,
- .read = read_rio,
- .write = write_rio,
- .unlocked_ioctl = ioctl_rio,
- .open = open_rio,
- .release = close_rio,
- .llseek = noop_llseek,
-};
-
-static struct usb_class_driver usb_rio_class = {
- .name = "rio500%d",
- .fops = &usb_rio_fops,
- .minor_base = RIO_MINOR,
-};
-
-static int probe_rio(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- struct rio_usb_data *rio = &rio_instance;
- int retval;
-
- dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
-
- retval = usb_register_dev(intf, &usb_rio_class);
- if (retval) {
- dev_err(&dev->dev,
- "Not able to get a minor for this device.\n");
- return -ENOMEM;
- }
-
- rio->rio_dev = dev;
-
- if (!(rio->obuf = kmalloc(OBUF_SIZE, GFP_KERNEL))) {
- dev_err(&dev->dev,
- "probe_rio: Not enough memory for the output buffer\n");
- usb_deregister_dev(intf, &usb_rio_class);
- return -ENOMEM;
- }
- dev_dbg(&intf->dev, "obuf address:%p\n", rio->obuf);
-
- if (!(rio->ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL))) {
- dev_err(&dev->dev,
- "probe_rio: Not enough memory for the input buffer\n");
- usb_deregister_dev(intf, &usb_rio_class);
- kfree(rio->obuf);
- return -ENOMEM;
- }
- dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf);
-
- mutex_init(&(rio->lock));
-
- usb_set_intfdata (intf, rio);
- rio->present = 1;
-
- return 0;
-}
-
-static void disconnect_rio(struct usb_interface *intf)
-{
- struct rio_usb_data *rio = usb_get_intfdata (intf);
-
- usb_set_intfdata (intf, NULL);
- mutex_lock(&rio500_mutex);
- if (rio) {
- usb_deregister_dev(intf, &usb_rio_class);
-
- mutex_lock(&(rio->lock));
- if (rio->isopen) {
- rio->isopen = 0;
- /* better let it finish - the release will do whats needed */
- rio->rio_dev = NULL;
- mutex_unlock(&(rio->lock));
- mutex_unlock(&rio500_mutex);
- return;
- }
- kfree(rio->ibuf);
- kfree(rio->obuf);
-
- dev_info(&intf->dev, "USB Rio disconnected.\n");
-
- rio->present = 0;
- mutex_unlock(&(rio->lock));
- }
- mutex_unlock(&rio500_mutex);
-}
-
-static const struct usb_device_id rio_table[] = {
- { USB_DEVICE(0x0841, 1) }, /* Rio 500 */
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, rio_table);
-
-static struct usb_driver rio_driver = {
- .name = "rio500",
- .probe = probe_rio,
- .disconnect = disconnect_rio,
- .id_table = rio_table,
-};
-
-module_usb_driver(rio_driver);
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
deleted file mode 100644
index 359abc98e706..000000000000
--- a/drivers/usb/misc/rio500_usb.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* ----------------------------------------------------------------------
-
- Copyright (C) 2000 Cesar Miquel (miquel@df.uba.ar)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- ---------------------------------------------------------------------- */
-
-
-
-#define RIO_SEND_COMMAND 0x1
-#define RIO_RECV_COMMAND 0x2
-
-#define RIO_DIR_OUT 0x0
-#define RIO_DIR_IN 0x1
-
-struct RioCommand {
- short length;
- int request;
- int requesttype;
- int value;
- int index;
- void __user *buffer;
- int timeout;
-};
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 05bd39d62568..b7a4ba020893 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3041,6 +3041,13 @@ static int sisusb_probe(struct usb_interface *intf,
mutex_init(&(sisusb->lock));
+ sisusb->sisusb_dev = dev;
+ sisusb->vrambase = SISUSB_PCI_MEMBASE;
+ sisusb->mmiobase = SISUSB_PCI_MMIOBASE;
+ sisusb->mmiosize = SISUSB_PCI_MMIOSIZE;
+ sisusb->ioportbase = SISUSB_PCI_IOPORTBASE;
+ /* Everything else is zero */
+
/* Register device */
retval = usb_register_dev(intf, &usb_sisusb_class);
if (retval) {
@@ -3051,13 +3058,7 @@ static int sisusb_probe(struct usb_interface *intf,
goto error_1;
}
- sisusb->sisusb_dev = dev;
- sisusb->minor = intf->minor;
- sisusb->vrambase = SISUSB_PCI_MEMBASE;
- sisusb->mmiobase = SISUSB_PCI_MMIOBASE;
- sisusb->mmiosize = SISUSB_PCI_MMIOSIZE;
- sisusb->ioportbase = SISUSB_PCI_IOPORTBASE;
- /* Everything else is zero */
+ sisusb->minor = intf->minor;
/* Allocate buffers */
sisusb->ibufsize = SISUSB_IBUF_SIZE;
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 9f48419abc46..35a736eaf864 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
@@ -56,6 +57,8 @@ struct usb_lcd {
using up all RAM */
struct usb_anchor submitted; /* URBs to wait for
before suspend */
+ struct rw_semaphore io_rwsem;
+ unsigned long disconnected:1;
};
#define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
@@ -141,6 +144,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
dev = file->private_data;
+ down_read(&dev->io_rwsem);
+
+ if (dev->disconnected) {
+ retval = -ENODEV;
+ goto out_up_io;
+ }
+
/* do a blocking bulk read to get data from the device */
retval = usb_bulk_msg(dev->udev,
usb_rcvbulkpipe(dev->udev,
@@ -157,6 +167,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
retval = bytes_read;
}
+out_up_io:
+ up_read(&dev->io_rwsem);
+
return retval;
}
@@ -236,11 +249,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
if (r < 0)
return -EINTR;
+ down_read(&dev->io_rwsem);
+
+ if (dev->disconnected) {
+ retval = -ENODEV;
+ goto err_up_io;
+ }
+
/* create a urb, and a buffer for it, and copy the data to the urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
retval = -ENOMEM;
- goto err_no_buf;
+ goto err_up_io;
}
buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
@@ -277,6 +297,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
the USB core will eventually free it entirely */
usb_free_urb(urb);
+ up_read(&dev->io_rwsem);
exit:
return count;
error_unanchor:
@@ -284,7 +305,8 @@ error_unanchor:
error:
usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
usb_free_urb(urb);
-err_no_buf:
+err_up_io:
+ up_read(&dev->io_rwsem);
up(&dev->limit_sem);
return retval;
}
@@ -325,6 +347,7 @@ static int lcd_probe(struct usb_interface *interface,
goto error;
kref_init(&dev->kref);
sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+ init_rwsem(&dev->io_rwsem);
init_usb_anchor(&dev->submitted);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -432,6 +455,12 @@ static void lcd_disconnect(struct usb_interface *interface)
/* give back our minor */
usb_deregister_dev(interface, &lcd_class);
+ down_write(&dev->io_rwsem);
+ dev->disconnected = 1;
+ up_write(&dev->io_rwsem);
+
+ usb_kill_anchored_urbs(&dev->submitted);
+
/* decrement our usage count */
kref_put(&dev->kref, lcd_delete);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 1e672343bcd6..2350502f9054 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -64,6 +64,7 @@ struct usb_yurex {
struct kref kref;
struct mutex io_mutex;
+ unsigned long disconnected:1;
struct fasync_struct *async_queue;
wait_queue_head_t waitq;
@@ -96,7 +97,6 @@ static void yurex_delete(struct kref *kref)
dev_dbg(&dev->interface->dev, "%s\n", __func__);
- usb_put_dev(dev->udev);
if (dev->cntl_urb) {
usb_kill_urb(dev->cntl_urb);
kfree(dev->cntl_req);
@@ -112,6 +112,8 @@ static void yurex_delete(struct kref *kref)
dev->int_buffer, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
}
+ usb_put_intf(dev->interface);
+ usb_put_dev(dev->udev);
kfree(dev);
}
@@ -136,6 +138,7 @@ static void yurex_interrupt(struct urb *urb)
switch (status) {
case 0: /*success*/
break;
+ /* The device is terminated or messed up, give up */
case -EOVERFLOW:
dev_err(&dev->interface->dev,
"%s - overflow with length %d, actual length is %d\n",
@@ -144,12 +147,13 @@ static void yurex_interrupt(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
case -EILSEQ:
- /* The device is terminated, clean up */
+ case -EPROTO:
+ case -ETIME:
return;
default:
dev_err(&dev->interface->dev,
"%s - unknown status received: %d\n", __func__, status);
- goto exit;
+ return;
}
/* handle received message */
@@ -181,7 +185,6 @@ static void yurex_interrupt(struct urb *urb)
break;
}
-exit:
retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
if (retval) {
dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
@@ -208,7 +211,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
init_waitqueue_head(&dev->waitq);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
- dev->interface = interface;
+ dev->interface = usb_get_intf(interface);
/* set up the endpoint information */
iface_desc = interface->cur_altsetting;
@@ -324,8 +327,10 @@ static void yurex_disconnect(struct usb_interface *interface)
usb_deregister_dev(interface, &yurex_class);
/* prevent more I/O from starting */
+ usb_poison_urb(dev->urb);
+ usb_poison_urb(dev->cntl_urb);
mutex_lock(&dev->io_mutex);
- dev->interface = NULL;
+ dev->disconnected = 1;
mutex_unlock(&dev->io_mutex);
/* wakeup waiters */
@@ -413,7 +418,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
dev = file->private_data;
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* already disconnected */
+ if (dev->disconnected) { /* already disconnected */
mutex_unlock(&dev->io_mutex);
return -ENODEV;
}
@@ -448,7 +453,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
goto error;
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* already disconnected */
+ if (dev->disconnected) { /* already disconnected */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 80b37d214beb..bd1a8dc285f5 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1036,12 +1036,18 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
mutex_lock(&rp->fetch_lock);
spin_lock_irqsave(&rp->b_lock, flags);
- mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
- kfree(rp->b_vec);
- rp->b_vec = vec;
- rp->b_size = size;
- rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
- rp->cnt_lost = 0;
+ if (rp->mmap_active) {
+ mon_free_buff(vec, size/CHUNK_SIZE);
+ kfree(vec);
+ ret = -EBUSY;
+ } else {
+ mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
+ kfree(rp->b_vec);
+ rp->b_vec = vec;
+ rp->b_size = size;
+ rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
+ rp->cnt_lost = 0;
+ }
spin_unlock_irqrestore(&rp->b_lock, flags);
mutex_unlock(&rp->fetch_lock);
}
@@ -1213,13 +1219,21 @@ mon_bin_poll(struct file *file, struct poll_table_struct *wait)
static void mon_bin_vma_open(struct vm_area_struct *vma)
{
struct mon_reader_bin *rp = vma->vm_private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rp->b_lock, flags);
rp->mmap_active++;
+ spin_unlock_irqrestore(&rp->b_lock, flags);
}
static void mon_bin_vma_close(struct vm_area_struct *vma)
{
+ unsigned long flags;
+
struct mon_reader_bin *rp = vma->vm_private_data;
+ spin_lock_irqsave(&rp->b_lock, flags);
rp->mmap_active--;
+ spin_unlock_irqrestore(&rp->b_lock, flags);
}
/*
@@ -1231,16 +1245,12 @@ static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long offset, chunk_idx;
struct page *pageptr;
- mutex_lock(&rp->fetch_lock);
offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= rp->b_size) {
- mutex_unlock(&rp->fetch_lock);
+ if (offset >= rp->b_size)
return VM_FAULT_SIGBUS;
- }
chunk_idx = offset / CHUNK_SIZE;
pageptr = rp->b_vec[chunk_idx].pg;
get_page(pageptr);
- mutex_unlock(&rp->fetch_lock);
vmf->page = pageptr;
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 579aa9accafc..c232229162e0 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1832,6 +1832,9 @@ static const struct attribute_group musb_attr_group = {
#define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
(2 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
+#define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \
+ (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
+ MUSB_DEVCTL_SESSION)
#define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
MUSB_DEVCTL_SESSION)
@@ -1854,6 +1857,11 @@ static void musb_pm_runtime_check_session(struct musb *musb)
s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
+ case MUSB_QUIRK_B_DISCONNECT_99:
+ musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+ break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries--) {
musb_dbg(musb,
@@ -2309,6 +2317,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_platform_disable(musb);
musb_generic_disable(musb);
+ /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
+ musb_writeb(musb->mregs, MUSB_POWER, 0);
+
/* Init IRQ workqueue before request_irq */
INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 19b5f08cb423..01ef25bd37e3 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1494,10 +1494,7 @@ done:
* We need to map sg if the transfer_buffer is
* NULL.
*/
- if (!urb->transfer_buffer)
- qh->use_sg = true;
-
- if (qh->use_sg) {
+ if (!urb->transfer_buffer) {
/* sg_miter_start is already done in musb_ep_program */
if (!sg_miter_next(&qh->sg_miter)) {
dev_err(musb->controller, "error: sg list empty\n");
@@ -1505,9 +1502,8 @@ done:
status = -EINVAL;
goto done;
}
- urb->transfer_buffer = qh->sg_miter.addr;
length = min_t(u32, length, qh->sg_miter.length);
- musb_write_fifo(hw_ep, length, urb->transfer_buffer);
+ musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
qh->sg_miter.consumed = length;
sg_miter_stop(&qh->sg_miter);
} else {
@@ -1516,11 +1512,6 @@ done:
qh->segsize = length;
- if (qh->use_sg) {
- if (offset + length >= urb->transfer_buffer_length)
- qh->use_sg = false;
- }
-
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
@@ -2040,8 +2031,10 @@ finish:
urb->actual_length += xfer_len;
qh->offset += xfer_len;
if (done) {
- if (qh->use_sg)
+ if (qh->use_sg) {
qh->use_sg = false;
+ urb->transfer_buffer = NULL;
+ }
if (urb->status == -EINPROGRESS)
urb->status = status;
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 512108e22d2b..1dc35ab31275 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -399,7 +399,7 @@ struct dma_controller *musbhs_dma_controller_create(struct musb *musb,
controller->controller.channel_abort = dma_channel_abort;
if (request_irq(irq, dma_controller_irq, 0,
- dev_name(musb->controller), &controller->controller)) {
+ dev_name(musb->controller), controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
musb_dma_controller_destroy(&controller->controller);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index e8be8e39ab8f..457ad33f4caa 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -388,8 +388,6 @@ static const struct musb_platform_ops omap2430_ops = {
.init = omap2430_musb_init,
.exit = omap2430_musb_exit,
- .set_vbus = omap2430_musb_set_vbus,
-
.enable = omap2430_musb_enable,
.disable = omap2430_musb_disable,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 20bcb2cf6a76..db391301a99f 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -19,7 +19,7 @@ config AB8500_USB
in host mode, low speed.
config FSL_USB2_OTG
- bool "Freescale USB OTG Transceiver Driver"
+ tristate "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index a72e8d670adc..cf0b67433ac9 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -422,7 +422,7 @@ static int twl6030_usb_remove(struct platform_device *pdev)
{
struct twl6030_usb *twl = platform_get_drvdata(pdev);
- cancel_delayed_work(&twl->get_status_work);
+ cancel_delayed_work_sync(&twl->get_status_work);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
REG_INT_MSK_LINE_C);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 8c5fc12ad778..8424c165f732 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -163,11 +163,12 @@ struct usbhs_priv;
#define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */
#define VALID (1 << 3) /* USB Request Receive */
-#define DVSQ_MASK (0x3 << 4) /* Device State */
+#define DVSQ_MASK (0x7 << 4) /* Device State */
#define POWER_STATE (0 << 4)
#define DEFAULT_STATE (1 << 4)
#define ADDRESS_STATE (2 << 4)
#define CONFIGURATION_STATE (3 << 4)
+#define SUSPENDED_STATE (4 << 4)
#define CTSQ_MASK (0x7) /* Control Transfer Stage */
#define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */
@@ -213,6 +214,7 @@ struct usbhs_priv;
/* DCPCTR */
#define BSTS (1 << 15) /* Buffer Status */
#define SUREQ (1 << 14) /* Sending SETUP Token */
+#define INBUFM (1 << 14) /* (PIPEnCTR) Transfer Buffer Monitor */
#define CSSTS (1 << 12) /* CSSTS Status */
#define ACLRM (1 << 9) /* Buffer Auto-Clear Mode */
#define SQCLR (1 << 8) /* Toggle Bit Clear */
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 968ade5a35f5..f6a1ae8abb21 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -98,7 +98,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
list_del_init(&pkt->node);
}
-static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
{
if (list_empty(&pipe->list))
return NULL;
@@ -821,9 +821,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
}
static void usbhsf_dma_complete(void *arg);
-static void xfer_work(struct work_struct *work)
+static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
{
- struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_fifo *fifo;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
@@ -831,12 +830,10 @@ static void xfer_work(struct work_struct *work)
struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir;
- unsigned long flags;
- usbhs_lock(priv, flags);
fifo = usbhs_pipe_to_fifo(pipe);
if (!fifo)
- goto xfer_work_end;
+ return;
chan = usbhsf_dma_chan_get(fifo, pkt);
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
@@ -845,7 +842,7 @@ static void xfer_work(struct work_struct *work)
pkt->trans, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
- goto xfer_work_end;
+ return;
desc->callback = usbhsf_dma_complete;
desc->callback_param = pipe;
@@ -853,7 +850,7 @@ static void xfer_work(struct work_struct *work)
pkt->cookie = dmaengine_submit(desc);
if (pkt->cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
- goto xfer_work_end;
+ return;
}
dev_dbg(dev, " %s %d (%d/ %d)\n",
@@ -864,8 +861,17 @@ static void xfer_work(struct work_struct *work)
dma_async_issue_pending(chan);
usbhsf_dma_start(pipe, fifo);
usbhs_pipe_enable(pipe);
+}
+
+static void xfer_work(struct work_struct *work)
+{
+ struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ unsigned long flags;
-xfer_work_end:
+ usbhs_lock(priv, flags);
+ usbhsf_dma_xfer_preparing(pkt);
usbhs_unlock(priv, flags);
}
@@ -918,8 +924,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt->trans = len;
usbhsf_tx_irq_ctrl(pipe, 0);
- INIT_WORK(&pkt->work, xfer_work);
- schedule_work(&pkt->work);
+ /* FIXME: Workaound for usb dmac that driver can be used in atomic */
+ if (usbhs_get_dparam(priv, has_usb_dmac)) {
+ usbhsf_dma_xfer_preparing(pkt);
+ } else {
+ INIT_WORK(&pkt->work, xfer_work);
+ schedule_work(&pkt->work);
+ }
return 0;
@@ -1025,8 +1036,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
pkt->trans = pkt->length;
- INIT_WORK(&pkt->work, xfer_work);
- schedule_work(&pkt->work);
+ usbhsf_dma_xfer_preparing(pkt);
return 0;
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 8b98507d7abc..c1fb39252b23 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -106,5 +106,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void *buf, int len, int zero, int sequence);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
void usbhs_pkt_start(struct usbhs_pipe *pipe);
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe);
#endif /* RENESAS_USB_FIFO_H */
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 5984fb134cf4..b0397bcfe1f6 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -465,12 +465,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ int state = usbhs_status_get_device_state(irq_state);
gpriv->gadget.speed = usbhs_bus_get_speed(priv);
- dev_dbg(dev, "state = %x : speed : %d\n",
- usbhs_status_get_device_state(irq_state),
- gpriv->gadget.speed);
+ dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed);
+
+ if (gpriv->gadget.speed != USB_SPEED_UNKNOWN &&
+ (state & SUSPENDED_STATE)) {
+ if (gpriv->driver && gpriv->driver->suspend)
+ gpriv->driver->suspend(&gpriv->gadget);
+ usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED);
+ }
return 0;
}
@@ -729,8 +735,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
unsigned long flags;
-
- usbhsg_pipe_disable(uep);
+ int ret = 0;
dev_dbg(dev, "set halt %d (pipe %d)\n",
halt, usbhs_pipe_number(pipe));
@@ -738,6 +743,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
/******************** spin lock ********************/
usbhs_lock(priv, flags);
+ /*
+ * According to usb_ep_set_halt()'s description, this function should
+ * return -EAGAIN if the IN endpoint has any queue or data. Note
+ * that the usbhs_pipe_is_dir_in() returns false if the pipe is an
+ * IN endpoint in the gadget mode.
+ */
+ if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) ||
+ usbhs_pipe_contains_transmittable_data(pipe))) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
if (halt)
usbhs_pipe_stall(pipe);
else
@@ -748,10 +765,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
else
usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
+out:
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
- return 0;
+ return ret;
}
static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 9396a8c14af8..8db4ca7d5d45 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -286,6 +286,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
return -EBUSY;
}
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe)
+{
+ u16 val;
+
+ /* Do not support for DCP pipe */
+ if (usbhs_pipe_is_dcp(pipe))
+ return false;
+
+ val = usbhsp_pipectrl_get(pipe);
+ if (val & INBUFM)
+ return true;
+
+ return false;
+}
+
/*
* PID ctrl
*/
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 95185fdb29b1..e4144704ee02 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -90,6 +90,7 @@ void usbhs_pipe_init(struct usbhs_priv *priv,
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
void usbhs_pipe_clear(struct usbhs_pipe *pipe);
int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe);
void usbhs_pipe_enable(struct usbhs_pipe *pipe);
void usbhs_pipe_disable(struct usbhs_pipe *pipe);
void usbhs_pipe_stall(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 9a2c0c76d11b..0291dc7cd284 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -555,9 +555,13 @@ static int ch341_tiocmget(struct tty_struct *tty)
static int ch341_reset_resume(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
- struct ch341_private *priv = usb_get_serial_port_data(port);
+ struct ch341_private *priv;
int ret;
+ priv = usb_get_serial_port_data(port);
+ if (!priv)
+ return 0;
+
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7bbf2ca73f68..613544d25fad 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -77,6 +77,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
@@ -121,6 +122,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index bbeeb2bd55a8..2c915be1db4c 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -773,7 +773,7 @@ send:
usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev,
usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress),
- port->interrupt_out_buffer, port->interrupt_out_size,
+ port->interrupt_out_buffer, actual_size,
cypress_write_int_callback, port, priv->write_urb_interval);
result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
if (result) {
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 972f5a5fe577..1f20fa0a67c0 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -560,9 +560,12 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
static void f81232_close(struct usb_serial_port *port)
{
+ struct f81232_private *port_priv = usb_get_serial_port_data(port);
+
f81232_port_disable(port);
usb_serial_generic_close(port);
usb_kill_urb(port->interrupt_in_urb);
+ flush_work(&port_priv->interrupt_work);
}
static void f81232_dtr_rts(struct usb_serial_port *port, int on)
@@ -656,6 +659,40 @@ static int f81232_port_remove(struct usb_serial_port *port)
return 0;
}
+static int f81232_suspend(struct usb_serial *serial, pm_message_t message)
+{
+ struct usb_serial_port *port = serial->port[0];
+ struct f81232_private *port_priv = usb_get_serial_port_data(port);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+ usb_kill_urb(port->read_urbs[i]);
+
+ usb_kill_urb(port->interrupt_in_urb);
+
+ if (port_priv)
+ flush_work(&port_priv->interrupt_work);
+
+ return 0;
+}
+
+static int f81232_resume(struct usb_serial *serial)
+{
+ struct usb_serial_port *port = serial->port[0];
+ int result;
+
+ if (tty_port_initialized(&port->port)) {
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+ if (result) {
+ dev_err(&port->dev, "submit interrupt urb failed: %d\n",
+ result);
+ return result;
+ }
+ }
+
+ return usb_serial_generic_resume(serial);
+}
+
static struct usb_serial_driver f81232_device = {
.driver = {
.owner = THIS_MODULE,
@@ -679,6 +716,8 @@ static struct usb_serial_driver f81232_device = {
.read_int_callback = f81232_read_int_callback,
.port_probe = f81232_port_probe,
.port_remove = f81232_port_remove,
+ .suspend = f81232_suspend,
+ .resume = f81232_resume,
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b88a72220acd..a7cb0968259e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -604,6 +604,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
@@ -1022,6 +1024,13 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
/* EZPrototypes devices */
{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
+ { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+ /* Sienna devices */
+ { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
+ { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+ /* U-Blox devices */
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index ddf5ab983dc9..32a40ab9a385 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -38,6 +38,9 @@
#define FTDI_LUMEL_PD12_PID 0x6002
+/* Sienna Serial Interface by Secyourit GmbH */
+#define FTDI_SIENNA_PID 0x8348
+
/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
#define CYBER_CORTEX_AV_PID 0x8698
@@ -566,7 +569,9 @@
/*
* NovaTech product ids (FTDI_VID)
*/
-#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
/*
* Synapse Wireless product ids (FTDI_VID)
@@ -686,6 +691,12 @@
#define BANDB_ZZ_PROG1_USB_PID 0xBA02
/*
+ * Echelon USB Serial Interface
+ */
+#define ECHELON_VID 0x0920
+#define ECHELON_U20_PID 0x7500
+
+/*
* Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
*/
#define INTREPID_VID 0x093C
@@ -1540,3 +1551,16 @@
#define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */
#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */
#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
+
+/*
+ * Unjo AB
+ */
+#define UNJO_VID 0x22B7
+#define UNJO_ISODEBUG_V1_PID 0x150D
+
+/*
+ * U-Blox products (http://www.u-blox.com).
+ */
+#define UBLOX_VID 0x1546
+#define UBLOX_C099F9P_ZED_PID 0x0502
+#define UBLOX_C099F9P_ODIN_PID 0x0503
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 944de657a07a..f1e74f50642f 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -350,6 +350,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
+ bool stopped = false;
int status = urb->status;
int i;
@@ -357,33 +358,51 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
if (urb == port->read_urbs[i])
break;
}
- set_bit(i, &port->read_urbs_free);
dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
urb->actual_length);
switch (status) {
case 0:
+ usb_serial_debug_data(&port->dev, __func__, urb->actual_length,
+ data);
+ port->serial->type->process_read_urb(urb);
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
__func__, status);
- return;
+ stopped = true;
+ break;
case -EPIPE:
dev_err(&port->dev, "%s - urb stopped: %d\n",
__func__, status);
- return;
+ stopped = true;
+ break;
default:
dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
__func__, status);
- goto resubmit;
+ break;
}
- usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
- port->serial->type->process_read_urb(urb);
+ /*
+ * Make sure URB processing is done before marking as free to avoid
+ * racing with unthrottle() on another CPU. Matches the barriers
+ * implied by the test_and_clear_bit() in
+ * usb_serial_generic_submit_read_urb().
+ */
+ smp_mb__before_atomic();
+ set_bit(i, &port->read_urbs_free);
+ /*
+ * Make sure URB is marked as free before checking the throttled flag
+ * to avoid racing with unthrottle() on another CPU. Matches the
+ * smp_mb() in unthrottle().
+ */
+ smp_mb__after_atomic();
+
+ if (stopped)
+ return;
-resubmit:
/* Throttle the device if requested by tty */
spin_lock_irqsave(&port->lock, flags);
port->throttled = port->throttle_req;
@@ -458,6 +477,12 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
port->throttled = port->throttle_req = 0;
spin_unlock_irq(&port->lock);
+ /*
+ * Matches the smp_mb__after_atomic() in
+ * usb_serial_generic_read_bulk_callback().
+ */
+ smp_mb();
+
if (was_throttled)
usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index de61271f2ba3..77555242e259 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -572,6 +572,7 @@ static void edge_interrupt_callback(struct urb *urb)
struct usb_serial_port *port;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
+ unsigned long flags;
int bytes_avail;
int position;
int txCredits;
@@ -603,7 +604,7 @@ static void edge_interrupt_callback(struct urb *urb)
if (length > 1) {
bytes_avail = data[0] | (data[1] << 8);
if (bytes_avail) {
- spin_lock(&edge_serial->es_lock);
+ spin_lock_irqsave(&edge_serial->es_lock, flags);
edge_serial->rxBytesAvail += bytes_avail;
dev_dbg(dev,
"%s - bytes_avail=%d, rxBytesAvail=%d, read_in_progress=%d\n",
@@ -626,22 +627,25 @@ static void edge_interrupt_callback(struct urb *urb)
edge_serial->read_in_progress = false;
}
}
- spin_unlock(&edge_serial->es_lock);
+ spin_unlock_irqrestore(&edge_serial->es_lock,
+ flags);
}
}
/* grab the txcredits for the ports if available */
position = 2;
portNumber = 0;
- while ((position < length) &&
+ while ((position < length - 1) &&
(portNumber < edge_serial->serial->num_ports)) {
txCredits = data[position] | (data[position+1] << 8);
if (txCredits) {
port = edge_serial->serial->port[portNumber];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
- spin_lock(&edge_port->ep_lock);
+ if (edge_port && edge_port->open) {
+ spin_lock_irqsave(&edge_port->ep_lock,
+ flags);
edge_port->txCredits += txCredits;
- spin_unlock(&edge_port->ep_lock);
+ spin_unlock_irqrestore(&edge_port->ep_lock,
+ flags);
dev_dbg(dev, "%s - txcredits for port%d = %d\n",
__func__, portNumber,
edge_port->txCredits);
@@ -682,6 +686,7 @@ static void edge_bulk_in_callback(struct urb *urb)
int retval;
__u16 raw_data_length;
int status = urb->status;
+ unsigned long flags;
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero read bulk status received: %d\n",
@@ -701,7 +706,7 @@ static void edge_bulk_in_callback(struct urb *urb)
usb_serial_debug_data(dev, __func__, raw_data_length, data);
- spin_lock(&edge_serial->es_lock);
+ spin_lock_irqsave(&edge_serial->es_lock, flags);
/* decrement our rxBytes available by the number that we just got */
edge_serial->rxBytesAvail -= raw_data_length;
@@ -725,7 +730,7 @@ static void edge_bulk_in_callback(struct urb *urb)
edge_serial->read_in_progress = false;
}
- spin_unlock(&edge_serial->es_lock);
+ spin_unlock_irqrestore(&edge_serial->es_lock, flags);
}
@@ -1662,7 +1667,8 @@ static void edge_break(struct tty_struct *tty, int break_state)
static void process_rcvd_data(struct edgeport_serial *edge_serial,
unsigned char *buffer, __u16 bufferLength)
{
- struct device *dev = &edge_serial->serial->dev->dev;
+ struct usb_serial *serial = edge_serial->serial;
+ struct device *dev = &serial->dev->dev;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
__u16 lastBufferLength;
@@ -1767,11 +1773,10 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
/* spit this data back into the tty driver if this
port is open */
- if (rxLen) {
- port = edge_serial->serial->port[
- edge_serial->rxPort];
+ if (rxLen && edge_serial->rxPort < serial->num_ports) {
+ port = serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
- if (edge_port->open) {
+ if (edge_port && edge_port->open) {
dev_dbg(dev, "%s - Sending %d bytes to TTY for port %d\n",
__func__, rxLen,
edge_serial->rxPort);
@@ -1779,8 +1784,8 @@ static void process_rcvd_data(struct edgeport_serial *edge_serial,
rxLen);
edge_port->port->icount.rx += rxLen;
}
- buffer += rxLen;
}
+ buffer += rxLen;
break;
case EXPECT_HDR3: /* Expect 3rd byte of status header */
@@ -1815,6 +1820,8 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
__u8 code = edge_serial->rxStatusCode;
/* switch the port pointer to the one being currently talked about */
+ if (edge_serial->rxPort >= edge_serial->serial->num_ports)
+ return;
port = edge_serial->serial->port[edge_serial->rxPort];
edge_port = usb_get_serial_port_data(port);
if (edge_port == NULL) {
@@ -2852,16 +2859,18 @@ static int edge_startup(struct usb_serial *serial)
response = 0;
if (edge_serial->is_epic) {
+ struct usb_host_interface *alt;
+
+ alt = serial->interface->cur_altsetting;
+
/* EPIC thing, set up our interrupt polling now and our read
* urb, so that the device knows it really is connected. */
interrupt_in_found = bulk_in_found = bulk_out_found = false;
- for (i = 0; i < serial->interface->altsetting[0]
- .desc.bNumEndpoints; ++i) {
+ for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
struct usb_endpoint_descriptor *endpoint;
int buffer_size;
- endpoint = &serial->interface->altsetting[0].
- endpoint[i].desc;
+ endpoint = &alt->endpoint[i].desc;
buffer_size = usb_endpoint_maxp(endpoint);
if (!interrupt_in_found &&
(usb_endpoint_is_int_in(endpoint))) {
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 73956d48a0c5..1347c77facd0 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -49,9 +49,10 @@ static int buffer_size;
static int xbof = -1;
static int ir_startup (struct usb_serial *serial);
-static int ir_open(struct tty_struct *tty, struct usb_serial_port *port);
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size);
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+static int ir_write_room(struct tty_struct *tty);
+static void ir_write_bulk_callback(struct urb *urb);
static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
@@ -81,8 +82,9 @@ static struct usb_serial_driver ir_device = {
.num_ports = 1,
.set_termios = ir_set_termios,
.attach = ir_startup,
- .open = ir_open,
- .prepare_write_buffer = ir_prepare_write_buffer,
+ .write = ir_write,
+ .write_room = ir_write_room,
+ .write_bulk_callback = ir_write_bulk_callback,
.process_read_urb = ir_process_read_urb,
};
@@ -198,6 +200,9 @@ static int ir_startup(struct usb_serial *serial)
{
struct usb_irda_cs_descriptor *irda_desc;
+ if (serial->num_bulk_in < 1 || serial->num_bulk_out < 1)
+ return -ENODEV;
+
irda_desc = irda_usb_find_class_desc(serial, 0);
if (!irda_desc) {
dev_err(&serial->dev->dev,
@@ -252,35 +257,102 @@ static int ir_startup(struct usb_serial *serial)
return 0;
}
-static int ir_open(struct tty_struct *tty, struct usb_serial_port *port)
+static int ir_write(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count)
{
- int i;
+ struct urb *urb = NULL;
+ unsigned long flags;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
- port->write_urbs[i]->transfer_flags = URB_ZERO_PACKET;
+ if (port->bulk_out_size == 0)
+ return -EINVAL;
- /* Start reading from the device */
- return usb_serial_generic_open(tty, port);
-}
+ if (count == 0)
+ return 0;
-static int ir_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size)
-{
- unsigned char *buf = dest;
- int count;
+ count = min(count, port->bulk_out_size - 1);
+
+ spin_lock_irqsave(&port->lock, flags);
+ if (__test_and_clear_bit(0, &port->write_urbs_free)) {
+ urb = port->write_urbs[0];
+ port->tx_bytes += count;
+ }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (!urb)
+ return 0;
/*
* The first byte of the packet we send to the device contains an
- * inbound header which indicates an additional number of BOFs and
+ * outbound header which indicates an additional number of BOFs and
* a baud rate change.
*
* See section 5.4.2.2 of the USB IrDA spec.
*/
- *buf = ir_xbof | ir_baud;
+ *(u8 *)urb->transfer_buffer = ir_xbof | ir_baud;
+
+ memcpy(urb->transfer_buffer + 1, buf, count);
+
+ urb->transfer_buffer_length = count + 1;
+ urb->transfer_flags = URB_ZERO_PACKET;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit write urb: %d\n", ret);
+
+ spin_lock_irqsave(&port->lock, flags);
+ __set_bit(0, &port->write_urbs_free);
+ port->tx_bytes -= count;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return ret;
+ }
+
+ return count;
+}
+
+static void ir_write_bulk_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ int status = urb->status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ __set_bit(0, &port->write_urbs_free);
+ port->tx_bytes -= urb->transfer_buffer_length - 1;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ switch (status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&port->dev, "write urb stopped: %d\n", status);
+ return;
+ case -EPIPE:
+ dev_err(&port->dev, "write urb stopped: %d\n", status);
+ return;
+ default:
+ dev_err(&port->dev, "nonzero write-urb status: %d\n", status);
+ break;
+ }
+
+ usb_serial_port_softint(port);
+}
+
+static int ir_write_room(struct tty_struct *tty)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ int count = 0;
+
+ if (port->bulk_out_size == 0)
+ return 0;
+
+ if (test_bit(0, &port->write_urbs_free))
+ count = port->bulk_out_size - 1;
- count = kfifo_out_locked(&port->write_fifo, buf + 1, size - 1,
- &port->lock);
- return count + 1;
+ return count;
}
static void ir_process_read_urb(struct urb *urb)
@@ -333,34 +405,34 @@ static void ir_set_termios(struct tty_struct *tty,
switch (baud) {
case 2400:
- ir_baud = USB_IRDA_BR_2400;
+ ir_baud = USB_IRDA_LS_2400;
break;
case 9600:
- ir_baud = USB_IRDA_BR_9600;
+ ir_baud = USB_IRDA_LS_9600;
break;
case 19200:
- ir_baud = USB_IRDA_BR_19200;
+ ir_baud = USB_IRDA_LS_19200;
break;
case 38400:
- ir_baud = USB_IRDA_BR_38400;
+ ir_baud = USB_IRDA_LS_38400;
break;
case 57600:
- ir_baud = USB_IRDA_BR_57600;
+ ir_baud = USB_IRDA_LS_57600;
break;
case 115200:
- ir_baud = USB_IRDA_BR_115200;
+ ir_baud = USB_IRDA_LS_115200;
break;
case 576000:
- ir_baud = USB_IRDA_BR_576000;
+ ir_baud = USB_IRDA_LS_576000;
break;
case 1152000:
- ir_baud = USB_IRDA_BR_1152000;
+ ir_baud = USB_IRDA_LS_1152000;
break;
case 4000000:
- ir_baud = USB_IRDA_BR_4000000;
+ ir_baud = USB_IRDA_LS_4000000;
break;
default:
- ir_baud = USB_IRDA_BR_9600;
+ ir_baud = USB_IRDA_LS_9600;
baud = 9600;
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 1f9414bdd649..e08755e9b612 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -567,6 +567,8 @@ static void usa49_glocont_callback(struct urb *urb)
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
@@ -968,6 +970,8 @@ static void usa67_glocont_callback(struct urb *urb)
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ continue;
if (p_priv->resend_cont) {
dev_dbg(&port->dev, "%s - sending setup\n", __func__);
@@ -1250,8 +1254,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
ep_desc = find_ep(serial, endpoint);
if (!ep_desc) {
- /* leak the urb, something's wrong and the callers don't care */
- return urb;
+ usb_free_urb(urb);
+ return NULL;
}
if (usb_endpoint_xfer_int(ep_desc)) {
ep_type_name = "INT";
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 135eb04368f9..14b45f3e6388 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -368,8 +368,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
if (!urbtrack)
return -ENOMEM;
- kref_get(&mos_parport->ref_count);
- urbtrack->mos_parport = mos_parport;
urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urbtrack->urb) {
kfree(urbtrack);
@@ -390,6 +388,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
usb_sndctrlpipe(usbdev, 0),
(unsigned char *)urbtrack->setup,
NULL, 0, async_complete, urbtrack);
+ kref_get(&mos_parport->ref_count);
+ urbtrack->mos_parport = mos_parport;
kref_init(&urbtrack->ref_count);
INIT_LIST_HEAD(&urbtrack->urblist_entry);
@@ -1941,10 +1941,6 @@ static int mos7720_startup(struct usb_serial *serial)
}
}
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
-
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
if (product == MOSCHIP_DEVICE_ID_7715) {
ret_val = mos7715_parport_init(serial);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 03d63bad6be4..0c92252c9316 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -131,11 +131,15 @@
/* This driver also supports
* ATEN UC2324 device using Moschip MCS7840
* ATEN UC2322 device using Moschip MCS7820
+ * MOXA UPort 2210 device using Moschip MCS7820
*/
#define USB_VENDOR_ID_ATENINTL 0x0557
#define ATENINTL_DEVICE_ID_UC2324 0x2011
#define ATENINTL_DEVICE_ID_UC2322 0x7820
+#define USB_VENDOR_ID_MOXA 0x110a
+#define MOXA_DEVICE_ID_2210 0x2210
+
/* Interrupt Routine Defines */
#define SERIAL_IIR_RLS 0x06
@@ -206,6 +210,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
+ {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)},
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -2089,6 +2094,7 @@ static int mos7840_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor);
u8 *buf;
int device_type;
@@ -2098,6 +2104,11 @@ static int mos7840_probe(struct usb_serial *serial,
goto out;
}
+ if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) {
+ device_type = MOSCHIP_DEVICE_ID_7820;
+ goto out;
+ }
+
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -2350,11 +2361,6 @@ out:
goto error;
} else
dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
-
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
- MOS_WDR_TIMEOUT);
}
return 0;
error:
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 64bf258e7e00..9606dde3194c 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -116,7 +116,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
requesttype,
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
- 0, 0, buffer, 1, 0);
+ 0, 0, buffer, 1, USB_CTRL_SET_TIMEOUT);
kfree(buffer);
if (retval < 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index b2b7c12e5c86..737b6652a0b5 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -200,6 +200,7 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
#define DELL_PRODUCT_5821E 0x81d7
+#define DELL_PRODUCT_5821E_ESIM 0x81e0
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -421,6 +422,7 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
+#define CINTERION_PRODUCT_CLS8 0x00b0
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -564,6 +566,9 @@ static void option_instat_callback(struct urb *urb);
/* Interface is reserved */
#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
+/* Device needs ZLP */
+#define ZLP BIT(17)
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -967,6 +972,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
+ /* Motorola devices */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1037,6 +1047,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1066,7 +1078,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1143,12 +1156,24 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff), /* Telit FN980 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff), /* Telit FN980 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff), /* Telit FN980 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
+ .driver_info = NCTRL(0) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110b, 0xff), /* Telit ME910G1 (ECM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
@@ -1165,10 +1190,16 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -1333,6 +1364,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0601, 0xff) }, /* GosunCn ZTE WeLink ME3630 (RNDIS mode) */
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
@@ -1538,6 +1570,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1766,6 +1799,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
.driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
+ .driver_info = RSVD(7) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1828,6 +1863,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
+ .driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
@@ -1941,10 +1978,22 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
- { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+ { USB_DEVICE_INTERFACE_CLASS(0x1435, 0xd191, 0xff), /* Wistron Neweb D19Q1 */
+ .driver_info = RSVD(1) | RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1690, 0x7588, 0xff), /* ASKEY WWHC050 */
+ .driver_info = RSVD(1) | RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2033, 0xff), /* BroadMobi BM806U */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
@@ -1956,6 +2005,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+ { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
@@ -2057,6 +2110,9 @@ static int option_attach(struct usb_serial *serial)
if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
data->use_send_setup = 1;
+ if (device_flags & ZLP)
+ data->use_zlp = 1;
+
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9706d214c409..4fcded2971d1 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -88,6 +88,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
@@ -101,6 +102,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
{ USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) },
+ { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index d84c3b3d477b..54d2fb974a41 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -128,6 +128,7 @@
#define HP_LM920_PRODUCT_ID 0x026b
#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
+#define HP_LD381_PRODUCT_ID 0x0f7f
#define HP_LCM220_PRODUCT_ID 0x3139
#define HP_LCM960_PRODUCT_ID 0x3239
#define HP_LD220_PRODUCT_ID 0x3524
@@ -159,3 +160,6 @@
#define SMART_VENDOR_ID 0x0b8c
#define SMART_PRODUCT_ID 0x2303
+/* Allied Telesis VT-Kit3 */
+#define AT_VENDOR_ID 0x0caa
+#define AT_VTKIT3_PRODUCT_ID 0x3001
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 1d17779b2203..19952ccd028a 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -872,7 +872,10 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
u8 newMSR = (u8) *ch;
unsigned long flags;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
spin_lock_irqsave(&port_priv->lock, flags);
port_priv->shadowMSR = newMSR;
@@ -900,7 +903,10 @@ static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch)
unsigned long flags;
u8 newLSR = (u8) *ch;
+ /* May be called from qt2_process_read_urb() for an unbound port. */
port_priv = usb_get_serial_port_data(port);
+ if (!port_priv)
+ return;
if (newLSR & UART_LSR_BI)
newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 836cb93ba49e..a7e41723c34c 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -778,7 +778,6 @@ static void ti_close(struct usb_serial_port *port)
struct ti_port *tport;
int port_number;
int status;
- int do_unlock;
unsigned long flags;
tdev = usb_get_serial_data(port->serial);
@@ -802,16 +801,13 @@ static void ti_close(struct usb_serial_port *port)
"%s - cannot send close port command, %d\n"
, __func__, status);
- /* if mutex_lock is interrupted, continue anyway */
- do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
+ mutex_lock(&tdev->td_open_close_lock);
--tport->tp_tdev->td_open_port_count;
- if (tport->tp_tdev->td_open_port_count <= 0) {
+ if (tport->tp_tdev->td_open_port_count == 0) {
/* last port is closed, shut down interrupt urb */
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
- tport->tp_tdev->td_open_port_count = 0;
}
- if (do_unlock)
- mutex_unlock(&tdev->td_open_close_lock);
+ mutex_unlock(&tdev->td_open_close_lock);
}
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 511242111403..15e05ebf37ac 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -89,6 +89,8 @@ DEVICE(moto_modem, MOTO_IDS);
#define MOTOROLA_TETRA_IDS() \
{ USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
{ USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+ { USB_DEVICE(0x0cad, 0x9013) }, /* MTP3xxx */ \
+ { USB_DEVICE(0x0cad, 0x9015) }, /* MTP85xx */ \
{ USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 4a037b4a79cf..8a24a11b7ffc 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -315,10 +315,7 @@ static void serial_cleanup(struct tty_struct *tty)
serial = port->serial;
owner = serial->type->driver.owner;
- mutex_lock(&serial->disc_mutex);
- if (!serial->disconnected)
- usb_autopm_put_interface(serial->interface);
- mutex_unlock(&serial->disc_mutex);
+ usb_autopm_put_interface(serial->interface);
usb_serial_put(serial);
module_put(owner);
@@ -1354,6 +1351,9 @@ static int usb_serial_register(struct usb_serial_driver *driver)
return -EINVAL;
}
+ /* Prevent individual ports from being unbound. */
+ driver->driver.suppress_bind_attrs = true;
+
usb_serial_operations_init(driver);
/* Add this device to our list of devices */
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 44b25c08c68a..1d0e28538346 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -35,6 +35,7 @@ struct usb_wwan_intf_private {
spinlock_t susp_lock;
unsigned int suspended:1;
unsigned int use_send_setup:1;
+ unsigned int use_zlp:1;
int in_flight;
unsigned int open_ports;
void *private;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 3dfdfc81254b..93c696e2131f 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -495,6 +495,7 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
void (*callback) (struct urb *))
{
struct usb_serial *serial = port->serial;
+ struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
@@ -505,6 +506,9 @@ static struct urb *usb_wwan_setup_urb(struct usb_serial_port *port,
usb_sndbulkpipe(serial->dev, endpoint) | dir,
buf, len, callback, ctx);
+ if (intfdata->use_zlp && dir == USB_DIR_OUT)
+ urb->transfer_flags |= URB_ZERO_PACKET;
+
return urb;
}
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index d3ea90bef84d..345211f1a491 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -604,6 +604,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
command_port = port->serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
+
+ if (command_port->bulk_out_size < datasize + 1)
+ return -EIO;
+
mutex_lock(&command_info->mutex);
command_info->command_finished = false;
@@ -677,6 +681,7 @@ static void firm_setup_port(struct tty_struct *tty)
struct device *dev = &port->dev;
struct whiteheat_port_settings port_settings;
unsigned int cflag = tty->termios.c_cflag;
+ speed_t baud;
port_settings.port = port->port_number + 1;
@@ -737,11 +742,13 @@ static void firm_setup_port(struct tty_struct *tty)
dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff);
/* get the baud rate wanted */
- port_settings.baud = tty_get_baud_rate(tty);
- dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud);
+ baud = tty_get_baud_rate(tty);
+ port_settings.baud = cpu_to_le32(baud);
+ dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud);
/* fixme: should set validated settings */
- tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud);
+ tty_encode_baud_rate(tty, baud, baud);
+
/* handle any settings that aren't specified in the tty structure */
port_settings.lloop = 0;
diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h
index 38065df4d2d8..30169c859a74 100644
--- a/drivers/usb/serial/whiteheat.h
+++ b/drivers/usb/serial/whiteheat.h
@@ -91,7 +91,7 @@ struct whiteheat_simple {
struct whiteheat_port_settings {
__u8 port; /* port number (1 to N) */
- __u32 baud; /* any value 7 - 460800, firmware calculates
+ __le32 baud; /* any value 7 - 460800, firmware calculates
best fit; arrives little endian */
__u8 bits; /* 5, 6, 7, or 8 */
__u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 4176d1af9bf2..d955761fce6f 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -51,7 +51,7 @@ MODULE_VERSION("1.03");
static int auto_delink_en = 1;
module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
#ifdef CONFIG_REALTEK_AUTOPM
static int ss_en = 1;
@@ -776,18 +776,16 @@ static void rts51x_suspend_timer_fn(unsigned long data)
break;
case RTS51X_STAT_IDLE:
case RTS51X_STAT_SS:
- usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n",
- atomic_read(&us->pusb_intf->pm_usage_cnt),
+ usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
- if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
+ if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
usb_stor_dbg(us, "Ready to enter SS state\n");
rts51x_set_stat(chip, RTS51X_STAT_SS);
/* ignore mass storage interface's children */
pm_suspend_ignore_children(&us->pusb_intf->dev, true);
usb_autopm_put_interface_async(us->pusb_intf);
- usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n",
- atomic_read(&us->pusb_intf->pm_usage_cnt),
+ usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
}
break;
@@ -820,11 +818,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
int ret;
if (working_scsi(srb)) {
- usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n",
- atomic_read(&us->pusb_intf->pm_usage_cnt),
+ usb_stor_dbg(us, "working scsi, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
- if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
+ if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
ret = usb_autopm_get_interface(us->pusb_intf);
usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
}
@@ -1013,12 +1010,15 @@ static int init_realtek_cr(struct us_data *us)
goto INIT_FAIL;
}
- if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
- CHECK_FW_VER(chip, 0x5901))
- SET_AUTO_DELINK(chip);
- if (STATUS_LEN(chip) == 16) {
- if (SUPPORT_AUTO_DELINK(chip))
+ if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
+ CHECK_PID(chip, 0x0159)) {
+ if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
+ CHECK_FW_VER(chip, 0x5901))
SET_AUTO_DELINK(chip);
+ if (STATUS_LEN(chip) == 16) {
+ if (SUPPORT_AUTO_DELINK(chip))
+ SET_AUTO_DELINK(chip);
+ }
}
#ifdef CONFIG_REALTEK_AUTOPM
if (ss_en)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 13f2c051dbf2..fd5398efce41 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -90,20 +90,8 @@ static int slave_alloc (struct scsi_device *sdev)
sdev->inquiry_len = 36;
/*
- * USB has unusual DMA-alignment requirements: Although the
- * starting address of each scatter-gather element doesn't matter,
- * the length of each element except the last must be divisible
- * by the Bulk maxpacket value. There's currently no way to
- * express this by block-layer constraints, so we'll cop out
- * and simply require addresses to be aligned at 512-byte
- * boundaries. This is okay since most block I/O involves
- * hardware sectors that are multiples of 512 bytes in length,
- * and since host controllers up through USB 2.0 have maxpacket
- * values no larger than 512.
- *
- * But it doesn't suffice for Wireless USB, where Bulk maxpacket
- * values can be as large as 2048. To make that work properly
- * will require changes to the block layer.
+ * Some host controllers may have alignment requirements.
+ * We'll play it safe by requiring 512-byte alignment always.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 64af88977b03..d022b5ff4cd0 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -46,6 +46,7 @@ struct uas_dev_info {
struct scsi_cmnd *cmnd[MAX_CMNDS];
spinlock_t lock;
struct work_struct work;
+ struct work_struct scan_work; /* for async scanning */
};
enum {
@@ -115,6 +116,17 @@ out:
spin_unlock_irqrestore(&devinfo->lock, flags);
}
+static void uas_scan_work(struct work_struct *work)
+{
+ struct uas_dev_info *devinfo =
+ container_of(work, struct uas_dev_info, scan_work);
+ struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
+
+ dev_dbg(&devinfo->intf->dev, "starting scan\n");
+ scsi_scan_host(shost);
+ dev_dbg(&devinfo->intf->dev, "scan complete\n");
+}
+
static void uas_add_work(struct uas_cmd_info *cmdinfo)
{
struct scsi_pointer *scp = (void *)cmdinfo;
@@ -800,20 +812,9 @@ static int uas_slave_alloc(struct scsi_device *sdev)
sdev->hostdata = devinfo;
/*
- * USB has unusual DMA-alignment requirements: Although the
- * starting address of each scatter-gather element doesn't matter,
- * the length of each element except the last must be divisible
- * by the Bulk maxpacket value. There's currently no way to
- * express this by block-layer constraints, so we'll cop out
- * and simply require addresses to be aligned at 512-byte
- * boundaries. This is okay since most block I/O involves
- * hardware sectors that are multiples of 512 bytes in length,
- * and since host controllers up through USB 2.0 have maxpacket
- * values no larger than 512.
- *
- * But it doesn't suffice for Wireless USB, where Bulk maxpacket
- * values can be as large as 2048. To make that work properly
- * will require changes to the block layer.
+ * The protocol has no requirements on alignment in the strict sense.
+ * Controllers may or may not have alignment restrictions.
+ * As this is not exported, we use an extremely conservative guess.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
@@ -843,6 +844,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->wce_default_on = 1;
}
+ /* Some disks cannot handle READ_CAPACITY_16 */
+ if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
+ sdev->no_read_capacity_16 = 1;
+
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
@@ -852,6 +857,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->fix_capacity = 1;
/*
+ * in some cases we have to guess
+ */
+ if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
+ sdev->guess_capacity = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
@@ -990,6 +1001,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
init_usb_anchor(&devinfo->data_urbs);
spin_lock_init(&devinfo->lock);
INIT_WORK(&devinfo->work, uas_do_work);
+ INIT_WORK(&devinfo->scan_work, uas_scan_work);
result = uas_configure_endpoints(devinfo);
if (result)
@@ -1006,7 +1018,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto free_streams;
- scsi_scan_host(shost);
+ /* Submit the delayed_work for SCSI-device scanning */
+ schedule_work(&devinfo->scan_work);
+
return result;
free_streams:
@@ -1174,6 +1188,12 @@ static void uas_disconnect(struct usb_interface *intf)
usb_kill_anchored_urbs(&devinfo->data_urbs);
uas_zap_pending(devinfo, DID_NO_CONNECT);
+ /*
+ * Prevent SCSI scanning (if it hasn't started yet)
+ * or wait for the SCSI-scanning routine to stop.
+ */
+ cancel_work_sync(&devinfo->scan_work);
+
scsi_remove_host(shost);
uas_free_streams(devinfo);
scsi_host_put(shost);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c802aabcc58c..a52ae34fb1c3 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1277,6 +1277,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
USB_SC_RBC, USB_PR_BULK, NULL,
0 ),
+UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
+ "Samsung",
+ "Flash Drive FIT",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64),
+
/* aeb */
UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
"Feiya",
@@ -2119,7 +2125,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
US_FL_IGNORE_RESIDUE ),
/* Reported by Michael Büsch <m@bues.ch> */
-UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
+UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
"JMicron",
"USB to ATA/ATAPI Bridge",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index 7ca779493671..dee100dd02e1 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -29,6 +29,11 @@ UNUSUAL_DEV(0x0bda, 0x0138, 0x0000, 0x9999,
"USB Card Reader",
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+UNUSUAL_DEV(0x0bda, 0x0153, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
UNUSUAL_DEV(0x0bda, 0x0158, 0x0000, 0x9999,
"Realtek",
"USB Card Reader",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index f15aa47c54a9..0eb8c67ee138 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -163,12 +163,15 @@ UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
-/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+/*
+ * Initially Reported-by: Takeo Nakayama <javhera@gmx.com>
+ * UAS Ignore Reported by Steven Ellis <sellis@redhat.com>
+ */
UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
"JMicron",
"JMS566",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_REPORT_OPCODES),
+ US_FL_NO_REPORT_OPCODES | US_FL_IGNORE_UAS),
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 5133a0792eb0..f24374486623 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -63,6 +63,7 @@ struct usb_skel {
spinlock_t err_lock; /* lock for errors */
struct kref kref;
struct mutex io_mutex; /* synchronize I/O with disconnect */
+ unsigned long disconnected:1;
wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */
};
#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
@@ -75,6 +76,7 @@ static void skel_delete(struct kref *kref)
struct usb_skel *dev = to_skel_dev(kref);
usb_free_urb(dev->bulk_in_urb);
+ usb_put_intf(dev->interface);
usb_put_dev(dev->udev);
kfree(dev->bulk_in_buffer);
kfree(dev);
@@ -126,10 +128,7 @@ static int skel_release(struct inode *inode, struct file *file)
return -ENODEV;
/* allow the device to be autosuspended */
- mutex_lock(&dev->io_mutex);
- if (dev->interface)
- usb_autopm_put_interface(dev->interface);
- mutex_unlock(&dev->io_mutex);
+ usb_autopm_put_interface(dev->interface);
/* decrement the count on our device */
kref_put(&dev->kref, skel_delete);
@@ -241,7 +240,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
if (rv < 0)
return rv;
- if (!dev->interface) { /* disconnect() was called */
+ if (dev->disconnected) { /* disconnect() was called */
rv = -ENODEV;
goto exit;
}
@@ -422,7 +421,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
/* this lock makes sure we don't submit URBs to gone devices */
mutex_lock(&dev->io_mutex);
- if (!dev->interface) { /* disconnect() was called */
+ if (dev->disconnected) { /* disconnect() was called */
mutex_unlock(&dev->io_mutex);
retval = -ENODEV;
goto error;
@@ -509,7 +508,7 @@ static int skel_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->bulk_in_wait);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
- dev->interface = interface;
+ dev->interface = usb_get_intf(interface);
/* set up the endpoint information */
/* use only the first bulk-in and bulk-out endpoints */
@@ -582,7 +581,7 @@ static void skel_disconnect(struct usb_interface *interface)
/* prevent more I/O from starting */
mutex_lock(&dev->io_mutex);
- dev->interface = NULL;
+ dev->disconnected = 1;
mutex_unlock(&dev->io_mutex);
usb_kill_anchored_urbs(&dev->submitted);
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 8e629b6a6f3f..a5a6a114219a 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -315,9 +315,17 @@ static int stub_probe(struct usb_device *udev)
const char *udev_busid = dev_name(&udev->dev);
struct bus_id_priv *busid_priv;
int rc = 0;
+ char save_status;
dev_dbg(&udev->dev, "Enter probe\n");
+ /* Not sure if this is our device. Allocate here to avoid
+ * calling alloc while holding busid_table lock.
+ */
+ sdev = stub_device_alloc(udev);
+ if (!sdev)
+ return -ENOMEM;
+
/* check we should claim or not by busid_table */
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) ||
@@ -332,6 +340,9 @@ static int stub_probe(struct usb_device *udev)
* See driver_probe_device() in driver/base/dd.c
*/
rc = -ENODEV;
+ if (!busid_priv)
+ goto sdev_free;
+
goto call_put_busid_priv;
}
@@ -351,12 +362,6 @@ static int stub_probe(struct usb_device *udev)
goto call_put_busid_priv;
}
- /* ok, this is my device */
- sdev = stub_device_alloc(udev);
- if (!sdev) {
- rc = -ENOMEM;
- goto call_put_busid_priv;
- }
dev_info(&udev->dev,
"usbip-host: register new device (bus %u dev %u)\n",
@@ -366,9 +371,16 @@ static int stub_probe(struct usb_device *udev)
/* set private data to usb_device */
dev_set_drvdata(&udev->dev, sdev);
+
busid_priv->sdev = sdev;
busid_priv->udev = udev;
+ save_status = busid_priv->status;
+ busid_priv->status = STUB_BUSID_ALLOC;
+
+ /* release the busid_lock */
+ put_busid_priv(busid_priv);
+
/*
* Claim this hub port.
* It doesn't matter what value we pass as owner
@@ -386,10 +398,8 @@ static int stub_probe(struct usb_device *udev)
dev_err(&udev->dev, "stub_add_files for %s\n", udev_busid);
goto err_files;
}
- busid_priv->status = STUB_BUSID_ALLOC;
- rc = 0;
- goto call_put_busid_priv;
+ return 0;
err_files:
usb_hub_release_port(udev->parent, udev->portnum,
@@ -398,23 +408,30 @@ err_port:
dev_set_drvdata(&udev->dev, NULL);
usb_put_dev(udev);
+ /* we already have busid_priv, just lock busid_lock */
+ spin_lock(&busid_priv->busid_lock);
busid_priv->sdev = NULL;
- stub_device_free(sdev);
+ busid_priv->status = save_status;
+ spin_unlock(&busid_priv->busid_lock);
+ /* lock is released - go to free */
+ goto sdev_free;
call_put_busid_priv:
+ /* release the busid_lock */
put_busid_priv(busid_priv);
+
+sdev_free:
+ stub_device_free(sdev);
+
return rc;
}
static void shutdown_busid(struct bus_id_priv *busid_priv)
{
- if (busid_priv->sdev && !busid_priv->shutdown_busid) {
- busid_priv->shutdown_busid = 1;
- usbip_event_add(&busid_priv->sdev->ud, SDEV_EVENT_REMOVED);
+ usbip_event_add(&busid_priv->sdev->ud, SDEV_EVENT_REMOVED);
- /* wait for the stop of the event handler */
- usbip_stop_eh(&busid_priv->sdev->ud);
- }
+ /* wait for the stop of the event handler */
+ usbip_stop_eh(&busid_priv->sdev->ud);
}
/*
@@ -441,11 +458,16 @@ static void stub_disconnect(struct usb_device *udev)
/* get stub_device */
if (!sdev) {
dev_err(&udev->dev, "could not get device");
- goto call_put_busid_priv;
+ /* release busid_lock */
+ put_busid_priv(busid_priv);
+ return;
}
dev_set_drvdata(&udev->dev, NULL);
+ /* release busid_lock before call to remove device files */
+ put_busid_priv(busid_priv);
+
/*
* NOTE: rx/tx threads are invoked for each usb_device.
*/
@@ -456,27 +478,36 @@ static void stub_disconnect(struct usb_device *udev)
(struct usb_dev_state *) udev);
if (rc) {
dev_dbg(&udev->dev, "unable to release port\n");
- goto call_put_busid_priv;
+ return;
}
/* If usb reset is called from event handler */
if (usbip_in_eh(current))
- goto call_put_busid_priv;
+ return;
+
+ /* we already have busid_priv, just lock busid_lock */
+ spin_lock(&busid_priv->busid_lock);
+ if (!busid_priv->shutdown_busid)
+ busid_priv->shutdown_busid = 1;
+ /* release busid_lock */
+ spin_unlock(&busid_priv->busid_lock);
/* shutdown the current connection */
shutdown_busid(busid_priv);
usb_put_dev(sdev->udev);
+ /* we already have busid_priv, just lock busid_lock */
+ spin_lock(&busid_priv->busid_lock);
/* free sdev */
busid_priv->sdev = NULL;
stub_device_free(sdev);
if (busid_priv->status == STUB_BUSID_ALLOC)
busid_priv->status = STUB_BUSID_ADDED;
-
-call_put_busid_priv:
- put_busid_priv(busid_priv);
+ /* release busid_lock */
+ spin_unlock(&busid_priv->busid_lock);
+ return;
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 5b807185f79e..d47176f9c310 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -353,14 +353,6 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
epd = &ep->desc;
- /* validate transfer_buffer_length */
- if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
- dev_err(&sdev->udev->dev,
- "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
- pdu->u.cmd_submit.transfer_buffer_length);
- return -1;
- }
-
if (usb_endpoint_xfer_control(epd)) {
if (dir == USBIP_DIR_OUT)
return usb_sndctrlpipe(udev, epnum);
@@ -383,16 +375,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
}
if (usb_endpoint_xfer_isoc(epd)) {
- /* validate packet size and number of packets */
- unsigned int maxp, packets, bytes;
-
- maxp = usb_endpoint_maxp(epd);
- maxp *= usb_endpoint_maxp_mult(epd);
- bytes = pdu->u.cmd_submit.transfer_buffer_length;
- packets = DIV_ROUND_UP(bytes, maxp);
-
+ /* validate number of packets */
if (pdu->u.cmd_submit.number_of_packets < 0 ||
- pdu->u.cmd_submit.number_of_packets > packets) {
+ pdu->u.cmd_submit.number_of_packets >
+ USBIP_MAX_ISO_PACKETS) {
dev_err(&sdev->udev->dev,
"CMD_SUBMIT: isoc invalid num packets %d\n",
pdu->u.cmd_submit.number_of_packets);
@@ -493,8 +479,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
}
/* allocate urb transfer buffer, if needed */
- if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
- pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
+ if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
priv->urb->transfer_buffer =
kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
GFP_KERNEL);
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 109e65ba01a0..0b199a2664c0 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -136,6 +136,13 @@ extern struct device_attribute dev_attr_usbip_debug;
#define USBIP_DIR_OUT 0x00
#define USBIP_DIR_IN 0x01
+/*
+ * Arbitrary limit for the maximum number of isochronous packets in an URB,
+ * compare for example the uhci_submit_isochronous function in
+ * drivers/usb/host/uhci-q.c
+ */
+#define USBIP_MAX_ISO_PACKETS 1024
+
/**
* struct usbip_header_basic - data pertinent to every request
* @command: the usbip request type
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 9936a2f199b1..8bda6455dfcb 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -318,6 +318,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
break;
}
+ break;
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
@@ -465,13 +466,14 @@ static void vhci_tx_urb(struct urb *urb)
{
struct vhci_device *vdev = get_vdev(urb->dev);
struct vhci_priv *priv;
- struct vhci_hcd *vhci = vdev_to_vhci(vdev);
+ struct vhci_hcd *vhci;
unsigned long flags;
if (!vdev) {
pr_err("could not get virtual device");
return;
}
+ vhci = vdev_to_vhci(vdev);
priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC);
if (!priv) {
@@ -512,8 +514,10 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
}
vdev = &vhci->vdev[portnum-1];
- /* patch to usb_sg_init() is in 2.5.60 */
- BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
+ if (!urb->transfer_buffer && urb->transfer_buffer_length) {
+ dev_dbg(dev, "Null URB transfer buffer\n");
+ return -EINVAL;
+ }
spin_lock_irqsave(&vhci->lock, flags);
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 5943deeec115..a4f65aacf335 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -90,16 +90,21 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_pack_pdu(pdu, urb, USBIP_RET_SUBMIT, 0);
/* recv transfer buffer */
- if (usbip_recv_xbuff(ud, urb) < 0)
- return;
+ if (usbip_recv_xbuff(ud, urb) < 0) {
+ urb->status = -EPROTO;
+ goto error;
+ }
/* recv iso_packet_descriptor */
- if (usbip_recv_iso(ud, urb) < 0)
- return;
+ if (usbip_recv_iso(ud, urb) < 0) {
+ urb->status = -EPROTO;
+ goto error;
+ }
/* restore the padding in iso packets */
usbip_pad_iso(ud, urb);
+error:
if (usbip_dbg_flag_vhci_rx)
usbip_dump_urb(urb);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 7338e43faa17..c94167d87178 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -356,11 +356,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
/*
- * Try to reset the device. The success of this is dependent on
- * being able to lock the device, which is not always possible.
+ * Try to get the locks ourselves to prevent a deadlock. The
+ * success of this is dependent on being able to lock the device,
+ * which is not always possible.
+ * We can not use the "try" reset interface here, which will
+ * overwrite the previously restored configuration information.
*/
- if (vdev->reset_works && !pci_try_reset_function(pdev))
- vdev->needs_reset = false;
+ if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
+ if (device_trylock(&pdev->dev)) {
+ if (!__pci_reset_function_locked(pdev))
+ vdev->needs_reset = false;
+ device_unlock(&pdev->dev);
+ }
+ pci_cfg_access_unlock(pdev);
+ }
pci_restore_state(pdev);
out:
@@ -417,10 +426,14 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
+
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) ||
+ vdev->nointx || vdev->pdev->is_virtfn)
+ return 0;
+
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
- if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
- return 1;
+ return pin ? 1 : 0;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
@@ -716,6 +729,7 @@ static long vfio_pci_ioctl(void *device_data,
{
void __iomem *io;
size_t size;
+ u16 orig_cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
@@ -731,15 +745,23 @@ static long vfio_pci_ioctl(void *device_data,
break;
}
- /* Is it really there? */
+ /*
+ * Is it really there? Enable memory decode for
+ * implicit access in pci_map_rom().
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ orig_cmd | PCI_COMMAND_MEMORY);
+
io = pci_map_rom(pdev, &size);
- if (!io || !size) {
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_unmap_rom(pdev, io);
+ } else {
info.size = 0;
- break;
}
- pci_unmap_rom(pdev, io);
- info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
@@ -1467,11 +1489,11 @@ static void __init vfio_pci_fill_ids(void)
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
- pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+ pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
- pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+ pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 7b8a957b008d..84905d074c4f 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -1182,8 +1182,10 @@ static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos)
return -ENOMEM;
ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags);
- if (ret)
+ if (ret) {
+ kfree(vdev->msi_perm);
return ret;
+ }
return len;
}
@@ -1607,6 +1609,15 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
}
/*
+ * Nag about hardware bugs, hopefully to have vendors fix them, but at least
+ * to collect a list of dependencies for the VF INTx pin quirk below.
+ */
+static const struct pci_device_id known_bogus_vf_intx_pin[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x270c) },
+ {}
+};
+
+/*
* For each device we allocate a pci_config_map that indicates the
* capability occupying each dword and thus the struct perm_bits we
* use for read and write. We also allocate a virtualized config
@@ -1671,6 +1682,24 @@ int vfio_config_init(struct vfio_pci_device *vdev)
if (pdev->is_virtfn) {
*(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor);
*(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
+
+ /*
+ * Per SR-IOV spec rev 1.1, 3.4.1.18 the interrupt pin register
+ * does not apply to VFs and VFs must implement this register
+ * as read-only with value zero. Userspace is not readily able
+ * to identify whether a device is a VF and thus that the pin
+ * definition on the device is bogus should it violate this
+ * requirement. We already virtualize the pin register for
+ * other purposes, so we simply need to replace the bogus value
+ * and consider VFs when we determine INTx IRQ count.
+ */
+ if (vconfig[PCI_INTERRUPT_PIN] &&
+ !pci_match_id(known_bogus_vf_intx_pin, pdev))
+ pci_warn(pdev,
+ "Hardware bug: VF reports bogus INTx pin %d\n",
+ vconfig[PCI_INTERRUPT_PIN]);
+
+ vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
}
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 1c46045b0e7f..94594dc63c41 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -297,8 +297,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
irq = pci_irq_vector(pdev, vector);
if (vdev->ctx[vector].trigger) {
- free_irq(irq, vdev->ctx[vector].trigger);
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
+ free_irq(irq, vdev->ctx[vector].trigger);
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(vdev->ctx[vector].trigger);
vdev->ctx[vector].trigger = NULL;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 70c748a5fbcc..a8e25f9409fa 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -406,6 +406,7 @@ static void tce_iommu_release(void *iommu_data)
{
struct tce_container *container = iommu_data;
struct tce_iommu_group *tcegrp;
+ struct tce_iommu_prereg *tcemem, *tmtmp;
long i;
while (tce_groups_attached(container)) {
@@ -428,13 +429,8 @@ static void tce_iommu_release(void *iommu_data)
tce_iommu_free_table(container, tbl);
}
- while (!list_empty(&container->prereg_list)) {
- struct tce_iommu_prereg *tcemem;
-
- tcemem = list_first_entry(&container->prereg_list,
- struct tce_iommu_prereg, next);
- WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
- }
+ list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
+ WARN_ON(tce_iommu_prereg_free(container, tcemem));
tce_iommu_disable(container);
if (container->mm)
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 1d48e62f4f52..0d5b667c0e65 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -53,10 +53,16 @@ module_param_named(disable_hugepages,
MODULE_PARM_DESC(disable_hugepages,
"Disable VFIO IOMMU support for IOMMU hugepages.");
+static unsigned int dma_entry_limit __read_mostly = U16_MAX;
+module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
+MODULE_PARM_DESC(dma_entry_limit,
+ "Maximum number of user DMA mappings per container (65535).");
+
struct vfio_iommu {
struct list_head domain_list;
struct mutex lock;
struct rb_root dma_list;
+ unsigned int dma_avail;
bool v2;
bool nesting;
};
@@ -384,6 +390,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
vfio_unmap_unpin(iommu, dma);
vfio_unlink_dma(iommu, dma);
kfree(dma);
+ iommu->dma_avail++;
}
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -584,12 +591,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
return -EEXIST;
}
+ if (!iommu->dma_avail) {
+ mutex_unlock(&iommu->lock);
+ return -ENOSPC;
+ }
+
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma) {
mutex_unlock(&iommu->lock);
return -ENOMEM;
}
+ iommu->dma_avail--;
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
@@ -905,6 +918,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
INIT_LIST_HEAD(&iommu->domain_list);
iommu->dma_list = RB_ROOT;
+ iommu->dma_avail = dma_entry_limit;
mutex_init(&iommu->lock);
return iommu;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 681d0eade82f..861f43f8f9ce 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -30,7 +30,7 @@
#include "vhost.h"
-static int experimental_zcopytx = 1;
+static int experimental_zcopytx = 0;
module_param(experimental_zcopytx, int, 0444);
MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
" 1 -Enable; 0 - Disable");
@@ -39,6 +39,12 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with small
+ * pkts.
+ */
+#define VHOST_NET_PKT_WEIGHT 256
+
/* MAX number of TX used buffers for outstanding zerocopy */
#define VHOST_MAX_PEND 128
#define VHOST_GOODCOPY_LEN 256
@@ -372,6 +378,7 @@ static void handle_tx(struct vhost_net *net)
struct socket *sock;
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy, zcopy_used;
+ int sent_pkts = 0;
mutex_lock(&vq->mutex);
sock = vq->private_data;
@@ -386,7 +393,7 @@ static void handle_tx(struct vhost_net *net)
hdr_size = nvq->vhost_hlen;
zcopy = nvq->ubufs;
- for (;;) {
+ do {
/* Release DMAs done buffers first */
if (zcopy)
vhost_zerocopy_signal_used(net, vq);
@@ -474,11 +481,7 @@ static void handle_tx(struct vhost_net *net)
vhost_zerocopy_signal_used(net, vq);
total_len += len;
vhost_net_tx_packet(net);
- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
out:
mutex_unlock(&vq->mutex);
}
@@ -656,6 +659,7 @@ static void handle_rx(struct vhost_net *net)
struct socket *sock;
struct iov_iter fixup;
__virtio16 num_buffers;
+ int recv_pkts = 0;
mutex_lock_nested(&vq->mutex, 0);
sock = vq->private_data;
@@ -675,7 +679,10 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
- while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
+ do {
+ sock_len = vhost_net_rx_peek_head_len(net, sock->sk);
+ if (!sock_len)
+ break;
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads, vhost_len,
@@ -754,12 +761,10 @@ static void handle_rx(struct vhost_net *net)
vhost_log_write(vq, vq_log, log, vhost_len,
vq->iov, in);
total_len += vhost_len;
- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
- vhost_poll_queue(&vq->poll);
- goto out;
- }
- }
- vhost_net_enable_vq(net, vq);
+ } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
+
+ if (!sock_len)
+ vhost_net_enable_vq(net, vq);
out:
mutex_unlock(&vq->mutex);
}
@@ -828,7 +833,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
}
- vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
+ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
+ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
@@ -908,11 +914,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
static struct socket *get_raw_socket(int fd)
{
- struct {
- struct sockaddr_ll sa;
- char buf[MAX_ADDR_LEN];
- } uaddr;
- int uaddr_len = sizeof uaddr, r;
+ int r;
struct socket *sock = sockfd_lookup(fd, &r);
if (!sock)
@@ -924,12 +926,7 @@ static struct socket *get_raw_socket(int fd)
goto err;
}
- r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
- &uaddr_len, 0);
- if (r)
- goto err;
-
- if (uaddr.sa.sll_family != AF_PACKET) {
+ if (sock->sk->sk_family != AF_PACKET) {
r = -EPFNOSUPPORT;
goto err;
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 17f9ad6fdfa5..dad90f6cc3e1 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -58,6 +58,12 @@
#define VHOST_SCSI_PREALLOC_UPAGES 2048
#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
+/* Max number of requests before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * request.
+ */
+#define VHOST_SCSI_WEIGHT 256
+
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
struct completion comp;
@@ -845,7 +851,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
u64 tag;
u32 exp_data_len, data_direction;
unsigned out, in;
- int head, ret, prot_bytes;
+ int head, ret, prot_bytes, c = 0;
size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
size_t out_size, in_size;
u16 lun;
@@ -864,7 +870,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
- for (;;) {
+ do {
head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &out, &in,
NULL, NULL);
@@ -1080,7 +1086,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
*/
INIT_WORK(&cmd->work, vhost_scsi_submission_work);
queue_work(vhost_scsi_workqueue, &cmd->work);
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
@@ -1433,7 +1439,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
+ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ,
+ VHOST_SCSI_WEIGHT, 0);
vhost_scsi_init_inflight(vs, NULL);
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 3cc98c07dcd3..682fc58e1f75 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -23,6 +23,12 @@
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_TEST_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * pkts.
+ */
+#define VHOST_TEST_PKT_WEIGHT 256
+
enum {
VHOST_TEST_VQ = 0,
VHOST_TEST_VQ_MAX = 1,
@@ -81,10 +87,8 @@ static void handle_vq(struct vhost_test *n)
}
vhost_add_used_and_signal(&n->dev, vq, head, 0);
total_len += len;
- if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
- vhost_poll_queue(&vq->poll);
+ if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
break;
- }
}
mutex_unlock(&vq->mutex);
@@ -116,7 +120,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
- vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX,
+ VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
f->private_data = n;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 2383caf88b67..b14e62f11075 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -393,8 +393,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
vhost_vq_free_iovecs(dev->vqs[i]);
}
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
+ int pkts, int total_len)
+{
+ struct vhost_dev *dev = vq->dev;
+
+ if ((dev->byte_weight && total_len >= dev->byte_weight) ||
+ pkts >= dev->weight) {
+ vhost_poll_queue(&vq->poll);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
+
void vhost_dev_init(struct vhost_dev *dev,
- struct vhost_virtqueue **vqs, int nvqs)
+ struct vhost_virtqueue **vqs, int nvqs,
+ int weight, int byte_weight)
{
struct vhost_virtqueue *vq;
int i;
@@ -408,6 +424,8 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->iotlb = NULL;
dev->mm = NULL;
dev->worker = NULL;
+ dev->weight = weight;
+ dev->byte_weight = byte_weight;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
@@ -863,8 +881,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end,
u64 userspace_addr, int perm)
{
- struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+ struct vhost_umem_node *tmp, *node;
+
+ if (!size)
+ return -EFAULT;
+ node = kmalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
@@ -1965,7 +1987,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
/* If this is an input descriptor, increment that count. */
if (access == VHOST_ACCESS_WO) {
*in_num += ret;
- if (unlikely(log)) {
+ if (unlikely(log && ret)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
@@ -2101,7 +2123,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
- if (unlikely(log)) {
+ if (unlikely(log && ret)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index e8efe1af7487..7932ad8d071a 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -164,9 +164,13 @@ struct vhost_dev {
struct list_head read_list;
struct list_head pending_list;
wait_queue_head_t wait;
+ int weight;
+ int byte_weight;
};
-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
+void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
+ int nvqs, int weight, int byte_weight);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 3cefd602b5b1..2e37097916b5 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -21,6 +21,14 @@
#include "vhost.h"
#define VHOST_VSOCK_DEFAULT_HOST_CID 2
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_VSOCK_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * small pkts.
+ */
+#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES,
@@ -391,7 +399,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
len = pkt->len;
/* Only accept correctly addressed packets */
- if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
+ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
+ le64_to_cpu(pkt->hdr.dst_cid) ==
+ vhost_transport_get_local_cid())
virtio_transport_recv_pkt(pkt);
else
virtio_transport_free_pkt(pkt);
@@ -529,7 +539,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
- vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
+ VHOST_VSOCK_PKT_WEIGHT,
+ VHOST_VSOCK_WEIGHT);
file->private_data = vsock;
spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 60d6c2ac87aa..1771220b2437 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -200,7 +200,7 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
LM3630A_LEDA_ENABLE, LM3630A_LEDA_ENABLE);
if (ret < 0)
goto out_i2c_err;
- return bl->props.brightness;
+ return 0;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access\n");
@@ -277,7 +277,7 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
LM3630A_LEDB_ENABLE, LM3630A_LEDB_ENABLE);
if (ret < 0)
goto out_i2c_err;
- return bl->props.brightness;
+ return 0;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index cd50df5807ea..086611c7bc03 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -400,10 +400,8 @@ static int lm3639_remove(struct i2c_client *client)
regmap_write(pchip->regmap, REG_ENABLE, 0x00);
- if (&pchip->cdev_torch)
- led_classdev_unregister(&pchip->cdev_torch);
- if (&pchip->cdev_flash)
- led_classdev_unregister(&pchip->cdev_flash);
+ led_classdev_unregister(&pchip->cdev_torch);
+ led_classdev_unregister(&pchip->cdev_flash);
if (pchip->bled)
device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
return 0;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index dda1c4b3a229..42c0a26646f6 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1323,6 +1323,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
static int vgacon_resize(struct vc_data *c, unsigned int width,
unsigned int height, unsigned int user)
{
+ if ((width << 1) * height > vga_vram_size)
+ return -EINVAL;
+
if (width % 2 || width > screen_info.orig_video_cols ||
height > (screen_info.orig_video_lines * vga_default_font_height)/
c->vc_font.height)
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 59abdc6a97f6..314b7eceb81c 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -350,7 +350,7 @@ static void init_chips(struct fb_info *p, unsigned long addr)
static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
{
struct fb_info *p;
- unsigned long addr, size;
+ unsigned long addr;
unsigned short cmd;
int rc = -ENODEV;
@@ -362,7 +362,6 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
goto err_disable;
addr = pci_resource_start(dp, 0);
- size = pci_resource_len(dp, 0);
if (addr == 0)
goto err_disable;
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 68a113594808..2811c4afde01 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -94,6 +94,8 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
int size = len * sizeof(u16);
int ret = -ENOMEM;
+ flags |= __GFP_NOWARN;
+
if (cmap->len != len) {
fb_dealloc_cmap(cmap);
if (!len)
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index a1d93151c059..ae623cd04d6c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -425,6 +425,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
{
unsigned int x;
+ if (image->width > info->var.xres || image->height > info->var.yres)
+ return;
+
if (rotate == FB_ROTATE_UR) {
for (x = 0;
x < num && image->dx + image->width <= info->var.xres;
@@ -1129,7 +1132,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
case FBIOGET_FSCREENINFO:
if (!lock_fb_info(info))
return -ENODEV;
- fix = info->fix;
+ memcpy(&fix, &info->fix, sizeof(fix));
unlock_fb_info(info);
ret = copy_to_user(argp, &fix, sizeof(fix)) ? -EFAULT : 0;
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 62c0cf79674f..63ec9c71fe84 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -997,97 +997,6 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
DPRINTK("========================================\n");
}
-/**
- * fb_edid_add_monspecs() - add monitor video modes from E-EDID data
- * @edid: 128 byte array with an E-EDID block
- * @spacs: monitor specs to be extended
- */
-void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
-{
- unsigned char *block;
- struct fb_videomode *m;
- int num = 0, i;
- u8 svd[64], edt[(128 - 4) / DETAILED_TIMING_DESCRIPTION_SIZE];
- u8 pos = 4, svd_n = 0;
-
- if (!edid)
- return;
-
- if (!edid_checksum(edid))
- return;
-
- if (edid[0] != 0x2 ||
- edid[2] < 4 || edid[2] > 128 - DETAILED_TIMING_DESCRIPTION_SIZE)
- return;
-
- DPRINTK(" Short Video Descriptors\n");
-
- while (pos < edid[2]) {
- u8 len = edid[pos] & 0x1f, type = (edid[pos] >> 5) & 7;
- pr_debug("Data block %u of %u bytes\n", type, len);
- if (type == 2) {
- for (i = pos; i < pos + len; i++) {
- u8 idx = edid[pos + i] & 0x7f;
- svd[svd_n++] = idx;
- pr_debug("N%sative mode #%d\n",
- edid[pos + i] & 0x80 ? "" : "on-n", idx);
- }
- } else if (type == 3 && len >= 3) {
- /* Check Vendor Specific Data Block. For HDMI,
- it is always 00-0C-03 for HDMI Licensing, LLC. */
- if (edid[pos + 1] == 3 && edid[pos + 2] == 0xc &&
- edid[pos + 3] == 0)
- specs->misc |= FB_MISC_HDMI;
- }
- pos += len + 1;
- }
-
- block = edid + edid[2];
-
- DPRINTK(" Extended Detailed Timings\n");
-
- for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
- i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
- if (PIXEL_CLOCK)
- edt[num++] = block - edid;
-
- /* Yikes, EDID data is totally useless */
- if (!(num + svd_n))
- return;
-
- m = kzalloc((specs->modedb_len + num + svd_n) *
- sizeof(struct fb_videomode), GFP_KERNEL);
-
- if (!m)
- return;
-
- memcpy(m, specs->modedb, specs->modedb_len * sizeof(struct fb_videomode));
-
- for (i = specs->modedb_len; i < specs->modedb_len + num; i++) {
- get_detailed_timing(edid + edt[i - specs->modedb_len], &m[i]);
- if (i == specs->modedb_len)
- m[i].flag |= FB_MODE_IS_FIRST;
- pr_debug("Adding %ux%u@%u\n", m[i].xres, m[i].yres, m[i].refresh);
- }
-
- for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) {
- int idx = svd[i - specs->modedb_len - num];
- if (!idx || idx >= ARRAY_SIZE(cea_modes)) {
- pr_warning("Reserved SVD code %d\n", idx);
- } else if (!cea_modes[idx].xres) {
- pr_warning("Unimplemented SVD code %d\n", idx);
- } else {
- memcpy(&m[i], cea_modes + idx, sizeof(m[i]));
- pr_debug("Adding SVD #%d: %ux%u@%u\n", idx,
- m[i].xres, m[i].yres, m[i].refresh);
- }
- }
-
- kfree(specs->modedb);
- specs->modedb = m;
- specs->modedb_len = specs->modedb_len + num + svd_n;
-}
-
/*
* VESA Generalized Timing Formula (GTF)
*/
@@ -1497,9 +1406,6 @@ int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
}
-void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
-{
-}
void fb_destroy_modedb(struct fb_videomode *modedb)
{
}
@@ -1607,7 +1513,6 @@ EXPORT_SYMBOL(fb_firmware_edid);
EXPORT_SYMBOL(fb_parse_edid);
EXPORT_SYMBOL(fb_edid_to_monspecs);
-EXPORT_SYMBOL(fb_edid_add_monspecs);
EXPORT_SYMBOL(fb_get_mode);
EXPORT_SYMBOL(fb_validate_mode);
EXPORT_SYMBOL(fb_destroy_modedb);
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index de119f11b78f..a9d76e1b4378 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -289,63 +289,6 @@ static const struct fb_videomode modedb[] = {
};
#ifdef CONFIG_FB_MODE_HELPERS
-const struct fb_videomode cea_modes[65] = {
- /* #1: 640x480p@59.94/60Hz */
- [1] = {
- NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #3: 720x480p@59.94/60Hz */
- [3] = {
- NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #5: 1920x1080i@59.94/60Hz */
- [5] = {
- NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_INTERLACED, 0,
- },
- /* #7: 720(1440)x480iH@59.94/60Hz */
- [7] = {
- NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
- FB_VMODE_INTERLACED, 0,
- },
- /* #9: 720(1440)x240pH@59.94/60Hz */
- [9] = {
- NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #18: 720x576pH@50Hz */
- [18] = {
- NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #19: 1280x720p@50Hz */
- [19] = {
- NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #20: 1920x1080i@50Hz */
- [20] = {
- NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_INTERLACED, 0,
- },
- /* #32: 1920x1080p@23.98/24Hz */
- [32] = {
- NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- FB_VMODE_NONINTERLACED, 0,
- },
- /* #35: (2880)x480p4x@59.94/60Hz */
- [35] = {
- NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0,
- FB_VMODE_NONINTERLACED, 0,
- },
-};
-
const struct fb_videomode vesa_modes[] = {
/* 0 640x350-85 VESA */
{ NULL, 85, 640, 350, 31746, 96, 32, 60, 32, 64, 3,
@@ -933,6 +876,9 @@ void fb_var_to_videomode(struct fb_videomode *mode,
if (var->vmode & FB_VMODE_DOUBLE)
vtotal *= 2;
+ if (!htotal || !vtotal)
+ return;
+
hfreq = pixclock/htotal;
mode->refresh = hfreq/vtotal;
}
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 14a93cb21310..66d58e93bc32 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -234,7 +234,7 @@ static int goldfish_fb_probe(struct platform_device *pdev)
fb->fb.var.activate = FB_ACTIVATE_NOW;
fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT);
fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH);
- fb->fb.var.pixclock = 10000;
+ fb->fb.var.pixclock = 0;
fb->fb.var.red.offset = 11;
fb->fb.var.red.length = 5;
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 463028543173..59e1cae57948 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -285,6 +285,8 @@ static int hga_card_detect(void)
hga_vram_len = 0x08000;
hga_vram = ioremap(0xb0000, hga_vram_len);
+ if (!hga_vram)
+ goto error;
if (request_region(0x3b0, 12, "hgafb"))
release_io_ports = 1;
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 4363c64d74e8..4ef9dc94e813 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1516,6 +1516,11 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
info->fix.smem_start = addr;
info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
0x400000 : 0x800000);
+ if (!info->screen_base) {
+ release_mem_region(addr, size);
+ framebuffer_release(info);
+ return -ENOMEM;
+ }
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
par->cmap_regs_phys = addr + 0x840000;
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index d059d04c63ac..20195d3dbf08 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -769,8 +769,8 @@ failed_free_cmap:
failed_free_clk:
clk_disable_unprepare(fbi->clk);
failed_free_fbmem:
- dma_free_coherent(fbi->dev, info->fix.smem_len,
- info->screen_base, fbi->fb_start_dma);
+ dma_free_wc(fbi->dev, info->fix.smem_len,
+ info->screen_base, fbi->fb_start_dma);
failed_free_info:
kfree(info);
@@ -804,7 +804,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
- dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+ dma_free_wc(fbi->dev, info->fix.smem_len,
info->screen_base, info->fix.smem_start);
clk_disable_unprepare(fbi->clk);
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index 31c301d6be62..52e161dbd204 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -105,11 +105,11 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
struct fbtype __user *f = (struct fbtype __user *) arg;
if (put_user(type, &f->fb_type) ||
- __put_user(info->var.yres, &f->fb_height) ||
- __put_user(info->var.xres, &f->fb_width) ||
- __put_user(fb_depth, &f->fb_depth) ||
- __put_user(0, &f->fb_cmsize) ||
- __put_user(fb_size, &f->fb_cmsize))
+ put_user(info->var.yres, &f->fb_height) ||
+ put_user(info->var.xres, &f->fb_width) ||
+ put_user(fb_depth, &f->fb_depth) ||
+ put_user(0, &f->fb_cmsize) ||
+ put_user(fb_size, &f->fb_cmsize))
return -EFAULT;
return 0;
}
@@ -124,10 +124,10 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
unsigned int index, count, i;
if (get_user(index, &c->index) ||
- __get_user(count, &c->count) ||
- __get_user(ured, &c->red) ||
- __get_user(ugreen, &c->green) ||
- __get_user(ublue, &c->blue))
+ get_user(count, &c->count) ||
+ get_user(ured, &c->red) ||
+ get_user(ugreen, &c->green) ||
+ get_user(ublue, &c->blue))
return -EFAULT;
cmap.len = 1;
@@ -164,13 +164,13 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
u8 red, green, blue;
if (get_user(index, &c->index) ||
- __get_user(count, &c->count) ||
- __get_user(ured, &c->red) ||
- __get_user(ugreen, &c->green) ||
- __get_user(ublue, &c->blue))
+ get_user(count, &c->count) ||
+ get_user(ured, &c->red) ||
+ get_user(ugreen, &c->green) ||
+ get_user(ublue, &c->blue))
return -EFAULT;
- if (index + count > cmap->len)
+ if (index > cmap->len || count > cmap->len - index)
return -EINVAL;
for (i = 0; i < count; i++) {
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index 20f7234e809e..c43b951cfb25 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -522,9 +522,7 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime)
SiS_DDC2Delay(SiS_Pr, 0x4000);
}
- } else if((SiS_Pr->SiS_IF_DEF_LVDS == 1) /* ||
- (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) ||
- (SiS_Pr->SiS_CustomT == CUT_CLEVO1400) */ ) { /* 315 series, LVDS; Special */
+ } else if (SiS_Pr->SiS_IF_DEF_LVDS == 1) { /* 315 series, LVDS; Special */
if(SiS_Pr->SiS_IF_DEF_CH70xx == 0) {
PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36);
diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
index aad1cc4be34a..c7ebf03b8d53 100644
--- a/drivers/video/fbdev/sm712.h
+++ b/drivers/video/fbdev/sm712.h
@@ -15,14 +15,10 @@
#define FB_ACCEL_SMI_LYNX 88
-#define SCREEN_X_RES 1024
-#define SCREEN_Y_RES 600
-#define SCREEN_BPP 16
-
-/*Assume SM712 graphics chip has 4MB VRAM */
-#define SM712_VIDEOMEMORYSIZE 0x00400000
-/*Assume SM722 graphics chip has 8MB VRAM */
-#define SM722_VIDEOMEMORYSIZE 0x00800000
+#define SCREEN_X_RES 1024
+#define SCREEN_Y_RES_PC 768
+#define SCREEN_Y_RES_NETBOOK 600
+#define SCREEN_BPP 16
#define dac_reg (0x3c8)
#define dac_val (0x3c9)
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 73cb4ffff3c5..0d92ff366a7b 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -530,6 +530,65 @@ static const struct modeinit vgamode[] = {
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
},
},
+ { /* 1024 x 768 16Bpp 60Hz */
+ 1024, 768, 16, 60,
+ /* Init_MISC */
+ 0xEB,
+ { /* Init_SR0_SR4 */
+ 0x03, 0x01, 0x0F, 0x03, 0x0E,
+ },
+ { /* Init_SR10_SR24 */
+ 0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+ 0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC4, 0x30, 0x02, 0x01, 0x01,
+ },
+ { /* Init_SR30_SR75 */
+ 0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+ 0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+ 0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+ 0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+ 0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+ 0x0F, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+ 0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+ 0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+ 0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+ },
+ { /* Init_SR80_SR93 */
+ 0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+ 0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+ 0x00, 0x00, 0x00, 0x00,
+ },
+ { /* Init_SRA0_SRAF */
+ 0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+ 0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+ },
+ { /* Init_GR00_GR08 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+ 0xFF,
+ },
+ { /* Init_AR00_AR14 */
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x41, 0x00, 0x0F, 0x00, 0x00,
+ },
+ { /* Init_CR00_CR18 */
+ 0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+ 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+ 0xFF,
+ },
+ { /* Init_CR30_CR4D */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+ 0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+ 0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+ },
+ { /* Init_CR90_CRA7 */
+ 0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+ 0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+ },
+ },
{ /* mode#5: 1024 x 768 24Bpp 60Hz */
1024, 768, 24, 60,
/* Init_MISC */
@@ -827,67 +886,80 @@ static inline unsigned int chan_to_field(unsigned int chan,
static int smtc_blank(int blank_mode, struct fb_info *info)
{
+ struct smtcfb_info *sfb = info->par;
+
/* clear DPMS setting */
switch (blank_mode) {
case FB_BLANK_UNBLANK:
/* Screen On: HSync: On, VSync : On */
+
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ smtc_seqw(0x6a, 0x16);
+ smtc_seqw(0x6b, 0x02);
+ break;
+ case 0x720:
+ smtc_seqw(0x6a, 0x0d);
+ smtc_seqw(0x6b, 0x02);
+ break;
+ }
+
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
- smtc_seqw(0x6a, 0x16);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
break;
case FB_BLANK_NORMAL:
/* Screen Off: HSync: On, VSync : On Soft blank */
+ smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+ smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
smtc_seqw(0x6a, 0x16);
smtc_seqw(0x6b, 0x02);
- smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
- smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
- smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
break;
case FB_BLANK_VSYNC_SUSPEND:
/* Screen On: HSync: On, VSync : Off */
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
break;
case FB_BLANK_HSYNC_SUSPEND:
/* Screen On: HSync: Off, VSync : On */
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
break;
case FB_BLANK_POWERDOWN:
/* Screen On: HSync: Off, VSync : Off */
+ smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+ smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+ smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
- smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
- smtc_seqw(0x6a, 0x0c);
- smtc_seqw(0x6b, 0x02);
smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+ smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
- smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
- smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
- smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+ smtc_seqw(0x6a, 0x0c);
+ smtc_seqw(0x6b, 0x02);
break;
default:
return -EINVAL;
@@ -1144,8 +1216,10 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
/* init SEQ register SR30 - SR75 */
for (i = 0; i < SIZE_SR30_SR75; i++)
- if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
- (i + 0x30) != 0x6b)
+ if ((i + 0x30) != 0x30 && (i + 0x30) != 0x62 &&
+ (i + 0x30) != 0x6a && (i + 0x30) != 0x6b &&
+ (i + 0x30) != 0x70 && (i + 0x30) != 0x71 &&
+ (i + 0x30) != 0x74 && (i + 0x30) != 0x75)
smtc_seqw(i + 0x30,
vgamode[j].init_sr30_sr75[i]);
@@ -1170,8 +1244,12 @@ static void sm7xx_set_timing(struct smtcfb_info *sfb)
smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
/* init CRTC register CR30 - CR4D */
- for (i = 0; i < SIZE_CR30_CR4D; i++)
+ for (i = 0; i < SIZE_CR30_CR4D; i++) {
+ if ((i + 0x30) >= 0x3B && (i + 0x30) <= 0x3F)
+ /* side-effect, don't write to CR3B-CR3F */
+ continue;
smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
+ }
/* init CRTC register CR90 - CRA7 */
for (i = 0; i < SIZE_CR90_CRA7; i++)
@@ -1322,6 +1400,11 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
{
sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
+ if (sfb->chip_id == 0x720)
+ /* on SM720, the framebuffer starts at the 1 MB offset */
+ sfb->fb->fix.smem_start += 0x00200000;
+
+ /* XXX: is it safe for SM720 on Big-Endian? */
if (sfb->fb->var.bits_per_pixel == 32)
sfb->fb->fix.smem_start += big_addr;
@@ -1359,12 +1442,82 @@ static inline void sm7xx_init_hw(void)
outb_p(0x11, 0x3c5);
}
+static u_long sm7xx_vram_probe(struct smtcfb_info *sfb)
+{
+ u8 vram;
+
+ switch (sfb->chip_id) {
+ case 0x710:
+ case 0x712:
+ /*
+ * Assume SM712 graphics chip has 4MB VRAM.
+ *
+ * FIXME: SM712 can have 2MB VRAM, which is used on earlier
+ * laptops, such as IBM Thinkpad 240X. This driver would
+ * probably crash on those machines. If anyone gets one of
+ * those and is willing to help, run "git blame" and send me
+ * an E-mail.
+ */
+ return 0x00400000;
+ case 0x720:
+ outb_p(0x76, 0x3c4);
+ vram = inb_p(0x3c5) >> 6;
+
+ if (vram == 0x00)
+ return 0x00800000; /* 8 MB */
+ else if (vram == 0x01)
+ return 0x01000000; /* 16 MB */
+ else if (vram == 0x02)
+ return 0x00400000; /* illegal, fallback to 4 MB */
+ else if (vram == 0x03)
+ return 0x00400000; /* 4 MB */
+ }
+ return 0; /* unknown hardware */
+}
+
+static void sm7xx_resolution_probe(struct smtcfb_info *sfb)
+{
+ /* get mode parameter from smtc_scr_info */
+ if (smtc_scr_info.lfb_width != 0) {
+ sfb->fb->var.xres = smtc_scr_info.lfb_width;
+ sfb->fb->var.yres = smtc_scr_info.lfb_height;
+ sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
+ goto final;
+ }
+
+ /*
+ * No parameter, default resolution is 1024x768-16.
+ *
+ * FIXME: earlier laptops, such as IBM Thinkpad 240X, has a 800x600
+ * panel, also see the comments about Thinkpad 240X above.
+ */
+ sfb->fb->var.xres = SCREEN_X_RES;
+ sfb->fb->var.yres = SCREEN_Y_RES_PC;
+ sfb->fb->var.bits_per_pixel = SCREEN_BPP;
+
+#ifdef CONFIG_MIPS
+ /*
+ * Loongson MIPS netbooks use 1024x600 LCD panels, which is the original
+ * target platform of this driver, but nearly all old x86 laptops have
+ * 1024x768. Lighting 768 panels using 600's timings would partially
+ * garble the display, so we don't want that. But it's not possible to
+ * distinguish them reliably.
+ *
+ * So we change the default to 768, but keep 600 as-is on MIPS.
+ */
+ sfb->fb->var.yres = SCREEN_Y_RES_NETBOOK;
+#endif
+
+final:
+ big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
+}
+
static int smtcfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct smtcfb_info *sfb;
struct fb_info *info;
- u_long smem_size = 0x00800000; /* default 8MB */
+ u_long smem_size;
int err;
unsigned long mmio_base;
@@ -1404,29 +1557,19 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
sm7xx_init_hw();
- /* get mode parameter from smtc_scr_info */
- if (smtc_scr_info.lfb_width != 0) {
- sfb->fb->var.xres = smtc_scr_info.lfb_width;
- sfb->fb->var.yres = smtc_scr_info.lfb_height;
- sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
- } else {
- /* default resolution 1024x600 16bit mode */
- sfb->fb->var.xres = SCREEN_X_RES;
- sfb->fb->var.yres = SCREEN_Y_RES;
- sfb->fb->var.bits_per_pixel = SCREEN_BPP;
- }
-
- big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
/* Map address and memory detection */
mmio_base = pci_resource_start(pdev, 0);
pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
+ smem_size = sm7xx_vram_probe(sfb);
+ dev_info(&pdev->dev, "%lu MiB of VRAM detected.\n",
+ smem_size / 1048576);
+
switch (sfb->chip_id) {
case 0x710:
case 0x712:
sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
sfb->fb->fix.mmio_len = 0x00400000;
- smem_size = SM712_VIDEOMEMORYSIZE;
sfb->lfb = ioremap(mmio_base, mmio_addr);
if (!sfb->lfb) {
dev_err(&pdev->dev,
@@ -1458,8 +1601,7 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
case 0x720:
sfb->fb->fix.mmio_start = mmio_base;
sfb->fb->fix.mmio_len = 0x00200000;
- smem_size = SM722_VIDEOMEMORYSIZE;
- sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
+ sfb->dp_regs = ioremap(mmio_base, 0x00200000 + smem_size);
sfb->lfb = sfb->dp_regs + 0x00200000;
sfb->mmio = (smtc_regbaseaddress =
sfb->dp_regs + 0x000c0000);
@@ -1476,6 +1618,9 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
goto failed_fb;
}
+ /* probe and decide resolution */
+ sm7xx_resolution_probe(sfb);
+
/* can support 32 bpp */
if (15 == sfb->fb->var.bits_per_pixel)
sfb->fb->var.bits_per_pixel = 16;
@@ -1486,7 +1631,11 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
if (err)
goto failed;
- smtcfb_setmode(sfb);
+ /*
+ * The screen would be temporarily garbled when sm712fb takes over
+ * vesafb or VGA text mode. Zero the framebuffer.
+ */
+ memset_io(sfb->lfb, 0, sfb->fb->fix.smem_len);
err = register_framebuffer(info);
if (err < 0)
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 2925d5ce8d3e..1267b93c03bd 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -430,7 +430,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
if (ret < 0)
return ret;
- ret = ssd1307fb_write_cmd(par->client, 0x0);
+ ret = ssd1307fb_write_cmd(par->client, par->page_offset);
if (ret < 0)
return ret;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 28b1e325536f..343c02e1ed62 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -1174,12 +1174,12 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
if (ptr[0] & 0x10)
frame->active_aspect = ptr[1] & 0xf;
if (ptr[0] & 0x8) {
- frame->top_bar = (ptr[5] << 8) + ptr[6];
- frame->bottom_bar = (ptr[7] << 8) + ptr[8];
+ frame->top_bar = (ptr[6] << 8) | ptr[5];
+ frame->bottom_bar = (ptr[8] << 8) | ptr[7];
}
if (ptr[0] & 0x4) {
- frame->left_bar = (ptr[9] << 8) + ptr[10];
- frame->right_bar = (ptr[11] << 8) + ptr[12];
+ frame->left_bar = (ptr[10] << 8) | ptr[9];
+ frame->right_bar = (ptr[12] << 8) | ptr[11];
}
frame->scan_mode = ptr[0] & 0x3;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 150ce2abf6c8..732e9abdcf96 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -215,6 +215,9 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
* hypervisor.
*/
lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
+ if (param.count == 0 ||
+ param.count > U64_MAX - lb_offset - PAGE_SIZE + 1)
+ return -EINVAL;
num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* Allocate the buffers we need */
@@ -334,8 +337,8 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
struct fsl_hv_ioctl_prop param;
char __user *upath, *upropname;
void __user *upropval;
- char *path = NULL, *propname = NULL;
- void *propval = NULL;
+ char *path, *propname;
+ void *propval;
int ret = 0;
/* Get the parameters from the user. */
@@ -347,32 +350,30 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
upropval = (void __user *)(uintptr_t)param.propval;
path = strndup_user(upath, FH_DTPROP_MAX_PATHLEN);
- if (IS_ERR(path)) {
- ret = PTR_ERR(path);
- goto out;
- }
+ if (IS_ERR(path))
+ return PTR_ERR(path);
propname = strndup_user(upropname, FH_DTPROP_MAX_PATHLEN);
if (IS_ERR(propname)) {
ret = PTR_ERR(propname);
- goto out;
+ goto err_free_path;
}
if (param.proplen > FH_DTPROP_MAX_PROPLEN) {
ret = -EINVAL;
- goto out;
+ goto err_free_propname;
}
propval = kmalloc(param.proplen, GFP_KERNEL);
if (!propval) {
ret = -ENOMEM;
- goto out;
+ goto err_free_propname;
}
if (set) {
if (copy_from_user(propval, upropval, param.proplen)) {
ret = -EFAULT;
- goto out;
+ goto err_free_propval;
}
param.ret = fh_partition_set_dtprop(param.handle,
@@ -391,7 +392,7 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
if (copy_to_user(upropval, propval, param.proplen) ||
put_user(param.proplen, &p->proplen)) {
ret = -EFAULT;
- goto out;
+ goto err_free_propval;
}
}
}
@@ -399,10 +400,12 @@ static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
if (put_user(param.ret, &p->ret))
ret = -EFAULT;
-out:
- kfree(path);
+err_free_propval:
kfree(propval);
+err_free_propname:
kfree(propname);
+err_free_path:
+ kfree(path);
return ret;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 30076956a096..4e64a3befa35 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -482,6 +482,17 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
get_page(newpage); /* balloon reference */
+ /*
+ * When we migrate a page to a different zone and adjusted the
+ * managed page count when inflating, we have to fixup the count of
+ * both involved zones.
+ */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM) &&
+ page_zone(page) != page_zone(newpage)) {
+ adjust_managed_page_count(page, 1);
+ adjust_managed_page_count(newpage, -1);
+ }
+
/* balloon's page migration 1st step -- inflate "newpage" */
spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
balloon_page_insert(vb_dev_info, newpage);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 8977f40ea441..e459cd7302e2 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -427,7 +427,7 @@ unmap_release:
kfree(desc);
END_USE(vq);
- return -EIO;
+ return -ENOMEM;
}
/**
@@ -1040,6 +1040,8 @@ struct virtqueue *vring_create_virtqueue(
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (queue)
break;
+ if (!may_reduce_num)
+ return NULL;
}
if (!num)
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
index 30b3acc93833..e81ec763b555 100644
--- a/drivers/vme/bridges/vme_fake.c
+++ b/drivers/vme/bridges/vme_fake.c
@@ -418,8 +418,9 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
}
}
-static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
- u32 aspace, u32 cycle)
+static noinline_for_stack u8 fake_vmeread8(struct fake_driver *bridge,
+ unsigned long long addr,
+ u32 aspace, u32 cycle)
{
u8 retval = 0xff;
int i;
@@ -450,8 +451,9 @@ static u8 fake_vmeread8(struct fake_driver *bridge, unsigned long long addr,
return retval;
}
-static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
- u32 aspace, u32 cycle)
+static noinline_for_stack u16 fake_vmeread16(struct fake_driver *bridge,
+ unsigned long long addr,
+ u32 aspace, u32 cycle)
{
u16 retval = 0xffff;
int i;
@@ -482,8 +484,9 @@ static u16 fake_vmeread16(struct fake_driver *bridge, unsigned long long addr,
return retval;
}
-static u32 fake_vmeread32(struct fake_driver *bridge, unsigned long long addr,
- u32 aspace, u32 cycle)
+static noinline_for_stack u32 fake_vmeread32(struct fake_driver *bridge,
+ unsigned long long addr,
+ u32 aspace, u32 cycle)
{
u32 retval = 0xffffffff;
int i;
@@ -613,8 +616,9 @@ out:
return retval;
}
-static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
- unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite8(struct fake_driver *bridge,
+ u8 *buf, unsigned long long addr,
+ u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
@@ -643,8 +647,9 @@ static void fake_vmewrite8(struct fake_driver *bridge, u8 *buf,
}
-static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
- unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite16(struct fake_driver *bridge,
+ u16 *buf, unsigned long long addr,
+ u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
@@ -673,8 +678,9 @@ static void fake_vmewrite16(struct fake_driver *bridge, u16 *buf,
}
-static void fake_vmewrite32(struct fake_driver *bridge, u32 *buf,
- unsigned long long addr, u32 aspace, u32 cycle)
+static noinline_for_stack void fake_vmewrite32(struct fake_driver *bridge,
+ u32 *buf, unsigned long long addr,
+ u32 aspace, u32 cycle)
{
int i;
unsigned long long start, end, offset;
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 59d74d1b47a8..2287e1be0e55 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1039,15 +1039,15 @@ static int ds_probe(struct usb_interface *intf,
/* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
alt = 3;
err = usb_set_interface(dev->udev,
- intf->altsetting[alt].desc.bInterfaceNumber, alt);
+ intf->cur_altsetting->desc.bInterfaceNumber, alt);
if (err) {
dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
"for %d interface: err=%d.\n", alt,
- intf->altsetting[alt].desc.bInterfaceNumber, err);
+ intf->cur_altsetting->desc.bInterfaceNumber, err);
goto err_out_clear;
}
- iface_desc = &intf->altsetting[alt];
+ iface_desc = intf->cur_altsetting;
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
pr_info("Num endpoints=%d. It is not DS9490R.\n",
iface_desc->desc.bNumEndpoints);
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index f4bc8c100a01..a3ac582420ec 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -437,8 +437,7 @@ int w1_reset_resume_command(struct w1_master *dev)
if (w1_reset_bus(dev))
return -1;
- /* This will make only the last matched slave perform a skip ROM. */
- w1_write_8(dev, W1_RESUME_CMD);
+ w1_write_8(dev, dev->slave_count > 1 ? W1_RESUME_CMD : W1_SKIP_ROM);
return 0;
}
EXPORT_SYMBOL_GPL(w1_reset_resume_command);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 523e6a73664e..af9ecf6b4b1e 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1874,6 +1874,7 @@ comment "Watchdog Pretimeout Governors"
config WATCHDOG_PRETIMEOUT_GOV
bool "Enable watchdog pretimeout governors"
+ depends on WATCHDOG_CORE
help
The option allows to select watchdog pretimeout governors.
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 4dddd8298a22..3e2e2e6a8328 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -240,6 +240,7 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+MODULE_ALIAS("platform:bcm2835-wdt");
MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index 7386111220d5..daeb645fcea8 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -126,13 +126,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd)
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
int ret;
- ret = da9062_reset_watchdog_timer(wdt);
- if (ret) {
- dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
- ret);
- return ret;
- }
-
ret = regmap_update_bits(wdt->hw->regmap,
DA9062AA_CONTROL_D,
DA9062AA_TWDSCALE_MASK,
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index fd28b660f475..714e2b2a6a35 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -59,7 +59,7 @@
#define IMX2_WDT_WMCR 0x08 /* Misc Register */
-#define IMX2_WDT_MAX_TIME 128
+#define IMX2_WDT_MAX_TIME 128U
#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
#define WDOG_SEC_TO_COUNT(s) ((s * 2 - 1) << 8)
@@ -190,8 +190,10 @@ static void __imx2_wdt_set_timeout(struct watchdog_device *wdog,
static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
unsigned int new_timeout)
{
- __imx2_wdt_set_timeout(wdog, new_timeout);
+ unsigned int actual;
+ actual = min(new_timeout, IMX2_WDT_MAX_TIME);
+ __imx2_wdt_set_timeout(wdog, actual);
wdog->timeout = new_timeout;
return 0;
}
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 44d180a2c5e5..58e06f059e67 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -137,8 +137,8 @@ static unsigned int meson_gxbb_wdt_get_timeleft(struct watchdog_device *wdt_dev)
reg = readl(data->reg_base + GXBB_WDT_TCNT_REG);
- return ((reg >> GXBB_WDT_TCNT_CNT_SHIFT) -
- (reg & GXBB_WDT_TCNT_SETUP_MASK)) / 1000;
+ return ((reg & GXBB_WDT_TCNT_SETUP_MASK) -
+ (reg >> GXBB_WDT_TCNT_CNT_SHIFT)) / 1000;
}
static const struct watchdog_ops meson_gxbb_wdt_ops = {
diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c
index 0805ee2acd7a..7aa8bf2d0f91 100644
--- a/drivers/watchdog/rn5t618_wdt.c
+++ b/drivers/watchdog/rn5t618_wdt.c
@@ -193,6 +193,7 @@ static struct platform_driver rn5t618_wdt_driver = {
module_platform_driver(rn5t618_wdt_driver);
+MODULE_ALIAS("platform:rn5t618-wdt");
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
MODULE_DESCRIPTION("RN5T618 watchdog driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 0da9943d405f..c310e841561c 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
memset(&r, 0, sizeof(r));
r.start = gas->address;
- r.end = r.start + gas->access_width - 1;
+ r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
r.flags = IORESOURCE_MEM;
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index e4db19e88ab1..05f9f5983ee1 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -358,7 +358,10 @@ static enum bp_state reserve_additional_memory(void)
* callers drop the mutex before trying again.
*/
mutex_unlock(&balloon_mutex);
+ /* add_memory_resource() requires the device_hotplug lock */
+ lock_device_hotplug();
rc = add_memory_resource(nid, resource, memhp_auto_online);
+ unlock_device_hotplug();
mutex_lock(&balloon_mutex);
if (rc) {
@@ -400,7 +403,8 @@ static struct notifier_block xen_memory_nb = {
#else
static enum bp_state reserve_additional_memory(void)
{
- balloon_stats.target_pages = balloon_stats.current_pages;
+ balloon_stats.target_pages = balloon_stats.current_pages +
+ balloon_stats.target_unpopulated;
return BP_ECANCELED;
}
#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
@@ -591,8 +595,15 @@ static void balloon_process(struct work_struct *work)
state = reserve_additional_memory();
}
- if (credit < 0)
- state = decrease_reservation(-credit, GFP_BALLOON);
+ if (credit < 0) {
+ long n_pages;
+
+ n_pages = min(-credit, si_mem_available());
+ state = decrease_reservation(n_pages, GFP_BALLOON);
+ if (state == BP_DONE && n_pages != -credit &&
+ n_pages < totalreserve_pages)
+ state = BP_EAGAIN;
+ }
state = update_schedule(state);
@@ -631,6 +642,9 @@ static int add_ballooned_pages(int nr_pages)
}
}
+ if (si_mem_available() < nr_pages)
+ return -ENOMEM;
+
st = decrease_reservation(nr_pages, GFP_USER);
if (st != BP_DONE)
return -ENOMEM;
@@ -754,7 +768,7 @@ static int __init balloon_init(void)
balloon_stats.schedule_delay = 1;
balloon_stats.max_schedule_delay = 32;
balloon_stats.retry_count = 1;
- balloon_stats.max_retry_count = RETRY_UNLIMITED;
+ balloon_stats.max_retry_count = 4;
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
set_online_page_callback(&xen_online_page);
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index f4e59c445964..17054d695411 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -53,7 +53,7 @@ static int vcpu_online(unsigned int cpu)
}
static void vcpu_hotplug(unsigned int cpu)
{
- if (!cpu_possible(cpu))
+ if (cpu >= nr_cpu_ids || !cpu_possible(cpu))
return;
switch (vcpu_online(cpu)) {
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 7494dbeb4409..db58aaa4dc59 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -29,6 +29,8 @@
#include "../pci/pci.h"
#ifdef CONFIG_PCI_MMCONFIG
#include <asm/pci_x86.h>
+
+static int xen_mcfg_late(void);
#endif
static bool __read_mostly pci_seg_supported = true;
@@ -40,7 +42,18 @@ static int xen_add_device(struct device *dev)
#ifdef CONFIG_PCI_IOV
struct pci_dev *physfn = pci_dev->physfn;
#endif
-
+#ifdef CONFIG_PCI_MMCONFIG
+ static bool pci_mcfg_reserved = false;
+ /*
+ * Reserve MCFG areas in Xen on first invocation due to this being
+ * potentially called from inside of acpi_init immediately after
+ * MCFG table has been finally parsed.
+ */
+ if (!pci_mcfg_reserved) {
+ xen_mcfg_late();
+ pci_mcfg_reserved = true;
+ }
+#endif
if (pci_seg_supported) {
struct {
struct physdev_pci_device_add add;
@@ -213,7 +226,7 @@ static int __init register_xen_pci_notifier(void)
arch_initcall(register_xen_pci_notifier);
#ifdef CONFIG_PCI_MMCONFIG
-static int __init xen_mcfg_late(void)
+static int xen_mcfg_late(void)
{
struct pci_mmcfg_region *cfg;
int rc;
@@ -252,8 +265,4 @@ static int __init xen_mcfg_late(void)
}
return 0;
}
-/*
- * Needs to be done after acpi_init which are subsys_initcall.
- */
-subsys_initcall_sync(xen_mcfg_late);
#endif
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
index 08cb419eb4e6..5f6b77ea34fb 100644
--- a/drivers/xen/preempt.c
+++ b/drivers/xen/preempt.c
@@ -37,7 +37,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
* cpu.
*/
__this_cpu_write(xen_in_preemptible_hcall, false);
- _cond_resched();
+ local_irq_enable();
+ cond_resched();
+ local_irq_disable();
__this_cpu_write(xen_in_preemptible_hcall, true);
}
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 133ee7d11b22..dfb9fc9b35e4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -365,8 +365,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
- if (((dev_addr + size - 1 <= dma_mask)) ||
- range_straddles_page_boundary(phys, size))
+ if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+ range_straddles_page_boundary(phys, size)))
xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 7f83e9083e9d..b1a1d7de0894 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -115,13 +115,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
{
int err;
u16 old_value;
- pci_power_t new_state, old_state;
+ pci_power_t new_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
- old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 6331a95691a4..ee5ce9286d61 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -106,7 +106,8 @@ static void pcistub_device_release(struct kref *kref)
* is called from "unbind" which takes a device_lock mutex.
*/
__pci_reset_function_locked(dev);
- if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
+ if (dev_data &&
+ pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
dev_info(&dev->dev, "Could not reload PCI state\n");
else
pci_restore_state(dev);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index f8c77751f330..e7fbed56c044 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -126,8 +126,6 @@ void xen_pcibk_reset_device(struct pci_dev *dev)
if (pci_is_enabled(dev))
pci_disable_device(dev);
- pci_write_config_word(dev, PCI_COMMAND, 0);
-
dev->is_busmaster = 0;
} else {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 0a3c6762df1b..07f6ba6ccaa7 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -536,7 +536,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
if (xen_store_evtchn == 0)
return -ENOENT;
- nonseekable_open(inode, filp);
+ stream_open(inode, filp);
u = kzalloc(sizeof(*u), GFP_KERNEL);
if (u == NULL)
diff --git a/firmware/Makefile b/firmware/Makefile
index ee4c05c918c7..e7a17752eeac 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -157,7 +157,7 @@ quiet_cmd_fwbin = MK_FW $@
PROGBITS=$(if $(CONFIG_ARM),%,@)progbits; \
echo "/* Generated by firmware/Makefile */" > $@;\
echo " .section .rodata" >>$@;\
- echo " .p2align $${ASM_ALIGN}" >>$@;\
+ echo " .p2align 4" >>$@;\
echo "_fw_$${FWSTR}_bin:" >>$@;\
echo " .incbin \"$(2)\"" >>$@;\
echo "_fw_end:" >>$@;\
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 082d227fa56b..6261719f6f2a 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -276,7 +276,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
switch (handler->flags) {
case ACL_TYPE_ACCESS:
if (acl) {
- struct iattr iattr;
+ struct iattr iattr = { 0 };
struct posix_acl *old_acl = acl;
retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 072e7599583a..a8ff43068619 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -59,6 +59,8 @@ enum {
Opt_cache_loose, Opt_fscache, Opt_mmap,
/* Access options */
Opt_access, Opt_posixacl,
+ /* Lock timeout option */
+ Opt_locktimeout,
/* Error token */
Opt_err
};
@@ -78,6 +80,7 @@ static const match_table_t tokens = {
{Opt_cachetag, "cachetag=%s"},
{Opt_access, "access=%s"},
{Opt_posixacl, "posixacl"},
+ {Opt_locktimeout, "locktimeout=%u"},
{Opt_err, NULL}
};
@@ -126,6 +129,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
#ifdef CONFIG_9P_FSCACHE
v9ses->cachetag = NULL;
#endif
+ v9ses->session_lock_timeout = P9_LOCK_TIMEOUT;
if (!opts)
return 0;
@@ -298,6 +302,23 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
#endif
break;
+ case Opt_locktimeout:
+ r = match_int(&args[0], &option);
+ if (r < 0) {
+ p9_debug(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
+ ret = r;
+ continue;
+ }
+ if (option < 1) {
+ p9_debug(P9_DEBUG_ERROR,
+ "locktimeout must be a greater than zero integer.\n");
+ ret = -EINVAL;
+ continue;
+ }
+ v9ses->session_lock_timeout = (long)option * HZ;
+ break;
+
default:
continue;
}
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 443d12e02043..ce6ca9f4f683 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -116,6 +116,7 @@ struct v9fs_session_info {
struct list_head slist; /* list of sessions registered with v9fs */
struct backing_dev_info bdi;
struct rw_semaphore rename_sem;
+ long session_lock_timeout; /* retry interval for blocking locks */
};
/* cache_validity flags */
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 6181ad79e1a5..e45b1a0dd513 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -49,8 +49,9 @@
* @page: structure to page
*
*/
-static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
+static int v9fs_fid_readpage(void *data, struct page *page)
{
+ struct p9_fid *fid = data;
struct inode *inode = page->mapping->host;
struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
struct iov_iter to;
@@ -121,7 +122,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
return ret;
- ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
+ ret = read_cache_pages(mapping, pages, v9fs_fid_readpage,
+ filp->private_data);
p9_debug(P9_DEBUG_VFS, " = %d\n", ret);
return ret;
}
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 48db9a9f13f9..cb6c4031af55 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -105,7 +105,6 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
int err = 0;
struct p9_fid *fid;
int buflen;
- int reclen = 0;
struct p9_rdir *rdir;
struct kvec kvec;
@@ -138,11 +137,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
while (rdir->head < rdir->tail) {
err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
rdir->tail - rdir->head, &st);
- if (err) {
+ if (err <= 0) {
p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
return -EIO;
}
- reclen = st.size+2;
over = !dir_emit(ctx, st.name, strlen(st.name),
v9fs_qid2ino(&st.qid), dt_type(&st));
@@ -150,8 +148,8 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
if (over)
return 0;
- rdir->head += reclen;
- ctx->pos += reclen;
+ rdir->head += err;
+ ctx->pos += err;
}
}
}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 2f035b15180e..e963b83afc71 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -154,6 +154,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
uint8_t status = P9_LOCK_ERROR;
int res = 0;
unsigned char fl_type;
+ struct v9fs_session_info *v9ses;
fid = filp->private_data;
BUG_ON(fid == NULL);
@@ -189,6 +190,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
if (IS_SETLKW(cmd))
flock.flags = P9_LOCK_FLAGS_BLOCK;
+ v9ses = v9fs_inode2v9ses(file_inode(filp));
+
/*
* if its a blocked request and we get P9_LOCK_BLOCKED as the status
* for lock request, keep on trying
@@ -202,7 +205,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
break;
if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
break;
- if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
+ if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
+ != 0)
break;
/*
* p9_client_lock_dotl overwrites flock.client_id with the
@@ -524,6 +528,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
if (!v9inode->writeback_fid &&
+ (vma->vm_flags & VM_SHARED) &&
(vma->vm_flags & VM_WRITE)) {
/*
* clone a fid and add it to writeback_fid
@@ -625,6 +630,8 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start - 1),
};
+ if (!(vma->vm_flags & VM_SHARED))
+ return;
p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index c9fdfb112933..e42c30001509 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -368,6 +368,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
struct buffer_head *bh;
struct object_info root_obj;
unsigned char *b_data;
+ unsigned int blocksize;
struct adfs_sb_info *asb;
struct inode *root;
int ret = -EINVAL;
@@ -419,8 +420,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
goto error_free_bh;
}
+ blocksize = 1 << dr->log2secsize;
brelse(bh);
- if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
+
+ if (sb_set_blocksize(sb, blocksize)) {
bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
if (!bh) {
adfs_error(sb, "couldn't read superblock on "
diff --git a/fs/afs/super.c b/fs/afs/super.c
index fbdb022b75a2..65389394e202 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -317,6 +317,7 @@ static int afs_fill_super(struct super_block *sb,
/* fill in the superblock */
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
sb->s_bdi = &as->volume->bdi;
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 2e1f50e467f1..02f0d373adbf 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -469,9 +469,10 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
*/
flags &= ~AUTOFS_EXP_LEAVES;
found = should_expire(expired, mnt, timeout, how);
- if (!found || found != expired)
- /* Something has changed, continue */
+ if (found != expired) { // something has changed, continue
+ dput(found);
goto next;
+ }
if (expired != dentry)
dput(dentry);
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 9b2917a30294..b18543b36ae1 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -859,9 +859,14 @@ err:
static int load_flat_shared_library(int id, struct lib_info *libs)
{
+ /*
+ * This is a fake bprm struct; only the members "buf", "file" and
+ * "filename" are actually used.
+ */
struct linux_binprm bprm;
int res;
char buf[16];
+ loff_t pos = 0;
memset(&bprm, 0, sizeof(bprm));
@@ -875,25 +880,11 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
if (IS_ERR(bprm.file))
return res;
- bprm.cred = prepare_exec_creds();
- res = -ENOMEM;
- if (!bprm.cred)
- goto out;
-
- /* We don't really care about recalculating credentials at this point
- * as we're past the point of no return and are dealing with shared
- * libraries.
- */
- bprm.cred_prepared = 1;
+ res = kernel_read(bprm.file, pos, bprm.buf, BINPRM_BUF_SIZE);
- res = prepare_binprm(&bprm);
-
- if (!res)
+ if (res >= 0)
res = load_flat_file(&bprm, libs, id, NULL);
- abort_creds(bprm.cred);
-
-out:
allow_write_access(bprm.file);
fput(bprm.file);
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index afdf4e3cafc2..37c2093a24d3 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -14,14 +14,31 @@
#include <linux/err.h>
#include <linux/fs.h>
+static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
+static inline char *next_non_spacetab(char *first, const char *last)
+{
+ for (; first <= last; first++)
+ if (!spacetab(*first))
+ return first;
+ return NULL;
+}
+static inline char *next_terminator(char *first, const char *last)
+{
+ for (; first <= last; first++)
+ if (spacetab(*first) || !*first)
+ return first;
+ return NULL;
+}
+
static int load_script(struct linux_binprm *bprm)
{
const char *i_arg, *i_name;
- char *cp;
+ char *cp, *buf_end;
struct file *file;
char interp[BINPRM_BUF_SIZE];
int retval;
+ /* Not ours to exec if we don't start with "#!". */
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
@@ -34,18 +51,40 @@ static int load_script(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
return -ENOENT;
- /*
- * This section does the #! interpretation.
- * Sorta complicated, but hopefully it will work. -TYT
- */
-
+ /* Release since we are not mapping a binary into memory. */
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
- bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
- if ((cp = strchr(bprm->buf, '\n')) == NULL)
- cp = bprm->buf+BINPRM_BUF_SIZE-1;
+ /*
+ * This section handles parsing the #! line into separate
+ * interpreter path and argument strings. We must be careful
+ * because bprm->buf is not yet guaranteed to be NUL-terminated
+ * (though the buffer will have trailing NUL padding when the
+ * file size was smaller than the buffer size).
+ *
+ * We do not want to exec a truncated interpreter path, so either
+ * we find a newline (which indicates nothing is truncated), or
+ * we find a space/tab/NUL after the interpreter path (which
+ * itself may be preceded by spaces/tabs). Truncating the
+ * arguments is fine: the interpreter can re-read the script to
+ * parse them on its own.
+ */
+ buf_end = bprm->buf + sizeof(bprm->buf) - 1;
+ cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
+ if (!cp) {
+ cp = next_non_spacetab(bprm->buf + 2, buf_end);
+ if (!cp)
+ return -ENOEXEC; /* Entire buf is spaces/tabs */
+ /*
+ * If there is no later space/tab/NUL we must assume the
+ * interpreter path is truncated.
+ */
+ if (!next_terminator(cp, buf_end))
+ return -ENOEXEC;
+ cp = buf_end;
+ }
+ /* NUL-terminate the buffer and any trailing spaces/tabs. */
*cp = '\0';
while (cp > bprm->buf) {
cp--;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index ff0b0be92d61..5456937836b8 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -265,16 +265,17 @@ out:
}
}
-static void run_ordered_work(struct __btrfs_workqueue *wq)
+static void run_ordered_work(struct __btrfs_workqueue *wq,
+ struct btrfs_work *self)
{
struct list_head *list = &wq->ordered_list;
struct btrfs_work *work;
spinlock_t *lock = &wq->list_lock;
unsigned long flags;
+ void *wtag;
+ bool free_self = false;
while (1) {
- void *wtag;
-
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
@@ -300,16 +301,47 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
list_del(&work->ordered_list);
spin_unlock_irqrestore(lock, flags);
- /*
- * We don't want to call the ordered free functions with the
- * lock held though. Save the work as tag for the trace event,
- * because the callback could free the structure.
- */
- wtag = work;
- work->ordered_free(work);
- trace_btrfs_all_work_done(wq->fs_info, wtag);
+ if (work == self) {
+ /*
+ * This is the work item that the worker is currently
+ * executing.
+ *
+ * The kernel workqueue code guarantees non-reentrancy
+ * of work items. I.e., if a work item with the same
+ * address and work function is queued twice, the second
+ * execution is blocked until the first one finishes. A
+ * work item may be freed and recycled with the same
+ * work function; the workqueue code assumes that the
+ * original work item cannot depend on the recycled work
+ * item in that case (see find_worker_executing_work()).
+ *
+ * Note that the work of one Btrfs filesystem may depend
+ * on the work of another Btrfs filesystem via, e.g., a
+ * loop device. Therefore, we must not allow the current
+ * work item to be recycled until we are really done,
+ * otherwise we break the above assumption and can
+ * deadlock.
+ */
+ free_self = true;
+ } else {
+ /*
+ * We don't want to call the ordered free functions with
+ * the lock held though. Save the work as tag for the
+ * trace event, because the callback could free the
+ * structure.
+ */
+ wtag = work;
+ work->ordered_free(work);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
+ }
}
spin_unlock_irqrestore(lock, flags);
+
+ if (free_self) {
+ wtag = self;
+ self->ordered_free(self);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
+ }
}
static void normal_work_helper(struct btrfs_work *work)
@@ -337,7 +369,7 @@ static void normal_work_helper(struct btrfs_work *work)
work->func(work);
if (need_order) {
set_bit(WORK_DONE_BIT, &work->flags);
- run_ordered_work(wq);
+ run_ordered_work(wq, work);
}
if (!need_order)
trace_btrfs_all_work_done(wq->fs_info, wtag);
@@ -415,3 +447,11 @@ void btrfs_set_work_high_priority(struct btrfs_work *work)
{
set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}
+
+void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
+{
+ if (wq->high)
+ flush_workqueue(wq->high->normal_wq);
+
+ flush_workqueue(wq->normal->normal_wq);
+}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 1f9597355c9d..a0f6986806a4 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -85,4 +85,6 @@ void btrfs_set_work_high_priority(struct btrfs_work *work);
struct btrfs_fs_info *btrfs_work_owner(struct btrfs_work *work);
struct btrfs_fs_info *btrfs_workqueue_owner(struct __btrfs_workqueue *wq);
bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq);
+void btrfs_flush_workqueue(struct btrfs_workqueue *wq);
+
#endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 85dc7ab8f89e..2973d256bb44 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2018,13 +2018,19 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
extent_item_objectid);
if (!search_commit_root) {
- trans = btrfs_join_transaction(fs_info->extent_root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ trans = btrfs_attach_transaction(fs_info->extent_root);
+ if (IS_ERR(trans)) {
+ if (PTR_ERR(trans) != -ENOENT &&
+ PTR_ERR(trans) != -EROFS)
+ return PTR_ERR(trans);
+ trans = NULL;
+ }
+ }
+
+ if (trans)
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
- } else {
+ else
down_read(&fs_info->commit_root_sem);
- }
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
tree_mod_seq_elem.seq, &refs,
@@ -2056,7 +2062,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
free_leaf_list(refs);
out:
- if (!search_commit_root) {
+ if (trans) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
btrfs_end_transaction(trans, fs_info->extent_root);
} else {
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index c94d3390cbfc..b5ebb43b1824 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -331,26 +331,6 @@ struct tree_mod_elem {
struct tree_mod_root old_root;
};
-static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
-{
- read_lock(&fs_info->tree_mod_log_lock);
-}
-
-static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
-{
- read_unlock(&fs_info->tree_mod_log_lock);
-}
-
-static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
-{
- write_lock(&fs_info->tree_mod_log_lock);
-}
-
-static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
-{
- write_unlock(&fs_info->tree_mod_log_lock);
-}
-
/*
* Pull a new tree mod seq number for our operation.
*/
@@ -370,14 +350,12 @@ static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
struct seq_list *elem)
{
- tree_mod_log_write_lock(fs_info);
- spin_lock(&fs_info->tree_mod_seq_lock);
+ write_lock(&fs_info->tree_mod_log_lock);
if (!elem->seq) {
elem->seq = btrfs_inc_tree_mod_seq(fs_info);
list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
}
- spin_unlock(&fs_info->tree_mod_seq_lock);
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&fs_info->tree_mod_log_lock);
return elem->seq;
}
@@ -396,7 +374,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
if (!seq_putting)
return;
- spin_lock(&fs_info->tree_mod_seq_lock);
+ write_lock(&fs_info->tree_mod_log_lock);
list_del(&elem->list);
elem->seq = 0;
@@ -407,29 +385,27 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
* blocker with lower sequence number exists, we
* cannot remove anything from the log
*/
- spin_unlock(&fs_info->tree_mod_seq_lock);
+ write_unlock(&fs_info->tree_mod_log_lock);
return;
}
min_seq = cur_elem->seq;
}
}
- spin_unlock(&fs_info->tree_mod_seq_lock);
/*
* anything that's lower than the lowest existing (read: blocked)
* sequence number can be removed from the tree.
*/
- tree_mod_log_write_lock(fs_info);
tm_root = &fs_info->tree_mod_log;
for (node = rb_first(tm_root); node; node = next) {
next = rb_next(node);
tm = container_of(node, struct tree_mod_elem, node);
- if (tm->seq > min_seq)
+ if (tm->seq >= min_seq)
continue;
rb_erase(node, tm_root);
kfree(tm);
}
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&fs_info->tree_mod_log_lock);
}
/*
@@ -440,7 +416,7 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
* for root replace operations, or the logical address of the affected
* block for all other operations.
*
- * Note: must be called with write lock (tree_mod_log_write_lock).
+ * Note: must be called with write lock for fs_info::tree_mod_log_lock.
*/
static noinline int
__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
@@ -480,7 +456,7 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
* Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
* returns zero with the tree_mod_log_lock acquired. The caller must hold
* this until all tree mod log insertions are recorded in the rb tree and then
- * call tree_mod_log_write_unlock() to release.
+ * write unlock fs_info::tree_mod_log_lock.
*/
static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb) {
@@ -490,9 +466,9 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
if (eb && btrfs_header_level(eb) == 0)
return 1;
- tree_mod_log_write_lock(fs_info);
+ write_lock(&fs_info->tree_mod_log_lock);
if (list_empty(&(fs_info)->tree_mod_seq_list)) {
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&fs_info->tree_mod_log_lock);
return 1;
}
@@ -556,7 +532,7 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
}
ret = __tree_mod_log_insert(fs_info, tm);
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&eb->fs_info->tree_mod_log_lock);
if (ret)
kfree(tm);
@@ -620,7 +596,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
ret = __tree_mod_log_insert(fs_info, tm);
if (ret)
goto free_tms;
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&eb->fs_info->tree_mod_log_lock);
kfree(tm_list);
return 0;
@@ -631,7 +607,7 @@ free_tms:
kfree(tm_list[i]);
}
if (locked)
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&eb->fs_info->tree_mod_log_lock);
kfree(tm_list);
kfree(tm);
@@ -712,7 +688,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
if (!ret)
ret = __tree_mod_log_insert(fs_info, tm);
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&fs_info->tree_mod_log_lock);
if (ret)
goto free_tms;
kfree(tm_list);
@@ -739,7 +715,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
struct tree_mod_elem *cur = NULL;
struct tree_mod_elem *found = NULL;
- tree_mod_log_read_lock(fs_info);
+ read_lock(&fs_info->tree_mod_log_lock);
tm_root = &fs_info->tree_mod_log;
node = tm_root->rb_node;
while (node) {
@@ -767,7 +743,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
break;
}
}
- tree_mod_log_read_unlock(fs_info);
+ read_unlock(&fs_info->tree_mod_log_lock);
return found;
}
@@ -848,7 +824,7 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
goto free_tms;
}
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&fs_info->tree_mod_log_lock);
kfree(tm_list);
return 0;
@@ -860,7 +836,7 @@ free_tms:
kfree(tm_list[i]);
}
if (locked)
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&fs_info->tree_mod_log_lock);
kfree(tm_list);
return ret;
@@ -920,7 +896,7 @@ tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
goto free_tms;
ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
- tree_mod_log_write_unlock(fs_info);
+ write_unlock(&eb->fs_info->tree_mod_log_lock);
if (ret)
goto free_tms;
kfree(tm_list);
@@ -1271,7 +1247,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
unsigned long p_size = sizeof(struct btrfs_key_ptr);
n = btrfs_header_nritems(eb);
- tree_mod_log_read_lock(fs_info);
+ read_lock(&fs_info->tree_mod_log_lock);
while (tm && tm->seq >= time_seq) {
/*
* all the operations are recorded with the operator used for
@@ -1326,7 +1302,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
if (tm->logical != first_tm->logical)
break;
}
- tree_mod_log_read_unlock(fs_info);
+ read_unlock(&fs_info->tree_mod_log_lock);
btrfs_set_header_nritems(eb, n);
}
@@ -1406,6 +1382,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
struct tree_mod_elem *tm;
struct extent_buffer *eb = NULL;
struct extent_buffer *eb_root;
+ u64 eb_root_owner = 0;
struct extent_buffer *old;
struct tree_mod_root *old_root = NULL;
u64 old_generation = 0;
@@ -1439,6 +1416,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
free_extent_buffer(old);
}
} else if (old_root) {
+ eb_root_owner = btrfs_header_owner(eb_root);
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
eb = alloc_dummy_extent_buffer(root->fs_info, logical,
@@ -1457,7 +1435,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
- btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
+ btrfs_set_header_owner(eb, eb_root_owner);
btrfs_set_header_level(eb, old_root->level);
btrfs_set_header_generation(eb, old_generation);
}
@@ -2971,6 +2949,10 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
again:
b = get_old_root(root, time_seq);
+ if (!b) {
+ ret = -EIO;
+ goto done;
+ }
level = btrfs_header_level(b);
p->locks[level] = BTRFS_READ_LOCK;
@@ -5465,6 +5447,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
advance_left = advance_right = 0;
while (1) {
+ cond_resched();
if (advance_left && !left_end_reached) {
ret = tree_advance(left_root, left_path, &left_level,
left_root_level,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index a423c36bcd72..2bc37d03d407 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -851,14 +851,12 @@ struct btrfs_fs_info {
struct list_head delayed_iputs;
struct mutex cleaner_delayed_iput_mutex;
- /* this protects tree_mod_seq_list */
- spinlock_t tree_mod_seq_lock;
atomic64_t tree_mod_seq;
- struct list_head tree_mod_seq_list;
- /* this protects tree_mod_log */
+ /* this protects tree_mod_log and tree_mod_seq_list */
rwlock_t tree_mod_log_lock;
struct rb_root tree_mod_log;
+ struct list_head tree_mod_seq_list;
atomic_t nr_async_submits;
atomic_t async_submit_draining;
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 8d93854a4b4f..c1ca4ce11e69 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -193,8 +193,6 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
ref->in_tree = 0;
btrfs_put_delayed_ref(ref);
atomic_dec(&delayed_refs->num_entries);
- if (trans->delayed_ref_updates)
- trans->delayed_ref_updates--;
}
static bool merge_ref(struct btrfs_trans_handle *trans,
@@ -281,7 +279,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
if (head->is_data)
return;
- spin_lock(&fs_info->tree_mod_seq_lock);
+ read_lock(&fs_info->tree_mod_log_lock);
if (!list_empty(&fs_info->tree_mod_seq_list)) {
struct seq_list *elem;
@@ -289,7 +287,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
struct seq_list, list);
seq = elem->seq;
}
- spin_unlock(&fs_info->tree_mod_seq_lock);
+ read_unlock(&fs_info->tree_mod_log_lock);
ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
list);
@@ -317,7 +315,7 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
struct seq_list *elem;
int ret = 0;
- spin_lock(&fs_info->tree_mod_seq_lock);
+ read_lock(&fs_info->tree_mod_log_lock);
if (!list_empty(&fs_info->tree_mod_seq_list)) {
elem = list_first_entry(&fs_info->tree_mod_seq_list,
struct seq_list, list);
@@ -331,7 +329,7 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
}
}
- spin_unlock(&fs_info->tree_mod_seq_lock);
+ read_unlock(&fs_info->tree_mod_log_lock);
return ret;
}
@@ -445,7 +443,6 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
add_tail:
list_add_tail(&ref->list, &href->ref_list);
atomic_inc(&root->num_entries);
- trans->delayed_ref_updates++;
spin_unlock(&href->lock);
return ret;
}
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index fb973cc0af66..395b07764269 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -511,18 +511,27 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
}
btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
- return PTR_ERR(trans);
+ while (1) {
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+ return PTR_ERR(trans);
+ }
+ ret = btrfs_commit_transaction(trans, root);
+ WARN_ON(ret);
+ mutex_lock(&uuid_mutex);
+ /* keep away write_all_supers() during the finishing procedure */
+ mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_lock(&root->fs_info->chunk_mutex);
+ if (src_device->has_pending_chunks) {
+ mutex_unlock(&root->fs_info->chunk_mutex);
+ mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+ mutex_unlock(&uuid_mutex);
+ } else {
+ break;
+ }
}
- ret = btrfs_commit_transaction(trans, root);
- WARN_ON(ret);
- mutex_lock(&uuid_mutex);
- /* keep away write_all_supers() during the finishing procedure */
- mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- mutex_lock(&root->fs_info->chunk_mutex);
btrfs_dev_replace_lock(dev_replace, 1);
dev_replace->replace_state =
scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9d3352fe8dc9..1de017051928 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1712,8 +1712,8 @@ static void end_workqueue_fn(struct btrfs_work *work)
bio->bi_error = end_io_wq->error;
bio->bi_private = end_io_wq->private;
bio->bi_end_io = end_io_wq->end_io;
- kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
bio_endio(bio);
+ kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
}
static int cleaner_kthread(void *arg)
@@ -2104,7 +2104,7 @@ static void free_root_extent_buffers(struct btrfs_root *root)
}
/* helper to cleanup tree roots */
-static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
+static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
{
free_root_extent_buffers(info->tree_root);
@@ -2113,7 +2113,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
free_root_extent_buffers(info->csum_root);
free_root_extent_buffers(info->quota_root);
free_root_extent_buffers(info->uuid_root);
- if (chunk_root)
+ if (free_chunk_root)
free_root_extent_buffers(info->chunk_root);
free_root_extent_buffers(info->free_space_root);
}
@@ -2519,7 +2519,6 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->delayed_iput_lock);
spin_lock_init(&fs_info->defrag_inodes_lock);
spin_lock_init(&fs_info->free_chunk_lock);
- spin_lock_init(&fs_info->tree_mod_seq_lock);
spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->qgroup_op_lock);
spin_lock_init(&fs_info->buffer_lock);
@@ -2980,6 +2979,7 @@ retry_root_backup:
/* do not make disk changes in broken FS or nologreplay is given */
if (btrfs_super_log_root(disk_super) != 0 &&
!btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) {
+ btrfs_info(fs_info, "start tree-log replay");
ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) {
err = ret;
@@ -3136,7 +3136,7 @@ fail_block_groups:
btrfs_free_block_groups(fs_info);
fail_tree_roots:
- free_root_pointers(fs_info, 1);
+ free_root_pointers(fs_info, true);
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
fail_sb_buffer:
@@ -3165,7 +3165,7 @@ recovery_tree_root:
if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT))
goto fail_tree_roots;
- free_root_pointers(fs_info, 0);
+ free_root_pointers(fs_info, false);
/* don't use the log in recovery mode, it won't be valid */
btrfs_set_super_log_root(disk_super, 0);
@@ -3825,6 +3825,19 @@ void close_ctree(struct btrfs_root *root)
*/
btrfs_delete_unused_bgs(root->fs_info);
+ /*
+ * There might be existing delayed inode workers still running
+ * and holding an empty delayed inode item. We must wait for
+ * them to complete first because they can create a transaction.
+ * This happens when someone calls btrfs_balance_delayed_items()
+ * and then a transaction commit runs the same delayed nodes
+ * before any delayed worker has done something with the nodes.
+ * We must wait for any worker here and not at transaction
+ * commit time since that could cause a deadlock.
+ * This is a very rare case.
+ */
+ btrfs_flush_workqueue(fs_info->delayed_workers);
+
ret = btrfs_commit_super(root);
if (ret)
btrfs_err(fs_info, "commit super ret %d", ret);
@@ -3862,7 +3875,7 @@ void close_ctree(struct btrfs_root *root)
btrfs_stop_all_workers(fs_info);
clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
- free_root_pointers(fs_info, 1);
+ free_root_pointers(fs_info, true);
iput(fs_info->btree_inode);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7938c48c72ff..538f378eea52 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -7571,6 +7571,14 @@ search:
*/
if ((flags & extra) && !(block_group->flags & extra))
goto loop;
+
+ /*
+ * This block group has different flags than we want.
+ * It's possible that we have MIXED_GROUP flag but no
+ * block group is mixed. Just skip such block group.
+ */
+ btrfs_release_block_group(block_group, delalloc);
+ continue;
}
have_block_group:
@@ -10317,6 +10325,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
btrfs_err(info,
"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
cache->key.objectid);
+ btrfs_put_block_group(cache);
ret = -EINVAL;
goto error;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4d901200be13..1372d3e5d90b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4060,6 +4060,14 @@ retry:
*/
scanned = 1;
index = 0;
+
+ /*
+ * If we're looping we could run into a page that is locked by a
+ * writer and that writer could be waiting on writeback for a
+ * page in our current bio, and thus deadlock, so flush the
+ * write bio here.
+ */
+ flush_write_bio(data);
goto retry;
}
@@ -4994,12 +5002,14 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
return eb;
eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
if (!eb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
eb->fs_info = fs_info;
again:
ret = radix_tree_preload(GFP_NOFS);
- if (ret)
+ if (ret) {
+ exists = ERR_PTR(ret);
goto free_eb;
+ }
spin_lock(&fs_info->buffer_lock);
ret = radix_tree_insert(&fs_info->buffer_radix,
start >> PAGE_SHIFT, eb);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 26f9ac719d20..4f59b4089eb0 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -227,6 +227,17 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
struct extent_map *merge = NULL;
struct rb_node *rb;
+ /*
+ * We can't modify an extent map that is in the tree and that is being
+ * used by another task, as it can cause that other task to see it in
+ * inconsistent state during the merging. We always have 1 reference for
+ * the tree and 1 for this task (which is unpinning the extent map or
+ * clearing the logging flag), so anything > 2 means it's being used by
+ * other tasks too.
+ */
+ if (atomic_read(&em->refs) > 2)
+ return;
+
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
if (rb)
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 437544846e4e..03661b744eaf 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1555,6 +1555,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break;
}
+ only_release_metadata = false;
sector_offset = pos & (root->sectorsize - 1);
reserve_bytes = round_up(write_bytes + sector_offset,
root->sectorsize);
@@ -1704,7 +1705,6 @@ again:
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL,
NULL, GFP_NOFS);
- only_release_metadata = false;
}
btrfs_drop_pages(pages, num_pages);
@@ -1952,6 +1952,18 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
u64 len;
/*
+ * If the inode needs a full sync, make sure we use a full range to
+ * avoid log tree corruption, due to hole detection racing with ordered
+ * extent completion for adjacent ranges, and assertion failures during
+ * hole detection.
+ */
+ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags)) {
+ start = 0;
+ end = LLONG_MAX;
+ }
+
+ /*
* The range length can be represented by u64, we have to do the typecasts
* to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
*/
@@ -2634,6 +2646,11 @@ out_only_mutex:
* for detecting, at fsync time, if the inode isn't yet in the
* log tree or it's there but not up to date.
*/
+ struct timespec now = current_time(inode);
+
+ inode_inc_iversion(inode);
+ inode->i_mtime = now;
+ inode->i_ctime = now;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 69a3c11af9d4..a84a1ceb260a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -391,6 +391,12 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode
if (uptodate && !PageUptodate(page)) {
btrfs_readpage(NULL, page);
lock_page(page);
+ if (page->mapping != inode->i_mapping) {
+ btrfs_err(BTRFS_I(inode)->root->fs_info,
+ "free space cache page truncated");
+ io_ctl_drop_pages(io_ctl);
+ return -EIO;
+ }
if (!PageUptodate(page)) {
btrfs_err(BTRFS_I(inode)->root->fs_info,
"error reading free space cache");
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index d27014b8bf72..075b59516c8c 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -159,6 +159,7 @@ static void start_caching(struct btrfs_root *root)
spin_lock(&root->ino_cache_lock);
root->ino_cache_state = BTRFS_CACHE_FINISHED;
spin_unlock(&root->ino_cache_lock);
+ wake_up(&root->ino_cache_wait);
return;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a609d2017a63..f56610ad64bd 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5576,7 +5576,6 @@ static void inode_tree_del(struct inode *inode)
spin_unlock(&root->inode_lock);
if (empty && btrfs_root_refs(&root->root_item) == 0) {
- synchronize_srcu(&root->fs_info->subvol_srcu);
spin_lock(&root->inode_lock);
empty = RB_EMPTY_ROOT(&root->inode_tree);
spin_unlock(&root->inode_lock);
@@ -9596,9 +9595,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
return -EXDEV;
/* close the race window with snapshot create/destroy ioctl */
- if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
- down_read(&root->fs_info->subvol_sem);
- if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
+ if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
+ new_ino == BTRFS_FIRST_FREE_OBJECTID)
down_read(&dest->fs_info->subvol_sem);
/*
@@ -9615,6 +9613,9 @@ static int btrfs_rename_exchange(struct inode *old_dir,
goto out_notrans;
}
+ if (dest != root)
+ btrfs_record_root_in_trans(trans, dest);
+
/*
* We need to find a free sequence number both in the source and
* in the destination directory for the exchange.
@@ -9781,9 +9782,8 @@ out_fail:
ret2 = btrfs_end_transaction(trans, root);
ret = ret ? ret : ret2;
out_notrans:
- if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
- up_read(&dest->fs_info->subvol_sem);
- if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
+ if (new_ino == BTRFS_FIRST_FREE_OBJECTID ||
+ old_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&root->fs_info->subvol_sem);
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 242584a0d3b5..eefe103c65da 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -385,6 +385,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ /*
+ * If the fs is mounted with nologreplay, which requires it to be
+ * mounted in RO mode as well, we can not allow discard on free space
+ * inside block groups, because log trees refer to extents that are not
+ * pinned in a block group's free space cache (pinning the extents is
+ * precisely the first phase of replaying a log tree).
+ */
+ if (btrfs_test_opt(fs_info, NOLOGREPLAY))
+ return -EROFS;
+
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
dev_list) {
@@ -600,12 +610,18 @@ static noinline int create_subvol(struct inode *dir,
btrfs_i_size_write(dir, dir->i_size + namelen * 2);
ret = btrfs_update_inode(trans, root, dir);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
objectid, root->root_key.objectid,
btrfs_ino(dir), index, name, namelen);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
root_item->uuid, BTRFS_UUID_KEY_SUBVOL,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b2d1e95de7be..7dc2284017fa 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -837,10 +837,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
}
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
+ /*
+ * If the ordered extent had an error save the error but don't
+ * exit without waiting first for all other ordered extents in
+ * the range to complete.
+ */
if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
ret = -EIO;
btrfs_put_ordered_extent(ordered);
- if (ret || end == 0 || end == start)
+ if (end == 0 || end == start)
break;
end--;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index f25233093d68..0355e6d9e21c 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -759,10 +759,10 @@ out:
return ret;
}
-static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- struct btrfs_root *root)
+static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *quota_root = fs_info->quota_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *l;
@@ -778,7 +778,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -1863,7 +1863,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
spin_unlock(&fs_info->qgroup_lock);
- ret = update_qgroup_status_item(trans, fs_info, quota_root);
+ ret = update_qgroup_status_item(trans);
if (ret)
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -2380,9 +2380,6 @@ out:
btrfs_free_path(path);
mutex_lock(&fs_info->qgroup_rescan_lock);
- if (!btrfs_fs_closing(fs_info))
- fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
-
if (err > 0 &&
fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -2398,16 +2395,30 @@ out:
trans = btrfs_start_transaction(fs_info->quota_root, 1);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
+ trans = NULL;
btrfs_err(fs_info,
"fail to start transaction for status update: %d\n",
err);
- goto done;
}
- ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
- if (ret < 0) {
- err = ret;
- btrfs_err(fs_info, "fail to update qgroup status: %d", err);
+
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+ if (!btrfs_fs_closing(fs_info))
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+ if (trans) {
+ ret = update_qgroup_status_item(trans);
+ if (ret < 0) {
+ err = ret;
+ btrfs_err(fs_info, "fail to update qgroup status: %d",
+ err);
+ }
}
+ fs_info->qgroup_rescan_running = false;
+ complete_all(&fs_info->qgroup_rescan_completion);
+ mutex_unlock(&fs_info->qgroup_rescan_lock);
+
+ if (!trans)
+ return;
+
btrfs_end_transaction(trans, fs_info->quota_root);
if (btrfs_fs_closing(fs_info)) {
@@ -2418,12 +2429,6 @@ out:
} else {
btrfs_err(fs_info, "qgroup scan failed with %d", err);
}
-
-done:
- mutex_lock(&fs_info->qgroup_rescan_lock);
- fs_info->qgroup_rescan_running = false;
- mutex_unlock(&fs_info->qgroup_rescan_lock);
- complete_all(&fs_info->qgroup_rescan_completion);
}
/*
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index af6a776fa18c..5aa07de5750e 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2395,8 +2395,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
bitmap_clear(rbio->dbitmap, pagenr, 1);
kunmap(p);
- for (stripe = 0; stripe < rbio->real_stripes; stripe++)
+ for (stripe = 0; stripe < nr_data; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+ kunmap(p_page);
}
__free_page(p_page);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 75bab76739be..0d1565d71231 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -734,21 +734,19 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
static void reada_start_machine_worker(struct btrfs_work *work)
{
struct reada_machine_work *rmw;
- struct btrfs_fs_info *fs_info;
int old_ioprio;
rmw = container_of(work, struct reada_machine_work, work);
- fs_info = rmw->fs_info;
-
- kfree(rmw);
old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
task_nice_ioprio(current));
set_task_ioprio(current, BTRFS_IOPRIO_READA);
- __reada_start_machine(fs_info);
+ __reada_start_machine(rmw->fs_info);
set_task_ioprio(current, old_ioprio);
- atomic_dec(&fs_info->reada_works_cnt);
+ atomic_dec(&rmw->fs_info->reada_works_cnt);
+
+ kfree(rmw);
}
static void __reada_start_machine(struct btrfs_fs_info *fs_info)
@@ -759,6 +757,7 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
u64 total = 0;
int i;
+again:
do {
enqueued = 0;
mutex_lock(&fs_devices->device_list_mutex);
@@ -771,6 +770,10 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
mutex_unlock(&fs_devices->device_list_mutex);
total += enqueued;
} while (enqueued && total < 10000);
+ if (fs_devices->seed) {
+ fs_devices = fs_devices->seed;
+ goto again;
+ }
if (enqueued == 0)
return;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index b0c3a6afe664..1003b983a8d7 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -537,8 +537,8 @@ static int should_ignore_root(struct btrfs_root *root)
if (!reloc_root)
return 0;
- if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
- root->fs_info->running_transaction->transid - 1)
+ if (btrfs_header_generation(reloc_root->commit_root) ==
+ root->fs_info->running_transaction->transid)
return 0;
/*
* if there is reloc tree and it was created in previous
@@ -1185,7 +1185,7 @@ out:
free_backref_node(cache, lower);
}
- free_backref_node(cache, node);
+ remove_backref_node(cache, node);
return ERR_PTR(err);
}
ASSERT(!node || !node->detached);
@@ -1296,7 +1296,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
if (!node)
return -ENOMEM;
- node->bytenr = root->node->start;
+ node->bytenr = root->commit_root->start;
node->data = root;
spin_lock(&rc->reloc_root_tree.lock);
@@ -1328,10 +1328,11 @@ static void __del_reloc_root(struct btrfs_root *root)
if (rc && root->node) {
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->node->start);
+ root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+ RB_CLEAR_NODE(&node->rb_node);
}
spin_unlock(&rc->reloc_root_tree.lock);
if (!node)
@@ -1349,7 +1350,7 @@ static void __del_reloc_root(struct btrfs_root *root)
* helper to update the 'address of tree root -> reloc tree'
* mapping
*/
-static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
+static int __update_reloc_root(struct btrfs_root *root)
{
struct rb_node *rb_node;
struct mapping_node *node = NULL;
@@ -1357,7 +1358,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->node->start);
+ root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1369,7 +1370,7 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
BUG_ON((struct btrfs_root *)node->data != root);
spin_lock(&rc->reloc_root_tree.lock);
- node->bytenr = new_bytenr;
+ node->bytenr = root->node->start;
rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
@@ -1519,6 +1520,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
}
if (reloc_root->commit_root != reloc_root->node) {
+ __update_reloc_root(reloc_root);
btrfs_set_root_node(root_item, reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
reloc_root->commit_root = btrfs_root_node(reloc_root);
@@ -2457,7 +2459,21 @@ out:
free_reloc_roots(&reloc_roots);
}
- BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
+ /*
+ * We used to have
+ *
+ * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
+ *
+ * here, but it's wrong. If we fail to start the transaction in
+ * prepare_to_merge() we will have only 0 ref reloc roots, none of which
+ * have actually been removed from the reloc_root_tree rb tree. This is
+ * fine because we're bailing here, and we hold a reference on the root
+ * for the list that holds it, so these roots will be cleaned up when we
+ * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
+ * will be cleaned up on unmount.
+ *
+ * The remaining nodes will be cleaned up by free_reloc_control.
+ */
}
static void free_block_list(struct rb_root *blocks)
@@ -4587,6 +4603,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
reloc_root->root_key.offset);
if (IS_ERR(fs_root)) {
err = PTR_ERR(fs_root);
+ list_add_tail(&reloc_root->root_list, &reloc_roots);
goto out_free;
}
@@ -4702,11 +4719,6 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
- if (buf == root->node)
- __update_reloc_root(root, cow->start);
- }
-
level = btrfs_header_level(buf);
if (btrfs_header_generation(buf) <=
btrfs_root_last_snapshot(&root->root_item))
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index edae751e870c..307b8baaf0e9 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -144,10 +144,8 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
return -ENOMEM;
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
+ if (ret < 0)
goto out;
- }
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index a45f26ac5da7..edfc7ba38b33 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -37,6 +37,14 @@
#include "compression.h"
/*
+ * Maximum number of references an extent can have in order for us to attempt to
+ * issue clone operations instead of write operations. This currently exists to
+ * avoid hitting limitations of the backreference walking code (taking a lot of
+ * time and using too much memory for extents with large number of references).
+ */
+#define SEND_MAX_EXTENT_REFS 64
+
+/*
* A fs_path is a helper to dynamically build path names with unknown size.
* It reallocates the internal buffer on demand.
* It allows fast adding of path elements on the right side (normal path) and
@@ -1327,6 +1335,7 @@ static int find_extent_clone(struct send_ctx *sctx,
struct clone_root *cur_clone_root;
struct btrfs_key found_key;
struct btrfs_path *tmp_path;
+ struct btrfs_extent_item *ei;
int compressed;
u32 i;
@@ -1376,7 +1385,6 @@ static int find_extent_clone(struct send_ctx *sctx,
ret = extent_from_logical(fs_info, disk_byte, tmp_path,
&found_key, &flags);
up_read(&fs_info->commit_root_sem);
- btrfs_release_path(tmp_path);
if (ret < 0)
goto out;
@@ -1385,6 +1393,21 @@ static int find_extent_clone(struct send_ctx *sctx,
goto out;
}
+ ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
+ struct btrfs_extent_item);
+ /*
+ * Backreference walking (iterate_extent_inodes() below) is currently
+ * too expensive when an extent has a large number of references, both
+ * in time spent and used memory. So for now just fallback to write
+ * operations instead of clone operations when an extent has more than
+ * a certain amount of references.
+ */
+ if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
+ ret = -ENOENT;
+ goto out;
+ }
+ btrfs_release_path(tmp_path);
+
/*
* Setup the clone roots.
*/
@@ -5835,68 +5858,21 @@ static int changed_extent(struct send_ctx *sctx,
{
int ret = 0;
- if (sctx->cur_ino != sctx->cmp_key->objectid) {
-
- if (result == BTRFS_COMPARE_TREE_CHANGED) {
- struct extent_buffer *leaf_l;
- struct extent_buffer *leaf_r;
- struct btrfs_file_extent_item *ei_l;
- struct btrfs_file_extent_item *ei_r;
-
- leaf_l = sctx->left_path->nodes[0];
- leaf_r = sctx->right_path->nodes[0];
- ei_l = btrfs_item_ptr(leaf_l,
- sctx->left_path->slots[0],
- struct btrfs_file_extent_item);
- ei_r = btrfs_item_ptr(leaf_r,
- sctx->right_path->slots[0],
- struct btrfs_file_extent_item);
-
- /*
- * We may have found an extent item that has changed
- * only its disk_bytenr field and the corresponding
- * inode item was not updated. This case happens due to
- * very specific timings during relocation when a leaf
- * that contains file extent items is COWed while
- * relocation is ongoing and its in the stage where it
- * updates data pointers. So when this happens we can
- * safely ignore it since we know it's the same extent,
- * but just at different logical and physical locations
- * (when an extent is fully replaced with a new one, we
- * know the generation number must have changed too,
- * since snapshot creation implies committing the current
- * transaction, and the inode item must have been updated
- * as well).
- * This replacement of the disk_bytenr happens at
- * relocation.c:replace_file_extents() through
- * relocation.c:btrfs_reloc_cow_block().
- */
- if (btrfs_file_extent_generation(leaf_l, ei_l) ==
- btrfs_file_extent_generation(leaf_r, ei_r) &&
- btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_compression(leaf_l, ei_l) ==
- btrfs_file_extent_compression(leaf_r, ei_r) &&
- btrfs_file_extent_encryption(leaf_l, ei_l) ==
- btrfs_file_extent_encryption(leaf_r, ei_r) &&
- btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
- btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
- btrfs_file_extent_type(leaf_l, ei_l) ==
- btrfs_file_extent_type(leaf_r, ei_r) &&
- btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
- btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
- btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_offset(leaf_l, ei_l) ==
- btrfs_file_extent_offset(leaf_r, ei_r) &&
- btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_num_bytes(leaf_r, ei_r))
- return 0;
- }
-
- inconsistent_snapshot_error(sctx, result, "extent");
- return -EIO;
- }
+ /*
+ * We have found an extent item that changed without the inode item
+ * having changed. This can happen either after relocation (where the
+ * disk_bytenr of an extent item is replaced at
+ * relocation.c:replace_file_extents()) or after deduplication into a
+ * file in both the parent and send snapshots (where an extent item can
+ * get modified or replaced with a new one). Note that deduplication
+ * updates the inode item, but it only changes the iversion (sequence
+ * field in the inode item) of the inode, so if a file is deduplicated
+ * the same amount of times in both the parent and send snapshots, its
+ * iversion becames the same in both snapshots, whence the inode item is
+ * the same on both snapshots.
+ */
+ if (sctx->cur_ino != sctx->cmp_key->objectid)
+ return 0;
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result != BTRFS_COMPARE_TREE_DELETED)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a7b69deb6d70..9286603a6a98 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1809,6 +1809,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
}
if (btrfs_super_log_root(fs_info->super_copy) != 0) {
+ btrfs_warn(fs_info,
+ "mount required to replay tree-log, cannot remount read-write");
ret = -EINVAL;
goto restore;
}
@@ -2164,7 +2166,15 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
*/
thresh = 4 * 1024 * 1024;
- if (!mixed && total_free_meta - thresh < block_rsv->size)
+ /*
+ * We only want to claim there's no available space if we can no longer
+ * allocate chunks for our metadata profile and our global reserve will
+ * not fit in the free metadata space. If we aren't ->full then we
+ * still can allocate chunks and thus are fine using the currently
+ * calculated f_bavail.
+ */
+ if (!mixed && block_rsv->space_info->full &&
+ total_free_meta - thresh < block_rsv->size)
buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 1f157fba8940..510cad48e519 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -751,7 +751,12 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
fs_devs->fsid_kobj.kset = btrfs_kset;
error = kobject_init_and_add(&fs_devs->fsid_kobj,
&btrfs_ktype, parent, "%pU", fs_devs->fsid);
- return error;
+ if (error) {
+ kobject_put(&fs_devs->fsid_kobj);
+ return error;
+ }
+
+ return 0;
}
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index bf62ad919a95..9edc2674b8a7 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -112,7 +112,6 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
spin_lock_init(&fs_info->qgroup_op_lock);
spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
- spin_lock_init(&fs_info->tree_mod_seq_lock);
mutex_init(&fs_info->qgroup_ioctl_lock);
mutex_init(&fs_info->qgroup_rescan_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index a724d9a79bd2..5e3b875d87e2 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -476,9 +476,9 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
root->node = alloc_test_extent_buffer(root->fs_info,
nodesize, nodesize);
- if (!root->node) {
- test_msg("Couldn't allocate dummy buffer\n");
- ret = -ENOMEM;
+ if (IS_ERR(root->node)) {
+ test_msg("couldn't allocate dummy buffer\n");
+ ret = PTR_ERR(root->node);
goto out;
}
btrfs_set_header_level(root->node, 0);
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 9c6666692341..e0aa6b9786fa 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -488,9 +488,9 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
*/
root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
nodesize);
- if (!root->node) {
+ if (IS_ERR(root->node)) {
test_msg("Couldn't allocate dummy buffer\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(root->node);
goto out;
}
btrfs_set_header_level(root->node, 0);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index fd6c74662e9a..31df020634cd 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1917,6 +1917,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_transaction *prev_trans = NULL;
int ret;
+ /*
+ * Some places just start a transaction to commit it. We need to make
+ * sure that if this commit fails that the abort code actually marks the
+ * transaction as failed, so set trans->dirty to make the abort code do
+ * the right thing.
+ */
+ trans->dirty = true;
+
/* Stop the commit early if ->aborted is set */
if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 47d11a30bee7..f79682937faf 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2827,6 +2827,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
log->log_transid = root->log_transid;
root->log_start_pid = 0;
/*
+ * Update or create log root item under the root's log_mutex to prevent
+ * races with concurrent log syncs that can lead to failure to update
+ * log root item because it was not created yet.
+ */
+ ret = update_log_root(trans, log);
+ /*
* IO has been started, blocks of the log tree have WRITTEN flag set
* in their headers. new modifications of the log will be written to
* new positions. so it's safe to allow log writers to go in.
@@ -2845,8 +2851,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&log_root_tree->log_mutex);
- ret = update_log_root(trans, log);
-
mutex_lock(&log_root_tree->log_mutex);
if (atomic_dec_and_test(&log_root_tree->log_writers)) {
/*
@@ -3343,9 +3347,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- /* find the first key from this transaction again */
+ /*
+ * Find the first key from this transaction again. See the note for
+ * log_new_dir_dentries, if we're logging a directory recursively we
+ * won't be holding its i_mutex, which means we can modify the directory
+ * while we're logging it. If we remove an entry between our first
+ * search and this search we'll not find the key again and can just
+ * bail.
+ */
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
- if (WARN_ON(ret != 0))
+ if (ret != 0)
goto done;
/*
@@ -4432,13 +4443,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, extent) ==
- BTRFS_FILE_EXTENT_INLINE) {
- len = btrfs_file_extent_inline_len(leaf,
- path->slots[0],
- extent);
- ASSERT(len == i_size);
+ BTRFS_FILE_EXTENT_INLINE)
return 0;
- }
len = btrfs_file_extent_num_bytes(leaf, extent);
/* Last extent goes beyond i_size, no need to log a hole. */
@@ -4835,7 +4841,7 @@ again:
err = btrfs_log_inode(trans, root, other_inode,
LOG_OTHER_INODE,
0, LLONG_MAX, ctx);
- iput(other_inode);
+ btrfs_add_delayed_iput(other_inode);
if (err)
goto out_unlock;
else
@@ -5253,7 +5259,7 @@ process_leaf:
}
if (btrfs_inode_in_log(di_inode, trans->transid)) {
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
break;
}
@@ -5265,7 +5271,7 @@ process_leaf:
if (!ret &&
btrfs_must_commit_transaction(trans, di_inode))
ret = 1;
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
if (ret)
goto next_dir_inode;
if (ctx->log_new_dentries) {
@@ -5411,7 +5417,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
if (!ret && ctx && ctx->log_new_dentries)
ret = log_new_dir_dentries(trans, root,
dir_inode, ctx);
- iput(dir_inode);
+ btrfs_add_delayed_iput(dir_inode);
if (ret)
goto out;
}
@@ -5691,9 +5697,28 @@ again:
wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
if (IS_ERR(wc.replay_dest)) {
ret = PTR_ERR(wc.replay_dest);
+
+ /*
+ * We didn't find the subvol, likely because it was
+ * deleted. This is ok, simply skip this log and go to
+ * the next one.
+ *
+ * We need to exclude the root because we can't have
+ * other log replays overwriting this log as we'll read
+ * it back in a few more times. This will keep our
+ * block from being modified, and we'll just bail for
+ * each subsequent pass.
+ */
+ if (ret == -ENOENT)
+ ret = btrfs_pin_extent_for_log_replay(fs_info->extent_root,
+ log->node->start,
+ log->node->len);
free_extent_buffer(log->node);
free_extent_buffer(log->commit_root);
kfree(log);
+
+ if (!ret)
+ goto next;
btrfs_handle_fs_error(fs_info, ret,
"Couldn't read target root for tree log recovery.");
goto error;
@@ -5725,7 +5750,6 @@ again:
&root->highest_objectid);
}
- key.offset = found_key.offset - 1;
wc.replay_dest->log_root = NULL;
free_extent_buffer(log->node);
free_extent_buffer(log->commit_root);
@@ -5733,9 +5757,10 @@ again:
if (ret)
goto error;
-
+next:
if (found_key.offset == 0)
break;
+ key.offset = found_key.offset - 1;
}
btrfs_release_path(path);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 83bb2f2aa83c..ee1c76cf8886 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -335,6 +335,8 @@ again_search_slot:
}
if (ret < 0 && ret != -ENOENT)
goto out;
+ key.offset++;
+ goto again_search_slot;
}
item_size -= sizeof(subid_le);
offset += sizeof(subid_le);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c063ac57c30e..70aa22a8a9cc 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4876,6 +4876,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
for (i = 0; i < map->num_stripes; i++) {
num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
+ map->stripes[i].dev->has_pending_chunks = true;
}
spin_lock(&extent_root->fs_info->free_chunk_lock);
@@ -5071,8 +5072,7 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
- BTRFS_BLOCK_GROUP_RAID5 |
- BTRFS_BLOCK_GROUP_DUP)) {
+ BTRFS_BLOCK_GROUP_RAID5)) {
max_errors = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
max_errors = 2;
@@ -7250,6 +7250,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
for (i = 0; i < map->num_stripes; i++) {
dev = map->stripes[i].dev;
dev->commit_bytes_used = dev->bytes_used;
+ dev->has_pending_chunks = false;
}
}
unlock_chunks(root);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 9c09aa29d6bd..96c1b847def6 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -62,6 +62,11 @@ struct btrfs_device {
spinlock_t io_lock ____cacheline_aligned;
int running_pending;
+ /* When true means this device has pending chunk alloc in
+ * current transaction. Protected by chunk_mutex.
+ */
+ bool has_pending_chunks;
+
/* regular prio bios */
struct btrfs_pending_bios pending_bios;
/* WRITE_SYNC bios */
@@ -307,7 +312,6 @@ struct btrfs_bio {
u64 map_type; /* get from map_lookup->type */
bio_end_io_t *end_io;
struct bio *orig_bio;
- unsigned long flags;
void *private;
atomic_t error;
int max_errors;
diff --git a/fs/buffer.c b/fs/buffer.c
index e2d4a45ae135..90d1fb5ed73d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3041,6 +3041,13 @@ void guard_bio_eod(int op, struct bio *bio)
/* Uhhuh. We've got a bio that straddles the device size! */
truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
+ /*
+ * The bio contains more than one segment which spans EOD, just return
+ * and let IO layer turn it into an EIO
+ */
+ if (truncated_bytes > bvec->bv_len)
+ return;
+
/* Truncate the bio.. */
bio->bi_iter.bi_size -= truncated_bytes;
bvec->bv_len -= truncated_bytes;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f916cd7b1918..f5d9835264aa 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -933,6 +933,11 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
+ /* remove from inode's cap rbtree, and clear auth cap */
+ rb_erase(&cap->ci_node, &ci->i_caps);
+ if (ci->i_auth_cap == cap)
+ ci->i_auth_cap = NULL;
+
/* remove from session list */
spin_lock(&session->s_cap_lock);
if (session->s_cap_iterator == cap) {
@@ -968,11 +973,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
spin_unlock(&session->s_cap_lock);
- /* remove from inode list */
- rb_erase(&cap->ci_node, &ci->i_caps);
- if (ci->i_auth_cap == cap)
- ci->i_auth_cap = NULL;
-
if (removed)
ceph_put_cap(mdsc, cap);
@@ -1081,20 +1081,23 @@ static int send_cap_msg(struct ceph_mds_session *session,
}
/*
- * Queue cap releases when an inode is dropped from our cache. Since
- * inode is about to be destroyed, there is no need for i_ceph_lock.
+ * Queue cap releases when an inode is dropped from our cache.
*/
void ceph_queue_caps_release(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct rb_node *p;
+ /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
+ * may call __ceph_caps_issued_mask() on a freeing inode. */
+ spin_lock(&ci->i_ceph_lock);
p = rb_first(&ci->i_caps);
while (p) {
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
p = rb_next(p);
__ceph_remove_cap(cap, true);
}
+ spin_unlock(&ci->i_ceph_lock);
}
/*
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index cec25691cbae..2ffc7fe8da52 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1471,6 +1471,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
{
struct ceph_inode_info *dci = ceph_inode(dir);
+ unsigned hash;
switch (dci->i_dir_layout.dl_dir_hash) {
case 0: /* for backward compat */
@@ -1478,8 +1479,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
return dn->d_name.hash;
default:
- return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+ spin_lock(&dn->d_lock);
+ hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
dn->d_name.name, dn->d_name.len);
+ spin_unlock(&dn->d_lock);
+ return hash;
}
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 30d9d9e7057d..049cff197d2a 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -523,6 +523,7 @@ static void ceph_i_callback(struct rcu_head *head)
struct inode *inode = container_of(head, struct inode, i_rcu);
struct ceph_inode_info *ci = ceph_inode(inode);
+ kfree(ci->i_symlink);
kmem_cache_free(ceph_inode_cachep, ci);
}
@@ -554,7 +555,6 @@ void ceph_destroy_inode(struct inode *inode)
ceph_put_snap_realm(mdsc, realm);
}
- kfree(ci->i_symlink);
while ((n = rb_first(&ci->i_fragtree)) != NULL) {
frag = rb_entry(n, struct ceph_inode_frag, node);
rb_erase(n, &ci->i_fragtree);
@@ -741,6 +741,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
int issued = 0, implemented, new_issued;
struct timespec mtime, atime, ctime;
struct ceph_buffer *xattr_blob = NULL;
+ struct ceph_buffer *old_blob = NULL;
struct ceph_string *pool_ns = NULL;
struct ceph_cap *new_cap = NULL;
int err = 0;
@@ -799,7 +800,12 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
ci->i_version = le64_to_cpu(info->version);
inode->i_version++;
inode->i_rdev = le32_to_cpu(info->rdev);
- inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+ /* directories have fl_stripe_unit set to zero */
+ if (le32_to_cpu(info->layout.fl_stripe_unit))
+ inode->i_blkbits =
+ fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+ else
+ inode->i_blkbits = CEPH_BLOCK_SHIFT;
if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
(issued & CEPH_CAP_AUTH_EXCL) == 0) {
@@ -858,7 +864,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
if (ci->i_xattrs.blob)
- ceph_buffer_put(ci->i_xattrs.blob);
+ old_blob = ci->i_xattrs.blob;
ci->i_xattrs.blob = xattr_blob;
if (xattr_blob)
memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1004,8 +1010,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
out:
if (new_cap)
ceph_put_cap(mdsc, new_cap);
- if (xattr_blob)
- ceph_buffer_put(xattr_blob);
+ ceph_buffer_put(old_blob);
+ ceph_buffer_put(xattr_blob);
ceph_put_string(pool_ns);
return err;
}
@@ -1624,7 +1630,6 @@ retry_lookup:
if (IS_ERR(realdn)) {
err = PTR_ERR(realdn);
d_drop(dn);
- dn = NULL;
goto next_item;
}
dn = realdn;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 6cbd0d805c9d..3139fbd4c34e 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1187,6 +1187,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
ci->i_prealloc_cap_flush = NULL;
}
+
+ if (drop &&
+ ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ }
}
spin_unlock(&ci->i_ceph_lock);
while (!list_empty(&to_remove)) {
@@ -3401,7 +3410,9 @@ static void delayed_work(struct work_struct *work)
pr_info("mds%d hung\n", s->s_mds);
}
}
- if (s->s_state < CEPH_MDS_SESSION_OPEN) {
+ if (s->s_state == CEPH_MDS_SESSION_NEW ||
+ s->s_state == CEPH_MDS_SESSION_RESTARTING ||
+ s->s_state == CEPH_MDS_SESSION_REJECTED) {
/* this mds is failed or recovering, just wait */
ceph_put_mds_session(s);
continue;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 411e9df0d40e..3a76ae001360 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -563,7 +563,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
old_snapc = NULL;
update_snapc:
- if (ci->i_head_snapc) {
+ if (ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ci->i_head_snapc = NULL;
+ } else {
ci->i_head_snapc = ceph_get_snap_context(new_snapc);
dout(" new snapc is %p\n", new_snapc);
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2a8903025853..ec1640f3167b 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -85,7 +85,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-
static int ceph_sync_fs(struct super_block *sb, int wait)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
@@ -178,6 +177,26 @@ static match_table_t fsopt_tokens = {
{-1, NULL}
};
+/*
+ * Remove adjacent slashes and then the trailing slash, unless it is
+ * the only remaining character.
+ *
+ * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
+ */
+static void canonicalize_path(char *path)
+{
+ int i, j = 0;
+
+ for (i = 0; path[i] != '\0'; i++) {
+ if (path[i] != '/' || j < 1 || path[j - 1] != '/')
+ path[j++] = path[i];
+ }
+
+ if (j > 1 && path[j - 1] == '/')
+ j--;
+ path[j] = '\0';
+}
+
static int parse_fsopt_token(char *c, void *private)
{
struct ceph_mount_options *fsopt = private;
@@ -337,6 +356,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
if (ret)
return ret;
+
ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
if (ret)
return ret;
@@ -396,13 +416,17 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
*/
dev_name_end = strchr(dev_name, '/');
if (dev_name_end) {
- if (strlen(dev_name_end) > 1) {
- fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
- if (!fsopt->server_path) {
- err = -ENOMEM;
- goto out;
- }
+ /*
+ * The server_path will include the whole chars from userland
+ * including the leading '/'.
+ */
+ fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+ if (!fsopt->server_path) {
+ err = -ENOMEM;
+ goto out;
}
+
+ canonicalize_path(fsopt->server_path);
} else {
dev_name_end = dev_name + strlen(dev_name);
}
@@ -725,7 +749,6 @@ static void destroy_caches(void)
ceph_fscache_unregister();
}
-
/*
* ceph_umount_begin - initiate forced umount. Tear down down the
* mount, skipping steps that may hang while waiting for server(s).
@@ -742,6 +765,12 @@ static void ceph_umount_begin(struct super_block *sb)
return;
}
+static int ceph_remount(struct super_block *sb, int *flags, char *data)
+{
+ sync_filesystem(sb);
+ return 0;
+}
+
static const struct super_operations ceph_super_ops = {
.alloc_inode = ceph_alloc_inode,
.destroy_inode = ceph_destroy_inode,
@@ -750,6 +779,7 @@ static const struct super_operations ceph_super_ops = {
.evict_inode = ceph_evict_inode,
.sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
+ .remount_fs = ceph_remount,
.show_options = ceph_show_options,
.statfs = ceph_statfs,
.umount_begin = ceph_umount_begin,
@@ -805,9 +835,6 @@ out:
return root;
}
-
-
-
/*
* mount: join the ceph cluster, and open root directory.
*/
@@ -821,18 +848,14 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
mutex_lock(&fsc->client->mount_mutex);
if (!fsc->sb->s_root) {
- const char *path;
+ const char *path = fsc->mount_options->server_path ?
+ fsc->mount_options->server_path + 1 : "";
+
err = __ceph_open_session(fsc->client, started);
if (err < 0)
goto out;
- if (!fsc->mount_options->server_path) {
- path = "";
- dout("mount opening path \\t\n");
- } else {
- path = fsc->mount_options->server_path + 1;
- dout("mount opening path %s\n", path);
- }
+ dout("mount opening path '%s'\n", path);
err = ceph_fs_debugfs_init(fsc);
if (err < 0)
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 622d5dd9f616..9f18635f78c7 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -70,7 +70,7 @@ struct ceph_mount_options {
char *snapdir_name; /* default ".snap" */
char *mds_namespace; /* default NULL */
- char *server_path; /* default "/" */
+ char *server_path; /* default NULL (means "/") */
};
struct ceph_fs_client {
@@ -476,7 +476,12 @@ static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
long long release_count,
long long ordered_count)
{
- smp_mb__before_atomic();
+ /*
+ * Makes sure operations that setup readdir cache (update page
+ * cache and i_size) are strongly ordered w.r.t. the following
+ * atomic64_set() operations.
+ */
+ smp_mb();
atomic64_set(&ci->i_complete_seq[0], release_count);
atomic64_set(&ci->i_complete_seq[1], ordered_count);
}
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 75267cdd5dfd..18b999deed03 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -74,7 +74,7 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
const char *ns_field = " pool_namespace=";
char buf[128];
size_t len, total_len = 0;
- int ret;
+ ssize_t ret;
pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
@@ -98,11 +98,8 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
if (pool_ns)
total_len += strlen(ns_field) + pool_ns->len;
- if (!size) {
- ret = total_len;
- } else if (total_len > size) {
- ret = -ERANGE;
- } else {
+ ret = total_len;
+ if (size >= total_len) {
memcpy(val, buf, len);
ret = len;
if (pool_name) {
@@ -757,8 +754,11 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
vxattr = ceph_match_vxattr(inode, name);
if (vxattr) {
err = -ENODATA;
- if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
+ if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
err = vxattr->getxattr_cb(ci, value, size);
+ if (size && size < err)
+ err = -ERANGE;
+ }
return err;
}
@@ -951,6 +951,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf = NULL;
+ struct ceph_buffer *old_blob = NULL;
int issued;
int err;
int dirty = 0;
@@ -1019,13 +1020,15 @@ retry:
struct ceph_buffer *blob;
spin_unlock(&ci->i_ceph_lock);
- dout(" preaallocating new blob size=%d\n", required_blob_size);
+ ceph_buffer_put(old_blob); /* Shouldn't be required */
+ dout(" pre-allocating new blob size=%d\n", required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto do_sync_unlocked;
spin_lock(&ci->i_ceph_lock);
+ /* prealloc_blob can't be released while holding i_ceph_lock */
if (ci->i_xattrs.prealloc_blob)
- ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+ old_blob = ci->i_xattrs.prealloc_blob;
ci->i_xattrs.prealloc_blob = blob;
goto retry;
}
@@ -1041,6 +1044,7 @@ retry:
}
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
if (lock_snap_rwsem)
up_read(&mdsc->snap_rwsem);
if (dirty)
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 44a240c4bb65..23e0477edf7d 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -134,6 +134,12 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
ret = -EBUSY;
goto out;
}
+
+ if (new_min < old_min && new_max > old_max) {
+ ret = -EBUSY;
+ goto out;
+ }
+
}
cd->next = *cp;
@@ -330,7 +336,7 @@ static struct kobject *cdev_get(struct cdev *p)
if (owner && !try_module_get(owner))
return NULL;
- kobj = kobject_get(&p->kobj);
+ kobj = kobject_get_unless_zero(&p->kobj);
if (!kobj)
module_put(owner);
return kobj;
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 9156be545b0f..4660208132a2 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -271,9 +271,9 @@ static void dump_referral(const struct dfs_info3_param *ref)
{
cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
- cifs_dbg(FYI, "DFS: fl: %hd, srv_type: %hd\n",
+ cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
ref->flags, ref->server_type);
- cifs_dbg(FYI, "DFS: ref_flags: %hd, path_consumed: %hd\n",
+ cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
ref->ref_flag, ref->path_consumed);
}
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 15bac390dff9..10aedc2a4c2d 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -603,7 +603,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pmode |= (S_IXUGO & (*pbits_to_set));
- cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
+ cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
return;
}
@@ -632,7 +632,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
if (mode & S_IXUGO)
*pace_flags |= SET_FILE_EXEC_RIGHTS;
- cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
+ cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
mode, *pace_flags);
return;
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4ed4736b5bc6..7ae21ad420fb 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1157,6 +1157,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
}
struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
#define CIFS_CACHE_READ_FLG 1
@@ -1177,6 +1178,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
struct cifsInodeInfo {
bool can_cache_brlcks;
struct list_head llist; /* locks helb by this inode */
+ /*
+ * NOTE: Some code paths call down_read(lock_sem) twice, so
+ * we must always use use cifs_down_write() instead of down_write()
+ * for this semaphore to avoid deadlocks.
+ */
struct rw_semaphore lock_sem; /* protect the fields above */
/* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;
@@ -1651,6 +1657,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
#endif /* CONFIG_CIFS_ACL */
void cifs_oplock_break(struct work_struct *work);
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index cd8025a249bb..cdf244df91c2 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -138,6 +138,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile,
struct file_lock *flock, const unsigned int xid);
extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
+extern void cifs_down_write(struct rw_semaphore *sem);
extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid,
struct file *file,
struct tcon_link *tlink,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 33e65b71c49a..f2707ff795d4 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -885,6 +885,7 @@ cifs_demultiplex_thread(void *p)
mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
set_freezable();
+ allow_kernel_signal(SIGKILL);
while (server->tcpStatus != CifsExiting) {
if (try_to_freeze())
continue;
@@ -1201,6 +1202,11 @@ cifs_parse_devname(const char *devname, struct smb_vol *vol)
const char *delims = "/\\";
size_t len;
+ if (unlikely(!devname || !*devname)) {
+ cifs_dbg(VFS, "Device name not specified.\n");
+ return -EINVAL;
+ }
+
/* make sure we have a valid UNC double delimiter prefix */
len = strspn(devname, delims);
if (len != 2)
@@ -2216,7 +2222,7 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
task = xchg(&server->tsk, NULL);
if (task)
- force_sig(SIGKILL, task);
+ send_sig(SIGKILL, task, 1);
}
static struct TCP_Server_Info *
@@ -2442,6 +2448,7 @@ static int
cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
{
int rc = 0;
+ int is_domain = 0;
const char *delim, *payload;
char *desc;
ssize_t len;
@@ -2489,6 +2496,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
rc = PTR_ERR(key);
goto out_err;
}
+ is_domain = 1;
}
down_read(&key->sem);
@@ -2546,6 +2554,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
goto out_key_put;
}
+ /*
+ * If we have a domain key then we must set the domainName in the
+ * for the request.
+ */
+ if (is_domain && ses->domainName) {
+ vol->domainname = kstrndup(ses->domainName,
+ strlen(ses->domainName),
+ GFP_KERNEL);
+ if (!vol->domainname) {
+ cifs_dbg(FYI, "Unable to allocate %zd bytes for "
+ "domain\n", len);
+ rc = -ENOMEM;
+ kfree(vol->username);
+ vol->username = NULL;
+ kzfree(vol->password);
+ vol->password = NULL;
+ goto out_key_put;
+ }
+ }
+
out_key_put:
up_read(&key->sem);
key_put(key);
@@ -2899,8 +2927,10 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
{
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
- bool old_set = old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
- bool new_set = new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH;
+ bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ old->prepath;
+ bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ new->prepath;
if (old_set && new_set && !strcmp(new->prepath, old->prepath))
return 1;
@@ -3371,7 +3401,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_gid = pvolume_info->linux_gid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
- cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n",
+ cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
cifs_sb->actimeo = pvolume_info->actimeo;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index e98e24eaa6a8..0262c8f7e7c7 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -551,7 +551,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
if (server->ops->close)
server->ops->close(xid, tcon, &fid);
cifs_del_pending_open(&open);
- fput(file);
rc = -ENOMEM;
}
@@ -830,10 +829,16 @@ lookup_out:
static int
cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
{
+ struct inode *inode;
+
if (flags & LOOKUP_RCU)
return -ECHILD;
if (d_really_is_positive(direntry)) {
+ inode = d_inode(direntry);
+ if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+ CIFS_I(inode)->time = 0; /* force reval */
+
if (cifs_revalidate_dentry(direntry))
return 0;
else {
@@ -844,7 +849,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
* attributes will have been updated by
* cifs_revalidate_dentry().
*/
- if (IS_AUTOMOUNT(d_inode(direntry)) &&
+ if (IS_AUTOMOUNT(inode) &&
!(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
spin_lock(&direntry->d_lock);
direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1c5099fffaec..09d83275c20b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -252,6 +252,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
xid, fid);
+ if (rc) {
+ server->ops->close(xid, tcon, fid);
+ if (rc == -ESTALE)
+ rc = -EOPENSTALE;
+ }
+
out:
kfree(buf);
return rc;
@@ -274,6 +280,13 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode)
return has_locks;
}
+void
+cifs_down_write(struct rw_semaphore *sem)
+{
+ while (!down_write_trylock(sem))
+ msleep(10);
+}
+
struct cifsFileInfo *
cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock)
@@ -299,9 +312,6 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
INIT_LIST_HEAD(&fdlocks->locks);
fdlocks->cfile = cfile;
cfile->llist = fdlocks;
- down_write(&cinode->lock_sem);
- list_add(&fdlocks->llist, &cinode->llist);
- up_write(&cinode->lock_sem);
cfile->count = 1;
cfile->pid = current->tgid;
@@ -325,6 +335,10 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
oplock = 0;
}
+ cifs_down_write(&cinode->lock_sem);
+ list_add(&fdlocks->llist, &cinode->llist);
+ up_write(&cinode->lock_sem);
+
spin_lock(&tcon->open_file_lock);
if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
oplock = fid->pending_open->oplock;
@@ -358,13 +372,31 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
return cifs_file;
}
-/*
- * Release a reference on the file private data. This may involve closing
- * the filehandle out on the server. Must be called without holding
- * tcon->open_file_lock and cifs_file->file_info_lock.
+/**
+ * cifsFileInfo_put - release a reference of file priv data
+ *
+ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
*/
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
+ _cifsFileInfo_put(cifs_file, true);
+}
+
+/**
+ * _cifsFileInfo_put - release a reference of file priv data
+ *
+ * This may involve closing the filehandle @cifs_file out on the
+ * server. Must be called without holding tcon->open_file_lock and
+ * cifs_file->file_info_lock.
+ *
+ * If @wait_for_oplock_handler is true and we are releasing the last
+ * reference, wait for any running oplock break handler of the file
+ * and cancel any pending one. If calling this function from the
+ * oplock break handler, you need to pass false.
+ *
+ */
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+{
struct inode *inode = d_inode(cifs_file->dentry);
struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -411,7 +443,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
spin_unlock(&tcon->open_file_lock);
- oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
+ oplock_break_cancelled = wait_oplock_handler ?
+ cancel_work_sync(&cifs_file->oplock_break) : false;
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
struct TCP_Server_Info *server = tcon->ses->server;
@@ -432,7 +465,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
* Delete any outstanding lock records. We'll lose them when the file
* is closed anyway.
*/
- down_write(&cifsi->lock_sem);
+ cifs_down_write(&cifsi->lock_sem);
list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
list_del(&li->llist);
cifs_del_lock_waiters(li);
@@ -689,6 +722,13 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
+ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ if (cfile->f_flags & O_SYNC)
+ create_options |= CREATE_WRITE_THROUGH;
+
+ if (cfile->f_flags & O_DIRECT)
+ create_options |= CREATE_NO_BUFFER;
+
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &cfile->fid);
@@ -986,7 +1026,7 @@ static void
cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
{
struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_add_tail(&lock->llist, &cfile->llist->locks);
up_write(&cinode->lock_sem);
}
@@ -1008,7 +1048,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
try_again:
exist = false;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
lock->type, &conf_lock, CIFS_LOCK_OP);
@@ -1030,7 +1070,7 @@ try_again:
(lock->blist.next == &lock->blist));
if (!rc)
goto try_again;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_del_init(&lock->blist);
}
@@ -1083,7 +1123,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock)
return rc;
try_again:
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
@@ -1287,7 +1327,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
int rc = 0;
/* we are going to update can_cache_brlcks here - need a write access */
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem);
return rc;
@@ -1476,7 +1516,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
if (!buf)
return -ENOMEM;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
@@ -1627,8 +1667,20 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
rc = server->ops->mand_unlock_range(cfile, flock, xid);
out:
- if (flock->fl_flags & FL_POSIX && !rc)
+ if (flock->fl_flags & FL_POSIX) {
+ /*
+ * If this is a request to remove all locks because we
+ * are closing the file, it doesn't matter if the
+ * unlocking failed as both cifs.ko and the SMB server
+ * remove the lock on file close
+ */
+ if (rc) {
+ cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
+ if (!(flock->fl_flags & FL_CLOSE))
+ return rc;
+ }
rc = locks_lock_file_wait(file, flock);
+ }
return rc;
}
@@ -2861,7 +2913,9 @@ cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
}
if (rc) {
- for (i = 0; i < nr_pages; i++) {
+ unsigned int nr_page_failed = i;
+
+ for (i = 0; i < nr_page_failed; i++) {
put_page(rdata->pages[i]);
rdata->pages[i] = NULL;
}
@@ -3901,6 +3955,7 @@ void cifs_oplock_break(struct work_struct *work)
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
+ _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
cifs_done_oplock_break(cinode);
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 57c938ffeb6e..dfa85ad5b481 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -405,6 +405,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* if uniqueid is different, return error */
if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+ CIFS_I(*pinode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgiiu_exit;
}
@@ -412,6 +413,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* if filetype is different, return error */
if (unlikely(((*pinode)->i_mode & S_IFMT) !=
(fattr.cf_mode & S_IFMT))) {
+ CIFS_I(*pinode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgiiu_exit;
}
@@ -771,43 +773,50 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
} else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
(strcmp(server->vals->version_string, SMB1_VERSION_STRING)
== 0)) {
- /*
- * For SMB2 and later the backup intent flag is already
- * sent if needed on open and there is no path based
- * FindFirst operation to use to retry with
- */
+ /*
+ * For SMB2 and later the backup intent flag is already
+ * sent if needed on open and there is no path based
+ * FindFirst operation to use to retry with
+ */
- srchinf = kzalloc(sizeof(struct cifs_search_info),
- GFP_KERNEL);
- if (srchinf == NULL) {
- rc = -ENOMEM;
- goto cgii_exit;
- }
+ srchinf = kzalloc(sizeof(struct cifs_search_info),
+ GFP_KERNEL);
+ if (srchinf == NULL) {
+ rc = -ENOMEM;
+ goto cgii_exit;
+ }
- srchinf->endOfSearch = false;
+ srchinf->endOfSearch = false;
+ if (tcon->unix_ext)
+ srchinf->info_level = SMB_FIND_FILE_UNIX;
+ else if ((tcon->ses->capabilities &
+ tcon->ses->server->vals->cap_nt_find) == 0)
+ srchinf->info_level = SMB_FIND_FILE_INFO_STANDARD;
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
srchinf->info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+ else /* no srvino useful for fallback to some netapp */
+ srchinf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
- srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
- CIFS_SEARCH_CLOSE_AT_END |
- CIFS_SEARCH_BACKUP_SEARCH;
+ srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
+ CIFS_SEARCH_CLOSE_AT_END |
+ CIFS_SEARCH_BACKUP_SEARCH;
- rc = CIFSFindFirst(xid, tcon, full_path,
- cifs_sb, NULL, srchflgs, srchinf, false);
- if (!rc) {
- data =
- (FILE_ALL_INFO *)srchinf->srch_entries_start;
+ rc = CIFSFindFirst(xid, tcon, full_path,
+ cifs_sb, NULL, srchflgs, srchinf, false);
+ if (!rc) {
+ data = (FILE_ALL_INFO *)srchinf->srch_entries_start;
- cifs_dir_info_to_fattr(&fattr,
- (FILE_DIRECTORY_INFO *)data, cifs_sb);
- fattr.cf_uniqueid = le64_to_cpu(
- ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
- validinum = true;
+ cifs_dir_info_to_fattr(&fattr,
+ (FILE_DIRECTORY_INFO *)data, cifs_sb);
+ fattr.cf_uniqueid = le64_to_cpu(
+ ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
+ validinum = true;
- cifs_buf_release(srchinf->ntwrk_buf_start);
- }
- kfree(srchinf);
- if (rc)
- goto cgii_exit;
+ cifs_buf_release(srchinf->ntwrk_buf_start);
+ }
+ kfree(srchinf);
+ if (rc)
+ goto cgii_exit;
} else
goto cgii_exit;
@@ -910,6 +919,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
/* if uniqueid is different, return error */
if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+ CIFS_I(*inode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgii_exit;
}
@@ -917,6 +927,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
/* if filetype is different, return error */
if (unlikely(((*inode)->i_mode & S_IFMT) !=
(fattr.cf_mode & S_IFMT))) {
+ CIFS_I(*inode)->time = 0; /* force reval */
rc = -ESTALE;
goto cgii_exit;
}
@@ -1562,7 +1573,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
struct TCP_Server_Info *server;
char *full_path;
- cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
+ cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
mode, inode);
cifs_sb = CIFS_SB(inode->i_sb);
@@ -1715,6 +1726,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
if (rc == 0 || rc != -EBUSY)
goto do_rename_exit;
+ /* Don't fall back to using SMB on SMB 2+ mount */
+ if (server->vals->protocol_id != 0)
+ goto do_rename_exit;
+
/* open-file renames don't work across directories */
if (to_dentry->d_parent != from_dentry->d_parent)
goto do_rename_exit;
@@ -1975,6 +1990,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
struct inode *inode = d_inode(dentry);
struct super_block *sb = dentry->d_sb;
char *full_path = NULL;
+ int count = 0;
if (inode == NULL)
return -ENOENT;
@@ -1996,15 +2012,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
full_path, inode, inode->i_count.counter,
dentry, cifs_get_time(dentry), jiffies);
+again:
if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
else
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
xid, NULL);
-
+ if (rc == -EAGAIN && count++ < 10)
+ goto again;
out:
kfree(full_path);
free_xid(xid);
+
return rc;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 50559a80acf8..5e75df69062d 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -494,8 +494,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
&pCifsInode->flags);
- queue_work(cifsoplockd_wq,
- &netfile->oplock_break);
+ cifs_queue_oplock_break(netfile);
netfile->oplock_break_cancelled = false;
spin_unlock(&tcon->open_file_lock);
@@ -592,6 +591,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
spin_unlock(&cinode->writers_lock);
}
+/**
+ * cifs_queue_oplock_break - queue the oplock break handler for cfile
+ *
+ * This function is called from the demultiplex thread when it
+ * receives an oplock break for @cfile.
+ *
+ * Assumes the tcon->open_file_lock is held.
+ * Assumes cfile->file_info_lock is NOT held.
+ */
+void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
+{
+ /*
+ * Bump the handle refcount now while we hold the
+ * open_file_lock to enforce the validity of it for the oplock
+ * break handler. The matching put is done at the end of the
+ * handler.
+ */
+ cifsFileInfo_get(cfile);
+
+ queue_work(cifsoplockd_wq, &cfile->oplock_break);
+}
+
void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
{
clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index cc88f4f0325e..bed973330227 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -130,10 +130,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
{0, 0}
};
-static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
- {0, 0}
-};
-
/*
* Convert a string containing text IPv4 or IPv6 address to binary form.
*
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index efd72e1fae74..6f5d78b172ba 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -180,6 +180,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
/* we do not want to loop forever */
last_mid = cur_mid;
cur_mid++;
+ /* avoid 0xFFFF MID */
+ if (cur_mid == 0xffff)
+ cur_mid++;
/*
* This nested loop looks more expensive than it is.
@@ -305,7 +308,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
remaining = tgt_total_cnt - total_in_tgt;
if (remaining < 0) {
- cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%hu\n",
+ cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%u\n",
tgt_total_cnt, total_in_tgt);
return -EPROTO;
}
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index dee5250701de..4dcce3f034f4 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -69,7 +69,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
goto out;
- if (oparms->tcon->use_resilient) {
+ if (oparms->tcon->use_resilient) {
nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */
nr_ioctl_req.Reserved = 0;
rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
@@ -138,7 +138,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
cur = buf;
- down_write(&cinode->lock_sem);
+ cifs_down_write(&cinode->lock_sem);
list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
if (flock->fl_start > li->offset ||
(flock->fl_start + length) <
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 98c25b969ab8..7e93d5706bf6 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -1034,7 +1034,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
"STATUS_UNFINISHED_CONTEXT_DELETED"},
{STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
- {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
+ /* Note that ENOATTTR and ENODATA are the same errno */
+ {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
{STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
{STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
"STATUS_WRONG_CREDENTIAL_HANDLE"},
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 244d27bb8fba..7b7b47e26dbd 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -512,7 +512,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
&cinode->flags);
- queue_work(cifsoplockd_wq, &cfile->oplock_break);
+ cifs_queue_oplock_break(cfile);
kfree(lw);
return true;
}
@@ -617,10 +617,10 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+
list_for_each(tmp1, &ses->tcon_list) {
tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
- cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
spin_lock(&tcon->open_file_lock);
list_for_each(tmp2, &tcon->openFileList) {
cfile = list_entry(tmp2, struct cifsFileInfo,
@@ -632,6 +632,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
continue;
cifs_dbg(FYI, "file id match, oplock break\n");
+ cifs_stats_inc(
+ &tcon->stats.cifs_stats.num_oplock_brks);
cinode = CIFS_I(d_inode(cfile->dentry));
spin_lock(&cfile->file_info_lock);
if (!CIFS_CACHE_WRITE(cinode) &&
@@ -656,17 +658,14 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
&cinode->flags);
spin_unlock(&cfile->file_info_lock);
- queue_work(cifsoplockd_wq,
- &cfile->oplock_break);
+
+ cifs_queue_oplock_break(cfile);
spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
return true;
}
spin_unlock(&tcon->open_file_lock);
- spin_unlock(&cifs_tcp_ses_lock);
- cifs_dbg(FYI, "No matching file for oplock break\n");
- return true;
}
}
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 97d8e2a3df9b..67d9b7a277a3 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1413,26 +1413,33 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
unsigned int epoch, bool *purge_cache)
{
char message[5] = {0};
+ unsigned int new_oplock = 0;
oplock &= 0xFF;
if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
return;
- cinode->oplock = 0;
+ /* Check if the server granted an oplock rather than a lease */
+ if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
+ return smb2_set_oplock_level(cinode, oplock, epoch,
+ purge_cache);
+
if (oplock & SMB2_LEASE_READ_CACHING_HE) {
- cinode->oplock |= CIFS_CACHE_READ_FLG;
+ new_oplock |= CIFS_CACHE_READ_FLG;
strcat(message, "R");
}
if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
- cinode->oplock |= CIFS_CACHE_HANDLE_FLG;
+ new_oplock |= CIFS_CACHE_HANDLE_FLG;
strcat(message, "H");
}
if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
- cinode->oplock |= CIFS_CACHE_WRITE_FLG;
+ new_oplock |= CIFS_CACHE_WRITE_FLG;
strcat(message, "W");
}
- if (!cinode->oplock)
- strcat(message, "None");
+ if (!new_oplock)
+ strncpy(message, "None", sizeof(message));
+
+ cinode->oplock = new_oplock;
cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
&cinode->vfs_inode);
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 52b6e4a40748..e8dc28dbe563 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -168,7 +168,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
if (tcon == NULL)
return 0;
- if (smb2_command == SMB2_TREE_CONNECT)
+ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0;
if (tcon->tidStatus == CifsExiting) {
@@ -247,9 +247,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
*/
mutex_lock(&tcon->ses->session_mutex);
rc = cifs_negotiate_protocol(0, tcon->ses);
- if (!rc && tcon->ses->need_reconnect)
+ if (!rc && tcon->ses->need_reconnect) {
rc = cifs_setup_session(0, tcon->ses, nls_codepage);
-
+ if ((rc == -EACCES) && !tcon->retry) {
+ rc = -EHOSTDOWN;
+ mutex_unlock(&tcon->ses->session_mutex);
+ goto failed;
+ }
+ }
if (rc || !tcon->need_reconnect) {
mutex_unlock(&tcon->ses->session_mutex);
goto out;
@@ -291,6 +296,7 @@ out:
case SMB2_SET_INFO:
rc = -EAGAIN;
}
+failed:
unload_nls(nls_codepage);
return rc;
}
@@ -660,7 +666,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
else
req->SecurityMode = 0;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+#else
req->Capabilities = 0;
+#endif /* DFS_UPCALL */
+
req->Channel = 0; /* MBZ */
sess_data->iov[0].iov_base = (char *)req;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 20af5187ba63..6634ad3567e0 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -31,7 +31,7 @@
#include "cifs_fs_sb.h"
#include "cifs_unicode.h"
-#define MAX_EA_VALUE_SIZE 65535
+#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
#define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
#define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 6e0154eb6fcc..649d17edc071 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -60,6 +60,41 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
return ret;
}
+struct coda_vm_ops {
+ atomic_t refcnt;
+ struct file *coda_file;
+ const struct vm_operations_struct *host_vm_ops;
+ struct vm_operations_struct vm_ops;
+};
+
+static void
+coda_vm_open(struct vm_area_struct *vma)
+{
+ struct coda_vm_ops *cvm_ops =
+ container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
+
+ atomic_inc(&cvm_ops->refcnt);
+
+ if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
+ cvm_ops->host_vm_ops->open(vma);
+}
+
+static void
+coda_vm_close(struct vm_area_struct *vma)
+{
+ struct coda_vm_ops *cvm_ops =
+ container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
+
+ if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
+ cvm_ops->host_vm_ops->close(vma);
+
+ if (atomic_dec_and_test(&cvm_ops->refcnt)) {
+ vma->vm_ops = cvm_ops->host_vm_ops;
+ fput(cvm_ops->coda_file);
+ kfree(cvm_ops);
+ }
+}
+
static int
coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
{
@@ -67,6 +102,8 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
struct coda_inode_info *cii;
struct file *host_file;
struct inode *coda_inode, *host_inode;
+ struct coda_vm_ops *cvm_ops;
+ int ret;
cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
@@ -75,6 +112,13 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
if (!host_file->f_op->mmap)
return -ENODEV;
+ if (WARN_ON(coda_file != vma->vm_file))
+ return -EIO;
+
+ cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
+ if (!cvm_ops)
+ return -ENOMEM;
+
coda_inode = file_inode(coda_file);
host_inode = file_inode(host_file);
@@ -88,6 +132,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
* the container file on us! */
else if (coda_inode->i_mapping != host_inode->i_mapping) {
spin_unlock(&cii->c_lock);
+ kfree(cvm_ops);
return -EBUSY;
}
@@ -96,7 +141,29 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
cfi->cfi_mapcount++;
spin_unlock(&cii->c_lock);
- return host_file->f_op->mmap(host_file, vma);
+ vma->vm_file = get_file(host_file);
+ ret = host_file->f_op->mmap(host_file, vma);
+
+ if (ret) {
+ /* if call_mmap fails, our caller will put coda_file so we
+ * should drop the reference to the host_file that we got.
+ */
+ fput(host_file);
+ kfree(cvm_ops);
+ } else {
+ /* here we add redirects for the open/close vm_operations */
+ cvm_ops->host_vm_ops = vma->vm_ops;
+ if (vma->vm_ops)
+ cvm_ops->vm_ops = *vma->vm_ops;
+
+ cvm_ops->vm_ops.open = coda_vm_open;
+ cvm_ops->vm_ops.close = coda_vm_close;
+ cvm_ops->coda_file = coda_file;
+ atomic_set(&cvm_ops->refcnt, 1);
+
+ vma->vm_ops = &cvm_ops->vm_ops;
+ }
+ return ret;
}
int coda_open(struct inode *coda_inode, struct file *coda_file)
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 822629126e89..ff9b5cf8ff01 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -187,8 +187,11 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
if (req->uc_opcode == CODA_OPEN_BY_FD) {
struct coda_open_by_fd_out *outp =
(struct coda_open_by_fd_out *)req->uc_data;
- if (!outp->oh.result)
+ if (!outp->oh.result) {
outp->fh = fget(outp->fd);
+ if (!outp->fh)
+ return -EBADF;
+ }
}
wake_up(&req->uc_sleep);
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 93c8e4a4bbd3..02ac9067a354 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -160,6 +160,7 @@ struct compat_video_event {
unsigned int frame_rate;
} u;
};
+#define VIDEO_GET_EVENT32 _IOR('o', 28, struct compat_video_event)
static int do_video_get_event(struct file *file,
unsigned int cmd, struct compat_video_event __user *up)
@@ -171,7 +172,7 @@ static int do_video_get_event(struct file *file,
if (kevent == NULL)
return -EFAULT;
- err = do_ioctl(file, cmd, (unsigned long)kevent);
+ err = do_ioctl(file, VIDEO_GET_EVENT, (unsigned long)kevent);
if (!err) {
err = convert_in_user(&kevent->type, &up->type);
err |= convert_in_user(&kevent->timestamp, &up->timestamp);
@@ -190,6 +191,7 @@ struct compat_video_still_picture {
compat_uptr_t iFrame;
int32_t size;
};
+#define VIDEO_STILLPICTURE32 _IOW('o', 30, struct compat_video_still_picture)
static int do_video_stillpicture(struct file *file,
unsigned int cmd, struct compat_video_still_picture __user *up)
@@ -212,7 +214,7 @@ static int do_video_stillpicture(struct file *file,
if (err)
return -EFAULT;
- err = do_ioctl(file, cmd, (unsigned long) up_native);
+ err = do_ioctl(file, VIDEO_STILLPICTURE, (unsigned long) up_native);
return err;
}
@@ -1038,9 +1040,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
COMPATIBLE_IOCTL(PPPIOCATTCHAN)
COMPATIBLE_IOCTL(PPPIOCGCHAN)
COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
-/* PPPOX */
-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
/* Big A */
/* sparc only */
/* Big Q for sound/OSS */
@@ -1487,9 +1486,9 @@ static long do_ioctl_trans(unsigned int cmd,
return rtc_ioctl(file, cmd, argp);
/* dvb */
- case VIDEO_GET_EVENT:
+ case VIDEO_GET_EVENT32:
return do_video_get_event(file, cmd, argp);
- case VIDEO_STILLPICTURE:
+ case VIDEO_STILLPICTURE32:
return do_video_stillpicture(file, cmd, argp);
case VIDEO_SET_SPU_PALETTE:
return do_video_set_spu_palette(file, cmd, argp);
@@ -1586,9 +1585,10 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
#endif
case FICLONE:
+ goto do_ioctl;
case FICLONERANGE:
case FIDEDUPERANGE:
- goto do_ioctl;
+ goto found_handler;
case FIBMAP:
case FIGETBSZ:
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index ccc31fa6f1a7..16eb59adf5aa 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -34,6 +34,15 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+struct configfs_fragment {
+ atomic_t frag_count;
+ struct rw_semaphore frag_sem;
+ bool frag_dead;
+};
+
+void put_fragment(struct configfs_fragment *);
+struct configfs_fragment *get_fragment(struct configfs_fragment *);
+
struct configfs_dirent {
atomic_t s_count;
int s_dependent_count;
@@ -48,6 +57,7 @@ struct configfs_dirent {
#ifdef CONFIG_LOCKDEP
int s_depth;
#endif
+ struct configfs_fragment *s_frag;
};
#define CONFIGFS_ROOT 0x0001
@@ -75,8 +85,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
extern int configfs_create_bin_file(struct config_item *,
const struct configfs_bin_attribute *);
-extern int configfs_make_dirent(struct configfs_dirent *,
- struct dentry *, void *, umode_t, int);
+extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
+ void *, umode_t, int, struct configfs_fragment *);
extern int configfs_dirent_is_ready(struct configfs_dirent *);
extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
@@ -151,6 +161,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd)
{
if (!(sd->s_type & CONFIGFS_ROOT)) {
kfree(sd->s_iattr);
+ put_fragment(sd->s_frag);
kmem_cache_free(configfs_dir_cachep, sd);
}
}
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index d2a1a79fa324..c2ef617d2f97 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry,
if (sd) {
/* Coordinate with configfs_readdir */
spin_lock(&configfs_dirent_lock);
- /* Coordinate with configfs_attach_attr where will increase
- * sd->s_count and update sd->s_dentry to new allocated one.
- * Only set sd->dentry to null when this dentry is the only
- * sd owner.
- * If not do so, configfs_d_iput may run just after
- * configfs_attach_attr and set sd->s_dentry to null
- * even it's still in use.
+ /*
+ * Set sd->s_dentry to null only when this dentry is the one
+ * that is going to be killed. Otherwise configfs_d_iput may
+ * run just after configfs_attach_attr and set sd->s_dentry to
+ * NULL even it's still in use.
*/
- if (atomic_read(&sd->s_count) <= 2)
+ if (sd->s_dentry == dentry)
sd->s_dentry = NULL;
spin_unlock(&configfs_dirent_lock);
@@ -166,11 +164,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
#endif /* CONFIG_LOCKDEP */
+static struct configfs_fragment *new_fragment(void)
+{
+ struct configfs_fragment *p;
+
+ p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
+ if (p) {
+ atomic_set(&p->frag_count, 1);
+ init_rwsem(&p->frag_sem);
+ p->frag_dead = false;
+ }
+ return p;
+}
+
+void put_fragment(struct configfs_fragment *frag)
+{
+ if (frag && atomic_dec_and_test(&frag->frag_count))
+ kfree(frag);
+}
+
+struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
+{
+ if (likely(frag))
+ atomic_inc(&frag->frag_count);
+ return frag;
+}
+
/*
* Allocates a new configfs_dirent and links it to the parent configfs_dirent
*/
static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
- void *element, int type)
+ void *element, int type,
+ struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
@@ -190,6 +215,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
kmem_cache_free(configfs_dir_cachep, sd);
return ERR_PTR(-ENOENT);
}
+ sd->s_frag = get_fragment(frag);
list_add(&sd->s_sibling, &parent_sd->s_children);
spin_unlock(&configfs_dirent_lock);
@@ -224,11 +250,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
int configfs_make_dirent(struct configfs_dirent * parent_sd,
struct dentry * dentry, void * element,
- umode_t mode, int type)
+ umode_t mode, int type, struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
- sd = configfs_new_dirent(parent_sd, element, type);
+ sd = configfs_new_dirent(parent_sd, element, type, frag);
if (IS_ERR(sd))
return PTR_ERR(sd);
@@ -275,7 +301,8 @@ static void init_symlink(struct inode * inode)
* until it is validated by configfs_dir_set_ready()
*/
-static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
+static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int error;
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
@@ -288,7 +315,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
return error;
error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
- CONFIGFS_DIR | CONFIGFS_USET_CREATING);
+ CONFIGFS_DIR | CONFIGFS_USET_CREATING,
+ frag);
if (unlikely(error))
return error;
@@ -353,9 +381,10 @@ int configfs_create_link(struct configfs_symlink *sl,
{
int err = 0;
umode_t mode = S_IFLNK | S_IRWXUGO;
+ struct configfs_dirent *p = parent->d_fsdata;
- err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode,
- CONFIGFS_ITEM_LINK);
+ err = configfs_make_dirent(p, dentry, sl, mode,
+ CONFIGFS_ITEM_LINK, p->s_frag);
if (!err) {
err = configfs_create(dentry, mode, init_symlink);
if (err) {
@@ -614,7 +643,8 @@ static int populate_attrs(struct config_item *item)
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry);
+ struct dentry *dentry,
+ struct configfs_fragment *frag);
static void configfs_detach_group(struct config_item *item);
static void detach_groups(struct config_group *group)
@@ -662,7 +692,8 @@ static void detach_groups(struct config_group *group)
* try using vfs_mkdir. Just a thought.
*/
static int create_default_group(struct config_group *parent_group,
- struct config_group *group)
+ struct config_group *group,
+ struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
@@ -678,7 +709,7 @@ static int create_default_group(struct config_group *parent_group,
d_add(child, NULL);
ret = configfs_attach_group(&parent_group->cg_item,
- &group->cg_item, child);
+ &group->cg_item, child, frag);
if (!ret) {
sd = child->d_fsdata;
sd->s_type |= CONFIGFS_USET_DEFAULT;
@@ -692,13 +723,14 @@ static int create_default_group(struct config_group *parent_group,
return ret;
}
-static int populate_groups(struct config_group *group)
+static int populate_groups(struct config_group *group,
+ struct configfs_fragment *frag)
{
struct config_group *new_group;
int ret = 0;
list_for_each_entry(new_group, &group->default_groups, group_entry) {
- ret = create_default_group(group, new_group);
+ ret = create_default_group(group, new_group, frag);
if (ret) {
detach_groups(group);
break;
@@ -812,11 +844,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g
*/
static int configfs_attach_item(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int ret;
- ret = configfs_create_dir(item, dentry);
+ ret = configfs_create_dir(item, dentry, frag);
if (!ret) {
ret = populate_attrs(item);
if (ret) {
@@ -846,12 +879,13 @@ static void configfs_detach_item(struct config_item *item)
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
- ret = configfs_attach_item(parent_item, item, dentry);
+ ret = configfs_attach_item(parent_item, item, dentry, frag);
if (!ret) {
sd = dentry->d_fsdata;
sd->s_type |= CONFIGFS_USET_DIR;
@@ -867,7 +901,7 @@ static int configfs_attach_group(struct config_item *parent_item,
*/
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
configfs_adjust_dir_dirent_depth_before_populate(sd);
- ret = populate_groups(to_config_group(item));
+ ret = populate_groups(to_config_group(item), frag);
if (ret) {
configfs_detach_item(item);
d_inode(dentry)->i_flags |= S_DEAD;
@@ -1262,6 +1296,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct configfs_dirent *sd;
struct config_item_type *type;
struct module *subsys_owner = NULL, *new_item_owner = NULL;
+ struct configfs_fragment *frag;
char *name;
sd = dentry->d_parent->d_fsdata;
@@ -1280,6 +1315,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
goto out;
}
+ frag = new_fragment();
+ if (!frag) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Get a working ref for the duration of this function */
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
@@ -1382,9 +1423,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
spin_unlock(&configfs_dirent_lock);
if (group)
- ret = configfs_attach_group(parent_item, item, dentry);
+ ret = configfs_attach_group(parent_item, item, dentry, frag);
else
- ret = configfs_attach_item(parent_item, item, dentry);
+ ret = configfs_attach_item(parent_item, item, dentry, frag);
spin_lock(&configfs_dirent_lock);
sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
@@ -1421,6 +1462,7 @@ out_put:
* reference.
*/
config_item_put(parent_item);
+ put_fragment(frag);
out:
return ret;
@@ -1432,6 +1474,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
struct config_item *item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
+ struct configfs_fragment *frag;
struct module *subsys_owner = NULL, *dead_item_owner = NULL;
int ret;
@@ -1489,6 +1532,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
}
} while (ret == -EAGAIN);
+ frag = sd->s_frag;
+ if (down_write_killable(&frag->frag_sem)) {
+ spin_lock(&configfs_dirent_lock);
+ configfs_detach_rollback(dentry);
+ spin_unlock(&configfs_dirent_lock);
+ return -EINTR;
+ }
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
+
/* Get a working ref for the duration of this function */
item = configfs_get_config_item(dentry);
@@ -1589,7 +1642,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
*/
err = -ENOENT;
if (configfs_dirent_is_ready(parent_sd)) {
- file->private_data = configfs_new_dirent(parent_sd, NULL, 0);
+ file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
if (IS_ERR(file->private_data))
err = PTR_ERR(file->private_data);
else
@@ -1745,8 +1798,13 @@ int configfs_register_group(struct config_group *parent_group,
{
struct configfs_subsystem *subsys = parent_group->cg_subsys;
struct dentry *parent;
+ struct configfs_fragment *frag;
int ret;
+ frag = new_fragment();
+ if (!frag)
+ return -ENOMEM;
+
mutex_lock(&subsys->su_mutex);
link_group(parent_group, group);
mutex_unlock(&subsys->su_mutex);
@@ -1754,13 +1812,22 @@ int configfs_register_group(struct config_group *parent_group,
parent = parent_group->cg_item.ci_dentry;
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
- ret = create_default_group(parent_group, group);
- if (!ret) {
- spin_lock(&configfs_dirent_lock);
- configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
- spin_unlock(&configfs_dirent_lock);
- }
+ ret = create_default_group(parent_group, group, frag);
+ if (ret)
+ goto err_out;
+
+ spin_lock(&configfs_dirent_lock);
+ configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
+ spin_unlock(&configfs_dirent_lock);
inode_unlock(d_inode(parent));
+ put_fragment(frag);
+ return 0;
+err_out:
+ inode_unlock(d_inode(parent));
+ mutex_lock(&subsys->su_mutex);
+ unlink_group(group);
+ mutex_unlock(&subsys->su_mutex);
+ put_fragment(frag);
return ret;
}
EXPORT_SYMBOL(configfs_register_group);
@@ -1776,16 +1843,12 @@ void configfs_unregister_group(struct config_group *group)
struct configfs_subsystem *subsys = group->cg_subsys;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_fragment *frag = sd->s_frag;
- mutex_lock(&subsys->su_mutex);
- if (!group->cg_item.ci_parent->ci_group) {
- /*
- * The parent has already been unlinked and detached
- * due to a rmdir.
- */
- goto unlink_group;
- }
- mutex_unlock(&subsys->su_mutex);
+ down_write(&frag->frag_sem);
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
spin_lock(&configfs_dirent_lock);
@@ -1801,7 +1864,6 @@ void configfs_unregister_group(struct config_group *group)
dput(dentry);
mutex_lock(&subsys->su_mutex);
-unlink_group:
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
}
@@ -1858,10 +1920,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
struct dentry *dentry;
struct dentry *root;
struct configfs_dirent *sd;
+ struct configfs_fragment *frag;
+
+ frag = new_fragment();
+ if (!frag)
+ return -ENOMEM;
root = configfs_pin_fs();
- if (IS_ERR(root))
+ if (IS_ERR(root)) {
+ put_fragment(frag);
return PTR_ERR(root);
+ }
if (!group->cg_item.ci_name)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
@@ -1877,7 +1946,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
d_add(dentry, NULL);
err = configfs_attach_group(sd->s_element, &group->cg_item,
- dentry);
+ dentry, frag);
if (err) {
BUG_ON(d_inode(dentry));
d_drop(dentry);
@@ -1895,6 +1964,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
unlink_group(group);
configfs_release_fs();
}
+ put_fragment(frag);
return err;
}
@@ -1904,12 +1974,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
struct config_group *group = &subsys->su_group;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *root = dentry->d_sb->s_root;
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_fragment *frag = sd->s_frag;
if (dentry->d_parent != root) {
pr_err("Tried to unregister non-subsystem!\n");
return;
}
+ down_write(&frag->frag_sem);
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
+
inode_lock_nested(d_inode(root),
I_MUTEX_PARENT);
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 2c6312db8516..7285440bc62e 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -53,40 +53,44 @@ struct configfs_buffer {
bool write_in_progress;
char *bin_buffer;
int bin_buffer_size;
+ int cb_max_size;
+ struct config_item *item;
+ struct module *owner;
+ union {
+ struct configfs_attribute *attr;
+ struct configfs_bin_attribute *bin_attr;
+ };
};
+static inline struct configfs_fragment *to_frag(struct file *file)
+{
+ struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
-/**
- * fill_read_buffer - allocate and fill buffer from item.
- * @dentry: dentry pointer.
- * @buffer: data buffer for file.
- *
- * Allocate @buffer->page, if it hasn't been already, then call the
- * config_item's show() method to fill the buffer with this attribute's
- * data.
- * This is called only once, on the file's first read.
- */
-static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
+ return sd->s_frag;
+}
+
+static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
{
- struct configfs_attribute * attr = to_attr(dentry);
- struct config_item * item = to_item(dentry->d_parent);
- int ret = 0;
- ssize_t count;
+ struct configfs_fragment *frag = to_frag(file);
+ ssize_t count = -ENOENT;
if (!buffer->page)
buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
if (!buffer->page)
return -ENOMEM;
- count = attr->show(item, buffer->page);
-
- BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE);
- if (count >= 0) {
- buffer->needs_read_fill = 0;
- buffer->count = count;
- } else
- ret = count;
- return ret;
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ count = buffer->attr->show(buffer->item, buffer->page);
+ up_read(&frag->frag_sem);
+
+ if (count < 0)
+ return count;
+ if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
+ return -EIO;
+ buffer->needs_read_fill = 0;
+ buffer->count = count;
+ return 0;
}
/**
@@ -111,12 +115,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
static ssize_t
configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- struct configfs_buffer * buffer = file->private_data;
+ struct configfs_buffer *buffer = file->private_data;
ssize_t retval = 0;
mutex_lock(&buffer->mutex);
if (buffer->needs_read_fill) {
- if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
+ retval = fill_read_buffer(file, buffer);
+ if (retval)
goto out;
}
pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
@@ -152,10 +157,8 @@ static ssize_t
configfs_read_bin_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct configfs_fragment *frag = to_frag(file);
struct configfs_buffer *buffer = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- struct config_item *item = to_item(dentry->d_parent);
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
ssize_t retval = 0;
ssize_t len = min_t(size_t, count, PAGE_SIZE);
@@ -166,18 +169,23 @@ configfs_read_bin_file(struct file *file, char __user *buf,
retval = -ETXTBSY;
goto out;
}
- buffer->read_in_progress = 1;
+ buffer->read_in_progress = true;
if (buffer->needs_read_fill) {
/* perform first read with buf == NULL to get extent */
- len = bin_attr->read(item, NULL, 0);
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ len = buffer->bin_attr->read(buffer->item, NULL, 0);
+ else
+ len = -ENOENT;
+ up_read(&frag->frag_sem);
if (len <= 0) {
retval = len;
goto out;
}
/* do not exceed the maximum value */
- if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) {
+ if (buffer->cb_max_size && len > buffer->cb_max_size) {
retval = -EFBIG;
goto out;
}
@@ -190,7 +198,13 @@ configfs_read_bin_file(struct file *file, char __user *buf,
buffer->bin_buffer_size = len;
/* perform second read to fill buffer */
- len = bin_attr->read(item, buffer->bin_buffer, len);
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ len = buffer->bin_attr->read(buffer->item,
+ buffer->bin_buffer, len);
+ else
+ len = -ENOENT;
+ up_read(&frag->frag_sem);
if (len < 0) {
retval = len;
vfree(buffer->bin_buffer);
@@ -240,25 +254,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
return error ? -EFAULT : count;
}
-
-/**
- * flush_write_buffer - push buffer to config_item.
- * @dentry: dentry to the attribute
- * @buffer: data buffer for file.
- * @count: number of bytes
- *
- * Get the correct pointers for the config_item and the attribute we're
- * dealing with, then call the store() method for the attribute,
- * passing the buffer that we acquired in fill_write_buffer().
- */
-
static int
-flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count)
+flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
{
- struct configfs_attribute * attr = to_attr(dentry);
- struct config_item * item = to_item(dentry->d_parent);
-
- return attr->store(item, buffer->page, count);
+ struct configfs_fragment *frag = to_frag(file);
+ int res = -ENOENT;
+
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ res = buffer->attr->store(buffer->item, buffer->page, count);
+ up_read(&frag->frag_sem);
+ return res;
}
@@ -282,13 +288,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size
static ssize_t
configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- struct configfs_buffer * buffer = file->private_data;
+ struct configfs_buffer *buffer = file->private_data;
ssize_t len;
mutex_lock(&buffer->mutex);
len = fill_write_buffer(buffer, buf, count);
if (len > 0)
- len = flush_write_buffer(file->f_path.dentry, buffer, len);
+ len = flush_write_buffer(file, buffer, len);
if (len > 0)
*ppos += len;
mutex_unlock(&buffer->mutex);
@@ -313,8 +319,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct configfs_buffer *buffer = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
void *tbuf = NULL;
ssize_t len;
@@ -325,13 +329,13 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
len = -ETXTBSY;
goto out;
}
- buffer->write_in_progress = 1;
+ buffer->write_in_progress = true;
/* buffer grows? */
if (*ppos + count > buffer->bin_buffer_size) {
- if (bin_attr->cb_max_size &&
- *ppos + count > bin_attr->cb_max_size) {
+ if (buffer->cb_max_size &&
+ *ppos + count > buffer->cb_max_size) {
len = -EFBIG;
goto out;
}
@@ -363,31 +367,51 @@ out:
return len;
}
-static int check_perm(struct inode * inode, struct file * file, int type)
+static int __configfs_open_file(struct inode *inode, struct file *file, int type)
{
- struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
- struct configfs_attribute * attr = to_attr(file->f_path.dentry);
- struct configfs_bin_attribute *bin_attr = NULL;
- struct configfs_buffer * buffer;
- struct configfs_item_operations * ops = NULL;
- int error = 0;
+ struct dentry *dentry = file->f_path.dentry;
+ struct configfs_fragment *frag = to_frag(file);
+ struct configfs_attribute *attr;
+ struct configfs_buffer *buffer;
+ int error;
- if (!item || !attr)
- goto Einval;
+ error = -ENOMEM;
+ buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
+ if (!buffer)
+ goto out;
- if (type & CONFIGFS_ITEM_BIN_ATTR)
- bin_attr = to_bin_attr(file->f_path.dentry);
+ error = -ENOENT;
+ down_read(&frag->frag_sem);
+ if (unlikely(frag->frag_dead))
+ goto out_free_buffer;
- /* Grab the module reference for this attribute if we have one */
- if (!try_module_get(attr->ca_owner)) {
- error = -ENODEV;
- goto Done;
+ error = -EINVAL;
+ buffer->item = to_item(dentry->d_parent);
+ if (!buffer->item)
+ goto out_free_buffer;
+
+ attr = to_attr(dentry);
+ if (!attr)
+ goto out_put_item;
+
+ if (type & CONFIGFS_ITEM_BIN_ATTR) {
+ buffer->bin_attr = to_bin_attr(dentry);
+ buffer->cb_max_size = buffer->bin_attr->cb_max_size;
+ } else {
+ buffer->attr = attr;
}
- if (item->ci_type)
- ops = item->ci_type->ct_item_ops;
- else
- goto Eaccess;
+ buffer->owner = attr->ca_owner;
+ /* Grab the module reference for this attribute if we have one */
+ error = -ENODEV;
+ if (!try_module_get(buffer->owner))
+ goto out_put_item;
+
+ error = -EACCES;
+ if (!buffer->item->ci_type)
+ goto out_put_module;
+
+ buffer->ops = buffer->item->ci_type->ct_item_ops;
/* File needs write support.
* The inode's perms must say it's ok,
@@ -395,13 +419,11 @@ static int check_perm(struct inode * inode, struct file * file, int type)
*/
if (file->f_mode & FMODE_WRITE) {
if (!(inode->i_mode & S_IWUGO))
- goto Eaccess;
-
+ goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
- goto Eaccess;
-
- if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write)
- goto Eaccess;
+ goto out_put_module;
+ if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
+ goto out_put_module;
}
/* File needs read support.
@@ -410,92 +432,72 @@ static int check_perm(struct inode * inode, struct file * file, int type)
*/
if (file->f_mode & FMODE_READ) {
if (!(inode->i_mode & S_IRUGO))
- goto Eaccess;
-
+ goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
- goto Eaccess;
-
- if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read)
- goto Eaccess;
+ goto out_put_module;
+ if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
+ goto out_put_module;
}
- /* No error? Great, allocate a buffer for the file, and store it
- * it in file->private_data for easy access.
- */
- buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
- if (!buffer) {
- error = -ENOMEM;
- goto Enomem;
- }
mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
- buffer->read_in_progress = 0;
- buffer->write_in_progress = 0;
- buffer->ops = ops;
+ buffer->read_in_progress = false;
+ buffer->write_in_progress = false;
file->private_data = buffer;
- goto Done;
+ up_read(&frag->frag_sem);
+ return 0;
- Einval:
- error = -EINVAL;
- goto Done;
- Eaccess:
- error = -EACCES;
- Enomem:
- module_put(attr->ca_owner);
- Done:
- if (error && item)
- config_item_put(item);
+out_put_module:
+ module_put(buffer->owner);
+out_put_item:
+ config_item_put(buffer->item);
+out_free_buffer:
+ up_read(&frag->frag_sem);
+ kfree(buffer);
+out:
return error;
}
static int configfs_release(struct inode *inode, struct file *filp)
{
- struct config_item * item = to_item(filp->f_path.dentry->d_parent);
- struct configfs_attribute * attr = to_attr(filp->f_path.dentry);
- struct module * owner = attr->ca_owner;
- struct configfs_buffer * buffer = filp->private_data;
-
- if (item)
- config_item_put(item);
- /* After this point, attr should not be accessed. */
- module_put(owner);
-
- if (buffer) {
- if (buffer->page)
- free_page((unsigned long)buffer->page);
- mutex_destroy(&buffer->mutex);
- kfree(buffer);
- }
+ struct configfs_buffer *buffer = filp->private_data;
+
+ module_put(buffer->owner);
+ if (buffer->page)
+ free_page((unsigned long)buffer->page);
+ mutex_destroy(&buffer->mutex);
+ kfree(buffer);
return 0;
}
static int configfs_open_file(struct inode *inode, struct file *filp)
{
- return check_perm(inode, filp, CONFIGFS_ITEM_ATTR);
+ return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
}
static int configfs_open_bin_file(struct inode *inode, struct file *filp)
{
- return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
+ return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
}
-static int configfs_release_bin_file(struct inode *inode, struct file *filp)
+static int configfs_release_bin_file(struct inode *inode, struct file *file)
{
- struct configfs_buffer *buffer = filp->private_data;
- struct dentry *dentry = filp->f_path.dentry;
- struct config_item *item = to_item(dentry->d_parent);
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
- ssize_t len = 0;
- int ret;
+ struct configfs_buffer *buffer = file->private_data;
- buffer->read_in_progress = 0;
+ buffer->read_in_progress = false;
if (buffer->write_in_progress) {
- buffer->write_in_progress = 0;
-
- len = bin_attr->write(item, buffer->bin_buffer,
- buffer->bin_buffer_size);
-
+ struct configfs_fragment *frag = to_frag(file);
+ buffer->write_in_progress = false;
+
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead) {
+ /* result of ->release() is ignored */
+ buffer->bin_attr->write(buffer->item,
+ buffer->bin_buffer,
+ buffer->bin_buffer_size);
+ }
+ up_read(&frag->frag_sem);
/* vfree on NULL is safe */
vfree(buffer->bin_buffer);
buffer->bin_buffer = NULL;
@@ -503,10 +505,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp)
buffer->needs_read_fill = 1;
}
- ret = configfs_release(inode, filp);
- if (len < 0)
- return len;
- return ret;
+ configfs_release(inode, file);
+ return 0;
}
@@ -541,7 +541,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
- CONFIGFS_ITEM_ATTR);
+ CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
inode_unlock(d_inode(dir));
return error;
@@ -563,7 +563,7 @@ int configfs_create_bin_file(struct config_item *item,
inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
- CONFIGFS_ITEM_BIN_ATTR);
+ CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
inode_unlock(dir->d_inode);
return error;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index fea6db1ee065..afd79a1a34b3 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -157,11 +157,42 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
!type->ct_item_ops->allow_link)
goto out_put;
+ /*
+ * This is really sick. What they wanted was a hybrid of
+ * link(2) and symlink(2) - they wanted the target resolved
+ * at syscall time (as link(2) would've done), be a directory
+ * (which link(2) would've refused to do) *AND* be a deep
+ * fucking magic, making the target busy from rmdir POV.
+ * symlink(2) is nothing of that sort, and the locking it
+ * gets matches the normal symlink(2) semantics. Without
+ * attempts to resolve the target (which might very well
+ * not even exist yet) done prior to locking the parent
+ * directory. This perversion, OTOH, needs to resolve
+ * the target, which would lead to obvious deadlocks if
+ * attempted with any directories locked.
+ *
+ * Unfortunately, that garbage is userland ABI and we should've
+ * said "no" back in 2005. Too late now, so we get to
+ * play very ugly games with locking.
+ *
+ * Try *ANYTHING* of that sort in new code, and you will
+ * really regret it. Just ask yourself - what could a BOFH
+ * do to me and do I want to find it out first-hand?
+ *
+ * AV, a thoroughly annoyed bastard.
+ */
+ inode_unlock(dir);
ret = get_target(symname, &path, &target_item, dentry->d_sb);
+ inode_lock(dir);
if (ret)
goto out_put;
- ret = type->ct_item_ops->allow_link(parent_item, target_item);
+ if (dentry->d_inode || d_unhashed(dentry))
+ ret = -EEXIST;
+ else
+ ret = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ if (!ret)
+ ret = type->ct_item_ops->allow_link(parent_item, target_item);
if (!ret) {
mutex_lock(&configfs_symlink_mutex);
ret = create_link(parent_item, target_item, dentry);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index c160d2d0e18d..57a97b38a2fa 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -114,6 +114,8 @@ int fscrypt_process_policy(struct file *filp,
if (!inode_has_encryption_context(inode)) {
if (!S_ISDIR(inode->i_mode))
ret = -ENOTDIR;
+ else if (IS_DEADDIR(inode))
+ ret = -ENOENT;
else if (!inode->i_sb->s_cop->empty_dir)
ret = -EOPNOTSUPP;
else if (!inode->i_sb->s_cop->empty_dir(inode))
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 77e9cd7a0137..20ee612017bf 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -170,19 +170,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
-static void debugfs_evict_inode(struct inode *inode)
+static void debugfs_i_callback(struct rcu_head *head)
{
- truncate_inode_pages_final(&inode->i_data);
- clear_inode(inode);
+ struct inode *inode = container_of(head, struct inode, i_rcu);
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
+ free_inode_nonrcu(inode);
+}
+
+static void debugfs_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, debugfs_i_callback);
}
static const struct super_operations debugfs_super_operations = {
.statfs = simple_statfs,
.remount_fs = debugfs_remount,
.show_options = debugfs_show_options,
- .evict_inode = debugfs_evict_inode,
+ .destroy_inode = debugfs_destroy_inode,
};
static struct vfsmount *debugfs_automount(struct path *path)
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 30e4e01db35a..b14bb2c46042 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -800,6 +800,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_delete_debug_file(ls);
+ idr_destroy(&ls->ls_recover_idr);
kfree(ls->ls_recover_buf);
/*
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index 9c47f1c14a8b..a47ae99f7bcb 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -683,7 +683,7 @@ int dlm_ls_start(struct dlm_ls *ls)
error = dlm_config_nodes(ls->ls_name, &nodes, &count);
if (error < 0)
- goto fail;
+ goto fail_rv;
spin_lock(&ls->ls_recover_lock);
@@ -715,8 +715,9 @@ int dlm_ls_start(struct dlm_ls *ls)
return 0;
fail:
- kfree(rv);
kfree(nodes);
+ fail_rv:
+ kfree(rv);
return error;
}
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 7cd24bccd4fe..37be29f21d04 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -38,10 +38,8 @@ int __init dlm_memory_init(void)
void dlm_memory_exit(void)
{
- if (lkb_cache)
- kmem_cache_destroy(lkb_cache);
- if (rsb_cache)
- kmem_cache_destroy(rsb_cache);
+ kmem_cache_destroy(lkb_cache);
+ kmem_cache_destroy(rsb_cache);
}
char *dlm_allocate_lvb(struct dlm_ls *ls)
@@ -86,8 +84,7 @@ void dlm_free_lkb(struct dlm_lkb *lkb)
struct dlm_user_args *ua;
ua = lkb->lkb_ua;
if (ua) {
- if (ua->lksb.sb_lvbptr)
- kfree(ua->lksb.sb_lvbptr);
+ kfree(ua->lksb.sb_lvbptr);
kfree(ua);
}
}
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 9ac65914ab5b..bb0d307deadd 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -25,6 +25,7 @@
#include "lvb_table.h"
#include "user.h"
#include "ast.h"
+#include "config.h"
static const char name_prefix[] = "dlm";
static const struct file_operations device_fops;
@@ -402,7 +403,7 @@ static int device_create_lockspace(struct dlm_lspace_params *params)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- error = dlm_new_lockspace(params->name, NULL, params->flags,
+ error = dlm_new_lockspace(params->name, dlm_config.ci_cluster_name, params->flags,
DLM_USER_LVB_LEN, NULL, NULL, NULL,
&lockspace);
if (error)
@@ -700,7 +701,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat,
result.version[0] = DLM_DEVICE_VERSION_MAJOR;
result.version[1] = DLM_DEVICE_VERSION_MINOR;
result.version[2] = DLM_DEVICE_VERSION_PATCH;
- memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
+ memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
result.user_lksb = ua->user_lksb;
/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index e5e29f8c920b..ff6cf23be8a2 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -339,8 +339,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
struct extent_crypt_result ecr;
int rc = 0;
- BUG_ON(!crypt_stat || !crypt_stat->tfm
- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
+ if (!crypt_stat || !crypt_stat->tfm
+ || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
+ return -EINVAL;
+
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
crypt_stat->key_size);
@@ -1034,8 +1036,10 @@ int ecryptfs_read_and_validate_header_region(struct inode *inode)
rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES,
inode);
- if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
- return rc >= 0 ? -EINVAL : rc;
+ if (rc < 0)
+ return rc;
+ else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+ return -EINVAL;
rc = ecryptfs_validate_marker(marker);
if (!rc)
ecryptfs_i_size_init(file_size, inode);
@@ -1397,8 +1401,10 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
ecryptfs_inode_to_lower(inode),
ECRYPTFS_XATTR_NAME, file_size,
ECRYPTFS_SIZE_AND_MARKER_BYTES);
- if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
- return rc >= 0 ? -EINVAL : rc;
+ if (rc < 0)
+ return rc;
+ else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+ return -EINVAL;
rc = ecryptfs_validate_marker(marker);
if (!rc)
ecryptfs_i_size_init(file_size, inode);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 5c5ff9f6fe07..2a5e436ff8dd 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -326,9 +326,9 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry)
{
- struct inode *inode, *lower_inode = d_inode(lower_dentry);
+ struct path *path = ecryptfs_dentry_to_lower_path(dentry->d_parent);
+ struct inode *inode, *lower_inode;
struct ecryptfs_dentry_info *dentry_info;
- struct vfsmount *lower_mnt;
int rc = 0;
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
@@ -340,16 +340,23 @@ static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry,
return ERR_PTR(-ENOMEM);
}
- lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
fsstack_copy_attr_atime(d_inode(dentry->d_parent),
- d_inode(lower_dentry->d_parent));
+ d_inode(path->dentry));
BUG_ON(!d_count(lower_dentry));
ecryptfs_set_dentry_private(dentry, dentry_info);
- dentry_info->lower_path.mnt = lower_mnt;
+ dentry_info->lower_path.mnt = mntget(path->mnt);
dentry_info->lower_path.dentry = lower_dentry;
- if (d_really_is_negative(lower_dentry)) {
+ /*
+ * negative dentry can go positive under us here - its parent is not
+ * locked. That's OK and that could happen just as we return from
+ * ecryptfs_lookup() anyway. Just need to be careful and fetch
+ * ->d_inode only once - it's not stable here.
+ */
+ lower_inode = READ_ONCE(lower_dentry->d_inode);
+
+ if (!lower_inode) {
/* We want to add because we couldn't find in lower */
d_add(dentry, NULL);
return NULL;
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index fa218cd64f74..b134315fb69d 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1335,7 +1335,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
printk(KERN_WARNING "Tag 1 packet contains key larger "
"than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES");
rc = -EINVAL;
- goto out;
+ goto out_free;
}
memcpy((*new_auth_tok)->session_key.encrypted_key,
&data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 4f457d5c4933..26464f9d9b76 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -397,6 +397,7 @@ int __init ecryptfs_init_messaging(void)
* ecryptfs_message_buf_len),
GFP_KERNEL);
if (!ecryptfs_msg_ctx_arr) {
+ kfree(ecryptfs_daemon_hash);
rc = -ENOMEM;
printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
goto out;
diff --git a/fs/exec.c b/fs/exec.c
index 81477116035d..bb03b98fd03b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1353,7 +1353,7 @@ void setup_new_exec(struct linux_binprm * bprm)
/* An exec changes our domain. We are no longer part of the thread
group */
- current->self_exec_id++;
+ WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1);
flush_signal_handlers(current, 0);
}
EXPORT_SYMBOL(setup_new_exec);
@@ -1790,7 +1790,7 @@ static int do_execveat_common(int fd, struct filename *filename,
current->fs->in_exec = 0;
current->in_execve = 0;
acct_update_integrals(current);
- task_numa_free(current);
+ task_numa_free(current, false);
free_bprm(bprm);
kfree(pathbuf);
putname(filename);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 7a7bba7c2328..1730122b10e0 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -146,6 +146,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
if (IS_ERR(tmp)) {
dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
+ err = PTR_ERR(tmp);
goto out_err;
}
if (tmp != dentry) {
@@ -506,26 +507,33 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
* inode is actually connected to the parent.
*/
err = exportfs_get_name(mnt, target_dir, nbuf, result);
- if (!err) {
- inode_lock(target_dir->d_inode);
- nresult = lookup_one_len(nbuf, target_dir,
- strlen(nbuf));
- inode_unlock(target_dir->d_inode);
- if (!IS_ERR(nresult)) {
- if (nresult->d_inode) {
- dput(result);
- result = nresult;
- } else
- dput(nresult);
- }
+ if (err) {
+ dput(target_dir);
+ goto err_result;
}
+ inode_lock(target_dir->d_inode);
+ nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
+ if (!IS_ERR(nresult)) {
+ if (unlikely(nresult->d_inode != result->d_inode)) {
+ dput(nresult);
+ nresult = ERR_PTR(-ESTALE);
+ }
+ }
+ inode_unlock(target_dir->d_inode);
/*
* At this point we are done with the parent, but it's pinned
* by the child dentry anyway.
*/
dput(target_dir);
+ if (IS_ERR(nresult)) {
+ err = PTR_ERR(nresult);
+ goto err_result;
+ }
+ dput(result);
+ result = nresult;
+
/*
* And finally make sure the dentry is actually acceptable
* to NFSD.
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 85449a6ddc56..fe664949d442 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -697,10 +697,13 @@ static int ext2_get_blocks(struct inode *inode,
if (!partial) {
count++;
mutex_unlock(&ei->truncate_mutex);
- if (err)
- goto cleanup;
goto got_it;
}
+
+ if (err) {
+ mutex_unlock(&ei->truncate_mutex);
+ goto cleanup;
+ }
}
/*
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 6fcb29b393d3..186912c9bf56 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1047,9 +1047,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext2;
- sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
- le32_to_cpu(es->s_first_data_block) - 1)
- / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
+ sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
+ le32_to_cpu(es->s_first_data_block) - 1)
+ / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index babef30d440b..c8679b583561 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -55,6 +55,7 @@
#include <linux/buffer_head.h>
#include <linux/init.h>
+#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
@@ -83,8 +84,8 @@
printk("\n"); \
} while (0)
#else
-# define ea_idebug(f...)
-# define ea_bdebug(f...)
+# define ea_idebug(inode, f...) no_printk(f)
+# define ea_bdebug(bh, f...) no_printk(f)
#endif
static int ext2_xattr_set2(struct inode *, struct buffer_head *,
@@ -835,8 +836,7 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1);
if (error) {
if (error == -EBUSY) {
- ea_bdebug(bh, "already in cache (%d cache entries)",
- atomic_read(&ext2_xattr_cache->c_entry_count));
+ ea_bdebug(bh, "already in cache");
error = 0;
}
} else
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 2455fe1446d6..de601f3c023d 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -279,6 +279,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
ext4_group_t ngroups = ext4_get_groups_count(sb);
struct ext4_group_desc *desc;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct buffer_head *bh_p;
if (block_group >= ngroups) {
ext4_error(sb, "block_group >= groups_count - block_group = %u,"
@@ -289,7 +290,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
- if (!sbi->s_group_desc[group_desc]) {
+ bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
+ /*
+ * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
+ * the pointer being dereferenced won't be dereferenced again. By
+ * looking at the usage in add_new_gdb() the value isn't modified,
+ * just the pointer, and so it remains valid.
+ */
+ if (!bh_p) {
ext4_error(sb, "Group descriptor not loaded - "
"block_group = %u, group_desc = %u, desc = %u",
block_group, group_desc, offset);
@@ -297,10 +305,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
}
desc = (struct ext4_group_desc *)(
- (__u8 *)sbi->s_group_desc[group_desc]->b_data +
+ (__u8 *)bh_p->b_data +
offset * EXT4_DESC_SIZE(sb));
if (bh)
- *bh = sbi->s_group_desc[group_desc];
+ *bh = bh_p;
return desc;
}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e16bc4cec62e..6b3a32f75dad 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -75,6 +75,11 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
error_msg = "rec_len is too small for name_len";
else if (unlikely(((char *) de - buf) + rlen > size))
error_msg = "directory entry overrun";
+ else if (unlikely(((char *) de - buf) + rlen >
+ size - EXT4_DIR_REC_LEN(1) &&
+ ((char *) de - buf) + rlen != size)) {
+ error_msg = "directory entry too close to block end";
+ }
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
@@ -106,7 +111,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct buffer_head *bh = NULL;
- int dir_has_error = 0;
struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
if (ext4_encrypted_inode(inode)) {
@@ -120,12 +124,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (err != ERR_BAD_DX_DIR) {
return err;
}
- /*
- * We don't set the inode dirty flag since it's not
- * critical that it get flushed back to the disk.
- */
- ext4_clear_inode_flag(file_inode(file),
- EXT4_INODE_INDEX);
+ /* Can we just clear INDEX flag to ignore htree information? */
+ if (!ext4_has_metadata_csum(sb)) {
+ /*
+ * We don't set the inode dirty flag since it's not
+ * critical that it gets flushed back to the disk.
+ */
+ ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+ }
}
if (ext4_has_inline_data(inode)) {
@@ -142,8 +148,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
return err;
}
- offset = ctx->pos & (sb->s_blocksize - 1);
-
while (ctx->pos < inode->i_size) {
struct ext4_map_blocks map;
@@ -152,9 +156,18 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
goto errout;
}
cond_resched();
+ offset = ctx->pos & (sb->s_blocksize - 1);
map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
map.m_len = 1;
err = ext4_map_blocks(NULL, inode, &map, 0);
+ if (err == 0) {
+ /* m_len should never be zero but let's avoid
+ * an infinite loop if it somehow is */
+ if (map.m_len == 0)
+ map.m_len = 1;
+ ctx->pos += map.m_len * sb->s_blocksize;
+ continue;
+ }
if (err > 0) {
pgoff_t index = map.m_pblk >>
(PAGE_SHIFT - inode->i_blkbits);
@@ -173,13 +186,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
}
if (!bh) {
- if (!dir_has_error) {
- EXT4_ERROR_FILE(file, 0,
- "directory contains a "
- "hole at offset %llu",
- (unsigned long long) ctx->pos);
- dir_has_error = 1;
- }
/* corrupt size? Maybe no more blocks to read */
if (ctx->pos > inode->i_blocks << 9)
break;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 567a6c7af677..eb0ec5068423 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1367,7 +1367,7 @@ struct ext4_sb_info {
loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
struct buffer_head * s_sbh; /* Buffer containing the super block */
struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
- struct buffer_head **s_group_desc;
+ struct buffer_head * __rcu *s_group_desc;
unsigned int s_mount_opt;
unsigned int s_mount_opt2;
unsigned int s_mount_flags;
@@ -1427,7 +1427,7 @@ struct ext4_sb_info {
#endif
/* for buddy allocator */
- struct ext4_group_info ***s_group_info;
+ struct ext4_group_info ** __rcu *s_group_info;
struct inode *s_buddy_cache;
spinlock_t s_md_lock;
unsigned short *s_mb_offsets;
@@ -1475,7 +1475,7 @@ struct ext4_sb_info {
unsigned int s_extent_max_zeroout_kb;
unsigned int s_log_groups_per_flex;
- struct flex_groups *s_flex_groups;
+ struct flex_groups * __rcu *s_flex_groups;
ext4_group_t s_flex_groups_allocated;
/* workqueue for reserved extent conversions (buffered io) */
@@ -1514,8 +1514,11 @@ struct ext4_sb_info {
struct ratelimit_state s_warning_ratelimit_state;
struct ratelimit_state s_msg_ratelimit_state;
- /* Barrier between changing inodes' journal flags and writepages ops. */
- struct percpu_rw_semaphore s_journal_flag_rwsem;
+ /*
+ * Barrier between writepages ops and changing any inode's JOURNAL_DATA
+ * or EXTENTS flag.
+ */
+ struct percpu_rw_semaphore s_writepages_rwsem;
/* Encryption support */
#ifdef CONFIG_EXT4_FS_ENCRYPTION
@@ -1547,6 +1550,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
}
/*
+ * Returns: sbi->field[index]
+ * Used to access an array element from the following sbi fields which require
+ * rcu protection to avoid dereferencing an invalid pointer due to reassignment
+ * - s_group_desc
+ * - s_group_info
+ * - s_flex_group
+ */
+#define sbi_array_rcu_deref(sbi, field, index) \
+({ \
+ typeof(*((sbi)->field)) _v; \
+ rcu_read_lock(); \
+ _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
+ rcu_read_unlock(); \
+ _v; \
+})
+
+/*
* Inode dynamic state flags
*/
enum {
@@ -2375,8 +2395,11 @@ int ext4_insert_dentry(struct inode *dir,
struct ext4_filename *fname);
static inline void ext4_update_dx_flag(struct inode *inode)
{
- if (!ext4_has_feature_dir_index(inode->i_sb))
+ if (!ext4_has_feature_dir_index(inode->i_sb)) {
+ /* ext4_iget() should have caught this... */
+ WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+ }
}
static unsigned char ext4_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
@@ -2552,6 +2575,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
extern bool ext4_empty_dir(struct inode *inode);
/* resize.c */
+extern void ext4_kvfree_array_rcu(void *to_free);
extern int ext4_group_add(struct super_block *sb,
struct ext4_new_group_data *input);
extern int ext4_group_extend(struct super_block *sb,
@@ -2792,13 +2816,13 @@ static inline
struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
ext4_group_t group)
{
- struct ext4_group_info ***grp_info;
+ struct ext4_group_info **grp_info;
long indexv, indexh;
BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
- grp_info = EXT4_SB(sb)->s_group_info;
indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
- return grp_info[indexv][indexh];
+ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
+ return grp_info[indexh];
}
/*
@@ -2848,7 +2872,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
!inode_is_locked(inode));
down_write(&EXT4_I(inode)->i_data_sem);
if (newsize > EXT4_I(inode)->i_disksize)
- EXT4_I(inode)->i_disksize = newsize;
+ WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
up_write(&EXT4_I(inode)->i_data_sem);
}
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 106a5bb3ae68..999d2a54297d 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1047,6 +1047,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
__le32 border;
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
int err = 0;
+ size_t ext_size = 0;
/* make decision: where to split? */
/* FIXME: now decision is simplest: at current extent */
@@ -1138,6 +1139,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
le16_add_cpu(&neh->eh_entries, m);
}
+ /* zero out unused area in the extent block */
+ ext_size = sizeof(struct ext4_extent_header) +
+ sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
+ memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
@@ -1217,6 +1222,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
sizeof(struct ext4_extent_idx) * m);
le16_add_cpu(&neh->eh_entries, m);
}
+ /* zero out unused area in the extent block */
+ ext_size = sizeof(struct ext4_extent_header) +
+ (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
+ memset(bh->b_data + ext_size, 0,
+ inode->i_sb->s_blocksize - ext_size);
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
@@ -1282,6 +1292,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
ext4_fsblk_t newblock, goal = 0;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
int err = 0;
+ size_t ext_size = 0;
/* Try to prepend new index to old one */
if (ext_depth(inode))
@@ -1307,9 +1318,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
goto out;
}
+ ext_size = sizeof(EXT4_I(inode)->i_data);
/* move top-level index/leaf into new block */
- memmove(bh->b_data, EXT4_I(inode)->i_data,
- sizeof(EXT4_I(inode)->i_data));
+ memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
+ /* zero out unused area in the extent block */
+ memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
/* set size of new block */
neh = ext_block_hdr(bh);
@@ -3432,8 +3445,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
(unsigned long long)map->m_lblk, map_len);
sbi = EXT4_SB(inode->i_sb);
- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
- inode->i_sb->s_blocksize_bits;
+ eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
+ >> inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map_len)
eof_block = map->m_lblk + map_len;
@@ -3688,8 +3701,8 @@ static int ext4_split_convert_extents(handle_t *handle,
__func__, inode->i_ino,
(unsigned long long)map->m_lblk, map->m_len);
- eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
- inode->i_sb->s_blocksize_bits;
+ eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
+ >> inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map->m_len)
eof_block = map->m_lblk + map->m_len;
/*
@@ -3742,8 +3755,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
* illegal.
*/
if (ee_block != map->m_lblk || ee_len > map->m_len) {
-#ifdef EXT4_DEBUG
- ext4_warning("Inode (%ld) finished: extent logical block %llu,"
+#ifdef CONFIG_EXT4_DEBUG
+ ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
" len %u; IO logical block %llu, len %u",
inode->i_ino, (unsigned long long)ee_block, ee_len,
(unsigned long long)map->m_lblk, map->m_len);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index fe76d0957a1f..59d3ea7094a0 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -163,6 +163,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
}
ret = __generic_file_write_iter(iocb, from);
+ /*
+ * Unaligned direct AIO must be the only IO in flight. Otherwise
+ * overlapping aligned IO after unaligned might result in data
+ * corruption.
+ */
+ if (ret == -EIOCBQUEUED && unaligned_aio)
+ ext4_unwritten_wait(inode);
inode_unlock(inode);
if (ret > 0)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 4f78e099de1d..c5af7bbf906f 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -331,11 +331,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
percpu_counter_inc(&sbi->s_freeinodes_counter);
if (sbi->s_log_groups_per_flex) {
- ext4_group_t f = ext4_flex_group(sbi, block_group);
+ struct flex_groups *fg;
- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
+ fg = sbi_array_rcu_deref(sbi, s_flex_groups,
+ ext4_flex_group(sbi, block_group));
+ atomic_inc(&fg->free_inodes);
if (is_directory)
- atomic_dec(&sbi->s_flex_groups[f].used_dirs);
+ atomic_dec(&fg->used_dirs);
}
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
@@ -376,12 +378,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
int flex_size, struct orlov_stats *stats)
{
struct ext4_group_desc *desc;
- struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
if (flex_size > 1) {
- stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
- stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
- stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
+ struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
+ s_flex_groups, g);
+ stats->free_inodes = atomic_read(&fg->free_inodes);
+ stats->free_clusters = atomic64_read(&fg->free_clusters);
+ stats->used_dirs = atomic_read(&fg->used_dirs);
return;
}
@@ -988,7 +991,8 @@ got:
if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, group);
- atomic_inc(&sbi->s_flex_groups[f].used_dirs);
+ atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
+ f)->used_dirs);
}
}
if (ext4_has_group_desc_csum(sb)) {
@@ -1011,7 +1015,8 @@ got:
if (sbi->s_log_groups_per_flex) {
flex_group = ext4_flex_group(sbi, group);
- atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
+ atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
+ flex_group)->free_inodes);
}
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 14007e621d2a..d2844fe9040d 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1217,6 +1217,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
ext4_lblk_t offsets[4], offsets2[4];
Indirect chain[4], chain2[4];
Indirect *partial, *partial2;
+ Indirect *p = NULL, *p2 = NULL;
ext4_lblk_t max_block;
__le32 nr = 0, nr2 = 0;
int n = 0, n2 = 0;
@@ -1258,7 +1259,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
}
- partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+ partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
if (nr) {
if (partial == chain) {
/* Shared branch grows from the inode */
@@ -1283,13 +1284,11 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
partial->p + 1,
(__le32 *)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
partial--;
}
end_range:
- partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+ partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
if (nr2) {
if (partial2 == chain2) {
/*
@@ -1319,16 +1318,14 @@ end_range:
(__le32 *)partial2->bh->b_data,
partial2->p,
(chain2+n2-1) - partial2);
- BUFFER_TRACE(partial2->bh, "call brelse");
- brelse(partial2->bh);
partial2--;
}
goto do_indirects;
}
/* Punch happened within the same level (n == n2) */
- partial = ext4_find_shared(inode, n, offsets, chain, &nr);
- partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+ partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
+ partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
/* Free top, but only if partial2 isn't its subtree. */
if (nr) {
@@ -1385,15 +1382,7 @@ end_range:
partial->p + 1,
partial2->p,
(chain+n-1) - partial);
- while (partial > chain) {
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
- }
- while (partial2 > chain2) {
- BUFFER_TRACE(partial2->bh, "call brelse");
- brelse(partial2->bh);
- }
- return 0;
+ goto cleanup;
}
/*
@@ -1408,8 +1397,6 @@ end_range:
partial->p + 1,
(__le32 *)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
partial--;
}
if (partial2 > chain2 && depth2 <= depth) {
@@ -1417,11 +1404,21 @@ end_range:
(__le32 *)partial2->bh->b_data,
partial2->p,
(chain2+n2-1) - partial2);
- BUFFER_TRACE(partial2->bh, "call brelse");
- brelse(partial2->bh);
partial2--;
}
}
+
+cleanup:
+ while (p && p > chain) {
+ BUFFER_TRACE(p->bh, "call brelse");
+ brelse(p->bh);
+ p--;
+ }
+ while (p2 && p2 > chain2) {
+ BUFFER_TRACE(p2->bh, "call brelse");
+ brelse(p2->bh);
+ p2--;
+ }
return 0;
do_indirects:
@@ -1429,7 +1426,7 @@ do_indirects:
switch (offsets[0]) {
default:
if (++n >= n2)
- return 0;
+ break;
nr = i_data[EXT4_IND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
@@ -1437,7 +1434,7 @@ do_indirects:
}
case EXT4_IND_BLOCK:
if (++n >= n2)
- return 0;
+ break;
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
@@ -1445,7 +1442,7 @@ do_indirects:
}
case EXT4_DIND_BLOCK:
if (++n >= n2)
- return 0;
+ break;
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
@@ -1454,5 +1451,5 @@ do_indirects:
case EXT4_TIND_BLOCK:
;
}
- return 0;
+ goto cleanup;
}
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 9a13f86fed62..4df4d31057b3 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1417,7 +1417,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
err = ext4_htree_store_dirent(dir_file, hinfo->hash,
hinfo->minor_hash, de, &tmp_str);
if (err) {
- count = err;
+ ret = err;
goto out;
}
count++;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4815be26b15f..45bcde1969e3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2475,7 +2475,7 @@ update_disksize:
* truncate are avoided by checking i_size under i_data_sem.
*/
disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
- if (disksize > EXT4_I(inode)->i_disksize) {
+ if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
int err2;
loff_t i_size;
@@ -2652,7 +2652,7 @@ static int ext4_writepages(struct address_space *mapping,
struct blk_plug plug;
bool give_up_on_write = false;
- percpu_down_read(&sbi->s_journal_flag_rwsem);
+ percpu_down_read(&sbi->s_writepages_rwsem);
trace_ext4_writepages(inode, wbc);
if (dax_mapping(mapping)) {
@@ -2853,7 +2853,7 @@ retry:
out_writepages:
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
- percpu_up_read(&sbi->s_journal_flag_rwsem);
+ percpu_up_read(&sbi->s_writepages_rwsem);
return ret;
}
@@ -3957,6 +3957,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
trace_ext4_punch_hole(inode, offset, length, 0);
+ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ if (ext4_has_inline_data(inode)) {
+ down_write(&EXT4_I(inode)->i_mmap_sem);
+ ret = ext4_convert_inline_data(inode);
+ up_write(&EXT4_I(inode)->i_mmap_sem);
+ if (ret)
+ return ret;
+ }
+
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
@@ -4585,6 +4594,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ret = -EFSCORRUPTED;
goto bad_inode;
}
+ /*
+ * If dir_index is not enabled but there's dir with INDEX flag set,
+ * we'd normally treat htree data as empty space. But with metadata
+ * checksumming that corrupts checksums so forbid that.
+ */
+ if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
+ ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
+ EXT4_ERROR_INODE(inode,
+ "iget: Dir with htree data on filesystem without dir_index feature.");
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
ei->i_disksize = inode->i_size;
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
@@ -4733,7 +4754,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
struct ext4_inode_info *ei)
{
struct inode *inode = &(ei->vfs_inode);
- u64 i_blocks = inode->i_blocks;
+ u64 i_blocks = READ_ONCE(inode->i_blocks);
struct super_block *sb = inode->i_sb;
if (i_blocks <= ~0U) {
@@ -5071,11 +5092,15 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
offset = inode->i_size & (PAGE_SIZE - 1);
/*
- * All buffers in the last page remain valid? Then there's nothing to
- * do. We do the check mainly to optimize the common PAGE_SIZE ==
- * blocksize case
+ * If the page is fully truncated, we don't need to wait for any commit
+ * (and we even should not as __ext4_journalled_invalidatepage() may
+ * strip all buffers from the page but keep the page dirty which can then
+ * confuse e.g. concurrent ext4_writepage() seeing dirty page without
+ * buffers). Also we don't need to wait for any commit if all buffers in
+ * the page remain valid. This is most beneficial for the common case of
+ * blocksize == PAGESIZE.
*/
- if (offset > PAGE_SIZE - i_blocksize(inode))
+ if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
return;
while (1) {
page = find_lock_page(inode->i_mapping,
@@ -5223,7 +5248,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
up_write(&EXT4_I(inode)->i_data_sem);
ext4_journal_stop(handle);
if (error) {
- if (orphan)
+ if (orphan && inode->i_nlink)
ext4_orphan_del(NULL, inode);
goto err_out;
}
@@ -5459,10 +5484,25 @@ static int ext4_expand_extra_isize(struct inode *inode,
{
struct ext4_inode *raw_inode;
struct ext4_xattr_ibody_header *header;
+ unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
return 0;
+ /* this was checked at iget time, but double check for good measure */
+ if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
+ (ei->i_extra_isize & 3)) {
+ EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
+ ei->i_extra_isize,
+ EXT4_INODE_SIZE(inode->i_sb));
+ return -EFSCORRUPTED;
+ }
+ if ((new_extra_isize < ei->i_extra_isize) ||
+ (new_extra_isize < 4) ||
+ (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
+ return -EINVAL; /* Should never happen */
+
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
@@ -5648,7 +5688,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
}
}
- percpu_down_write(&sbi->s_journal_flag_rwsem);
+ percpu_down_write(&sbi->s_writepages_rwsem);
jbd2_journal_lock_updates(journal);
/*
@@ -5665,7 +5705,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
err = jbd2_journal_flush(journal);
if (err < 0) {
jbd2_journal_unlock_updates(journal);
- percpu_up_write(&sbi->s_journal_flag_rwsem);
+ percpu_up_write(&sbi->s_writepages_rwsem);
ext4_inode_resume_unlocked_dio(inode);
return err;
}
@@ -5674,7 +5714,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal);
- percpu_up_write(&sbi->s_journal_flag_rwsem);
+ percpu_up_write(&sbi->s_writepages_rwsem);
if (val)
up_write(&EXT4_I(inode)->i_mmap_sem);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 2880e017cd0a..baa2f6375226 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -727,7 +727,7 @@ group_add_out:
if (err == 0)
err = err2;
mnt_drop_write_file(filp);
- if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
+ if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
ext4_has_group_desc_csum(sb) &&
test_opt(sb, INIT_INODE_TABLE))
err = ext4_register_li_request(sb, o_group);
@@ -749,6 +749,13 @@ resizefs_out:
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ /*
+ * We haven't replayed the journal, so we cannot use our
+ * block-bitmap-guided storage zapping commands.
+ */
+ if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
+ return -EROFS;
+
if (copy_from_user(&range, (struct fstrim_range __user *)arg,
sizeof(range)))
return -EFAULT;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index a49d0e5d7baf..c18668e3135e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2377,7 +2377,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned size;
- struct ext4_group_info ***new_groupinfo;
+ struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
EXT4_DESC_PER_BLOCK_BITS(sb);
@@ -2390,13 +2390,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
return -ENOMEM;
}
- if (sbi->s_group_info) {
- memcpy(new_groupinfo, sbi->s_group_info,
+ rcu_read_lock();
+ old_groupinfo = rcu_dereference(sbi->s_group_info);
+ if (old_groupinfo)
+ memcpy(new_groupinfo, old_groupinfo,
sbi->s_group_info_size * sizeof(*sbi->s_group_info));
- kvfree(sbi->s_group_info);
- }
- sbi->s_group_info = new_groupinfo;
+ rcu_read_unlock();
+ rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
+ if (old_groupinfo)
+ ext4_kvfree_array_rcu(old_groupinfo);
ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
sbi->s_group_info_size);
return 0;
@@ -2408,6 +2411,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
{
int i;
int metalen = 0;
+ int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_info **meta_group_info;
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
@@ -2426,12 +2430,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
"for a buddy group");
goto exit_meta_group_info;
}
- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
- meta_group_info;
+ rcu_read_lock();
+ rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
+ rcu_read_unlock();
}
- meta_group_info =
- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
+ meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
@@ -2479,8 +2483,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
exit_group_info:
/* If a meta_group_info table has been allocated, release it now */
if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
- kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
+ struct ext4_group_info ***group_info;
+
+ rcu_read_lock();
+ group_info = rcu_dereference(sbi->s_group_info);
+ kfree(group_info[idx]);
+ group_info[idx] = NULL;
+ rcu_read_unlock();
}
exit_meta_group_info:
return -ENOMEM;
@@ -2493,6 +2502,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
struct ext4_sb_info *sbi = EXT4_SB(sb);
int err;
struct ext4_group_desc *desc;
+ struct ext4_group_info ***group_info;
struct kmem_cache *cachep;
err = ext4_mb_alloc_groupinfo(sb, ngroups);
@@ -2527,11 +2537,16 @@ err_freebuddy:
while (i-- > 0)
kmem_cache_free(cachep, ext4_get_group_info(sb, i));
i = sbi->s_group_info_size;
+ rcu_read_lock();
+ group_info = rcu_dereference(sbi->s_group_info);
while (i-- > 0)
- kfree(sbi->s_group_info[i]);
+ kfree(group_info[i]);
+ rcu_read_unlock();
iput(sbi->s_buddy_cache);
err_freesgi:
- kvfree(sbi->s_group_info);
+ rcu_read_lock();
+ kvfree(rcu_dereference(sbi->s_group_info));
+ rcu_read_unlock();
return -ENOMEM;
}
@@ -2720,7 +2735,7 @@ int ext4_mb_release(struct super_block *sb)
ext4_group_t ngroups = ext4_get_groups_count(sb);
ext4_group_t i;
int num_meta_group_infos;
- struct ext4_group_info *grinfo;
+ struct ext4_group_info *grinfo, ***group_info;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
@@ -2738,9 +2753,12 @@ int ext4_mb_release(struct super_block *sb)
num_meta_group_infos = (ngroups +
EXT4_DESC_PER_BLOCK(sb) - 1) >>
EXT4_DESC_PER_BLOCK_BITS(sb);
+ rcu_read_lock();
+ group_info = rcu_dereference(sbi->s_group_info);
for (i = 0; i < num_meta_group_infos; i++)
- kfree(sbi->s_group_info[i]);
- kvfree(sbi->s_group_info);
+ kfree(group_info[i]);
+ kvfree(group_info);
+ rcu_read_unlock();
}
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
@@ -2998,7 +3016,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_group_t flex_group = ext4_flex_group(sbi,
ac->ac_b_ex.fe_group);
atomic64_sub(ac->ac_b_ex.fe_len,
- &sbi->s_flex_groups[flex_group].free_clusters);
+ &sbi_array_rcu_deref(sbi, s_flex_groups,
+ flex_group)->free_clusters);
}
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
@@ -4888,7 +4907,8 @@ do_more:
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(count_clusters,
- &sbi->s_flex_groups[flex_group].free_clusters);
+ &sbi_array_rcu_deref(sbi, s_flex_groups,
+ flex_group)->free_clusters);
}
if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
@@ -5033,7 +5053,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
- &sbi->s_flex_groups[flex_group].free_clusters);
+ &sbi_array_rcu_deref(sbi, s_flex_groups,
+ flex_group)->free_clusters);
}
ext4_mb_unload_buddy(&e4b);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 364ea4d4a943..bce2d696d6b9 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -434,6 +434,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
int ext4_ext_migrate(struct inode *inode)
{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
handle_t *handle;
int retval = 0, i;
__le32 *i_data;
@@ -458,6 +459,8 @@ int ext4_ext_migrate(struct inode *inode)
*/
return retval;
+ percpu_down_write(&sbi->s_writepages_rwsem);
+
/*
* Worst case we can touch the allocation bitmaps, a bgd
* block, and a block to link in the orphan list. We do need
@@ -468,7 +471,7 @@ int ext4_ext_migrate(struct inode *inode)
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
- return retval;
+ goto out_unlock;
}
goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
@@ -479,7 +482,7 @@ int ext4_ext_migrate(struct inode *inode)
if (IS_ERR(tmp_inode)) {
retval = PTR_ERR(tmp_inode);
ext4_journal_stop(handle);
- return retval;
+ goto out_unlock;
}
i_size_write(tmp_inode, i_size_read(inode));
/*
@@ -521,7 +524,7 @@ int ext4_ext_migrate(struct inode *inode)
*/
ext4_orphan_del(NULL, tmp_inode);
retval = PTR_ERR(handle);
- goto out;
+ goto out_tmp_inode;
}
ei = EXT4_I(inode);
@@ -602,10 +605,11 @@ err_out:
/* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode);
ext4_journal_stop(handle);
-out:
+out_tmp_inode:
unlock_new_inode(tmp_inode);
iput(tmp_inode);
-
+out_unlock:
+ percpu_up_write(&sbi->s_writepages_rwsem);
return retval;
}
@@ -615,7 +619,8 @@ out:
int ext4_ind_migrate(struct inode *inode)
{
struct ext4_extent_header *eh;
- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_super_block *es = sbi->s_es;
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_extent *ex;
unsigned int i, len;
@@ -639,9 +644,13 @@ int ext4_ind_migrate(struct inode *inode)
if (test_opt(inode->i_sb, DELALLOC))
ext4_alloc_da_blocks(inode);
+ percpu_down_write(&sbi->s_writepages_rwsem);
+
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out_unlock;
+ }
down_write(&EXT4_I(inode)->i_data_sem);
ret = ext4_ext_check_inode(inode);
@@ -676,5 +685,7 @@ int ext4_ind_migrate(struct inode *inode)
errout:
ext4_journal_stop(handle);
up_write(&EXT4_I(inode)->i_data_sem);
+out_unlock:
+ percpu_up_write(&sbi->s_writepages_rwsem);
return ret;
}
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index c2e830a6206d..fb1ad9510c5f 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -119,10 +119,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
{
__ext4_warning(sb, function, line, "%s", msg);
__ext4_warning(sb, function, line,
- "MMP failure info: last update time: %llu, last update "
- "node: %s, last update device: %s",
- (long long unsigned int) le64_to_cpu(mmp->mmp_time),
- mmp->mmp_nodename, mmp->mmp_bdevname);
+ "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
+ (unsigned long long)le64_to_cpu(mmp->mmp_time),
+ (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
+ (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
}
/*
@@ -153,6 +153,7 @@ static int kmmpd(void *data)
mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
EXT4_MMP_MIN_CHECK_INTERVAL);
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
+ BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
bdevname(bh->b_bdev, mmp->mmp_bdevname);
memcpy(mmp->mmp_nodename, init_utsname()->nodename,
@@ -377,7 +378,8 @@ skip:
/*
* Start a kernel thread to update the MMP block periodically.
*/
- EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
+ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
+ (int)sizeof(mmp->mmp_bdevname),
bdevname(bh->b_bdev,
mmp->mmp_bdevname));
if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 3c3757ee11f0..339ede11896a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -79,8 +79,18 @@ static struct buffer_head *ext4_append(handle_t *handle,
static int ext4_dx_csum_verify(struct inode *inode,
struct ext4_dir_entry *dirent);
+/*
+ * Hints to ext4_read_dirblock regarding whether we expect a directory
+ * block being read to be an index block, or a block containing
+ * directory entries (and if the latter, whether it was found via a
+ * logical block in an htree index block). This is used to control
+ * what sort of sanity checkinig ext4_read_dirblock() will do on the
+ * directory block read from the storage device. EITHER will means
+ * the caller doesn't know what kind of directory block will be read,
+ * so no specific verification will be done.
+ */
typedef enum {
- EITHER, INDEX, DIRENT
+ EITHER, INDEX, DIRENT, DIRENT_HTREE
} dirblock_type_t;
#define ext4_read_dirblock(inode, block, type) \
@@ -106,11 +116,14 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
return bh;
}
- if (!bh) {
+ if (!bh && (type == INDEX || type == DIRENT_HTREE)) {
ext4_error_inode(inode, func, line, block,
- "Directory hole found");
+ "Directory hole found for htree %s block",
+ (type == INDEX) ? "index" : "leaf");
return ERR_PTR(-EFSCORRUPTED);
}
+ if (!bh)
+ return NULL;
dirent = (struct ext4_dir_entry *) bh->b_data;
/* Determine whether or not we have an index block */
if (is_dx(inode)) {
@@ -960,7 +973,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
(unsigned long)block));
- bh = ext4_read_dirblock(dir, block, DIRENT);
+ bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
if (IS_ERR(bh))
return PTR_ERR(bh);
@@ -1432,6 +1445,7 @@ restart:
/*
* We deal with the read-ahead logic here.
*/
+ cond_resched();
if (ra_ptr >= ra_max) {
/* Refill the readahead buffer */
ra_ptr = 0;
@@ -1537,7 +1551,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
return (struct buffer_head *) frame;
do {
block = dx_get_block(frame->at);
- bh = ext4_read_dirblock(dir, block, DIRENT);
+ bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
if (IS_ERR(bh))
goto errout;
@@ -2135,6 +2149,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
retval = ext4_dx_add_entry(handle, &fname, dir, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
goto out;
+ /* Can we just ignore htree data? */
+ if (ext4_has_metadata_csum(sb)) {
+ EXT4_ERROR_INODE(dir,
+ "Directory has corrupted htree index.");
+ retval = -EFSCORRUPTED;
+ goto out;
+ }
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
@@ -2142,6 +2163,11 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
blocks = dir->i_size >> sb->s_blocksize_bits;
for (block = 0; block < blocks; block++) {
bh = ext4_read_dirblock(dir, block, DIRENT);
+ if (bh == NULL) {
+ bh = ext4_bread(handle, dir, block,
+ EXT4_GET_BLOCKS_CREATE);
+ goto add_to_new_block;
+ }
if (IS_ERR(bh)) {
retval = PTR_ERR(bh);
bh = NULL;
@@ -2162,6 +2188,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
brelse(bh);
}
bh = ext4_append(handle, dir, &block);
+add_to_new_block:
if (IS_ERR(bh)) {
retval = PTR_ERR(bh);
bh = NULL;
@@ -2203,7 +2230,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
return PTR_ERR(frame);
entries = frame->entries;
at = frame->at;
- bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
+ bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT_HTREE);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
bh = NULL;
@@ -2702,7 +2729,7 @@ bool ext4_empty_dir(struct inode *inode)
{
unsigned int offset;
struct buffer_head *bh;
- struct ext4_dir_entry_2 *de, *de1;
+ struct ext4_dir_entry_2 *de;
struct super_block *sb;
if (ext4_has_inline_data(inode)) {
@@ -2719,36 +2746,48 @@ bool ext4_empty_dir(struct inode *inode)
EXT4_ERROR_INODE(inode, "invalid size");
return true;
}
- bh = ext4_read_dirblock(inode, 0, EITHER);
+ /* The first directory block must not be a hole,
+ * so treat it as DIRENT_HTREE
+ */
+ bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
if (IS_ERR(bh))
return true;
de = (struct ext4_dir_entry_2 *) bh->b_data;
- de1 = ext4_next_entry(de, sb->s_blocksize);
- if (le32_to_cpu(de->inode) != inode->i_ino ||
- le32_to_cpu(de1->inode) == 0 ||
- strcmp(".", de->name) || strcmp("..", de1->name)) {
- ext4_warning_inode(inode, "directory missing '.' and/or '..'");
+ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
+ 0) ||
+ le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
+ ext4_warning_inode(inode, "directory missing '.'");
brelse(bh);
return true;
}
- offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
- ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
- de = ext4_next_entry(de1, sb->s_blocksize);
+ offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
+ de = ext4_next_entry(de, sb->s_blocksize);
+ if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
+ offset) ||
+ le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
+ ext4_warning_inode(inode, "directory missing '..'");
+ brelse(bh);
+ return true;
+ }
+ offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
while (offset < inode->i_size) {
- if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
+ if (!(offset & (sb->s_blocksize - 1))) {
unsigned int lblock;
brelse(bh);
lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
bh = ext4_read_dirblock(inode, lblock, EITHER);
+ if (bh == NULL) {
+ offset += sb->s_blocksize;
+ continue;
+ }
if (IS_ERR(bh))
return true;
- de = (struct ext4_dir_entry_2 *) bh->b_data;
}
+ de = (struct ext4_dir_entry_2 *) (bh->b_data +
+ (offset & (sb->s_blocksize - 1)));
if (ext4_check_dir_entry(inode, NULL, de, bh,
bh->b_data, bh->b_size, offset)) {
- de = (struct ext4_dir_entry_2 *)(bh->b_data +
- sb->s_blocksize);
offset = (offset | (sb->s_blocksize - 1)) + 1;
continue;
}
@@ -2757,7 +2796,6 @@ bool ext4_empty_dir(struct inode *inode)
return false;
}
offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
- de = ext4_next_entry(de, sb->s_blocksize);
}
brelse(bh);
return true;
@@ -3052,18 +3090,17 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
- if (inode->i_nlink == 0) {
- ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
- dentry->d_name.len, dentry->d_name.name);
- set_nlink(inode, 1);
- }
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_unlink;
dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
- drop_nlink(inode);
+ if (inode->i_nlink == 0)
+ ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
+ dentry->d_name.len, dentry->d_name.name);
+ else
+ drop_nlink(inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
inode->i_ctime = ext4_current_time(inode);
@@ -3302,7 +3339,10 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
struct buffer_head *bh;
if (!ext4_has_inline_data(inode)) {
- bh = ext4_read_dirblock(inode, 0, EITHER);
+ /* The first directory block must not be a hole, so
+ * treat it as DIRENT_HTREE
+ */
+ bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
if (IS_ERR(bh)) {
*retval = PTR_ERR(bh);
return NULL;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0094923e5ebf..94f60f9d57fd 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -469,16 +469,25 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
nr_to_submit) {
gfp_t gfp_flags = GFP_NOFS;
+ /*
+ * Since bounce page allocation uses a mempool, we can only use
+ * a waiting mask (i.e. request guaranteed allocation) on the
+ * first page of the bio. Otherwise it can deadlock.
+ */
+ if (io->io_bio)
+ gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
retry_encrypt:
data_page = fscrypt_encrypt_page(inode, page, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
- if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
- if (io->io_bio) {
+ if (ret == -ENOMEM &&
+ (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
+ gfp_flags = GFP_NOFS;
+ if (io->io_bio)
ext4_io_submit(io);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- }
- gfp_flags |= __GFP_NOFAIL;
+ else
+ gfp_flags |= __GFP_NOFAIL;
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry_encrypt;
}
data_page = NULL;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 67b359629a66..845d9841c91c 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -16,6 +16,33 @@
#include "ext4_jbd2.h"
+struct ext4_rcu_ptr {
+ struct rcu_head rcu;
+ void *ptr;
+};
+
+static void ext4_rcu_ptr_callback(struct rcu_head *head)
+{
+ struct ext4_rcu_ptr *ptr;
+
+ ptr = container_of(head, struct ext4_rcu_ptr, rcu);
+ kvfree(ptr->ptr);
+ kfree(ptr);
+}
+
+void ext4_kvfree_array_rcu(void *to_free)
+{
+ struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+
+ if (ptr) {
+ ptr->ptr = to_free;
+ call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
+ return;
+ }
+ synchronize_rcu();
+ kvfree(to_free);
+}
+
int ext4_resize_begin(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -541,8 +568,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
brelse(gdb);
goto out;
}
- memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
- gdb->b_size);
+ memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
+ s_group_desc, j)->b_data, gdb->b_size);
set_buffer_uptodate(gdb);
err = ext4_handle_dirty_metadata(handle, NULL, gdb);
@@ -849,13 +876,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
}
brelse(dind);
- o_group_desc = EXT4_SB(sb)->s_group_desc;
+ rcu_read_lock();
+ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
+ rcu_read_unlock();
n_group_desc[gdb_num] = gdb_bh;
- EXT4_SB(sb)->s_group_desc = n_group_desc;
+ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
EXT4_SB(sb)->s_gdb_count++;
- kvfree(o_group_desc);
+ ext4_kvfree_array_rcu(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
err = ext4_handle_dirty_super(handle, sb);
@@ -903,15 +932,24 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
return err;
}
- o_group_desc = EXT4_SB(sb)->s_group_desc;
+ rcu_read_lock();
+ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
+ rcu_read_unlock();
n_group_desc[gdb_num] = gdb_bh;
- EXT4_SB(sb)->s_group_desc = n_group_desc;
- EXT4_SB(sb)->s_gdb_count++;
- kvfree(o_group_desc);
+
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
+ if (err) {
+ kvfree(n_group_desc);
+ brelse(gdb_bh);
+ return err;
+ }
+
+ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
+ EXT4_SB(sb)->s_gdb_count++;
+ ext4_kvfree_array_rcu(o_group_desc);
return err;
}
@@ -1173,7 +1211,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
* use non-sparse filesystems anymore. This is already checked above.
*/
if (gdb_off) {
- gdb_bh = sbi->s_group_desc[gdb_num];
+ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
+ gdb_num);
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
@@ -1255,7 +1294,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
/*
* get_write_access() has been called on gdb_bh by ext4_add_new_desc().
*/
- gdb_bh = sbi->s_group_desc[gdb_num];
+ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
/* Update group descriptor block for new group */
gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
gdb_off * EXT4_DESC_SIZE(sb));
@@ -1383,11 +1422,14 @@ static void ext4_update_super(struct super_block *sb,
percpu_counter_read(&sbi->s_freeclusters_counter));
if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
+ struct flex_groups *fg;
+
flex_group = ext4_flex_group(sbi, group_data[0].group);
+ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
- &sbi->s_flex_groups[flex_group].free_clusters);
+ &fg->free_clusters);
atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
- &sbi->s_flex_groups[flex_group].free_inodes);
+ &fg->free_inodes);
}
/*
@@ -1482,7 +1524,8 @@ exit_journal:
for (; gdb_num <= gdb_num_end; gdb_num++) {
struct buffer_head *gdb_bh;
- gdb_bh = sbi->s_group_desc[gdb_num];
+ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
+ gdb_num);
if (old_gdb == gdb_bh->b_blocknr)
continue;
update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
@@ -2040,6 +2083,10 @@ out:
free_flex_gd(flex_gd);
if (resize_inode != NULL)
iput(resize_inode);
- ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
+ if (err)
+ ext4_warning(sb, "error (%d) occurred during "
+ "file system resize", err);
+ ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
+ ext4_blocks_count(es));
return err;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a6c7ace9cfd1..ed0520fe4dad 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -344,7 +344,8 @@ static void save_error_info(struct super_block *sb, const char *func,
unsigned int line)
{
__save_error_info(sb, func, line);
- ext4_commit_super(sb, 1);
+ if (!bdev_read_only(sb->s_bdev))
+ ext4_commit_super(sb, 1);
}
/*
@@ -826,6 +827,8 @@ static void ext4_put_super(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ struct buffer_head **group_desc;
+ struct flex_groups **flex_groups;
int aborted = 0;
int i, err;
@@ -857,15 +860,23 @@ static void ext4_put_super(struct super_block *sb)
if (!(sb->s_flags & MS_RDONLY))
ext4_commit_super(sb, 1);
+ rcu_read_lock();
+ group_desc = rcu_dereference(sbi->s_group_desc);
for (i = 0; i < sbi->s_gdb_count; i++)
- brelse(sbi->s_group_desc[i]);
- kvfree(sbi->s_group_desc);
- kvfree(sbi->s_flex_groups);
+ brelse(group_desc[i]);
+ kvfree(group_desc);
+ flex_groups = rcu_dereference(sbi->s_flex_groups);
+ if (flex_groups) {
+ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
+ kvfree(flex_groups[i]);
+ kvfree(flex_groups);
+ }
+ rcu_read_unlock();
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
+ percpu_free_rwsem(&sbi->s_writepages_rwsem);
brelse(sbi->s_sbh);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
@@ -2109,8 +2120,8 @@ done:
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct flex_groups *new_groups;
- int size;
+ struct flex_groups **old_groups, **new_groups;
+ int size, i, j;
if (!sbi->s_log_groups_per_flex)
return 0;
@@ -2119,22 +2130,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
if (size <= sbi->s_flex_groups_allocated)
return 0;
- size = roundup_pow_of_two(size * sizeof(struct flex_groups));
- new_groups = ext4_kvzalloc(size, GFP_KERNEL);
+ new_groups = ext4_kvzalloc(roundup_pow_of_two(size *
+ sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
if (!new_groups) {
- ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
- size / (int) sizeof(struct flex_groups));
+ ext4_msg(sb, KERN_ERR,
+ "not enough memory for %d flex group pointers", size);
return -ENOMEM;
}
-
- if (sbi->s_flex_groups) {
- memcpy(new_groups, sbi->s_flex_groups,
- (sbi->s_flex_groups_allocated *
- sizeof(struct flex_groups)));
- kvfree(sbi->s_flex_groups);
+ for (i = sbi->s_flex_groups_allocated; i < size; i++) {
+ new_groups[i] = ext4_kvzalloc(roundup_pow_of_two(
+ sizeof(struct flex_groups)),
+ GFP_KERNEL);
+ if (!new_groups[i]) {
+ for (j = sbi->s_flex_groups_allocated; j < i; j++)
+ kvfree(new_groups[j]);
+ kvfree(new_groups);
+ ext4_msg(sb, KERN_ERR,
+ "not enough memory for %d flex groups", size);
+ return -ENOMEM;
+ }
}
- sbi->s_flex_groups = new_groups;
- sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
+ rcu_read_lock();
+ old_groups = rcu_dereference(sbi->s_flex_groups);
+ if (old_groups)
+ memcpy(new_groups, old_groups,
+ (sbi->s_flex_groups_allocated *
+ sizeof(struct flex_groups *)));
+ rcu_read_unlock();
+ rcu_assign_pointer(sbi->s_flex_groups, new_groups);
+ sbi->s_flex_groups_allocated = size;
+ if (old_groups)
+ ext4_kvfree_array_rcu(old_groups);
return 0;
}
@@ -2142,6 +2168,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = NULL;
+ struct flex_groups *fg;
ext4_group_t flex_group;
int i, err;
@@ -2159,12 +2186,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
gdp = ext4_get_group_desc(sb, i, NULL);
flex_group = ext4_flex_group(sbi, i);
- atomic_add(ext4_free_inodes_count(sb, gdp),
- &sbi->s_flex_groups[flex_group].free_inodes);
+ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
+ atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
atomic64_add(ext4_free_group_clusters(sb, gdp),
- &sbi->s_flex_groups[flex_group].free_clusters);
- atomic_add(ext4_used_dirs_count(sb, gdp),
- &sbi->s_flex_groups[flex_group].used_dirs);
+ &fg->free_clusters);
+ atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
}
return 1;
@@ -2743,17 +2769,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0;
}
-#ifndef CONFIG_QUOTA
- if (ext4_has_feature_quota(sb) && !readonly) {
- ext4_msg(sb, KERN_ERR,
- "Filesystem with quota feature cannot be mounted RDWR "
- "without CONFIG_QUOTA");
- return 0;
- }
- if (ext4_has_feature_project(sb) && !readonly) {
+#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
+ if (!readonly && (ext4_has_feature_quota(sb) ||
+ ext4_has_feature_project(sb))) {
ext4_msg(sb, KERN_ERR,
- "Filesystem with project quota feature cannot be mounted RDWR "
- "without CONFIG_QUOTA");
+ "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
return 0;
}
#endif /* CONFIG_QUOTA */
@@ -3342,6 +3362,40 @@ int ext4_calculate_overhead(struct super_block *sb)
return 0;
}
+static void ext4_clamp_want_extra_isize(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ unsigned def_extra_isize = sizeof(struct ext4_inode) -
+ EXT4_GOOD_OLD_INODE_SIZE;
+
+ if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
+ sbi->s_want_extra_isize = 0;
+ return;
+ }
+ if (sbi->s_want_extra_isize < 4) {
+ sbi->s_want_extra_isize = def_extra_isize;
+ if (ext4_has_feature_extra_isize(sb)) {
+ if (sbi->s_want_extra_isize <
+ le16_to_cpu(es->s_want_extra_isize))
+ sbi->s_want_extra_isize =
+ le16_to_cpu(es->s_want_extra_isize);
+ if (sbi->s_want_extra_isize <
+ le16_to_cpu(es->s_min_extra_isize))
+ sbi->s_want_extra_isize =
+ le16_to_cpu(es->s_min_extra_isize);
+ }
+ }
+ /* Check if enough inode space is available */
+ if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
+ (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+ sbi->s_inode_size)) {
+ sbi->s_want_extra_isize = def_extra_isize;
+ ext4_msg(sb, KERN_INFO,
+ "required extra inode space not available");
+ }
+}
+
static void ext4_set_resv_clusters(struct super_block *sb)
{
ext4_fsblk_t resv_clusters;
@@ -3375,9 +3429,10 @@ static void ext4_set_resv_clusters(struct super_block *sb)
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
{
char *orig_data = kstrdup(data, GFP_KERNEL);
- struct buffer_head *bh;
+ struct buffer_head *bh, **group_desc;
struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ struct flex_groups **flex_groups;
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
@@ -3766,7 +3821,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
sbi->s_inodes_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
- sbi->s_blocks_per_group);
+ sbi->s_inodes_per_group);
goto failed_mount;
}
sbi->s_itb_per_group = sbi->s_inodes_per_group /
@@ -3897,9 +3952,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
EXT4_BLOCKS_PER_GROUP(sb) - 1);
do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
- ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
+ ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
"(block count %llu, first data block %u, "
- "blocks per group %lu)", sbi->s_groups_count,
+ "blocks per group %lu)", blocks_count,
ext4_blocks_count(es),
le32_to_cpu(es->s_first_data_block),
EXT4_BLOCKS_PER_GROUP(sb));
@@ -3927,9 +3982,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
}
- sbi->s_group_desc = ext4_kvmalloc(db_count *
+ rcu_assign_pointer(sbi->s_group_desc,
+ ext4_kvmalloc(db_count *
sizeof(struct buffer_head *),
- GFP_KERNEL);
+ GFP_KERNEL));
if (sbi->s_group_desc == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory");
ret = -ENOMEM;
@@ -3939,14 +3995,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
bgl_lock_init(sbi->s_blockgroup_lock);
for (i = 0; i < db_count; i++) {
+ struct buffer_head *bh;
+
block = descriptor_loc(sb, logical_sb_block, i);
- sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
- if (!sbi->s_group_desc[i]) {
+ bh = sb_bread_unmovable(sb, block);
+ if (!bh) {
ext4_msg(sb, KERN_ERR,
"can't read group descriptor %d", i);
db_count = i;
goto failed_mount2;
}
+ rcu_read_lock();
+ rcu_dereference(sbi->s_group_desc)[i] = bh;
+ rcu_read_unlock();
}
sbi->s_gdb_count = db_count;
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
@@ -4034,7 +4095,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"data=, fs mounted w/o journal");
goto failed_mount_wq;
}
- sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM;
+ sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
clear_opt(sb, JOURNAL_CHECKSUM);
clear_opt(sb, DATA_FLAGS);
sbi->s_journal = NULL;
@@ -4156,29 +4217,7 @@ no_journal:
if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
sb->s_flags |= MS_RDONLY;
- /* determine the minimum size of new large inodes, if present */
- if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
- EXT4_GOOD_OLD_INODE_SIZE;
- if (ext4_has_feature_extra_isize(sb)) {
- if (sbi->s_want_extra_isize <
- le16_to_cpu(es->s_want_extra_isize))
- sbi->s_want_extra_isize =
- le16_to_cpu(es->s_want_extra_isize);
- if (sbi->s_want_extra_isize <
- le16_to_cpu(es->s_min_extra_isize))
- sbi->s_want_extra_isize =
- le16_to_cpu(es->s_min_extra_isize);
- }
- }
- /* Check if enough inode space is available */
- if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
- sbi->s_inode_size) {
- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
- EXT4_GOOD_OLD_INODE_SIZE;
- ext4_msg(sb, KERN_INFO, "required extra inode space not"
- "available");
- }
+ ext4_clamp_want_extra_isize(sb);
ext4_set_resv_clusters(sb);
@@ -4217,7 +4256,7 @@ no_journal:
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
GFP_KERNEL);
if (!err)
- err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
+ err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
@@ -4310,13 +4349,19 @@ failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
ext4_mb_release(sb);
- if (sbi->s_flex_groups)
- kvfree(sbi->s_flex_groups);
+ rcu_read_lock();
+ flex_groups = rcu_dereference(sbi->s_flex_groups);
+ if (flex_groups) {
+ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
+ kvfree(flex_groups[i]);
+ kvfree(flex_groups);
+ }
+ rcu_read_unlock();
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
+ percpu_free_rwsem(&sbi->s_writepages_rwsem);
failed_mount5:
ext4_ext_release(sb);
ext4_release_system_zone(sb);
@@ -4343,9 +4388,12 @@ failed_mount3:
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
failed_mount2:
+ rcu_read_lock();
+ group_desc = rcu_dereference(sbi->s_group_desc);
for (i = 0; i < db_count; i++)
- brelse(sbi->s_group_desc[i]);
- kvfree(sbi->s_group_desc);
+ brelse(group_desc[i]);
+ kvfree(group_desc);
+ rcu_read_unlock();
failed_mount:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
@@ -4959,6 +5007,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ ext4_clamp_want_extra_isize(sb);
+
if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
test_opt(sb, JOURNAL_CHECKSUM)) {
ext4_msg(sb, KERN_ERR, "changing journal_checksum "
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9041805096e0..b2cccd4083b8 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1201,6 +1201,7 @@ int do_write_data_page(struct f2fs_io_info *fio)
/* This page is already truncated */
if (fio->old_blkaddr == NULL_ADDR) {
ClearPageUptodate(page);
+ clear_cold_data(page);
goto out_writepage;
}
@@ -1266,7 +1267,7 @@ static int f2fs_write_data_page(struct page *page,
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
>> PAGE_SHIFT;
- loff_t psize = (page->index + 1) << PAGE_SHIFT;
+ loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
@@ -1337,8 +1338,10 @@ done:
clear_cold_data(page);
out:
inode_dec_dirty_pages(inode);
- if (err)
+ if (err) {
ClearPageUptodate(page);
+ clear_cold_data(page);
+ }
if (wbc->for_reclaim) {
f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
@@ -1821,6 +1824,8 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
inode_dec_dirty_pages(inode);
}
+ clear_cold_data(page);
+
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return;
@@ -1839,6 +1844,7 @@ int f2fs_release_page(struct page *page, gfp_t wait)
if (IS_ATOMIC_WRITTEN_PAGE(page))
return 0;
+ clear_cold_data(page);
set_page_private(page, 0);
ClearPagePrivate(page);
return 1;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index af719d93507e..b414892be08b 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -772,6 +772,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
clear_page_dirty_for_io(page);
ClearPagePrivate(page);
ClearPageUptodate(page);
+ clear_cold_data(page);
inode_dec_dirty_pages(dir);
}
f2fs_put_page(page, 1);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f46ac1651bd5..e3c438c8b8ce 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -980,7 +980,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
}
dn.ofs_in_node++;
i++;
- new_size = (dst + i) << PAGE_SHIFT;
+ new_size = (loff_t)(dst + i) << PAGE_SHIFT;
if (dst_inode->i_size < new_size)
f2fs_i_size_write(dst_inode, new_size);
} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 759056e776e5..b1d62003cda6 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -985,7 +985,7 @@ stop:
put_gc_inode(&gc_list);
- if (sync)
+ if (sync && !ret)
ret = sec_freed ? 0 : -EAGAIN;
return ret;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 1de02c31756b..c56d04ec45dc 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -288,6 +288,7 @@ make_now:
return inode;
bad_inode:
+ f2fs_inode_synced(inode);
iget_failed(inode);
trace_f2fs_iget_exit(inode, ret);
return ERR_PTR(ret);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index e59eeaf02eaa..e87b7d7e80fc 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -179,6 +179,8 @@ static void recover_inode(struct inode *inode, struct page *page)
char *name;
inode->i_mode = le16_to_cpu(raw->i_mode);
+ i_uid_write(inode, le32_to_cpu(raw->i_uid));
+ i_gid_write(inode, le32_to_cpu(raw->i_gid));
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
@@ -407,7 +409,15 @@ retry_dn:
get_node_info(sbi, dn.nid, &ni);
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
- f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
+
+ if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
+ inode->i_ino, ofs_of_node(dn.node_page),
+ ofs_of_node(page));
+ err = -EFAULT;
+ goto err;
+ }
for (; start < end; start++, dn.ofs_in_node++) {
block_t src, dest;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 2fb99a081de8..c4c84af1ec17 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -227,8 +227,10 @@ static int __revoke_inmem_pages(struct inode *inode,
}
next:
/* we don't need to invalidate this in the sccessful status */
- if (drop || recover)
+ if (drop || recover) {
ClearPageUptodate(page);
+ clear_cold_data(page);
+ }
set_page_private(page, 0);
ClearPagePrivate(page);
f2fs_put_page(page, 1);
@@ -2490,6 +2492,41 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
return init_victim_secmap(sbi);
}
+static int sanity_check_curseg(struct f2fs_sb_info *sbi)
+{
+ int i;
+
+ /*
+ * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
+ * In LFS curseg, all blkaddr after .next_blkoff should be unused.
+ */
+ for (i = 0; i < NO_CHECK_TYPE; i++) {
+ struct curseg_info *curseg = CURSEG_I(sbi, i);
+ struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
+ unsigned int blkofs = curseg->next_blkoff;
+
+ if (f2fs_test_bit(blkofs, se->cur_valid_map))
+ goto out;
+
+ if (curseg->alloc_type == SSR)
+ continue;
+
+ for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
+ if (!f2fs_test_bit(blkofs, se->cur_valid_map))
+ continue;
+out:
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Current segment's next free block offset is "
+ "inconsistent with bitmap, logtype:%u, "
+ "segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
+ i, curseg->segno, curseg->alloc_type,
+ curseg->next_blkoff, blkofs);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
/*
* Update min, max modified time for cost-benefit GC algorithm
*/
@@ -2583,6 +2620,10 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
if (err)
return err;
+ err = sanity_check_curseg(sbi);
+ if (err)
+ return err;
+
init_min_max_mtime(sbi);
return 0;
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 893723978f5e..faca7fdb54b0 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -613,7 +613,6 @@ static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
static inline int check_block_count(struct f2fs_sb_info *sbi,
int segno, struct f2fs_sit_entry *raw_sit)
{
-#ifdef CONFIG_F2FS_CHECK_FS
bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
int valid_blocks = 0;
int cur_pos = 0, next_pos;
@@ -640,7 +639,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
set_sbi_flag(sbi, SBI_NEED_FSCK);
return -EINVAL;
}
-#endif
+
/* check segment usage, and check boundary of a given segment number */
if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
|| segno > TOTAL_SEGS(sbi) - 1)) {
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 83a96334dc07..e0ac676e0a35 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1489,7 +1489,7 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
unsigned int segment_count_main;
unsigned int cp_pack_start_sum, cp_payload;
block_t user_block_count;
- int i;
+ int i, j;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1530,11 +1530,43 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
return 1;
+ for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
+ le32_to_cpu(ckpt->cur_node_segno[j])) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Node segment (%u, %u) has the same "
+ "segno: %u", i, j,
+ le32_to_cpu(ckpt->cur_node_segno[i]));
+ return 1;
+ }
+ }
}
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
return 1;
+ for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
+ if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Data segment (%u, %u) has the same "
+ "segno: %u", i, j,
+ le32_to_cpu(ckpt->cur_data_segno[i]));
+ return 1;
+ }
+ }
+ }
+ for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
+ for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
+ if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
+ le32_to_cpu(ckpt->cur_data_segno[j])) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Node segment (%u) and Data segment (%u)"
+ " has the same segno: %u", i, j,
+ le32_to_cpu(ckpt->cur_node_segno[i]));
+ return 1;
+ }
+ }
}
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
@@ -1618,8 +1650,12 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
if (err)
return err;
- return percpu_counter_init(&sbi->total_valid_inode_count, 0,
+ err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
GFP_KERNEL);
+ if (err)
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
+
+ return err;
}
/*
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index 73b4e1d1912a..501c283761d2 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -61,6 +61,7 @@ void f2fs_trace_pid(struct page *page)
page->private = pid;
+retry:
if (radix_tree_preload(GFP_NOFS))
return;
@@ -71,7 +72,12 @@ void f2fs_trace_pid(struct page *page)
if (p)
radix_tree_delete(&pids, pid);
- f2fs_radix_tree_insert(&pids, pid, current);
+ if (radix_tree_insert(&pids, pid, current)) {
+ spin_unlock(&pids_lock);
+ radix_tree_preload_end();
+ cond_resched();
+ goto retry;
+ }
trace_printk("%3x:%3x %4x %-16s\n",
MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 81cecbe6d7cf..971e369517a7 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -1097,8 +1097,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
err = -ENOMEM;
goto error;
}
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(bhs[n]);
memset(bhs[n]->b_data, 0, sb->s_blocksize);
set_buffer_uptodate(bhs[n]);
+ unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
n++;
@@ -1155,6 +1158,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
de = (struct msdos_dir_entry *)bhs[0]->b_data;
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(bhs[0]);
/* filling the new directory slots ("." and ".." entries) */
memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
@@ -1177,6 +1182,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec *ts)
de[0].size = de[1].size = 0;
memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
set_buffer_uptodate(bhs[0]);
+ unlock_buffer(bhs[0]);
mark_buffer_dirty_inode(bhs[0], dir);
err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
@@ -1234,11 +1240,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
/* fill the directory entry */
copy = min(size, sb->s_blocksize);
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(bhs[n]);
memcpy(bhs[n]->b_data, slots, copy);
- slots += copy;
- size -= copy;
set_buffer_uptodate(bhs[n]);
+ unlock_buffer(bhs[n]);
mark_buffer_dirty_inode(bhs[n], dir);
+ slots += copy;
+ size -= copy;
if (!size)
break;
n++;
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index a9cad9b60790..0129d4d07a54 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -389,8 +389,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
err = -ENOMEM;
goto error;
}
+ /* Avoid race with userspace read via bdev */
+ lock_buffer(c_bh);
memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
set_buffer_uptodate(c_bh);
+ unlock_buffer(c_bh);
mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
if (sb->s_flags & MS_SYNCHRONOUS)
err = sync_dirty_buffer(c_bh);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 3d04b124bce0..392ec5641f38 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -160,12 +160,17 @@ static int fat_file_release(struct inode *inode, struct file *filp)
int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
struct inode *inode = filp->f_mapping->host;
- int res, err;
+ int err;
+
+ err = __generic_file_fsync(filp, start, end, datasync);
+ if (err)
+ return err;
- res = generic_file_fsync(filp, start, end, datasync);
err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping);
+ if (err)
+ return err;
- return res ? res : err;
+ return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 88720011a6eb..f0387d040331 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -736,6 +736,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
return NULL;
init_rwsem(&ei->truncate_lock);
+ /* Zeroing to allow iput() even if partial initialized inode. */
+ ei->mmu_private = 0;
+ ei->i_start = 0;
+ ei->i_logstart = 0;
+ ei->i_attrs = 0;
+ ei->i_pos = 0;
+
return &ei->vfs_inode;
}
@@ -1366,16 +1373,6 @@ out:
return 0;
}
-static void fat_dummy_inode_init(struct inode *inode)
-{
- /* Initialize this dummy inode to work as no-op. */
- MSDOS_I(inode)->mmu_private = 0;
- MSDOS_I(inode)->i_start = 0;
- MSDOS_I(inode)->i_logstart = 0;
- MSDOS_I(inode)->i_attrs = 0;
- MSDOS_I(inode)->i_pos = 0;
-}
-
static int fat_read_root(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
@@ -1820,13 +1817,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
fat_inode = new_inode(sb);
if (!fat_inode)
goto out_fail;
- fat_dummy_inode_init(fat_inode);
sbi->fat_inode = fat_inode;
fsinfo_inode = new_inode(sb);
if (!fsinfo_inode)
goto out_fail;
- fat_dummy_inode_init(fsinfo_inode);
fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
sbi->fsinfo_inode = fsinfo_inode;
insert_inode_hash(fsinfo_inode);
diff --git a/fs/file.c b/fs/file.c
index 69d6990e3021..09aac4d4729b 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -475,6 +475,7 @@ struct files_struct init_files = {
.full_fds_bits = init_files.full_fds_bits_init,
},
.file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
+ .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
};
static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f3aea1b8702c..882e9d6830df 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -331,11 +331,22 @@ struct inode_switch_wbs_context {
struct work_struct work;
};
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+ down_write(&bdi->wb_switch_rwsem);
+}
+
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+ up_write(&bdi->wb_switch_rwsem);
+}
+
static void inode_switch_wbs_work_fn(struct work_struct *work)
{
struct inode_switch_wbs_context *isw =
container_of(work, struct inode_switch_wbs_context, work);
struct inode *inode = isw->inode;
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
struct address_space *mapping = inode->i_mapping;
struct bdi_writeback *old_wb = inode->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
@@ -344,6 +355,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
void **slot;
/*
+ * If @inode switches cgwb membership while sync_inodes_sb() is
+ * being issued, sync_inodes_sb() might miss it. Synchronize.
+ */
+ down_read(&bdi->wb_switch_rwsem);
+
+ /*
* By the time control reaches here, RCU grace period has passed
* since I_WB_SWITCH assertion and all wb stat update transactions
* between unlocked_inode_to_wb_begin/end() are guaranteed to be
@@ -435,6 +452,8 @@ skip_switch:
spin_unlock(&new_wb->list_lock);
spin_unlock(&old_wb->list_lock);
+ up_read(&bdi->wb_switch_rwsem);
+
if (switched) {
wb_wakeup(new_wb);
wb_put(old_wb);
@@ -475,9 +494,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (inode->i_state & I_WB_SWITCH)
return;
+ /*
+ * Avoid starting new switches while sync_inodes_sb() is in
+ * progress. Otherwise, if the down_write protected issue path
+ * blocks heavily, we might end up starting a large number of
+ * switches which will block on the rwsem.
+ */
+ if (!down_read_trylock(&bdi->wb_switch_rwsem))
+ return;
+
isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
if (!isw)
- return;
+ goto out_unlock;
/* find and pin the new wb */
rcu_read_lock();
@@ -502,8 +530,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
isw->inode = inode;
- atomic_inc(&isw_nr_in_flight);
-
/*
* In addition to synchronizing among switchers, I_WB_SWITCH tells
* the RCU protected stat update paths to grab the mapping's
@@ -511,12 +537,17 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
* Let's continue after I_WB_SWITCH is guaranteed to be visible.
*/
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
- return;
+
+ atomic_inc(&isw_nr_in_flight);
+
+ goto out_unlock;
out_free:
if (isw->new_wb)
wb_put(isw->new_wb);
kfree(isw);
+out_unlock:
+ up_read(&bdi->wb_switch_rwsem);
}
/**
@@ -551,10 +582,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
spin_unlock(&inode->i_lock);
/*
- * A dying wb indicates that the memcg-blkcg mapping has changed
- * and a new wb is already serving the memcg. Switch immediately.
+ * A dying wb indicates that either the blkcg associated with the
+ * memcg changed or the associated memcg is dying. In the first
+ * case, a replacement wb should already be available and we should
+ * refresh the wb immediately. In the second case, trying to
+ * refresh will keep failing.
*/
- if (unlikely(wb_dying(wbc->wb)))
+ if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
inode_switch_wbs(inode, wbc->wb_id);
}
@@ -690,6 +724,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
void wbc_account_io(struct writeback_control *wbc, struct page *page,
size_t bytes)
{
+ struct cgroup_subsys_state *css;
int id;
/*
@@ -701,7 +736,12 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
if (!wbc->wb)
return;
- id = mem_cgroup_css_from_page(page)->id;
+ css = mem_cgroup_css_from_page(page);
+ /* dead cgroups shouldn't contribute to inode ownership arbitration */
+ if (!(css->flags & CSS_ONLINE))
+ return;
+
+ id = css->id;
if (id == wbc->wb_id) {
wbc->wb_bytes += bytes;
@@ -878,7 +918,11 @@ restart:
void cgroup_writeback_umount(void)
{
if (atomic_read(&isw_nr_in_flight)) {
- synchronize_rcu();
+ /*
+ * Use rcu_barrier() to wait for all pending callbacks to
+ * ensure that all in-flight wb switches are in the workqueue.
+ */
+ rcu_barrier();
flush_workqueue(isw_wq);
}
}
@@ -894,6 +938,9 @@ fs_initcall(cgroup_writeback_init);
#else /* CONFIG_CGROUP_WRITEBACK */
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
__releases(&inode->i_lock)
@@ -2408,8 +2455,11 @@ void sync_inodes_sb(struct super_block *sb)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
+ /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
+ bdi_down_write_wb_switch_rwsem(bdi);
bdi_split_work_to_wbs(bdi, &work, false);
wb_wait_for_completion(bdi, &done);
+ bdi_up_write_wb_switch_rwsem(bdi);
wait_sb_inodes(sb);
}
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index e25c40c10f4f..97ac2f5843fc 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -107,7 +107,7 @@ static ssize_t fuse_conn_max_background_read(struct file *file,
if (!fc)
return 0;
- val = fc->max_background;
+ val = READ_ONCE(fc->max_background);
fuse_conn_put(fc);
return fuse_conn_limit_read(file, buf, len, ppos, val);
@@ -144,7 +144,7 @@ static ssize_t fuse_conn_congestion_threshold_read(struct file *file,
if (!fc)
return 0;
- val = fc->congestion_threshold;
+ val = READ_ONCE(fc->congestion_threshold);
fuse_conn_put(fc);
return fuse_conn_limit_read(file, buf, len, ppos, val);
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index c5b6b7165489..d9aba9700726 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -513,6 +513,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
rc = cuse_send_init(cc);
if (rc) {
fuse_dev_free(fud);
+ fuse_conn_put(&cc->fc);
return rc;
}
file->private_data = fud;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 411f16101d1a..8016cd059db1 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1668,7 +1668,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
offset = outarg->offset & ~PAGE_MASK;
file_size = i_size_read(inode);
- num = outarg->size;
+ num = min(outarg->size, fc->max_write);
if (outarg->offset > file_size)
num = 0;
else if (outarg->offset + num > file_size)
@@ -1975,10 +1975,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
ret = -EINVAL;
- if (rem < len) {
- pipe_unlock(pipe);
- goto out;
- }
+ if (rem < len)
+ goto out_free;
rem = len;
while (rem) {
@@ -1996,7 +1994,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
} else {
- pipe_buf_get(pipe, ibuf);
+ if (!pipe_buf_get(pipe, ibuf))
+ goto out_free;
+
*obuf = *ibuf;
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->len = rem;
@@ -2019,11 +2019,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
ret = fuse_dev_do_write(fud, &cs, len);
pipe_lock(pipe);
+out_free:
for (idx = 0; idx < nbuf; idx++)
pipe_buf_release(pipe, &bufs[idx]);
pipe_unlock(pipe);
-out:
kfree(bufs);
return ret;
}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 60dd2bc10776..9af23f436558 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -234,7 +234,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
kfree(forget);
if (ret == -ENOMEM)
goto out;
- if (ret || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
+ if (ret || fuse_invalid_attr(&outarg.attr) ||
+ (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
goto invalid;
forget_all_cached_acls(inode);
@@ -297,6 +298,12 @@ int fuse_valid_type(int m)
S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
}
+bool fuse_invalid_attr(struct fuse_attr *attr)
+{
+ return !fuse_valid_type(attr->mode) ||
+ attr->size > LLONG_MAX;
+}
+
int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
struct fuse_entry_out *outarg, struct inode **inode)
{
@@ -328,7 +335,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
err = -EIO;
if (!outarg->nodeid)
goto out_put_forget;
- if (!fuse_valid_type(outarg->attr.mode))
+ if (fuse_invalid_attr(&outarg->attr))
goto out_put_forget;
*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
@@ -451,7 +458,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
goto out_free_ff;
err = -EIO;
- if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
+ if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
+ fuse_invalid_attr(&outentry.attr))
goto out_free_ff;
ff->fh = outopen.fh;
@@ -557,7 +565,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
goto out_put_forget_req;
err = -EIO;
- if (invalid_nodeid(outarg.nodeid))
+ if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
goto out_put_forget_req;
if ((outarg.attr.mode ^ mode) & S_IFMT)
@@ -830,7 +838,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
spin_lock(&fc->lock);
fi->attr_version = ++fc->attr_version;
- inc_nlink(inode);
+ if (likely(inode->i_nlink < UINT_MAX))
+ inc_nlink(inode);
spin_unlock(&fc->lock);
fuse_invalidate_attr(inode);
fuse_update_ctime(inode);
@@ -910,7 +919,8 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err) {
- if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
+ if (fuse_invalid_attr(&outarg.attr) ||
+ (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
make_bad_inode(inode);
err = -EIO;
} else {
@@ -1218,7 +1228,7 @@ static int fuse_direntplus_link(struct file *file,
if (invalid_nodeid(o->nodeid))
return -EIO;
- if (!fuse_valid_type(o->attr.mode))
+ if (fuse_invalid_attr(&o->attr))
return -EIO;
fc = get_fuse_conn(dir);
@@ -1654,6 +1664,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
if (attr->ia_valid & ATTR_SIZE)
is_truncate = true;
+ /* Flush dirty data/metadata before non-truncate SETATTR */
+ if (is_wb && S_ISREG(inode->i_mode) &&
+ attr->ia_valid &
+ (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
+ ATTR_TIMES_SET)) {
+ err = write_inode_now(inode, true);
+ if (err)
+ return err;
+
+ fuse_set_nowrite(inode);
+ fuse_release_nowrite(inode);
+ }
+
if (is_truncate) {
fuse_set_nowrite(inode);
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
@@ -1682,7 +1705,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
goto error;
}
- if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
+ if (fuse_invalid_attr(&outarg.attr) ||
+ (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
make_bad_inode(inode);
err = -EIO;
goto error;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 30a607473621..92f905ea20b0 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -178,7 +178,9 @@ void fuse_finish_open(struct inode *inode, struct file *file)
file->f_op = &fuse_direct_io_file_operations;
if (!(ff->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages2(inode->i_mapping);
- if (ff->open_flags & FOPEN_NONSEEKABLE)
+ if (ff->open_flags & FOPEN_STREAM)
+ stream_open(inode, file);
+ else if (ff->open_flags & FOPEN_NONSEEKABLE)
nonseekable_open(inode, file);
if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -199,7 +201,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
{
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
- bool lock_inode = (file->f_flags & O_TRUNC) &&
+ bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
fc->atomic_o_trunc &&
fc->writeback_cache;
@@ -207,16 +209,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
if (err)
return err;
- if (lock_inode)
+ if (is_wb_truncate) {
inode_lock(inode);
+ fuse_set_nowrite(inode);
+ }
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
if (!err)
fuse_finish_open(inode, file);
- if (lock_inode)
+ if (is_wb_truncate) {
+ fuse_release_nowrite(inode);
inode_unlock(inode);
+ }
return err;
}
@@ -1521,7 +1527,7 @@ __acquires(fc->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
- size_t crop = i_size_read(inode);
+ loff_t crop = i_size_read(inode);
struct fuse_req *req;
while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
@@ -1692,6 +1698,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
return 0;
}
@@ -2961,6 +2968,13 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
}
}
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ offset + length > i_size_read(inode)) {
+ err = inode_newsize_ok(inode, offset + length);
+ if (err)
+ goto out;
+ }
+
if (!(mode & FALLOC_FL_KEEP_SIZE))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 1c905c7666de..f84dd6d87d90 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -898,6 +898,8 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc);
*/
int fuse_valid_type(int m);
+bool fuse_invalid_attr(struct fuse_attr *attr);
+
/**
* Is current process allowed to perform filesystem operation?
*/
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 39af17b407f0..d83e99fa98b3 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1236,6 +1236,8 @@ static int do_grow(struct inode *inode, u64 size)
}
error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
+ (unstuff &&
+ gfs2_is_jdata(ip) ? RES_JDATA : 0) +
(sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
0 : RES_QUOTA), 0);
if (error)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 7a8b1d72e3d9..adc1a97cfe96 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -136,22 +136,26 @@ static int demote_ok(const struct gfs2_glock *gl)
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
+ if (!(gl->gl_ops->go_flags & GLOF_LRU))
+ return;
+
spin_lock(&lru_lock);
- if (!list_empty(&gl->gl_lru))
- list_del_init(&gl->gl_lru);
- else
+ list_del(&gl->gl_lru);
+ list_add_tail(&gl->gl_lru, &lru_list);
+
+ if (!test_bit(GLF_LRU, &gl->gl_flags)) {
+ set_bit(GLF_LRU, &gl->gl_flags);
atomic_inc(&lru_count);
+ }
- list_add_tail(&gl->gl_lru, &lru_list);
- set_bit(GLF_LRU, &gl->gl_flags);
spin_unlock(&lru_lock);
}
static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
spin_lock(&lru_lock);
- if (!list_empty(&gl->gl_lru)) {
+ if (test_bit(GLF_LRU, &gl->gl_flags)) {
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
clear_bit(GLF_LRU, &gl->gl_flags);
@@ -544,6 +548,9 @@ __acquires(&gl->gl_lockref.lock)
goto out_unlock;
if (nonblock)
goto out_sched;
+ smp_mb();
+ if (atomic_read(&gl->gl_revokes) != 0)
+ goto out_sched;
set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
gl->gl_target = gl->gl_demote_state;
@@ -1048,8 +1055,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
!test_bit(GLF_DEMOTE, &gl->gl_flags))
fast_path = 1;
}
- if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
- (glops->go_flags & GLOF_LRU))
+ if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
gfs2_glock_add_to_lru(gl);
trace_gfs2_glock_queue(gh, 0);
@@ -1349,6 +1355,7 @@ __acquires(&lru_lock)
if (!spin_trylock(&gl->gl_lockref.lock)) {
add_back_to_lru:
list_add(&gl->gl_lru, &lru_list);
+ set_bit(GLF_LRU, &gl->gl_flags);
atomic_inc(&lru_count);
continue;
}
@@ -1356,7 +1363,6 @@ add_back_to_lru:
spin_unlock(&gl->gl_lockref.lock);
goto add_back_to_lru;
}
- clear_bit(GLF_LRU, &gl->gl_flags);
gl->gl_lockref.count++;
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1392,6 +1398,7 @@ static long gfs2_scan_glock_lru(int nr)
if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count);
+ clear_bit(GLF_LRU, &gl->gl_flags);
freed++;
continue;
}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index bd6202b70447..daad7b04f88c 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1248,7 +1248,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
if (!(*opened & FILE_OPENED))
return finish_no_open(file, d);
dput(d);
- return 0;
+ return excl && (flags & O_CREAT) ? -EEXIST : 0;
}
BUG_ON(d != NULL);
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 8b907c5cc913..3c3d037df824 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -32,9 +32,10 @@ extern struct workqueue_struct *gfs2_control_wq;
* @delta is the difference between the current rtt sample and the
* running average srtt. We add 1/8 of that to the srtt in order to
* update the current srtt estimate. The variance estimate is a bit
- * more complicated. We subtract the abs value of the @delta from
- * the current variance estimate and add 1/4 of that to the running
- * total.
+ * more complicated. We subtract the current variance estimate from
+ * the abs value of the @delta and add 1/4 of that to the running
+ * total. That's equivalent to 3/4 of the current variance
+ * estimate plus 1/4 of the abs of @delta.
*
* Note that the index points at the array entry containing the smoothed
* mean value, and the variance is always in the following entry
@@ -50,7 +51,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
s64 delta = sample - s->stats[index];
s->stats[index] += (delta >> 3);
index++;
- s->stats[index] += ((abs(delta) - s->stats[index]) >> 2);
+ s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
}
/**
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 073126707270..0a80f6636549 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -630,7 +630,10 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
RB_CLEAR_NODE(&rs->rs_node);
if (rs->rs_free) {
- struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
+ u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
+ rs->rs_free - 1;
+ struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
+ struct gfs2_bitmap *start, *last;
/* return reserved blocks to the rgrp */
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
@@ -641,7 +644,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
it will force the number to be recalculated later. */
rgd->rd_extfail_pt += rs->rs_free;
rs->rs_free = 0;
- clear_bit(GBF_FULL, &bi->bi_flags);
+ if (gfs2_rbm_from_block(&last_rbm, last_block))
+ return;
+ start = rbm_bi(&rs->rs_rbm);
+ last = rbm_bi(&last_rbm);
+ do
+ clear_bit(GBF_FULL, &start->bi_flags);
+ while (start++ != last);
}
}
@@ -1211,7 +1220,7 @@ static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
rl_flags &= ~GFS2_RDF_MASK;
rgd->rd_flags &= GFS2_RDF_MASK;
- rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
+ rgd->rd_flags |= (rl_flags | GFS2_RDF_CHECK);
if (rgd->rd_rgl->rl_unlinked == 0)
rgd->rd_flags &= ~GFS2_RDF_CHECK;
rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index e3ee387a6dfe..37496d83661a 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -844,10 +844,10 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
return error;
+ flush_workqueue(gfs2_delete_workqueue);
kthread_stop(sdp->sd_quotad_process);
kthread_stop(sdp->sd_logd_process);
- flush_workqueue(gfs2_delete_workqueue);
gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0);
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 2e713673df42..85dab71bee74 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -444,6 +444,7 @@ skip:
/* restore search_key */
hfs_bnode_read_key(node, fd->search_key, 14);
}
+ new_node = NULL;
}
if (!rec && node->parent)
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 320f4372f172..77eff447d301 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -219,25 +219,17 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
return node;
}
-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+/* Make sure @tree has enough space for the @rsvd_nodes */
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
{
- struct hfs_bnode *node, *next_node;
- struct page **pagep;
- u32 nidx, idx;
- unsigned off;
- u16 off16;
- u16 len;
- u8 *data, byte, m;
- int i;
-
- while (!tree->free_nodes) {
- struct inode *inode = tree->inode;
- u32 count;
- int res;
+ struct inode *inode = tree->inode;
+ u32 count;
+ int res;
+ while (tree->free_nodes < rsvd_nodes) {
res = hfs_extend_file(inode);
if (res)
- return ERR_PTR(res);
+ return res;
HFS_I(inode)->phys_size = inode->i_size =
(loff_t)HFS_I(inode)->alloc_blocks *
HFS_SB(tree->sb)->alloc_blksz;
@@ -245,9 +237,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
tree->sb->s_blocksize_bits;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
- tree->free_nodes = count - tree->node_count;
+ tree->free_nodes += count - tree->node_count;
tree->node_count = count;
}
+ return 0;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+ struct hfs_bnode *node, *next_node;
+ struct page **pagep;
+ u32 nidx, idx;
+ unsigned off;
+ u16 off16;
+ u16 len;
+ u8 *data, byte, m;
+ int i, res;
+
+ res = hfs_bmap_reserve(tree, 1);
+ if (res)
+ return ERR_PTR(res);
nidx = 0;
node = hfs_bnode_find(tree, nidx);
diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
index f6bd266d70b5..2715f416b5a8 100644
--- a/fs/hfs/btree.h
+++ b/fs/hfs/btree.h
@@ -81,6 +81,7 @@ struct hfs_find_data {
extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp);
extern void hfs_btree_close(struct hfs_btree *);
extern void hfs_btree_write(struct hfs_btree *);
+extern int hfs_bmap_reserve(struct hfs_btree *, int);
extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *);
extern void hfs_bmap_free(struct hfs_bnode *node);
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 8a66405b0f8b..d365bf0b8c77 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -97,6 +97,14 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
+ if (err)
+ goto err2;
+
hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
HFS_CDR_THD : HFS_CDR_FTH,
@@ -295,6 +303,14 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
return err;
dst_fd = src_fd;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(src_fd.tree, 2 * src_fd.tree->depth);
+ if (err)
+ goto out;
+
/* find the old dir entry and read the data */
hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
err = hfs_brec_find(&src_fd);
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index e33a0d36a93e..cbe4fca96378 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -117,6 +117,10 @@ static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
if (res != -ENOENT)
return res;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
+ if (res)
+ return res;
hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
} else {
@@ -300,7 +304,7 @@ int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
return 0;
blocks = 0;
- for (i = 0; i < 3; extent++, i++)
+ for (i = 0; i < 3; i++)
blocks += be16_to_cpu(extent[i].count);
res = hfs_free_extents(sb, extent, blocks, blocks);
@@ -341,7 +345,9 @@ int hfs_get_block(struct inode *inode, sector_t block,
ablock = (u32)block / HFS_SB(sb)->fs_div;
if (block >= HFS_I(inode)->fs_blocks) {
- if (block > HFS_I(inode)->fs_blocks || !create)
+ if (!create)
+ return 0;
+ if (block > HFS_I(inode)->fs_blocks)
return -EIO;
if (ablock >= HFS_I(inode)->alloc_blocks) {
res = hfs_extend_file(inode);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index f776acf2378a..de0d6d4c46b6 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -641,6 +641,8 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
truncate_setsize(inode, attr->ia_size);
hfs_file_truncate(inode);
+ inode->i_atime = inode->i_mtime = inode->i_ctime =
+ current_time(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
index e5b221de7de6..0c4548d8cd0b 100644
--- a/fs/hfsplus/attributes.c
+++ b/fs/hfsplus/attributes.c
@@ -216,6 +216,11 @@ int hfsplus_create_attr(struct inode *inode,
if (err)
goto failed_init_create_attr;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1);
+ if (err)
+ goto failed_create_attr;
+
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
@@ -286,6 +291,10 @@ static int __hfsplus_delete_attr(struct inode *inode, u32 cnid,
return -ENOENT;
}
+ /* Avoid btree corruption */
+ hfs_bnode_read(fd->bnode, fd->search_key,
+ fd->keyoffset, fd->keylength);
+
err = hfs_brec_remove(fd);
if (err)
return err;
@@ -312,6 +321,11 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
if (err)
return err;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ err = hfs_bmap_reserve(fd.tree, fd.tree->depth);
+ if (err)
+ goto out;
+
if (name) {
err = hfsplus_attr_build_key(sb, fd.search_key,
inode->i_ino, name);
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 1002a0c08319..20ce698251ad 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -447,6 +447,7 @@ skip:
/* restore search_key */
hfs_bnode_read_key(node, fd->search_key, 14);
}
+ new_node = NULL;
}
if (!rec && node->parent)
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 8d2256454efe..7e96b4c294f7 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -341,26 +341,21 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
return node;
}
-struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+/* Make sure @tree has enough space for the @rsvd_nodes */
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
{
- struct hfs_bnode *node, *next_node;
- struct page **pagep;
- u32 nidx, idx;
- unsigned off;
- u16 off16;
- u16 len;
- u8 *data, byte, m;
- int i;
+ struct inode *inode = tree->inode;
+ struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
+ u32 count;
+ int res;
- while (!tree->free_nodes) {
- struct inode *inode = tree->inode;
- struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
- u32 count;
- int res;
+ if (rsvd_nodes <= 0)
+ return 0;
+ while (tree->free_nodes < rsvd_nodes) {
res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
if (res)
- return ERR_PTR(res);
+ return res;
hip->phys_size = inode->i_size =
(loff_t)hip->alloc_blocks <<
HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
@@ -368,9 +363,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
inode_set_bytes(inode, inode->i_size);
count = inode->i_size >> tree->node_size_shift;
- tree->free_nodes = count - tree->node_count;
+ tree->free_nodes += count - tree->node_count;
tree->node_count = count;
}
+ return 0;
+}
+
+struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
+{
+ struct hfs_bnode *node, *next_node;
+ struct page **pagep;
+ u32 nidx, idx;
+ unsigned off;
+ u16 off16;
+ u16 len;
+ u8 *data, byte, m;
+ int i, res;
+
+ res = hfs_bmap_reserve(tree, 1);
+ if (res)
+ return ERR_PTR(res);
nidx = 0;
node = hfs_bnode_find(tree, nidx);
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index a5e00f7a4c14..947da72e72a3 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -264,6 +264,14 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
+ if (err)
+ goto err2;
+
hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
entry_size = hfsplus_fill_cat_thread(sb, &entry,
S_ISDIR(inode->i_mode) ?
@@ -332,6 +340,14 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
if (err)
return err;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most once.
+ */
+ err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2);
+ if (err)
+ goto out;
+
if (!str) {
int len;
@@ -432,6 +448,14 @@ int hfsplus_rename_cat(u32 cnid,
return err;
dst_fd = src_fd;
+ /*
+ * Fail early and avoid ENOSPC during the btree operations. We may
+ * have to split the root node at most twice.
+ */
+ err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1);
+ if (err)
+ goto out;
+
/* find the old dir entry and read the data */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
src_dir->i_ino, src_name);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index feca524ce2a5..d93c051559cb 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -99,6 +99,10 @@ static int __hfsplus_ext_write_extent(struct inode *inode,
if (hip->extent_state & HFSPLUS_EXT_NEW) {
if (res != -ENOENT)
return res;
+ /* Fail early and avoid ENOSPC during the btree operation */
+ res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
+ if (res)
+ return res;
hfs_brec_insert(fd, hip->cached_extents,
sizeof(hfsplus_extent_rec));
hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
@@ -232,7 +236,9 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
ablock = iblock >> sbi->fs_shift;
if (iblock >= hip->fs_blocks) {
- if (iblock > hip->fs_blocks || !create)
+ if (!create)
+ return 0;
+ if (iblock > hip->fs_blocks)
return -EIO;
if (ablock >= hip->alloc_blocks) {
res = hfsplus_file_extend(inode, false);
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index a3f03b247463..35cd703c6604 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -311,6 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
#define hfs_btree_open hfsplus_btree_open
#define hfs_btree_close hfsplus_btree_close
#define hfs_btree_write hfsplus_btree_write
+#define hfs_bmap_reserve hfsplus_bmap_reserve
#define hfs_bmap_alloc hfsplus_bmap_alloc
#define hfs_bmap_free hfsplus_bmap_free
#define hfs_bnode_read hfsplus_bnode_read
@@ -395,6 +396,7 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors,
struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id);
void hfs_btree_close(struct hfs_btree *tree);
int hfs_btree_write(struct hfs_btree *tree);
+int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes);
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
void hfs_bmap_free(struct hfs_bnode *node);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 2e796f8302ff..cfd380e2743d 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -260,6 +260,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
}
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
}
setattr_copy(inode, attr);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 001487b230b5..253b03451b72 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -451,9 +451,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
if (next >= end)
break;
- hash = hugetlb_fault_mutex_hash(h, current->mm,
- &pseudo_vma,
- mapping, next, 0);
+ hash = hugetlb_fault_mutex_hash(h, mapping, next, 0);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
@@ -573,7 +571,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
struct address_space *mapping = inode->i_mapping;
struct hstate *h = hstate_inode(inode);
struct vm_area_struct pseudo_vma;
- struct mm_struct *mm = current->mm;
loff_t hpage_size = huge_page_size(h);
unsigned long hpage_shift = huge_page_shift(h);
pgoff_t start, index, end;
@@ -637,8 +634,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
addr = index * hpage_size;
/* mutex taken here, fault path and hole punch */
- hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
- index, addr);
+ hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* See if already present in mapping to avoid alloc/free */
@@ -746,11 +742,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
umode_t mode, dev_t dev)
{
struct inode *inode;
- struct resv_map *resv_map;
+ struct resv_map *resv_map = NULL;
- resv_map = resv_map_alloc();
- if (!resv_map)
- return NULL;
+ /*
+ * Reserve maps are only needed for inodes that can have associated
+ * page allocations.
+ */
+ if (S_ISREG(mode) || S_ISLNK(mode)) {
+ resv_map = resv_map_alloc();
+ if (!resv_map)
+ return NULL;
+ }
inode = new_inode(sb);
if (inode) {
@@ -782,8 +784,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
break;
}
lockdep_annotate_inode_mutex_key(inode);
- } else
- kref_put(&resv_map->refs, resv_map_release);
+ } else {
+ if (resv_map)
+ kref_put(&resv_map->refs, resv_map_release);
+ }
return inode;
}
diff --git a/fs/inode.c b/fs/inode.c
index 2071ff5343c5..0d993ce7a940 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -135,6 +135,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_sb = sb;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
+ atomic64_set(&inode->i_sequence, 0);
atomic_set(&inode->i_count, 1);
inode->i_op = &empty_iops;
inode->i_fop = &no_open_fops;
@@ -1804,8 +1805,13 @@ int file_remove_privs(struct file *file)
int kill;
int error = 0;
- /* Fast path for nothing security related */
- if (IS_NOSEC(inode))
+ /*
+ * Fast path for nothing security related.
+ * As well for non-regular files, e.g. blkdev inodes.
+ * For example, blkdev_write_iter() might get here
+ * trying to remove privs which it is not allowed to.
+ */
+ if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
return 0;
kill = dentry_needs_remove_privs(dentry);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 4d5a5a4cc017..addb0784dd1c 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -168,7 +168,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
"journal space in %s\n", __func__,
journal->j_devname);
WARN_ON(1);
- jbd2_journal_abort(journal, 0);
+ jbd2_journal_abort(journal, -EIO);
}
write_lock(&journal->j_state_lock);
} else {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 31f8ca046639..f65ad50d5f7b 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -700,9 +700,11 @@ void jbd2_journal_commit_transaction(journal_t *journal)
the last tag we set up. */
tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
-
- jbd2_descriptor_block_csum_set(journal, descriptor);
start_journal_io:
+ if (descriptor)
+ jbd2_descriptor_block_csum_set(journal,
+ descriptor);
+
for (i = 0; i < bufs; i++) {
struct buffer_head *bh = wbuf[i];
/*
@@ -720,7 +722,6 @@ start_journal_io:
submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
}
cond_resched();
- stats.run.rs_blocks_logged += bufs;
/* Force a new descriptor to be generated next
time round the loop. */
@@ -778,7 +779,7 @@ start_journal_io:
err = journal_submit_commit_record(journal, commit_transaction,
&cbh, crc32_sum);
if (err)
- __jbd2_journal_abort_hard(journal);
+ jbd2_journal_abort(journal, err);
}
blk_finish_plug(&plug);
@@ -807,6 +808,7 @@ start_journal_io:
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;
/*
* The list contains temporary buffer heads created by
@@ -852,6 +854,7 @@ start_journal_io:
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
@@ -869,10 +872,11 @@ start_journal_io:
err = journal_submit_commit_record(journal, commit_transaction,
&cbh, crc32_sum);
if (err)
- __jbd2_journal_abort_hard(journal);
+ jbd2_journal_abort(journal, err);
}
if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
+ stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) {
blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
@@ -965,29 +969,34 @@ restart_loop:
* it. */
/*
- * A buffer which has been freed while still being journaled by
- * a previous transaction.
- */
- if (buffer_freed(bh)) {
+ * A buffer which has been freed while still being journaled
+ * by a previous transaction, refile the buffer to BJ_Forget of
+ * the running transaction. If the just committed transaction
+ * contains "add to orphan" operation, we can completely
+ * invalidate the buffer now. We are rather through in that
+ * since the buffer may be still accessible when blocksize <
+ * pagesize and it is attached to the last partial page.
+ */
+ if (buffer_freed(bh) && !jh->b_next_transaction) {
+ struct address_space *mapping;
+
+ clear_buffer_freed(bh);
+ clear_buffer_jbddirty(bh);
+
/*
- * If the running transaction is the one containing
- * "add to orphan" operation (b_next_transaction !=
- * NULL), we have to wait for that transaction to
- * commit before we can really get rid of the buffer.
- * So just clear b_modified to not confuse transaction
- * credit accounting and refile the buffer to
- * BJ_Forget of the running transaction. If the just
- * committed transaction contains "add to orphan"
- * operation, we can completely invalidate the buffer
- * now. We are rather through in that since the
- * buffer may be still accessible when blocksize <
- * pagesize and it is attached to the last partial
- * page.
+ * Block device buffers need to stay mapped all the
+ * time, so it is enough to clear buffer_jbddirty and
+ * buffer_freed bits. For the file mapping buffers (i.e.
+ * journalled data) we need to unmap buffer and clear
+ * more bits. We also need to be careful about the check
+ * because the data page mapping can get cleared under
+ * our hands. Note that if mapping == NULL, we don't
+ * need to make buffer unmapped because the page is
+ * already detached from the mapping and buffers cannot
+ * get reused.
*/
- jh->b_modified = 0;
- if (!jh->b_next_transaction) {
- clear_buffer_freed(bh);
- clear_buffer_jbddirty(bh);
+ mapping = READ_ONCE(bh->b_page->mapping);
+ if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
clear_buffer_mapped(bh);
clear_buffer_new(bh);
clear_buffer_req(bh);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index d10bb2c30bf8..efc8cfd06073 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1339,6 +1339,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
journal_superblock_t *sb = journal->j_superblock;
int ret;
+ /* Buffer got discarded which means block device got invalidated */
+ if (!buffer_mapped(bh))
+ return -EIO;
+
trace_jbd2_write_superblock(journal, write_flags);
if (!(journal->j_flags & JBD2_BARRIER))
write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
@@ -1666,6 +1670,11 @@ int jbd2_journal_load(journal_t *journal)
journal->j_devname);
return -EFSCORRUPTED;
}
+ /*
+ * clear JBD2_ABORT flag initialized in journal_init_common
+ * here to update log tail information with the newest seq.
+ */
+ journal->j_flags &= ~JBD2_ABORT;
/* OK, we've finished with the dynamic journal bits:
* reinitialise the dynamic contents of the superblock in memory
@@ -1673,7 +1682,6 @@ int jbd2_journal_load(journal_t *journal)
if (journal_reset(journal))
goto recovery_error;
- journal->j_flags &= ~JBD2_ABORT;
journal->j_flags |= JBD2_LOADED;
return 0;
@@ -2092,12 +2100,10 @@ static void __journal_abort_soft (journal_t *journal, int errno)
__jbd2_journal_abort_hard(journal);
- if (errno) {
- jbd2_journal_update_sb_errno(journal);
- write_lock(&journal->j_state_lock);
- journal->j_flags |= JBD2_REC_ERR;
- write_unlock(&journal->j_state_lock);
- }
+ jbd2_journal_update_sb_errno(journal);
+ write_lock(&journal->j_state_lock);
+ journal->j_flags |= JBD2_REC_ERR;
+ write_unlock(&journal->j_state_lock);
}
/**
@@ -2139,11 +2145,6 @@ static void __journal_abort_soft (journal_t *journal, int errno)
* failure to disk. ext3_error, for example, now uses this
* functionality.
*
- * Errors which originate from within the journaling layer will NOT
- * supply an errno; a null errno implies that absolutely no further
- * writes are done to the journal (unless there are any already in
- * progress).
- *
*/
void jbd2_journal_abort(journal_t *journal, int errno)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 799f96c67211..8de458d64134 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1037,8 +1037,8 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
/* For undo access buffer must have data copied */
if (undo && !jh->b_committed_data)
goto out;
- if (jh->b_transaction != handle->h_transaction &&
- jh->b_next_transaction != handle->h_transaction)
+ if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
+ READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
goto out;
/*
* There are two reasons for the barrier here:
@@ -2213,14 +2213,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
return -EBUSY;
}
/*
- * OK, buffer won't be reachable after truncate. We just set
- * j_next_transaction to the running transaction (if there is
- * one) and mark buffer as freed so that commit code knows it
- * should clear dirty bits when it is done with the buffer.
+ * OK, buffer won't be reachable after truncate. We just clear
+ * b_modified to not confuse transaction credit accounting, and
+ * set j_next_transaction to the running transaction (if there
+ * is one) and mark buffer as freed so that commit code knows
+ * it should clear dirty bits when it is done with the buffer.
*/
set_buffer_freed(bh);
if (journal->j_running_transaction && buffer_jbddirty(bh))
jh->b_next_transaction = journal->j_running_transaction;
+ jh->b_modified = 0;
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
@@ -2446,8 +2448,8 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
* our jh reference and thus __jbd2_journal_file_buffer() must not
* take a new one.
*/
- jh->b_transaction = jh->b_next_transaction;
- jh->b_next_transaction = NULL;
+ WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
+ WRITE_ONCE(jh->b_next_transaction, NULL);
if (buffer_freed(bh))
jlist = BJ_Forget;
else if (jh->b_modified)
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 06a71dbd4833..2f236cca6095 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
- if (f->target) {
- kfree(f->target);
- f->target = NULL;
- }
-
fds = f->dents;
while(fds) {
fd = fds;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 226640563df3..76aedbc97773 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
static void jffs2_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+ kfree(f->target);
+ kmem_cache_free(jffs2_inode_cachep, f);
}
static void jffs2_destroy_inode(struct inode *inode)
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index 4d973524c887..224ef034004b 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -1928,8 +1928,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
* header ?
*/
if (tlck->type & tlckTRUNCATE) {
- /* This odd declaration suppresses a bogus gcc warning */
- pxd_t pxd = pxd; /* truncated extent of xad */
+ pxd_t pxd; /* truncated extent of xad */
int twm;
/*
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 80317b04c84a..e431a850f2f2 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -63,6 +63,9 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
if (base == kn)
break;
+ if ((s - path) + 3 >= PATH_MAX)
+ return -ENAMETOOLONG;
+
strcpy(s, "../");
s += 3;
base = base->parent;
@@ -79,7 +82,7 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
if (len < 2)
return -EINVAL;
len--;
- if ((s - path) + len > PATH_MAX)
+ if ((s - path) + len >= PATH_MAX)
return -ENAMETOOLONG;
/* reverse fillup of target string from target to base */
diff --git a/fs/libfs.c b/fs/libfs.c
index 9588780ad43e..278457f22148 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -85,58 +85,47 @@ int dcache_dir_close(struct inode *inode, struct file *file)
EXPORT_SYMBOL(dcache_dir_close);
/* parent is locked at least shared */
-static struct dentry *next_positive(struct dentry *parent,
- struct list_head *from,
- int count)
+/*
+ * Returns an element of siblings' list.
+ * We are looking for <count>th positive after <p>; if
+ * found, dentry is grabbed and passed to caller via *<res>.
+ * If no such element exists, the anchor of list is returned
+ * and *<res> is set to NULL.
+ */
+static struct list_head *scan_positives(struct dentry *cursor,
+ struct list_head *p,
+ loff_t count,
+ struct dentry **res)
{
- unsigned *seq = &parent->d_inode->i_dir_seq, n;
- struct dentry *res;
- struct list_head *p;
- bool skipped;
- int i;
+ struct dentry *dentry = cursor->d_parent, *found = NULL;
-retry:
- i = count;
- skipped = false;
- n = smp_load_acquire(seq) & ~1;
- res = NULL;
- rcu_read_lock();
- for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+ spin_lock(&dentry->d_lock);
+ while ((p = p->next) != &dentry->d_subdirs) {
struct dentry *d = list_entry(p, struct dentry, d_child);
- if (!simple_positive(d)) {
- skipped = true;
- } else if (!--i) {
- res = d;
- break;
+ // we must at least skip cursors, to avoid livelocks
+ if (d->d_flags & DCACHE_DENTRY_CURSOR)
+ continue;
+ if (simple_positive(d) && !--count) {
+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+ if (simple_positive(d))
+ found = dget_dlock(d);
+ spin_unlock(&d->d_lock);
+ if (likely(found))
+ break;
+ count = 1;
+ }
+ if (need_resched()) {
+ list_move(&cursor->d_child, p);
+ p = &cursor->d_child;
+ spin_unlock(&dentry->d_lock);
+ cond_resched();
+ spin_lock(&dentry->d_lock);
}
}
- rcu_read_unlock();
- if (skipped) {
- smp_rmb();
- if (unlikely(*seq != n))
- goto retry;
- }
- return res;
-}
-
-static void move_cursor(struct dentry *cursor, struct list_head *after)
-{
- struct dentry *parent = cursor->d_parent;
- unsigned n, *seq = &parent->d_inode->i_dir_seq;
- spin_lock(&parent->d_lock);
- for (;;) {
- n = *seq;
- if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
- break;
- cpu_relax();
- }
- __list_del(cursor->d_child.prev, cursor->d_child.next);
- if (after)
- list_add(&cursor->d_child, after);
- else
- list_add_tail(&cursor->d_child, &parent->d_subdirs);
- smp_store_release(seq, n + 2);
- spin_unlock(&parent->d_lock);
+ spin_unlock(&dentry->d_lock);
+ dput(*res);
+ *res = found;
+ return p;
}
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
@@ -152,17 +141,28 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
return -EINVAL;
}
if (offset != file->f_pos) {
+ struct dentry *cursor = file->private_data;
+ struct dentry *to = NULL;
+ struct list_head *p;
+
file->f_pos = offset;
- if (file->f_pos >= 2) {
- struct dentry *cursor = file->private_data;
- struct dentry *to;
- loff_t n = file->f_pos - 2;
-
- inode_lock_shared(dentry->d_inode);
- to = next_positive(dentry, &dentry->d_subdirs, n);
- move_cursor(cursor, to ? &to->d_child : NULL);
- inode_unlock_shared(dentry->d_inode);
+ inode_lock_shared(dentry->d_inode);
+
+ if (file->f_pos > 2) {
+ p = scan_positives(cursor, &dentry->d_subdirs,
+ file->f_pos - 2, &to);
+ spin_lock(&dentry->d_lock);
+ list_move(&cursor->d_child, p);
+ spin_unlock(&dentry->d_lock);
+ } else {
+ spin_lock(&dentry->d_lock);
+ list_del_init(&cursor->d_child);
+ spin_unlock(&dentry->d_lock);
}
+
+ dput(to);
+
+ inode_unlock_shared(dentry->d_inode);
}
return offset;
}
@@ -184,25 +184,29 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
struct dentry *cursor = file->private_data;
- struct list_head *p = &cursor->d_child;
- struct dentry *next;
- bool moved = false;
+ struct list_head *anchor = &dentry->d_subdirs;
+ struct dentry *next = NULL;
+ struct list_head *p;
if (!dir_emit_dots(file, ctx))
return 0;
if (ctx->pos == 2)
- p = &dentry->d_subdirs;
- while ((next = next_positive(dentry, p, 1)) != NULL) {
+ p = anchor;
+ else
+ p = &cursor->d_child;
+
+ while ((p = scan_positives(cursor, p, 1, &next)) != anchor) {
if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
d_inode(next)->i_ino, dt_type(d_inode(next))))
break;
- moved = true;
- p = &next->d_child;
ctx->pos++;
}
- if (moved)
- move_cursor(cursor, p);
+ spin_lock(&dentry->d_lock);
+ list_move_tail(&cursor->d_child, p);
+ spin_unlock(&dentry->d_lock);
+ dput(next);
+
return 0;
}
EXPORT_SYMBOL(dcache_readdir);
@@ -795,7 +799,7 @@ int simple_attr_open(struct inode *inode, struct file *file,
{
struct simple_attr *attr;
- attr = kmalloc(sizeof(*attr), GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
@@ -835,9 +839,11 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
if (ret)
return ret;
- if (*ppos) { /* continued read */
+ if (*ppos && attr->get_buf[0]) {
+ /* continued read */
size = strlen(attr->get_buf);
- } else { /* first read */
+ } else {
+ /* first read */
u64 val;
ret = attr->get(attr->data, &val);
if (ret)
diff --git a/fs/locks.c b/fs/locks.c
index 22c5b4aa4961..8252647c6084 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2681,7 +2681,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
}
if (inode) {
/* userspace relies on this representation of dev_t */
- seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
+ seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
} else {
diff --git a/fs/namei.c b/fs/namei.c
index eb4626bad88a..0953281430b1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1011,7 +1011,8 @@ static int may_linkat(struct path *link)
* may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
* should be allowed, or not, on files that already
* exist.
- * @dir: the sticky parent directory
+ * @dir_mode: mode bits of directory
+ * @dir_uid: owner of directory
* @inode: the inode of the file to open
*
* Block an O_CREAT open of a FIFO (or a regular file) when:
@@ -1027,18 +1028,18 @@ static int may_linkat(struct path *link)
*
* Returns 0 if the open is allowed, -ve on error.
*/
-static int may_create_in_sticky(struct dentry * const dir,
+static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid,
struct inode * const inode)
{
if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
(!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
- likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
- uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
+ likely(!(dir_mode & S_ISVTX)) ||
+ uid_eq(inode->i_uid, dir_uid) ||
uid_eq(current_fsuid(), inode->i_uid))
return 0;
- if (likely(dir->d_inode->i_mode & 0002) ||
- (dir->d_inode->i_mode & 0020 &&
+ if (likely(dir_mode & 0002) ||
+ (dir_mode & 0020 &&
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
return -EACCES;
@@ -1369,7 +1370,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
nd->path.dentry = parent;
nd->seq = seq;
if (unlikely(!path_connected(&nd->path)))
- return -ENOENT;
+ return -ECHILD;
break;
} else {
struct mount *mnt = real_mount(nd->path.mnt);
@@ -3259,6 +3260,8 @@ static int do_last(struct nameidata *nd,
int *opened)
{
struct dentry *dir = nd->path.dentry;
+ kuid_t dir_uid = nd->inode->i_uid;
+ umode_t dir_mode = nd->inode->i_mode;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;
@@ -3401,7 +3404,7 @@ finish_open:
error = -EISDIR;
if (d_is_dir(nd->path.dentry))
goto out;
- error = may_create_in_sticky(dir,
+ error = may_create_in_sticky(dir_mode, dir_uid,
d_backing_inode(nd->path.dentry));
if (unlikely(error))
goto out;
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index b1daeafbea92..c3428767332c 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -89,7 +89,7 @@ config NFS_V4
config NFS_SWAP
bool "Provide swap over NFS support"
default n
- depends on NFS_FS
+ depends on NFS_FS && SWAP
select SUNRPC_SWAP
help
This option enables swapon to work on files located on NFS mounts.
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 9d7537446260..0d4a56c77a1a 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -419,7 +419,7 @@ static bool referring_call_exists(struct nfs_client *clp,
uint32_t nrclists,
struct referring_call_list *rclists)
{
- bool status = 0;
+ bool status = false;
int i, j;
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ebecfb8fba06..28d8a57a9908 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -440,7 +440,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
case XPRT_TRANSPORT_RDMA:
if (retrans == NFS_UNSPEC_RETRANS)
to->to_retries = NFS_DEF_TCP_RETRANS;
- if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
+ if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
to->to_initval = NFS_MAX_TCP_TIMEOUT;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index dff600ae0d74..014039618cff 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -52,6 +52,16 @@ nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
return false;
}
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
+{
+ struct nfs_delegation *delegation;
+
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (nfs4_is_valid_delegation(delegation, 0))
+ return delegation;
+ return NULL;
+}
+
static int
nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
{
@@ -91,7 +101,7 @@ int nfs4_check_delegation(struct inode *inode, fmode_t flags)
return nfs4_do_check_delegation(inode, flags, false);
}
-static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
+static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct inode *inode = state->inode;
struct file_lock *fl;
@@ -106,7 +116,7 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
spin_lock(&flctx->flc_lock);
restart:
list_for_each_entry(fl, list, fl_list) {
- if (nfs_file_open_context(fl->fl_file) != ctx)
+ if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
spin_unlock(&flctx->flc_lock);
status = nfs4_lock_delegation_recall(fl, state, stateid);
@@ -153,7 +163,7 @@ again:
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
err = nfs4_open_delegation_recall(ctx, state, stateid, type);
if (!err)
- err = nfs_delegation_claim_locks(ctx, state, stateid);
+ err = nfs_delegation_claim_locks(state, stateid);
if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
err = -EAGAIN;
mutex_unlock(&sp->so_delegreturn_mutex);
@@ -224,6 +234,8 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
spin_lock(&delegation->lock);
if (delegation->inode != NULL)
inode = igrab(delegation->inode);
+ if (!inode)
+ set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
spin_unlock(&delegation->lock);
return inode;
}
@@ -857,10 +869,11 @@ restart:
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
list_for_each_entry_rcu(delegation, &server->delegations,
super_list) {
- if (test_bit(NFS_DELEGATION_RETURNING,
- &delegation->flags))
- continue;
- if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURNING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_NEED_RECLAIM,
&delegation->flags) == 0)
continue;
if (!nfs_sb_active(server->super))
@@ -965,10 +978,11 @@ restart:
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
list_for_each_entry_rcu(delegation, &server->delegations,
super_list) {
- if (test_bit(NFS_DELEGATION_RETURNING,
- &delegation->flags))
- continue;
- if (test_bit(NFS_DELEGATION_TEST_EXPIRED,
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURNING,
+ &delegation->flags) ||
+ test_bit(NFS_DELEGATION_TEST_EXPIRED,
&delegation->flags) == 0)
continue;
if (!nfs_sb_active(server->super))
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index e9d555796873..f72095bf9e10 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -33,6 +33,7 @@ enum {
NFS_DELEGATION_RETURNING,
NFS_DELEGATION_REVOKED,
NFS_DELEGATION_TEST_EXPIRED,
+ NFS_DELEGATION_INODE_FREEING,
};
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
@@ -62,6 +63,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
int nfs4_have_delegation(struct inode *inode, fmode_t flags);
int nfs4_check_delegation(struct inode *inode, fmode_t flags);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 1e5321d1ed22..2517fcd423b6 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -57,7 +57,7 @@ static void nfs_readdir_clear_array(struct page*);
const struct file_operations nfs_dir_operations = {
.llseek = nfs_llseek_dir,
.read = generic_read_dir,
- .iterate_shared = nfs_readdir,
+ .iterate = nfs_readdir,
.open = nfs_opendir,
.release = nfs_closedir,
.fsync = nfs_fsync_dir,
@@ -145,7 +145,6 @@ struct nfs_cache_array_entry {
};
struct nfs_cache_array {
- atomic_t refcount;
int size;
int eof_index;
u64 last_cookie;
@@ -170,6 +169,17 @@ typedef struct {
unsigned int eof:1;
} nfs_readdir_descriptor_t;
+static
+void nfs_readdir_init_array(struct page *page)
+{
+ struct nfs_cache_array *array;
+
+ array = kmap_atomic(page);
+ memset(array, 0, sizeof(struct nfs_cache_array));
+ array->eof_index = -1;
+ kunmap_atomic(array);
+}
+
/*
* The caller is responsible for calling nfs_readdir_release_array(page)
*/
@@ -201,18 +211,10 @@ void nfs_readdir_clear_array(struct page *page)
int i;
array = kmap_atomic(page);
- if (atomic_dec_and_test(&array->refcount))
- for (i = 0; i < array->size; i++)
- kfree(array->array[i].string.name);
- kunmap_atomic(array);
-}
-
-static bool grab_page(struct page *page)
-{
- struct nfs_cache_array *array = kmap_atomic(page);
- bool res = atomic_inc_not_zero(&array->refcount);
+ for (i = 0; i < array->size; i++)
+ kfree(array->array[i].string.name);
+ array->size = 0;
kunmap_atomic(array);
- return res;
}
/*
@@ -287,7 +289,7 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri
desc->cache_entry_index = index;
return 0;
out_eof:
- desc->eof = 1;
+ desc->eof = true;
return -EBADCOOKIE;
}
@@ -341,7 +343,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
if (array->eof_index >= 0) {
status = -EBADCOOKIE;
if (*desc->dir_cookie == array->last_cookie)
- desc->eof = 1;
+ desc->eof = true;
}
out:
return status;
@@ -653,6 +655,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
int status = -ENOMEM;
unsigned int array_size = ARRAY_SIZE(pages);
+ nfs_readdir_init_array(page);
+
entry.prev_cookie = 0;
entry.cookie = desc->last_cookie;
entry.eof = 0;
@@ -673,9 +677,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
status = PTR_ERR(array);
goto out_label_free;
}
- memset(array, 0, sizeof(struct nfs_cache_array));
- atomic_set(&array->refcount, 1);
- array->eof_index = -1;
status = nfs_readdir_alloc_pages(pages, array_size);
if (status < 0)
@@ -730,6 +731,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
unlock_page(page);
return 0;
error:
+ nfs_readdir_clear_array(page);
unlock_page(page);
return ret;
}
@@ -737,7 +739,6 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
static
void cache_page_release(nfs_readdir_descriptor_t *desc)
{
- nfs_readdir_clear_array(desc->page);
put_page(desc->page);
desc->page = NULL;
}
@@ -745,33 +746,34 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
static
struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
{
- struct page *page;
-
- for (;;) {
- page = read_cache_page(desc->file->f_mapping,
+ return read_cache_page(desc->file->f_mapping,
desc->page_index, (filler_t *)nfs_readdir_filler, desc);
- if (IS_ERR(page) || grab_page(page))
- break;
- put_page(page);
- }
- return page;
}
/*
* Returns 0 if desc->dir_cookie was found on page desc->page_index
+ * and locks the page to prevent removal from the page cache.
*/
static
-int find_cache_page(nfs_readdir_descriptor_t *desc)
+int find_and_lock_cache_page(nfs_readdir_descriptor_t *desc)
{
int res;
desc->page = get_cache_page(desc);
if (IS_ERR(desc->page))
return PTR_ERR(desc->page);
-
- res = nfs_readdir_search_array(desc);
+ res = lock_page_killable(desc->page);
if (res != 0)
- cache_page_release(desc);
+ goto error;
+ res = -EAGAIN;
+ if (desc->page->mapping != NULL) {
+ res = nfs_readdir_search_array(desc);
+ if (res == 0)
+ return 0;
+ }
+ unlock_page(desc->page);
+error:
+ cache_page_release(desc);
return res;
}
@@ -786,7 +788,7 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
desc->last_cookie = 0;
}
do {
- res = find_cache_page(desc);
+ res = find_and_lock_cache_page(desc);
} while (res == -EAGAIN);
return res;
}
@@ -815,7 +817,7 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
ent = &array->array[i];
if (!dir_emit(desc->ctx, ent->string.name, ent->string.len,
nfs_compat_user_ino64(ent->ino), ent->d_type)) {
- desc->eof = 1;
+ desc->eof = true;
break;
}
desc->ctx->pos++;
@@ -827,11 +829,10 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
ctx->duped = 1;
}
if (array->eof_index >= 0)
- desc->eof = 1;
+ desc->eof = true;
nfs_readdir_release_array(desc->page);
out:
- cache_page_release(desc);
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
(unsigned long long)*desc->dir_cookie, res);
return res;
@@ -877,13 +878,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
status = nfs_do_filldir(desc);
+ out_release:
+ nfs_readdir_clear_array(desc->page);
+ cache_page_release(desc);
out:
dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
__func__, status);
return status;
- out_release:
- cache_page_release(desc);
- goto out;
}
/* The file offset position represents the dirent entry number. A
@@ -928,7 +929,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
if (res == -EBADCOOKIE) {
res = 0;
/* This means either end of directory */
- if (*desc->dir_cookie && desc->eof == 0) {
+ if (*desc->dir_cookie && !desc->eof) {
/* Or that the server has 'lost' a cookie */
res = uncached_readdir(desc);
if (res == 0)
@@ -948,6 +949,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
break;
res = nfs_do_filldir(desc);
+ unlock_page(desc->page);
+ cache_page_release(desc);
if (res < 0)
break;
} while (!desc->eof);
@@ -960,11 +963,13 @@ out:
static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
{
+ struct inode *inode = file_inode(filp);
struct nfs_open_dir_context *dir_ctx = filp->private_data;
dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
filp, offset, whence);
+ inode_lock(inode);
switch (whence) {
case 1:
offset += filp->f_pos;
@@ -972,13 +977,16 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
if (offset >= 0)
break;
default:
- return -EINVAL;
+ offset = -EINVAL;
+ goto out;
}
if (offset != filp->f_pos) {
filp->f_pos = offset;
dir_ctx->dir_cookie = 0;
dir_ctx->duped = 0;
}
+out:
+ inode_unlock(inode);
return offset;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 53f0012ace42..de135d2591ff 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -595,6 +595,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
result = PTR_ERR(l_ctx);
+ nfs_direct_req_release(dreq);
goto out_release;
}
dreq->l_ctx = l_ctx;
@@ -1019,6 +1020,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
l_ctx = nfs_get_lock_context(dreq->ctx);
if (IS_ERR(l_ctx)) {
result = PTR_ERR(l_ctx);
+ nfs_direct_req_release(dreq);
goto out_release;
}
dreq->l_ctx = l_ctx;
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 90099896b838..c8863563c635 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -17,7 +17,7 @@
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
-static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
+static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
static unsigned int dataserver_retrans;
void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 76ae25661d3f..851274b25d39 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -950,6 +950,7 @@ int nfs_open(struct inode *inode, struct file *filp)
nfs_fscache_open_file(inode, filp);
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_open);
/*
* This function is called whenever some part of NFS notices that
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 1452177c822d..c719389381dc 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -434,7 +434,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
-extern void nfs4_purge_state_owners(struct nfs_server *);
+extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
+extern void nfs4_free_state_owners(struct list_head *head);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct nfs4_state *, fmode_t);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 43f42cc30a60..3ee60c533217 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -781,9 +781,12 @@ found:
static void nfs4_destroy_server(struct nfs_server *server)
{
+ LIST_HEAD(freeme);
+
nfs_server_return_all_delegations(server);
unset_pnfs_layoutdriver(server);
- nfs4_purge_state_owners(server);
+ nfs4_purge_state_owners(server, &freeme);
+ nfs4_free_state_owners(&freeme);
}
/*
@@ -844,7 +847,7 @@ nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
spin_lock(&nn->nfs_client_lock);
list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
- if (nfs4_cb_match_client(addr, clp, minorversion) == false)
+ if (!nfs4_cb_match_client(addr, clp, minorversion))
continue;
if (!nfs4_has_session(clp))
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 89a77950e0b0..7138383382ff 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -49,7 +49,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
return err;
if ((openflags & O_ACCMODE) == 3)
- openflags--;
+ return nfs_open(inode, filp);
/* We can't create new files here */
openflags &= ~(O_CREAT|O_EXCL);
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
- case -EPERM:
- case -EACCES:
- case -EDQUOT:
- case -ENOSPC:
- case -EROFS:
- goto out_put_ctx;
default:
+ goto out_put_ctx;
+ case -ENOENT:
+ case -ESTALE:
+ case -EISDIR:
+ case -ENOTDIR:
+ case -ELOOP:
goto out_drop;
}
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index eb55ab6930b5..632d3c3f8dfb 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1121,6 +1121,12 @@ struct nfs4_opendata {
int cancelled;
};
+struct nfs4_open_createattrs {
+ struct nfs4_label *label;
+ struct iattr *sattr;
+ const __u32 verf[2];
+};
+
static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
int err, struct nfs4_exception *exception)
{
@@ -1190,8 +1196,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
- const struct iattr *attrs,
- struct nfs4_label *label,
+ const struct nfs4_open_createattrs *c,
enum open_claim_type4 claim,
gfp_t gfp_mask)
{
@@ -1199,6 +1204,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
struct inode *dir = d_inode(parent);
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
+ struct nfs4_label *label = (c != NULL) ? c->label : NULL;
struct nfs4_opendata *p;
p = kzalloc(sizeof(*p), gfp_mask);
@@ -1255,15 +1261,11 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
p->o_arg.fh = NFS_FH(d_inode(dentry));
}
- if (attrs != NULL && attrs->ia_valid != 0) {
- __u32 verf[2];
-
+ if (c != NULL && c->sattr != NULL && c->sattr->ia_valid != 0) {
p->o_arg.u.attrs = &p->attrs;
- memcpy(&p->attrs, attrs, sizeof(p->attrs));
+ memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
- verf[0] = jiffies;
- verf[1] = current->pid;
- memcpy(p->o_arg.u.verifier.data, verf,
+ memcpy(p->o_arg.u.verifier.data, c->verf,
sizeof(p->o_arg.u.verifier.data));
}
p->c_arg.fh = &p->o_res.fh;
@@ -1366,8 +1368,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
return 0;
if ((delegation->type & fmode) != fmode)
return 0;
- if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
- return 0;
switch (claim) {
case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_FH:
@@ -1626,7 +1626,6 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
{
struct nfs4_state *state = opendata->state;
- struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *delegation;
int open_mode = opendata->o_arg.open_flags;
fmode_t fmode = opendata->o_arg.fmode;
@@ -1643,7 +1642,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
}
spin_unlock(&state->owner->so_lock);
rcu_read_lock();
- delegation = rcu_dereference(nfsi->delegation);
+ delegation = nfs4_get_valid_delegation(state->inode);
if (!can_open_delegated(delegation, fmode, claim)) {
rcu_read_unlock();
break;
@@ -1814,7 +1813,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
struct nfs4_opendata *opendata;
opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
- NULL, NULL, claim, GFP_NOFS);
+ NULL, claim, GFP_NOFS);
if (opendata == NULL)
return ERR_PTR(-ENOMEM);
opendata->state = state;
@@ -2140,7 +2139,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
goto out_no_action;
rcu_read_lock();
- delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
+ delegation = nfs4_get_valid_delegation(data->state->inode);
if (can_open_delegated(delegation, data->o_arg.fmode, claim))
goto unlock_no_action;
rcu_read_unlock();
@@ -2748,7 +2747,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
nfs4_schedule_stateid_recovery(server, state);
}
out:
- nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+ if (!opendata->cancelled)
+ nfs4_sequence_free_slot(&opendata->o_res.seq_res);
return ret;
}
@@ -2758,8 +2758,7 @@ out:
static int _nfs4_do_open(struct inode *dir,
struct nfs_open_context *ctx,
int flags,
- struct iattr *sattr,
- struct nfs4_label *label,
+ const struct nfs4_open_createattrs *c,
int *opened)
{
struct nfs4_state_owner *sp;
@@ -2771,6 +2770,8 @@ static int _nfs4_do_open(struct inode *dir,
struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
+ struct iattr *sattr = c->sattr;
+ struct nfs4_label *label = c->label;
struct nfs4_label *olabel = NULL;
int status;
@@ -2789,8 +2790,8 @@ static int _nfs4_do_open(struct inode *dir,
status = -ENOMEM;
if (d_really_is_positive(dentry))
claim = NFS4_OPEN_CLAIM_FH;
- opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
- label, claim, GFP_KERNEL);
+ opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
+ c, claim, GFP_KERNEL);
if (opendata == NULL)
goto err_put_state_owner;
@@ -2871,10 +2872,18 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_exception exception = { };
struct nfs4_state *res;
+ struct nfs4_open_createattrs c = {
+ .label = label,
+ .sattr = sattr,
+ .verf = {
+ [0] = (__u32)jiffies,
+ [1] = (__u32)current->pid,
+ },
+ };
int status;
do {
- status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
+ status = _nfs4_do_open(dir, ctx, flags, &c, opened);
res = ctx->state;
trace_nfs4_open_file(ctx, flags, status);
if (status == 0)
@@ -2907,6 +2916,11 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
exception.retry = 1;
continue;
}
+ if (status == -NFS4ERR_EXPIRED) {
+ nfs4_schedule_lease_recovery(server->nfs_client);
+ exception.retry = 1;
+ continue;
+ }
if (status == -EAGAIN) {
/* We must have found a delegation */
exception.retry = 1;
@@ -5532,6 +5546,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
}
status = task->tk_status;
if (setclientid.sc_cred) {
+ kfree(clp->cl_acceptor);
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 857af951831f..4e63daeef633 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -143,6 +143,10 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,
/* Sustain the lease, even if it's empty. If the clientid4
* goes stale it's of no use for trunking discovery. */
nfs4_schedule_state_renewal(*result);
+
+ /* If the client state need to recover, do it. */
+ if (clp->cl_state)
+ nfs4_schedule_state_manager(clp);
}
out:
return status;
@@ -607,24 +611,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
/**
* nfs4_purge_state_owners - Release all cached state owners
* @server: nfs_server with cached state owners to release
+ * @head: resulting list of state owners
*
* Called at umount time. Remaining state owners will be on
* the LRU with ref count of zero.
+ * Note that the state owners are not freed, but are added
+ * to the list @head, which can later be used as an argument
+ * to nfs4_free_state_owners.
*/
-void nfs4_purge_state_owners(struct nfs_server *server)
+void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *tmp;
- LIST_HEAD(doomed);
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
- list_move(&sp->so_lru, &doomed);
+ list_move(&sp->so_lru, head);
nfs4_remove_state_owner_locked(sp);
}
spin_unlock(&clp->cl_lock);
+}
- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @head: resulting list of state owners
+ *
+ * Frees a list of state owners that was generated by
+ * nfs4_purge_state_owners
+ */
+void nfs4_free_state_owners(struct list_head *head)
+{
+ struct nfs4_state_owner *sp, *tmp;
+
+ list_for_each_entry_safe(sp, tmp, head, so_lru) {
list_del(&sp->so_lru);
nfs4_free_state_owner(sp);
}
@@ -1760,12 +1779,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
struct nfs4_state_owner *sp;
struct nfs_server *server;
struct rb_node *pos;
+ LIST_HEAD(freeme);
int status = 0;
restart:
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
- nfs4_purge_state_owners(server);
+ nfs4_purge_state_owners(server, &freeme);
spin_lock(&clp->cl_lock);
for (pos = rb_first(&server->state_owners);
pos != NULL;
@@ -1794,6 +1814,7 @@ restart:
spin_unlock(&clp->cl_lock);
}
rcu_read_unlock();
+ nfs4_free_state_owners(&freeme);
return 0;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 5e2724a928ed..d7f8d5ce30e3 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1123,7 +1123,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
} else
*p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
}
- if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
+ if (label && (bmval[2] & FATTR4_WORD2_SECURITY_LABEL)) {
*p++ = cpu_to_be32(label->lfs);
*p++ = cpu_to_be32(label->pi);
*p++ = cpu_to_be32(label->len);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index fad4d5188aaf..529f3a576263 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -562,7 +562,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
}
hdr->res.fattr = &hdr->fattr;
- hdr->res.count = count;
+ hdr->res.count = 0;
hdr->res.eof = 0;
hdr->res.verf = &hdr->verf;
nfs_fattr_init(&hdr->fattr);
@@ -851,15 +851,6 @@ static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
return 0;
}
-/*
- * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
- */
-void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
-{
- pgio->pg_mirror_count = 1;
- pgio->pg_mirror_idx = 0;
-}
-
static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
{
pgio->pg_mirror_count = 1;
@@ -1285,6 +1276,14 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
}
}
+/*
+ * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
+ */
+void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
+{
+ nfs_pageio_complete(pgio);
+}
+
int __init nfs_init_nfspagecache(void)
{
nfs_page_cachep = kmem_cache_create("nfs_page",
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0e008db16b16..c3abf92adfb7 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1436,7 +1436,7 @@ pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
if ((range->iomode == IOMODE_RW &&
ls_range->iomode != IOMODE_RW) ||
(range->iomode != ls_range->iomode &&
- strict_iomode == true) ||
+ strict_iomode) ||
!pnfs_lseg_range_intersecting(ls_range, range))
return 0;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index b7bca8303989..06e72229be12 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -588,7 +588,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
/* Emulate the eof flag, which isn't normally needed in NFSv2
* as it is guaranteed to always return the file attributes
*/
- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+ if ((hdr->res.count == 0 && hdr->args.count > 0) ||
+ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
hdr->res.eof = 1;
}
return 0;
@@ -609,8 +610,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
+ hdr->res.count = hdr->args.count;
nfs_writeback_update_inode(hdr);
+ }
return 0;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 659ad12e33ba..4c21e572f2d9 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1928,7 +1928,7 @@ static int nfs_parse_devname(const char *dev_name,
/* kill possible hostname list: not supported */
comma = strchr(dev_name, ',');
if (comma != NULL && comma < end)
- *comma = 0;
+ len = comma - dev_name;
}
if (len > maxnamlen)
@@ -2047,7 +2047,8 @@ static int nfs23_validate_mount_data(void *options,
memcpy(sap, &data->addr, sizeof(data->addr));
args->nfs_server.addrlen = sizeof(data->addr);
args->nfs_server.port = ntohs(data->addr.sin_port);
- if (!nfs_verify_server_address(sap))
+ if (sap->sa_family != AF_INET ||
+ !nfs_verify_server_address(sap))
goto out_no_address;
if (!(data->flags & NFS_MOUNT_TCP))
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3069cd46ea66..8d842282111b 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -934,8 +934,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
cb->cb_seq_status = 1;
cb->cb_status = 0;
if (minorversion) {
- if (!nfsd41_cb_get_slot(clp, task))
+ if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
return;
+ cb->cb_holds_slot = true;
}
rpc_call_start(task);
}
@@ -962,6 +963,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
return true;
}
+ if (!cb->cb_holds_slot)
+ goto need_restart;
+
switch (cb->cb_seq_status) {
case 0:
/*
@@ -999,6 +1003,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
cb->cb_seq_status);
}
+ cb->cb_holds_slot = false;
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_wake_up_next(&clp->cl_cb_waitq);
dprintk("%s: freed slot, new seqid=%d\n", __func__,
@@ -1206,6 +1211,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_seq_status = 1;
cb->cb_status = 0;
cb->cb_need_restart = false;
+ cb->cb_holds_slot = false;
}
void nfsd4_run_cb(struct nfsd4_callback *cb)
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 64813697f4c4..f6cc2fddb78b 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -680,7 +680,7 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
/* Client gets 2 lease periods to return it */
cutoff = ktime_add_ns(task->tk_start,
- nn->nfsd4_lease * NSEC_PER_SEC * 2);
+ (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2);
if (ktime_before(now, cutoff)) {
rpc_delay(task, HZ/100); /* 10 mili-seconds */
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 66eaeb1e8c2c..dc9586feab31 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -661,7 +661,7 @@ struct cld_net {
struct cld_upcall {
struct list_head cu_list;
struct cld_net *cu_net;
- struct task_struct *cu_task;
+ struct completion cu_done;
struct cld_msg cu_msg;
};
@@ -670,23 +670,18 @@ __cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
{
int ret;
struct rpc_pipe_msg msg;
+ struct cld_upcall *cup = container_of(cmsg, struct cld_upcall, cu_msg);
memset(&msg, 0, sizeof(msg));
msg.data = cmsg;
msg.len = sizeof(*cmsg);
- /*
- * Set task state before we queue the upcall. That prevents
- * wake_up_process in the downcall from racing with schedule.
- */
- set_current_state(TASK_UNINTERRUPTIBLE);
ret = rpc_queue_upcall(pipe, &msg);
if (ret < 0) {
- set_current_state(TASK_RUNNING);
goto out;
}
- schedule();
+ wait_for_completion(&cup->cu_done);
if (msg.errno < 0)
ret = msg.errno;
@@ -753,7 +748,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
if (copy_from_user(&cup->cu_msg, src, mlen) != 0)
return -EFAULT;
- wake_up_process(cup->cu_task);
+ complete(&cup->cu_done);
return mlen;
}
@@ -768,7 +763,7 @@ cld_pipe_destroy_msg(struct rpc_pipe_msg *msg)
if (msg->errno >= 0)
return;
- wake_up_process(cup->cu_task);
+ complete(&cup->cu_done);
}
static const struct rpc_pipe_ops cld_upcall_ops = {
@@ -899,7 +894,7 @@ restart_search:
goto restart_search;
}
}
- new->cu_task = current;
+ init_completion(&new->cu_done);
new->cu_msg.cm_vers = CLD_UPCALL_VERSION;
put_unaligned(cn->cn_xid++, &new->cu_msg.cm_xid);
new->cu_net = cn;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3656f87d11e3..4509c76716e3 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1502,11 +1502,16 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
{
u32 slotsize = slot_bytes(ca);
u32 num = ca->maxreqs;
- int avail;
+ unsigned long avail, total_avail;
spin_lock(&nfsd_drc_lock);
- avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
- nfsd_drc_max_mem - nfsd_drc_mem_used);
+ total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
+ avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
+ /*
+ * Never use more than a third of the remaining memory,
+ * unless it's the only way to give this client a slot:
+ */
+ avail = clamp_t(unsigned long, avail, slotsize, total_avail/3);
num = min_t(int, num, avail / slotsize);
nfsd_drc_mem_used += num * slotsize;
spin_unlock(&nfsd_drc_lock);
@@ -3062,12 +3067,17 @@ static bool replay_matches_cache(struct svc_rqst *rqstp,
(bool)seq->cachethis)
return false;
/*
- * If there's an error than the reply can have fewer ops than
- * the call. But if we cached a reply with *more* ops than the
- * call you're sending us now, then this new call is clearly not
- * really a replay of the old one:
+ * If there's an error then the reply can have fewer ops than
+ * the call.
+ */
+ if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
+ return false;
+ /*
+ * But if we cached a reply with *more* ops than the call you're
+ * sending us now, then this new call is clearly not really a
+ * replay of the old one:
*/
- if (slot->sl_opcnt < argp->opcnt)
+ if (slot->sl_opcnt > argp->opcnt)
return false;
/* This is the only check explicitly called by spec: */
if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
@@ -6024,7 +6034,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
if (fl_flags & FL_SLEEP) {
- nbl->nbl_time = jiffies;
+ nbl->nbl_time = get_seconds();
spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 5c4800626f13..60291d10f8e4 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -430,7 +430,7 @@ void nfsd_reset_versions(void)
*/
static void set_max_drc(void)
{
- #define NFSD_DRC_SIZE_SHIFT 10
+ #define NFSD_DRC_SIZE_SHIFT 7
nfsd_drc_max_mem = (nr_free_buffer_pages()
>> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE;
nfsd_drc_mem_used = 0;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 86aa92d200e1..7872b1ead885 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -69,6 +69,7 @@ struct nfsd4_callback {
int cb_seq_status;
int cb_status;
bool cb_need_restart;
+ bool cb_holds_slot;
};
struct nfsd4_callback_ops {
@@ -590,7 +591,7 @@ static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b)
struct nfsd4_blocked_lock {
struct list_head nbl_list;
struct list_head nbl_lru;
- unsigned long nbl_time;
+ time_t nbl_time;
struct file_lock nbl_lock;
struct knfsd_fh nbl_fh;
struct nfsd4_callback nbl_cb;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 8f0b19a3ca81..b8cd100cfcd6 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -395,10 +395,23 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
bool get_write_count;
bool size_change = (iap->ia_valid & ATTR_SIZE);
- if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+ if (iap->ia_valid & ATTR_SIZE) {
accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
- if (iap->ia_valid & ATTR_SIZE)
ftype = S_IFREG;
+ }
+
+ /*
+ * If utimes(2) and friends are called with times not NULL, we should
+ * not set NFSD_MAY_WRITE bit. Otherwise fh_verify->nfsd_permission
+ * will return EACCESS, when the caller's effective UID does not match
+ * the owner of the file, and the caller is not privileged. In this
+ * situation, we should return EPERM(notify_change will return this).
+ */
+ if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME)) {
+ accmode |= NFSD_MAY_OWNER_OVERRIDE;
+ if (!(iap->ia_valid & (ATTR_ATIME_SET | ATTR_MTIME_SET)))
+ accmode |= NFSD_MAY_WRITE;
+ }
/* Callers that do fh_verify should do the fh_want_write: */
get_write_count = !fhp->fh_dentry;
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 0bf9e7bf5800..9140b9cf3870 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -116,8 +116,11 @@ void nfsd_put_raparams(struct file *file, struct raparms *ra);
static inline int fh_want_write(struct svc_fh *fh)
{
- int ret = mnt_want_write(fh->fh_export->ex_path.mnt);
+ int ret;
+ if (fh->fh_want_write)
+ return 0;
+ ret = mnt_want_write(fh->fh_export->ex_path.mnt);
if (!ret)
fh->fh_want_write = true;
return ret;
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index ee8dbbae78b6..6dc714a56c37 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -338,8 +338,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
- if (IS_ERR(acl) || !acl)
- return PTR_ERR(acl);
+ if (IS_ERR_OR_NULL(acl))
+ return PTR_ERR_OR_ZERO(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (ret)
return ret;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 06089becca60..dfb8a923921e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7246,6 +7246,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inline_data *idata = &di->id2.i_data;
+ /* No need to punch hole beyond i_size. */
+ if (start >= i_size_read(inode))
+ return 0;
+
if (end > i_size_read(inode))
end = i_size_read(inode);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index c26d046adaaa..6ad76397b31d 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2046,7 +2046,8 @@ out_write_size:
inode->i_mtime = inode->i_ctime = current_time(inode);
di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
- ocfs2_update_inode_fsync_trans(handle, inode, 1);
+ if (handle)
+ ocfs2_update_inode_fsync_trans(handle, inode, 1);
}
if (handle)
ocfs2_journal_dirty(handle, wc->w_di_bh);
@@ -2143,13 +2144,30 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
struct ocfs2_dio_write_ctxt *dwc = NULL;
struct buffer_head *di_bh = NULL;
u64 p_blkno;
- loff_t pos = iblock << inode->i_sb->s_blocksize_bits;
+ unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
+ loff_t pos = iblock << i_blkbits;
+ sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
unsigned len, total_len = bh_result->b_size;
int ret = 0, first_get_block = 0;
len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
len = min(total_len, len);
+ /*
+ * bh_result->b_size is count in get_more_blocks according to write
+ * "pos" and "end", we need map twice to return different buffer state:
+ * 1. area in file size, not set NEW;
+ * 2. area out file size, set NEW.
+ *
+ * iblock endblk
+ * |--------|---------|---------|---------
+ * |<-------area in file------->|
+ */
+
+ if ((iblock <= endblk) &&
+ ((iblock + ((len - 1) >> i_blkbits)) > endblk))
+ len = (endblk - iblock + 1) << i_blkbits;
+
mlog(0, "get block of %lu at %llu:%u req %u\n",
inode->i_ino, pos, len, total_len);
@@ -2233,6 +2251,9 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
if (desc->c_needs_zero)
set_buffer_new(bh_result);
+ if (iblock > endblk)
+ set_buffer_new(bh_result);
+
/* May sleep in end_io. It should not happen in a irq context. So defer
* it to dio work queue. */
set_buffer_defer_completion(bh_result);
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 935bac253991..1403c88f2b05 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -98,25 +98,34 @@ out:
return ret;
}
+/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
+ * will be easier to handle read failure.
+ */
int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
unsigned int nr, struct buffer_head *bhs[])
{
int status = 0;
unsigned int i;
struct buffer_head *bh;
+ int new_bh = 0;
trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
if (!nr)
goto bail;
+ /* Don't put buffer head and re-assign it to NULL if it is allocated
+ * outside since the caller can't be aware of this alternation!
+ */
+ new_bh = (bhs[0] == NULL);
+
for (i = 0 ; i < nr ; i++) {
if (bhs[i] == NULL) {
bhs[i] = sb_getblk(osb->sb, block++);
if (bhs[i] == NULL) {
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ break;
}
}
bh = bhs[i];
@@ -156,9 +165,26 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
submit_bh(REQ_OP_READ, 0, bh);
}
+read_failure:
for (i = nr; i > 0; i--) {
bh = bhs[i - 1];
+ if (unlikely(status)) {
+ if (new_bh && bh) {
+ /* If middle bh fails, let previous bh
+ * finish its read and then put it to
+ * aovoid bh leak
+ */
+ if (!buffer_jbd(bh))
+ wait_on_buffer(bh);
+ put_bh(bh);
+ bhs[i - 1] = NULL;
+ } else if (bh && buffer_uptodate(bh)) {
+ clear_buffer_uptodate(bh);
+ }
+ continue;
+ }
+
/* No need to wait on the buffer if it's managed by JBD. */
if (!buffer_jbd(bh))
wait_on_buffer(bh);
@@ -168,8 +194,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
* so we can safely record this and loop back
* to cleanup the other buffers. */
status = -EIO;
- put_bh(bh);
- bhs[i - 1] = NULL;
+ goto read_failure;
}
}
@@ -177,6 +202,9 @@ bail:
return status;
}
+/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
+ * will be easier to handle read failure.
+ */
int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
struct buffer_head *bhs[], int flags,
int (*validate)(struct super_block *sb,
@@ -186,6 +214,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
int i, ignore_cache = 0;
struct buffer_head *bh;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ int new_bh = 0;
trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
@@ -211,6 +240,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
goto bail;
}
+ /* Don't put buffer head and re-assign it to NULL if it is allocated
+ * outside since the caller can't be aware of this alternation!
+ */
+ new_bh = (bhs[0] == NULL);
+
ocfs2_metadata_cache_io_lock(ci);
for (i = 0 ; i < nr ; i++) {
if (bhs[i] == NULL) {
@@ -219,7 +253,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
ocfs2_metadata_cache_io_unlock(ci);
status = -ENOMEM;
mlog_errno(status);
- goto bail;
+ /* Don't forget to put previous bh! */
+ break;
}
}
bh = bhs[i];
@@ -313,16 +348,27 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
}
}
- status = 0;
-
+read_failure:
for (i = (nr - 1); i >= 0; i--) {
bh = bhs[i];
if (!(flags & OCFS2_BH_READAHEAD)) {
- if (status) {
- /* Clear the rest of the buffers on error */
- put_bh(bh);
- bhs[i] = NULL;
+ if (unlikely(status)) {
+ /* Clear the buffers on error including those
+ * ever succeeded in reading
+ */
+ if (new_bh && bh) {
+ /* If middle bh fails, let previous bh
+ * finish its read and then put it to
+ * aovoid bh leak
+ */
+ if (!buffer_jbd(bh))
+ wait_on_buffer(bh);
+ put_bh(bh);
+ bhs[i] = NULL;
+ } else if (bh && buffer_uptodate(bh)) {
+ clear_buffer_uptodate(bh);
+ }
continue;
}
/* We know this can't have changed as we hold the
@@ -340,9 +386,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
* uptodate. */
status = -EIO;
clear_buffer_needs_validate(bh);
- put_bh(bh);
- bhs[i] = NULL;
- continue;
+ goto read_failure;
}
if (buffer_needs_validate(bh)) {
@@ -352,11 +396,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
BUG_ON(buffer_jbd(bh));
clear_buffer_needs_validate(bh);
status = validate(sb, bh);
- if (status) {
- put_bh(bh);
- bhs[i] = NULL;
- continue;
- }
+ if (status)
+ goto read_failure;
}
}
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index c204ac9b49e5..81a0d5d82757 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -621,13 +621,15 @@ static void o2nm_node_group_drop_item(struct config_group *group,
struct o2nm_node *node = to_o2nm_node(item);
struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
- o2net_disconnect_node(node);
+ if (cluster->cl_nodes[node->nd_num] == node) {
+ o2net_disconnect_node(node);
- if (cluster->cl_has_local &&
- (cluster->cl_local_node == node->nd_num)) {
- cluster->cl_has_local = 0;
- cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
- o2net_stop_listening(node);
+ if (cluster->cl_has_local &&
+ (cluster->cl_local_node == node->nd_num)) {
+ cluster->cl_has_local = 0;
+ cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
+ o2net_stop_listening(node);
+ }
}
/* XXX call into net to stop this node from trading messages */
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 290373024d9d..e8ace3b54e9c 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -310,6 +310,18 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry,
out_attach:
spin_lock(&dentry_attach_lock);
+ if (unlikely(dentry->d_fsdata && !alias)) {
+ /* d_fsdata is set by a racing thread which is doing
+ * the same thing as this thread is doing. Leave the racing
+ * thread going ahead and we return here.
+ */
+ spin_unlock(&dentry_attach_lock);
+ iput(dl->dl_inode);
+ ocfs2_lock_res_free(&dl->dl_lockres);
+ kfree(dl);
+ return 0;
+ }
+
dentry->d_fsdata = dl;
dl->dl_count++;
spin_unlock(&dentry_attach_lock);
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index e7b760deefae..32d60f69db24 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -329,7 +329,7 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle)
{
char *buf;
- buf = (char *) get_zeroed_page(GFP_NOFS);
+ buf = (char *) get_zeroed_page(GFP_ATOMIC);
if (buf) {
dump_mle(mle, buf, PAGE_SIZE - 1);
free_page((unsigned long)buf);
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 1082b2c3014b..5f2a120240e5 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -105,7 +105,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
enum dlm_status status;
int actions = 0;
int in_use;
- u8 owner;
+ u8 owner;
+ int recovery_wait = 0;
mlog(0, "master_node = %d, valblk = %d\n", master_node,
flags & LKM_VALBLK);
@@ -208,9 +209,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
}
if (flags & LKM_CANCEL)
lock->cancel_pending = 0;
- else
- lock->unlock_pending = 0;
-
+ else {
+ if (!lock->unlock_pending)
+ recovery_wait = 1;
+ else
+ lock->unlock_pending = 0;
+ }
}
/* get an extra ref on lock. if we are just switching
@@ -244,6 +248,17 @@ leave:
spin_unlock(&res->spinlock);
wake_up(&res->wq);
+ if (recovery_wait) {
+ spin_lock(&res->spinlock);
+ /* Unlock request will directly succeed after owner dies,
+ * and the lock is already removed from grant list. We have to
+ * wait for RECOVERING done or we miss the chance to purge it
+ * since the removement is much faster than RECOVERING proc.
+ */
+ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
+ spin_unlock(&res->spinlock);
+ }
+
/* let the caller's final dlm_lock_put handle the actual kfree */
if (actions & DLM_UNLOCK_FREE_LOCK) {
/* this should always be coupled with list removal */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 5729d55da67d..2c3e975126b3 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3421,7 +3421,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
* we can recover correctly from node failure. Otherwise, we may get
* invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
*/
- if (!ocfs2_is_o2cb_active() &&
+ if (ocfs2_userspace_stack(osb) &&
lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
lvb = 1;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 3494e220b510..bed15dec3c16 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -148,16 +148,24 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
u64 blkno;
struct dentry *parent;
struct inode *dir = d_inode(child);
+ int set;
trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno);
+ status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1);
+ if (status < 0) {
+ mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
+ parent = ERR_PTR(status);
+ goto bail;
+ }
+
status = ocfs2_inode_lock(dir, NULL, 0);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
parent = ERR_PTR(status);
- goto bail;
+ goto unlock_nfs_sync;
}
status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
@@ -166,11 +174,31 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
goto bail_unlock;
}
+ status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set);
+ if (status < 0) {
+ if (status == -EINVAL) {
+ status = -ESTALE;
+ } else
+ mlog(ML_ERROR, "test inode bit failed %d\n", status);
+ parent = ERR_PTR(status);
+ goto bail_unlock;
+ }
+
+ trace_ocfs2_get_dentry_test_bit(status, set);
+ if (!set) {
+ status = -ESTALE;
+ parent = ERR_PTR(status);
+ goto bail_unlock;
+ }
+
parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
bail_unlock:
ocfs2_inode_unlock(dir, 0);
+unlock_nfs_sync:
+ ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1);
+
bail:
trace_ocfs2_get_parent_end(parent);
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 4506ec5ec2ea..bfc44644301c 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -289,7 +289,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
if (inode_alloc)
inode_lock(inode_alloc);
- if (o2info_coherent(&fi->ifi_req)) {
+ if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
status = ocfs2_inode_lock(inode_alloc, &bh, 0);
if (status < 0) {
mlog_errno(status);
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index fa947d36ae1d..733c05135305 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -231,7 +231,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
/* At this point, we know that no more recovery threads can be
* launched, so wait for any recovery completion work to
* complete. */
- flush_workqueue(osb->ocfs2_wq);
+ if (osb->ocfs2_wq)
+ flush_workqueue(osb->ocfs2_wq);
/*
* Now that recovery is shut down, and the osb is about to be
@@ -1017,7 +1018,8 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
mlog_errno(status);
}
- if (status == 0) {
+ /* Shutdown the kernel journal system */
+ if (!jbd2_journal_destroy(journal->j_journal) && !status) {
/*
* Do not toggle if flush was unsuccessful otherwise
* will leave dirty metadata in a "clean" journal
@@ -1026,9 +1028,6 @@ void ocfs2_journal_shutdown(struct ocfs2_super *osb)
if (status < 0)
mlog_errno(status);
}
-
- /* Shutdown the kernel journal system */
- jbd2_journal_destroy(journal->j_journal);
journal->j_journal = NULL;
OCFS2_I(inode)->ip_open_count--;
@@ -1081,6 +1080,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
+ if (replayed) {
+ jbd2_journal_lock_updates(journal->j_journal);
+ status = jbd2_journal_flush(journal->j_journal);
+ jbd2_journal_unlock_updates(journal->j_journal);
+ if (status < 0)
+ mlog_errno(status);
+ }
+
status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
if (status < 0) {
mlog_errno(status);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 497a4171ef61..bfb50fc51528 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -637,9 +637,11 @@ static inline void ocfs2_update_inode_fsync_trans(handle_t *handle,
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- oi->i_sync_tid = handle->h_transaction->t_tid;
- if (datasync)
- oi->i_datasync_tid = handle->h_transaction->t_tid;
+ if (!is_handle_aborted(handle)) {
+ oi->i_sync_tid = handle->h_transaction->t_tid;
+ if (datasync)
+ oi->i_datasync_tid = handle->h_transaction->t_tid;
+ }
}
#endif /* OCFS2_JOURNAL_H */
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 5d53d0d63d19..ea38677daa06 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -391,7 +391,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
struct ocfs2_dinode *alloc = NULL;
cancel_delayed_work(&osb->la_enable_wq);
- flush_workqueue(osb->ocfs2_wq);
+ if (osb->ocfs2_wq)
+ flush_workqueue(osb->ocfs2_wq);
if (osb->local_alloc_state == OCFS2_LA_UNUSED)
goto out;
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index c179afd0051a..afaa044f5f6b 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -25,6 +25,7 @@
#include "ocfs2_ioctl.h"
#include "alloc.h"
+#include "localalloc.h"
#include "aops.h"
#include "dlmglue.h"
#include "extent_map.h"
@@ -222,6 +223,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
struct ocfs2_refcount_tree *ref_tree = NULL;
u32 new_phys_cpos, new_len;
u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
+ int need_free = 0;
if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
@@ -315,6 +317,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
if (!partial) {
context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
ret = -ENOSPC;
+ need_free = 1;
goto out_commit;
}
}
@@ -339,6 +342,20 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
mlog_errno(ret);
out_commit:
+ if (need_free && context->data_ac) {
+ struct ocfs2_alloc_context *data_ac = context->data_ac;
+
+ if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+ ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+ new_phys_cpos, new_len);
+ else
+ ocfs2_free_clusters(handle,
+ data_ac->ac_inode,
+ data_ac->ac_bh,
+ ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
+ new_len);
+ }
+
ocfs2_commit_trans(osb, handle);
out_unlock_mutex:
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 87e577a49b0d..542fa21aeaa9 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -714,7 +714,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
- if (atomic_read(&dquot->dq_count) > 1)
+ if (dquot_is_busy(dquot))
goto out;
/* Running from downconvert thread? Postpone quota processing to wq */
if (current == osb->dc_task) {
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 820359096c7a..52c07346bea3 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -48,12 +48,6 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
*/
static struct ocfs2_stack_plugin *active_stack;
-inline int ocfs2_is_o2cb_active(void)
-{
- return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
-}
-EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
-
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
{
struct ocfs2_stack_plugin *p;
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index e3036e1790e8..f2dce10fae54 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -298,9 +298,6 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
-/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
-int ocfs2_is_o2cb_active(void);
-
extern struct kset *ocfs2_kset;
#endif /* STACKGLUE_H */
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 01932763b4d1..e108c945ac1f 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3832,7 +3832,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
int low_bucket = 0, bucket, high_bucket;
struct ocfs2_xattr_bucket *search;
- u32 last_hash;
u64 blkno, lower_blkno = 0;
search = ocfs2_xattr_bucket_new(inode);
@@ -3876,8 +3875,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
if (xh->xh_count)
xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
- last_hash = le32_to_cpu(xe->xe_name_hash);
-
/* record lower_blkno which may be the insert place. */
lower_blkno = blkno;
diff --git a/fs/open.c b/fs/open.c
index a6c6244f4993..e17cc79bd88a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -380,6 +380,25 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
override_cred->cap_permitted;
}
+ /*
+ * The new set of credentials can *only* be used in
+ * task-synchronous circumstances, and does not need
+ * RCU freeing, unless somebody then takes a separate
+ * reference to it.
+ *
+ * NOTE! This is _only_ true because this credential
+ * is used purely for override_creds() that installs
+ * it as the subjective cred. Other threads will be
+ * accessing ->real_cred, not the subjective cred.
+ *
+ * If somebody _does_ make a copy of this (using the
+ * 'get_current_cred()' function), that will clear the
+ * non_rcu field, because now that other user may be
+ * expecting RCU freeing. But normal thread-synchronous
+ * cred accesses will keep things non-RCY.
+ */
+ override_cred->non_rcu = 1;
+
old_cred = override_creds(override_cred);
retry:
res = user_path_at(dfd, filename, lookup_flags, &path);
@@ -717,6 +736,12 @@ static int do_dentry_open(struct file *f,
return 0;
}
+ /* Any file opened for execve()/uselib() has to be a regular file. */
+ if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
+ error = -EACCES;
+ goto cleanup_file;
+ }
+
if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
error = get_write_access(inode);
if (unlikely(error))
@@ -799,9 +824,6 @@ cleanup_file:
* the return value of d_splice_alias(), then the caller needs to perform dput()
* on it after finish_open().
*
- * On successful return @file is a fully instantiated open file. After this, if
- * an error occurs in ->atomic_open(), it needs to clean up with fput().
- *
* Returns zero on success or -errno if the open failed.
*/
int finish_open(struct file *file, struct dentry *dentry,
@@ -1186,3 +1208,21 @@ int nonseekable_open(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(nonseekable_open);
+
+/*
+ * stream_open is used by subsystems that want stream-like file descriptors.
+ * Such file descriptors are not seekable and don't have notion of position
+ * (file.f_pos is always 0). Contrary to file descriptors of other regular
+ * files, .read() and .write() can run simultaneously.
+ *
+ * stream_open never fails and is marked to return int so that it could be
+ * directly used as file_operations.open .
+ */
+int stream_open(struct inode *inode, struct file *filp)
+{
+ filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
+ filp->f_mode |= FMODE_STREAM;
+ return 0;
+}
+
+EXPORT_SYMBOL(stream_open);
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 0748a26598fc..7d7df003f9d8 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -304,6 +304,7 @@ static void *help_start(struct seq_file *m, loff_t *pos)
static void *help_next(struct seq_file *m, void *v, loff_t *pos)
{
+ (*pos)++;
gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n");
return NULL;
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index a799546a67f7..f6172c3f83ba 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -315,7 +315,7 @@ static ssize_t sysfs_service_op_show(struct kobject *kobj,
/* Can't do a service_operation if the client is not running... */
rc = is_daemon_in_service();
if (rc) {
- pr_info("%s: Client not running :%d:\n",
+ pr_info_ratelimited("%s: Client not running :%d:\n",
__func__,
is_daemon_in_service());
goto out;
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 16f6db88c8e5..804b3469669f 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -234,7 +234,8 @@ static bool ovl_can_list(const char *s)
return true;
/* Never list trusted.overlay, list other trusted for superuser only */
- return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+ return !ovl_is_private_xattr(s) &&
+ ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
}
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
diff --git a/fs/pipe.c b/fs/pipe.c
index 388e09a689de..347c6dc888c8 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -193,9 +193,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
* in the tee() system call, when we duplicate the buffers in one
* pipe into another.
*/
-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
- get_page(buf->page);
+ return try_get_page(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_get);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 712b44c63701..9682bbf325d6 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -448,7 +448,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
* a program is not able to use ptrace(2) in that case. It is
* safe because the task has stopped executing permanently.
*/
- if (permitted && (task->flags & PF_DUMPCORE)) {
+ if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
if (try_get_task_stack(task)) {
eip = KSTK_EIP(task);
esp = KSTK_ESP(task);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 1999e85840d5..191573a625f2 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -500,6 +500,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
if (root->set_ownership)
root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
+ else {
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ }
return inode;
}
@@ -1604,8 +1608,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
if (--header->nreg)
return;
- put_links(header);
- start_unregistering(header);
+ if (parent) {
+ put_links(header);
+ start_unregistering(header);
+ }
+
if (!--header->count)
kfree_rcu(header, rcu);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 5138e781737a..4b207b10db03 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1057,6 +1057,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
count = -EINTR;
goto out_mm;
}
+ /*
+ * Avoid to modify vma->vm_flags
+ * without locked ops while the
+ * coredump reads the vm_flags.
+ */
+ if (!mmget_still_valid(mm)) {
+ /*
+ * Silently return "count"
+ * like if get_task_mm()
+ * failed. FIXME: should this
+ * function have returned
+ * -ESRCH if get_task_mm()
+ * failed like if
+ * get_proc_task() fails?
+ */
+ up_write(&mm->mmap_sem);
+ goto out_mm;
+ }
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma->vm_flags &= ~VM_SOFTDIRTY;
vma_set_page_prot(vma);
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 8ab782d8b33d..93d13f4010c1 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -165,6 +165,16 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
}
/*
+ * Architectures which support memory encryption override this.
+ */
+ssize_t __weak
+copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
+ unsigned long offset, int userbuf)
+{
+ return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
+}
+
+/*
* Copy to either kernel or user space
*/
static int copy_to(void *target, void *src, size_t size, int userbuf)
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 8b09271e5d66..a73959e6ae32 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -321,6 +321,17 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
prz = cxt->przs[cxt->dump_write_cnt];
+ /*
+ * Since this is a new crash dump, we need to reset the buffer in
+ * case it still has an old dump present. Without this, the new dump
+ * will get appended, which would seriously confuse anything trying
+ * to check dump file contents. Specifically, ramoops_read_kmsg_hdr()
+ * expects to find a dump header in the beginning of buffer data, so
+ * we must to reset the buffer values, in order to ensure that the
+ * header will be written to the beginning of the buffer.
+ */
+ persistent_ram_zap(prz);
+
hlen = ramoops_write_kmsg_hdr(prz, compressed);
if (size + hlen > prz->buffer_size)
size = prz->buffer_size - hlen;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index f9246ac4eef8..82a5ecbe2da9 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -479,7 +479,7 @@ int dquot_release(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
- if (atomic_read(&dquot->dq_count) > 1)
+ if (dquot_is_busy(dquot))
goto out_dqlock;
mutex_lock(&dqopt->dqio_mutex);
if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
@@ -611,7 +611,7 @@ EXPORT_SYMBOL(dquot_scan_active);
/* Write all dquot structures to quota files */
int dquot_writeback_dquots(struct super_block *sb, int type)
{
- struct list_head *dirty;
+ struct list_head dirty;
struct dquot *dquot;
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
@@ -624,9 +624,10 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
if (!sb_has_quota_active(sb, cnt))
continue;
spin_lock(&dq_list_lock);
- dirty = &dqopt->info[cnt].dqi_dirty_list;
- while (!list_empty(dirty)) {
- dquot = list_first_entry(dirty, struct dquot,
+ /* Move list away to avoid livelock. */
+ list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
+ while (!list_empty(&dirty)) {
+ dquot = list_first_entry(&dirty, struct dquot,
dq_dirty);
/* Dirty and inactive can be only bad dquot... */
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
@@ -2848,68 +2849,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
static int do_proc_dqstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int type = (int *)table->data - dqstats.stat;
+ unsigned int type = (unsigned long *)table->data - dqstats.stat;
+ s64 value = percpu_counter_sum(&dqstats.counter[type]);
+
+ /* Filter negative values for non-monotonic counters */
+ if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
+ type == DQST_FREE_DQUOTS))
+ value = 0;
/* Update global table */
- dqstats.stat[type] =
- percpu_counter_sum_positive(&dqstats.counter[type]);
- return proc_dointvec(table, write, buffer, lenp, ppos);
+ dqstats.stat[type] = value;
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static struct ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
.data = &dqstats.stat[DQST_LOOKUPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "drops",
.data = &dqstats.stat[DQST_DROPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "reads",
.data = &dqstats.stat[DQST_READS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "writes",
.data = &dqstats.stat[DQST_WRITES],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "cache_hits",
.data = &dqstats.stat[DQST_CACHE_HITS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "allocated_dquots",
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "free_dquots",
.data = &dqstats.stat[DQST_FREE_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "syncs",
.data = &dqstats.stat[DQST_SYNCS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
diff --git a/fs/read_write.c b/fs/read_write.c
index 9819f7c6c8c5..9e1fd4c20e89 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -575,12 +575,13 @@ EXPORT_SYMBOL(vfs_write);
static inline loff_t file_pos_read(struct file *file)
{
- return file->f_pos;
+ return file->f_mode & FMODE_STREAM ? 0 : file->f_pos;
}
static inline void file_pos_write(struct file *file, loff_t pos)
{
- file->f_pos = pos;
+ if ((file->f_mode & FMODE_STREAM) == 0)
+ file->f_pos = pos;
}
SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
@@ -1204,6 +1205,9 @@ COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
const struct compat_iovec __user *,vec,
unsigned long, vlen, loff_t, pos, int, flags)
{
+ if (pos == -1)
+ return do_compat_readv(fd, vec, vlen, flags);
+
return do_compat_preadv64(fd, vec, vlen, pos, flags);
}
#endif
@@ -1310,6 +1314,9 @@ COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
const struct compat_iovec __user *,vec,
unsigned long, vlen, loff_t, pos, int, flags)
{
+ if (pos == -1)
+ return do_compat_writev(fd, vec, vlen, flags);
+
return do_compat_pwritev64(fd, vec, vlen, pos, flags);
}
#endif
diff --git a/fs/readdir.c b/fs/readdir.c
index 9d0212c374d6..1059f2a9be0b 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -64,6 +64,40 @@ out:
EXPORT_SYMBOL(iterate_dir);
/*
+ * POSIX says that a dirent name cannot contain NULL or a '/'.
+ *
+ * It's not 100% clear what we should really do in this case.
+ * The filesystem is clearly corrupted, but returning a hard
+ * error means that you now don't see any of the other names
+ * either, so that isn't a perfect alternative.
+ *
+ * And if you return an error, what error do you use? Several
+ * filesystems seem to have decided on EUCLEAN being the error
+ * code for EFSCORRUPTED, and that may be the error to use. Or
+ * just EIO, which is perhaps more obvious to users.
+ *
+ * In order to see the other file names in the directory, the
+ * caller might want to make this a "soft" error: skip the
+ * entry, and return the error at the end instead.
+ *
+ * Note that this should likely do a "memchr(name, 0, len)"
+ * check too, since that would be filesystem corruption as
+ * well. However, that case can't actually confuse user space,
+ * which has to do a strlen() on the name anyway to find the
+ * filename length, and the above "soft error" worry means
+ * that it's probably better left alone until we have that
+ * issue clarified.
+ */
+static int verify_dirent_name(const char *name, int len)
+{
+ if (!len)
+ return -EIO;
+ if (memchr(name, '/', len))
+ return -EIO;
+ return 0;
+}
+
+/*
* Traditional linux readdir() handling..
*
* "count=1" is a special case, meaning that the buffer is one
@@ -172,6 +206,9 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
sizeof(long));
+ buf->error = verify_dirent_name(name, namlen);
+ if (unlikely(buf->error))
+ return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
@@ -258,6 +295,9 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
sizeof(u64));
+ buf->error = verify_dirent_name(name, namlen);
+ if (unlikely(buf->error))
+ return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index bd4c727f4610..9531b6c18ac7 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2102,6 +2102,15 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
goto out_inserted_sd;
}
+ /*
+ * Mark it private if we're creating the privroot
+ * or something under it.
+ */
+ if (IS_PRIVATE(dir) || dentry == REISERFS_SB(sb)->priv_root) {
+ inode->i_flags |= S_PRIVATE;
+ inode->i_opflags &= ~IOP_XATTR;
+ }
+
if (reiserfs_posixacl(inode->i_sb)) {
reiserfs_write_unlock(inode->i_sb);
retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
@@ -2116,8 +2125,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
reiserfs_warning(inode->i_sb, "jdm-13090",
"ACLs aren't enabled in the fs, "
"but vfs thinks they are!");
- } else if (IS_PRIVATE(dir))
- inode->i_flags |= S_PRIVATE;
+ }
if (security->name) {
reiserfs_write_unlock(inode->i_sb);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 1ec728cf82d1..1c900f322089 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -377,10 +377,13 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
/*
* Propagate the private flag so we know we're
- * in the priv tree
+ * in the priv tree. Also clear IOP_XATTR
+ * since we don't have xattrs on xattr files.
*/
- if (IS_PRIVATE(dir))
+ if (IS_PRIVATE(dir)) {
inode->i_flags |= S_PRIVATE;
+ inode->i_opflags &= ~IOP_XATTR;
+ }
}
reiserfs_write_unlock(dir->i_sb);
if (retval == IO_ERROR) {
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index d920a646b578..3e78a394fdb8 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -1167,6 +1167,8 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
return bmap_nr > ((1LL << 16) - 1);
}
+extern const struct xattr_handler *reiserfs_xattr_handlers[];
+
/*
* this says about version of key of all items (but stat data) the
* object consists of
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index a97e352d05d3..5f5fff068877 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -2249,7 +2249,8 @@ error_out:
/* also releases the path */
unfix_nodes(&s_ins_balance);
#ifdef REISERQUOTA_DEBUG
- reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
+ if (inode)
+ reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
"reiserquota insert_item(): freeing %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index dec6c93044fa..677608a89b08 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -599,6 +599,7 @@ static void reiserfs_put_super(struct super_block *s)
reiserfs_write_unlock(s);
mutex_destroy(&REISERFS_SB(s)->lock);
destroy_workqueue(REISERFS_SB(s)->commit_wq);
+ kfree(REISERFS_SB(s)->s_jdev);
kfree(s->s_fs_info);
s->s_fs_info = NULL;
}
@@ -1927,7 +1928,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (!sbi->s_jdev) {
SWARN(silent, s, "", "Cannot allocate memory for "
"journal device name");
- goto error;
+ goto error_unlocked;
}
}
#ifdef CONFIG_QUOTA
@@ -2026,6 +2027,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (replay_only(s))
goto error_unlocked;
+ s->s_xattr = reiserfs_xattr_handlers;
+
if (bdev_read_only(s->s_bdev) && !(s->s_flags & MS_RDONLY)) {
SWARN(silent, s, "clm-7000",
"Detected readonly device, marking FS readonly");
@@ -2215,6 +2218,7 @@ error_unlocked:
kfree(qf_names[j]);
}
#endif
+ kfree(sbi->s_jdev);
kfree(sbi);
s->s_fs_info = NULL;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 9e313fc7fdc7..07900105523f 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -121,13 +121,13 @@ static struct dentry *open_xa_root(struct super_block *sb, int flags)
struct dentry *xaroot;
if (d_really_is_negative(privroot))
- return ERR_PTR(-ENODATA);
+ return ERR_PTR(-EOPNOTSUPP);
inode_lock_nested(d_inode(privroot), I_MUTEX_XATTR);
xaroot = dget(REISERFS_SB(sb)->xattr_root);
if (!xaroot)
- xaroot = ERR_PTR(-ENODATA);
+ xaroot = ERR_PTR(-EOPNOTSUPP);
else if (d_really_is_negative(xaroot)) {
int err = -ENODATA;
@@ -318,8 +318,12 @@ static int reiserfs_for_each_xattr(struct inode *inode,
out_dir:
dput(dir);
out:
- /* -ENODATA isn't an error */
- if (err == -ENODATA)
+ /*
+ * -ENODATA: this object doesn't have any xattrs
+ * -EOPNOTSUPP: this file system doesn't have xattrs enabled on disk.
+ * Neither are errors
+ */
+ if (err == -ENODATA || err == -EOPNOTSUPP)
err = 0;
return err;
}
@@ -609,6 +613,10 @@ int reiserfs_xattr_set(struct inode *inode, const char *name,
int error, error2;
size_t jbegin_count = reiserfs_xattr_nblocks(inode, buffer_size);
+ /* Check before we start a transaction and then do nothing. */
+ if (!d_really_is_positive(REISERFS_SB(inode->i_sb)->priv_root))
+ return -EOPNOTSUPP;
+
if (!(flags & XATTR_REPLACE))
jbegin_count += reiserfs_xattr_jcreate_nblocks(inode);
@@ -831,8 +839,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
if (d_really_is_negative(dentry))
return -EINVAL;
- if (!dentry->d_sb->s_xattr ||
- get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
+ if (get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
return -EOPNOTSUPP;
dir = open_xa_dir(d_inode(dentry), XATTR_REPLACE);
@@ -872,6 +879,7 @@ static int create_privroot(struct dentry *dentry)
}
d_inode(dentry)->i_flags |= S_PRIVATE;
+ d_inode(dentry)->i_opflags &= ~IOP_XATTR;
reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr "
"storage.\n", PRIVROOT_NAME);
@@ -885,7 +893,7 @@ static int create_privroot(struct dentry *dentry) { return 0; }
#endif
/* Actual operations that are exported to VFS-land */
-static const struct xattr_handler *reiserfs_xattr_handlers[] = {
+const struct xattr_handler *reiserfs_xattr_handlers[] = {
#ifdef CONFIG_REISERFS_FS_XATTR
&reiserfs_xattr_user_handler,
&reiserfs_xattr_trusted_handler,
@@ -956,8 +964,10 @@ int reiserfs_lookup_privroot(struct super_block *s)
if (!IS_ERR(dentry)) {
REISERFS_SB(s)->priv_root = dentry;
d_set_d_op(dentry, &xattr_lookup_poison_ops);
- if (d_really_is_positive(dentry))
+ if (d_really_is_positive(dentry)) {
d_inode(dentry)->i_flags |= S_PRIVATE;
+ d_inode(dentry)->i_opflags &= ~IOP_XATTR;
+ }
} else
err = PTR_ERR(dentry);
inode_unlock(d_inode(s->s_root));
@@ -986,7 +996,6 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
}
if (d_really_is_positive(privroot)) {
- s->s_xattr = reiserfs_xattr_handlers;
inode_lock(d_inode(privroot));
if (!REISERFS_SB(s)->xattr_root) {
struct dentry *dentry;
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index d92a1dc6ee70..1f1fdfd3bc5c 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -316,10 +316,8 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
* would be useless since permissions are ignored, and a pain because
* it introduces locking cycles
*/
- if (IS_PRIVATE(dir)) {
- inode->i_flags |= S_PRIVATE;
+ if (IS_PRIVATE(inode))
goto apply_umask;
- }
err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
if (err)
diff --git a/fs/splice.c b/fs/splice.c
index 01983bea760c..8bfbc8a50164 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1585,7 +1585,11 @@ retry:
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
- pipe_buf_get(ipipe, ibuf);
+ if (!pipe_buf_get(ipipe, ibuf)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
*obuf = *ibuf;
/*
@@ -1659,7 +1663,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
- pipe_buf_get(ipipe, ibuf);
+ if (!pipe_buf_get(ipipe, ibuf)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index b4fbeefba246..5ef0d1d60743 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -721,6 +721,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
int err, page_idx, page_cnt, ret = 0, n = 0;
int allocate = bu->buf ? 0 : 1;
loff_t isize;
+ gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
err = ubifs_tnc_get_bu_keys(c, bu);
if (err)
@@ -782,8 +783,9 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
if (page_offset > end_index)
break;
- page = find_or_create_page(mapping, page_offset,
- GFP_NOFS | __GFP_COLD);
+ page = pagecache_get_page(mapping, page_offset,
+ FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
+ ra_gfp_mask);
if (!page)
break;
if (!PageUptodate(page))
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fd817022cb9b..9e66d85021fc 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -478,13 +478,15 @@ static struct buffer_head *udf_getblk(struct inode *inode, long block,
return NULL;
}
-/* Extend the file by 'blocks' blocks, return the number of extents added */
+/* Extend the file with new blocks totaling 'new_block_bytes',
+ * return the number of extents added
+ */
static int udf_do_extend_file(struct inode *inode,
struct extent_position *last_pos,
struct kernel_long_ad *last_ext,
- sector_t blocks)
+ loff_t new_block_bytes)
{
- sector_t add;
+ uint32_t add;
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
struct super_block *sb = inode->i_sb;
struct kernel_lb_addr prealloc_loc = {};
@@ -494,7 +496,7 @@ static int udf_do_extend_file(struct inode *inode,
/* The previous extent is fake and we should not extend by anything
* - there's nothing to do... */
- if (!blocks && fake)
+ if (!new_block_bytes && fake)
return 0;
iinfo = UDF_I(inode);
@@ -525,13 +527,12 @@ static int udf_do_extend_file(struct inode *inode,
/* Can we merge with the previous extent? */
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
EXT_NOT_RECORDED_NOT_ALLOCATED) {
- add = ((1 << 30) - sb->s_blocksize -
- (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
- sb->s_blocksize_bits;
- if (add > blocks)
- add = blocks;
- blocks -= add;
- last_ext->extLength += add << sb->s_blocksize_bits;
+ add = (1 << 30) - sb->s_blocksize -
+ (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+ if (add > new_block_bytes)
+ add = new_block_bytes;
+ new_block_bytes -= add;
+ last_ext->extLength += add;
}
if (fake) {
@@ -552,28 +553,27 @@ static int udf_do_extend_file(struct inode *inode,
}
/* Managed to do everything necessary? */
- if (!blocks)
+ if (!new_block_bytes)
goto out;
/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
last_ext->extLocation.logicalBlockNum = 0;
last_ext->extLocation.partitionReferenceNum = 0;
- add = (1 << (30-sb->s_blocksize_bits)) - 1;
- last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
- (add << sb->s_blocksize_bits);
+ add = (1 << 30) - sb->s_blocksize;
+ last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
/* Create enough extents to cover the whole hole */
- while (blocks > add) {
- blocks -= add;
+ while (new_block_bytes > add) {
+ new_block_bytes -= add;
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
if (err)
return err;
count++;
}
- if (blocks) {
+ if (new_block_bytes) {
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
- (blocks << sb->s_blocksize_bits);
+ new_block_bytes;
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
if (err)
@@ -604,6 +604,24 @@ out:
return count;
}
+/* Extend the final block of the file to final_block_len bytes */
+static void udf_do_extend_final_block(struct inode *inode,
+ struct extent_position *last_pos,
+ struct kernel_long_ad *last_ext,
+ uint32_t final_block_len)
+{
+ struct super_block *sb = inode->i_sb;
+ uint32_t added_bytes;
+
+ added_bytes = final_block_len -
+ (last_ext->extLength & (sb->s_blocksize - 1));
+ last_ext->extLength += added_bytes;
+ UDF_I(inode)->i_lenExtents += added_bytes;
+
+ udf_write_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+}
+
static int udf_extend_file(struct inode *inode, loff_t newsize)
{
@@ -613,10 +631,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
int8_t etype;
struct super_block *sb = inode->i_sb;
sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
+ unsigned long partial_final_block;
int adsize;
struct udf_inode_info *iinfo = UDF_I(inode);
struct kernel_long_ad extent;
- int err;
+ int err = 0;
+ int within_final_block;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
adsize = sizeof(struct short_ad);
@@ -626,18 +646,8 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
BUG();
etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
+ within_final_block = (etype != -1);
- /* File has extent covering the new size (could happen when extending
- * inside a block)? */
- if (etype != -1)
- return 0;
- if (newsize & (sb->s_blocksize - 1))
- offset++;
- /* Extended file just to the boundary of the last file block? */
- if (offset == 0)
- return 0;
-
- /* Truncate is extending the file by 'offset' blocks */
if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
(epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
/* File has no extents at all or has empty last
@@ -651,7 +661,22 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
&extent.extLength, 0);
extent.extLength |= etype << 30;
}
- err = udf_do_extend_file(inode, &epos, &extent, offset);
+
+ partial_final_block = newsize & (sb->s_blocksize - 1);
+
+ /* File has extent covering the new size (could happen when extending
+ * inside a block)?
+ */
+ if (within_final_block) {
+ /* Extending file within the last file block */
+ udf_do_extend_final_block(inode, &epos, &extent,
+ partial_final_block);
+ } else {
+ loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
+ partial_final_block;
+ err = udf_do_extend_file(inode, &epos, &extent, add);
+ }
+
if (err < 0)
goto out;
err = 0;
@@ -756,6 +781,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
/* Are we beyond EOF? */
if (etype == -1) {
int ret;
+ loff_t hole_len;
isBeyondEOF = true;
if (count) {
if (c)
@@ -771,7 +797,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
startnum = (offset > 0);
}
/* Create extents for the hole between EOF and offset */
- ret = udf_do_extend_file(inode, &prev_epos, laarr, offset);
+ hole_len = (loff_t)offset << inode->i_blkbits;
+ ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
if (ret < 0) {
brelse(prev_epos.bh);
brelse(cur_epos.bh);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 03369a89600e..4abdba453885 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -2460,17 +2460,29 @@ static unsigned int udf_count_free_table(struct super_block *sb,
static unsigned int udf_count_free(struct super_block *sb)
{
unsigned int accum = 0;
- struct udf_sb_info *sbi;
+ struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map;
+ unsigned int part = sbi->s_partition;
+ int ptype = sbi->s_partmaps[part].s_partition_type;
+
+ if (ptype == UDF_METADATA_MAP25) {
+ part = sbi->s_partmaps[part].s_type_specific.s_metadata.
+ s_phys_partition_ref;
+ } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
+ /*
+ * Filesystems with VAT are append-only and we cannot write to
+ * them. Let's just report 0 here.
+ */
+ return 0;
+ }
- sbi = UDF_SB(sb);
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid =
(struct logicalVolIntegrityDesc *)
sbi->s_lvid_bh->b_data;
- if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
+ if (le32_to_cpu(lvid->numOfPartitions) > part) {
accum = le32_to_cpu(
- lvid->freeSpaceTable[sbi->s_partition]);
+ lvid->freeSpaceTable[part]);
if (accum == 0xFFFFFFFF)
accum = 0;
}
@@ -2479,7 +2491,7 @@ static unsigned int udf_count_free(struct super_block *sb)
if (accum)
return accum;
- map = &sbi->s_partmaps[sbi->s_partition];
+ map = &sbi->s_partmaps[part];
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
accum += udf_count_free_bitmap(sb,
map->s_uspace.s_bitmap);
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 398019fb1448..9c4fb1fc0822 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -228,7 +228,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
case UFS_UID_44BSD:
return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
case UFS_UID_EFT:
- if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
+ if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
/* Fall through */
default:
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 784d667475ae..de63d4e2dfba 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -464,6 +464,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
+ bool still_valid;
ACCESS_ONCE(ctx->released) = true;
@@ -479,6 +480,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* taking the mmap_sem for writing.
*/
down_write(&mm->mmap_sem);
+ still_valid = mmget_still_valid(mm);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
cond_resched();
@@ -489,15 +491,17 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
continue;
}
new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
- new_flags, vma->anon_vma,
- vma->vm_file, vma->vm_pgoff,
- vma_policy(vma),
- NULL_VM_UFFD_CTX);
- if (prev)
- vma = prev;
- else
- prev = vma;
+ if (still_valid) {
+ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+ new_flags, vma->anon_vma,
+ vma->vm_file, vma->vm_pgoff,
+ vma_policy(vma),
+ NULL_VM_UFFD_CTX);
+ if (prev)
+ vma = prev;
+ else
+ prev = vma;
+ }
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
@@ -802,6 +806,9 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
goto out;
down_write(&mm->mmap_sem);
+ if (!mmget_still_valid(mm))
+ goto out_unlock;
+
vma = find_vma_prev(mm, start, &prev);
if (!vma)
goto out_unlock;
@@ -947,6 +954,9 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
goto out;
down_write(&mm->mmap_sem);
+ if (!mmget_still_valid(mm))
+ goto out_unlock;
+
vma = find_vma_prev(mm, start, &prev);
if (!vma)
goto out_unlock;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 8ad65d43b65d..9ca8809ee3d0 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4212,15 +4212,28 @@ xfs_bmapi_read(
XFS_STATS_INC(mp, xs_blk_mapr);
ifp = XFS_IFORK_PTR(ip, whichfork);
+ if (!ifp) {
+ /* No CoW fork? Return a hole. */
+ if (whichfork == XFS_COW_FORK) {
+ mval->br_startoff = bno;
+ mval->br_startblock = HOLESTARTBLOCK;
+ mval->br_blockcount = len;
+ mval->br_state = XFS_EXT_NORM;
+ *nmap = 1;
+ return 0;
+ }
- /* No CoW fork? Return a hole. */
- if (whichfork == XFS_COW_FORK && !ifp) {
- mval->br_startoff = bno;
- mval->br_startblock = HOLESTARTBLOCK;
- mval->br_blockcount = len;
- mval->br_state = XFS_EXT_NORM;
- *nmap = 1;
- return 0;
+ /*
+ * A missing attr ifork implies that the inode says we're in
+ * extents or btree format but failed to pass the inode fork
+ * verifier while trying to load it. Treat that as a file
+ * corruption too.
+ */
+#ifdef DEBUG
+ xfs_alert(mp, "%s: inode %llu missing fork %d",
+ __func__, ip->i_ino, whichfork);
+#endif /* DEBUG */
+ return -EFSCORRUPTED;
}
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
@@ -5675,7 +5688,7 @@ __xfs_bunmapi(
* Make sure we don't touch multiple AGF headers out of order
* in a single transaction, as that could cause AB-BA deadlocks.
*/
- if (!wasdel) {
+ if (!wasdel && !isrt) {
agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
if (prev_agno != NULLAGNUMBER && prev_agno > agno)
break;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 3f45d9867e10..0b58b9d419e8 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -57,6 +57,32 @@ static kmem_zone_t *xfs_buf_zone;
#define xb_to_gfp(flags) \
((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
+/*
+ * Locking orders
+ *
+ * xfs_buf_ioacct_inc:
+ * xfs_buf_ioacct_dec:
+ * b_sema (caller holds)
+ * b_lock
+ *
+ * xfs_buf_stale:
+ * b_sema (caller holds)
+ * b_lock
+ * lru_lock
+ *
+ * xfs_buf_rele:
+ * b_lock
+ * pag_buf_lock
+ * lru_lock
+ *
+ * xfs_buftarg_wait_rele
+ * lru_lock
+ * b_lock (trylock due to inversion)
+ *
+ * xfs_buftarg_isolate
+ * lru_lock
+ * b_lock (trylock due to inversion)
+ */
static inline int
xfs_buf_is_vmapped(
@@ -957,8 +983,18 @@ xfs_buf_rele(
ASSERT(atomic_read(&bp->b_hold) > 0);
- release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
+ /*
+ * We grab the b_lock here first to serialise racing xfs_buf_rele()
+ * calls. The pag_buf_lock being taken on the last reference only
+ * serialises against racing lookups in xfs_buf_find(). IOWs, the second
+ * to last reference we drop here is not serialised against the last
+ * reference until we take bp->b_lock. Hence if we don't grab b_lock
+ * first, the last "release" reference can win the race to the lock and
+ * free the buffer before the second-to-last reference is processed,
+ * leading to a use-after-free scenario.
+ */
spin_lock(&bp->b_lock);
+ release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
if (!release) {
/*
* Drop the in-flight state if the buffer is already on the LRU
@@ -1674,7 +1710,7 @@ xfs_buftarg_isolate(
* zero. If the value is already zero, we need to reclaim the
* buffer, otherwise it gets another trip through the LRU.
*/
- if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
+ if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
spin_unlock(&bp->b_lock);
return LRU_ROTATE;
}
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 321f57721b92..6b7ed221726d 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -251,6 +251,32 @@ xfs_compat_ioc_bulkstat(
int done;
int error;
+ /*
+ * Output structure handling functions. Depending on the command,
+ * either the xfs_bstat and xfs_inogrp structures are written out
+ * to userpace memory via bulkreq.ubuffer. Normally the compat
+ * functions and structure size are the correct ones to use ...
+ */
+ inumbers_fmt_pf inumbers_func = xfs_inumbers_fmt_compat;
+ bulkstat_one_pf bs_one_func = xfs_bulkstat_one_compat;
+ size_t bs_one_size = sizeof(struct compat_xfs_bstat);
+
+#ifdef CONFIG_X86_X32
+ if (in_x32_syscall()) {
+ /*
+ * ... but on x32 the input xfs_fsop_bulkreq has pointers
+ * which must be handled in the "compat" (32-bit) way, while
+ * the xfs_bstat and xfs_inogrp structures follow native 64-
+ * bit layout convention. So adjust accordingly, otherwise
+ * the data written out in compat layout will not match what
+ * x32 userspace expects.
+ */
+ inumbers_func = xfs_inumbers_fmt;
+ bs_one_func = xfs_bulkstat_one;
+ bs_one_size = sizeof(struct xfs_bstat);
+ }
+#endif
+
/* done = 1 if there are more stats to get and if bulkstat */
/* should be called again (unused here, but used in dmapi) */
@@ -282,15 +308,15 @@ xfs_compat_ioc_bulkstat(
if (cmd == XFS_IOC_FSINUMBERS_32) {
error = xfs_inumbers(mp, &inlast, &count,
- bulkreq.ubuffer, xfs_inumbers_fmt_compat);
+ bulkreq.ubuffer, inumbers_func);
} else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
int res;
- error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
- sizeof(compat_xfs_bstat_t), NULL, &res);
+ error = bs_one_func(mp, inlast, bulkreq.ubuffer,
+ bs_one_size, NULL, &res);
} else if (cmd == XFS_IOC_FSBULKSTAT_32) {
error = xfs_bulkstat(mp, &inlast, &count,
- xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
+ bs_one_func, bs_one_size,
bulkreq.ubuffer, &done);
} else
error = -EINVAL;
@@ -346,6 +372,7 @@ xfs_compat_attrlist_by_handle(
{
int error;
attrlist_cursor_kern_t *cursor;
+ compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
compat_xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
char *kbuf;
@@ -380,6 +407,11 @@ xfs_compat_attrlist_by_handle(
if (error)
goto out_kfree;
+ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
+ error = -EFAULT;
+ goto out_kfree;
+ }
+
if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
error = -EFAULT;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 33c389934238..7bfddcd32d73 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -774,6 +774,7 @@ xfs_setattr_nonsize(
out_cancel:
xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_dqrele:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 33c9a3aae948..7bfcd09d446b 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1540,6 +1540,8 @@ out_free_iclog:
if (iclog->ic_bp)
xfs_buf_free(iclog->ic_bp);
kmem_free(iclog);
+ if (prev_iclog == log->l_iclog)
+ break;
}
spinlock_destroy(&log->l_icloglock);
xfs_buf_free(log->l_xbuf);
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index f82d79a8c694..a7c3da09b72f 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -214,6 +214,9 @@ xfs_fs_rm_xquota(
if (XFS_IS_QUOTA_ON(mp))
return -EINVAL;
+ if (uflags & ~(FS_USER_QUOTA | FS_GROUP_QUOTA | FS_PROJ_QUOTA))
+ return -EINVAL;
+
if (uflags & FS_USER_QUOTA)
flags |= XFS_DQ_USER;
if (uflags & FS_GROUP_QUOTA)
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 802bcc326d9f..0d93d3c10fcc 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1222,13 +1222,11 @@ xfs_rtmount_inodes(
xfs_sb_t *sbp;
sbp = &mp->m_sb;
- if (sbp->sb_rbmino == NULLFSINO)
- return 0;
error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip);
if (error)
return error;
ASSERT(mp->m_rbmip != NULL);
- ASSERT(sbp->sb_rsumino != NULLFSINO);
+
error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
if (error) {
IRELE(mp->m_rbmip);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 67d589e0a49f..b16ca13c11d5 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1674,6 +1674,7 @@ xfs_fs_fill_super(
out_close_devices:
xfs_close_devices(mp);
out_free_fsname:
+ sb->s_fs_info = NULL;
xfs_free_fsname(mp);
kfree(mp);
out:
@@ -1691,6 +1692,10 @@ xfs_fs_put_super(
{
struct xfs_mount *mp = XFS_M(sb);
+ /* if ->fill_super failed, we have no mount to tear down */
+ if (!sb->s_fs_info)
+ return;
+
xfs_notice(mp, "Unmounting Filesystem");
xfs_filestream_unmount(mp);
xfs_unmountfs(mp);
@@ -1700,6 +1705,8 @@ xfs_fs_put_super(
xfs_destroy_percpu_counters(mp);
xfs_destroy_mount_workqueues(mp);
xfs_close_devices(mp);
+
+ sb->s_fs_info = NULL;
xfs_free_fsname(mp);
kfree(mp);
}
@@ -1719,6 +1726,9 @@ xfs_fs_nr_cached_objects(
struct super_block *sb,
struct shrink_control *sc)
{
+ /* Paranoia: catch incorrect calls during mount setup or teardown */
+ if (WARN_ON_ONCE(!sb->s_fs_info))
+ return 0;
return xfs_reclaim_inodes_count(XFS_M(sb));
}
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 1d798abae710..f502d257d494 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -551,6 +551,8 @@ typedef u64 acpi_integer;
#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
+
/*******************************************************************************
*
* Miscellaneous constants
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 6f96247226a4..89f079d6b41b 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -47,6 +47,7 @@ struct bug_entry {
#ifndef HAVE_ARCH_BUG
#define BUG() do { \
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+ barrier_before_unreachable(); \
panic("BUG!"); \
} while (0)
#endif
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index 65e4468ac53d..52fbf236a90e 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -6,24 +6,6 @@
#include <linux/compiler.h>
#include <linux/log2.h>
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
- int order;
-
- size--;
- size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
- order = fls(size);
-#else
- order = fls64(size);
-#endif
- return order;
-}
-
/**
* get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order
@@ -42,19 +24,27 @@ int __get_order(unsigned long size)
* to hold an object of the specified size.
*
* The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
*/
-#define get_order(n) \
-( \
- __builtin_constant_p(n) ? ( \
- ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
- (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
- ilog2((n) - 1) - PAGE_SHIFT + 1) \
- ) : \
- __get_order(n) \
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ if (__builtin_constant_p(size)) {
+ if (!size)
+ return BITS_PER_LONG - PAGE_SHIFT;
+
+ if (size < (1UL << PAGE_SHIFT))
+ return 0;
+
+ return ilog2((size) - 1) - PAGE_SHIFT + 1;
+ }
+
+ size--;
+ size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+ return fls(size);
+#else
+ return fls64(size);
+#endif
+}
#endif /* __ASSEMBLY__ */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 003207670597..c0542de64690 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -312,7 +312,7 @@ struct drm_dp_resource_status_notify {
struct drm_dp_query_payload_ack_reply {
u8 port_number;
- u8 allocated_pbn;
+ u16 allocated_pbn;
};
struct drm_dp_sideband_msg_req_body {
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 9c03895dc479..72bf408f887f 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -42,6 +42,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
+ bool readonly:1;
};
struct drm_vma_offset_manager {
diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h
index 614aff2c7aff..a03e86fe2c57 100644
--- a/include/dt-bindings/reset/amlogic,meson8b-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h
@@ -95,9 +95,9 @@
#define RESET_VD_RMEM 64
#define RESET_AUDIN 65
#define RESET_DBLK 66
-#define RESET_PIC_DC 66
-#define RESET_PSC 66
-#define RESET_NAND 66
+#define RESET_PIC_DC 67
+#define RESET_PSC 68
+#define RESET_NAND 69
#define RESET_GE2D 70
#define RESET_PARSER_REG 71
#define RESET_PARSER_FETCH 72
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ca2b4c4aec42..5670bb9788bb 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -77,7 +77,7 @@ static inline bool has_acpi_companion(struct device *dev)
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
- ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL));
+ ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false));
}
static inline const char *acpi_dev_name(struct acpi_device *adev)
@@ -309,7 +309,10 @@ void acpi_set_irq_model(enum acpi_irq_model_id model,
#ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
+static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
+{
+ return -1;
+}
#endif
/*
* This function undoes the effect of one call to acpi_register_gsi().
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index a270f25ee7c7..1a527e40d601 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -23,6 +23,8 @@ struct ahci_host_priv;
struct platform_device;
struct scsi_host_template;
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 73fd8b7e9534..4be0e14b38fc 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -107,7 +107,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
#define AARP_RESOLVE_TIME (10 * HZ)
extern struct datalink_proto *ddp_dl, *aarp_dl;
-extern void aarp_proto_init(void);
+extern int aarp_proto_init(void);
/* Inter module exports */
@@ -150,19 +150,29 @@ extern int sysctl_aarp_retransmit_limit;
extern int sysctl_aarp_resolve_time;
#ifdef CONFIG_SYSCTL
-extern void atalk_register_sysctl(void);
+extern int atalk_register_sysctl(void);
extern void atalk_unregister_sysctl(void);
#else
-#define atalk_register_sysctl() do { } while(0)
-#define atalk_unregister_sysctl() do { } while(0)
+static inline int atalk_register_sysctl(void)
+{
+ return 0;
+}
+static inline void atalk_unregister_sysctl(void)
+{
+}
#endif
#ifdef CONFIG_PROC_FS
extern int atalk_proc_init(void);
extern void atalk_proc_exit(void);
#else
-#define atalk_proc_init() ({ 0; })
-#define atalk_proc_exit() do { } while(0)
+static inline int atalk_proc_init(void)
+{
+ return 0;
+}
+static inline void atalk_proc_exit(void)
+{
+}
#endif /* CONFIG_PROC_FS */
#endif /* __LINUX_ATALK_H__ */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 4ea779b25a51..34056ec64c7c 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -157,6 +157,7 @@ struct backing_dev_info {
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
+ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 87ce64dafb93..e588b01a23d4 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -226,7 +226,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
{
if (count != 1) {
bio->bi_flags |= (1 << BIO_REFFED);
- smp_mb__before_atomic();
+ smp_mb();
}
atomic_set(&bio->__bi_cnt, count);
}
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 3b77588a9360..26347cc717e8 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -86,6 +86,14 @@
*/
/*
+ * Allocation and deallocation of bitmap.
+ * Provided in lib/bitmap.c to avoid circular dependency.
+ */
+extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
+extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
+extern void bitmap_free(const unsigned long *bitmap);
+
+/*
* lib/bitmap.c provides these functions:
*/
@@ -185,8 +193,13 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+/*
+ * The static inlines below do not handle constant nbits==0 correctly,
+ * so make such users (should any ever turn up) call the out-of-line
+ * versions.
+ */
#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
@@ -316,7 +329,7 @@ static __always_inline int bitmap_weight(const unsigned long *src, unsigned int
}
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, int nbits)
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a83c822c35c2..cee74a52b9eb 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -1,28 +1,10 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
+#include <linux/bits.h>
-#ifdef __KERNEL__
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE 8
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
- (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
- (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
@@ -77,7 +59,7 @@ static __always_inline unsigned long hweight_long(unsigned long w)
*/
static inline __u64 rol64(__u64 word, unsigned int shift)
{
- return (word << shift) | (word >> (64 - shift));
+ return (word << (shift & 63)) | (word >> ((-shift) & 63));
}
/**
@@ -87,7 +69,7 @@ static inline __u64 rol64(__u64 word, unsigned int shift)
*/
static inline __u64 ror64(__u64 word, unsigned int shift)
{
- return (word >> shift) | (word << (64 - shift));
+ return (word >> (shift & 63)) | (word << ((-shift) & 63));
}
/**
@@ -97,7 +79,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
- return (word << shift) | (word >> ((-shift) & 31));
+ return (word << (shift & 31)) | (word >> ((-shift) & 31));
}
/**
@@ -107,7 +89,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
*/
static inline __u32 ror32(__u32 word, unsigned int shift)
{
- return (word >> shift) | (word << (32 - shift));
+ return (word >> (shift & 31)) | (word << ((-shift) & 31));
}
/**
@@ -117,7 +99,7 @@ static inline __u32 ror32(__u32 word, unsigned int shift)
*/
static inline __u16 rol16(__u16 word, unsigned int shift)
{
- return (word << shift) | (word >> (16 - shift));
+ return (word << (shift & 15)) | (word >> ((-shift) & 15));
}
/**
@@ -127,7 +109,7 @@ static inline __u16 rol16(__u16 word, unsigned int shift)
*/
static inline __u16 ror16(__u16 word, unsigned int shift)
{
- return (word >> shift) | (word << (16 - shift));
+ return (word >> (shift & 15)) | (word << ((-shift) & 15));
}
/**
@@ -137,7 +119,7 @@ static inline __u16 ror16(__u16 word, unsigned int shift)
*/
static inline __u8 rol8(__u8 word, unsigned int shift)
{
- return (word << shift) | (word >> (8 - shift));
+ return (word << (shift & 7)) | (word >> ((-shift) & 7));
}
/**
@@ -147,7 +129,7 @@ static inline __u8 rol8(__u8 word, unsigned int shift)
*/
static inline __u8 ror8(__u8 word, unsigned int shift)
{
- return (word >> shift) | (word << (8 - shift));
+ return (word >> (shift & 7)) | (word << ((-shift) & 7));
}
/**
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index fb790b8449c1..333e42cf08de 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -31,32 +31,32 @@ static inline u32 __bitrev32(u32 x)
#define __constant_bitrev32(x) \
({ \
- u32 __x = x; \
- __x = (__x >> 16) | (__x << 16); \
- __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
- __x; \
+ u32 ___x = x; \
+ ___x = (___x >> 16) | (___x << 16); \
+ ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
+ ___x; \
})
#define __constant_bitrev16(x) \
({ \
- u16 __x = x; \
- __x = (__x >> 8) | (__x << 8); \
- __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
- __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
- __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
- __x; \
+ u16 ___x = x; \
+ ___x = (___x >> 8) | (___x << 8); \
+ ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
+ ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
+ ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
+ ___x; \
})
#define __constant_bitrev8(x) \
({ \
- u8 __x = x; \
- __x = (__x >> 4) | (__x << 4); \
- __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
- __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
- __x; \
+ u8 ___x = x; \
+ ___x = (___x >> 4) | (___x << 4); \
+ ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
+ ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
+ ___x; \
})
#define bitrev32(x) \
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6b5481d88923..b522ddad45d8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -243,6 +243,11 @@ struct request {
struct request *next_rq;
};
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return rq->cmd_type != REQ_TYPE_FS;
+}
+
static inline unsigned short req_get_ioprio(struct request *req)
{
return req->ioprio;
@@ -305,6 +310,7 @@ struct queue_limits {
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned int physical_block_size;
+ unsigned int logical_block_size;
unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
@@ -314,7 +320,6 @@ struct queue_limits {
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
@@ -693,7 +698,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
static inline bool rq_mergeable(struct request *rq)
{
- if (rq->cmd_type != REQ_TYPE_FS)
+ if (blk_rq_is_passthrough(rq))
return false;
if (req_op(rq) == REQ_OP_FLUSH)
@@ -942,7 +947,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
{
struct request_queue *q = rq->q;
- if (unlikely(rq->cmd_type != REQ_TYPE_FS))
+ if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors ||
@@ -1022,7 +1027,7 @@ extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
extern void blk_queue_max_write_same_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
@@ -1247,7 +1252,7 @@ static inline unsigned int queue_max_segment_size(struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned short queue_logical_block_size(struct request_queue *q)
+static inline unsigned queue_logical_block_size(struct request_queue *q)
{
int retval = 512;
@@ -1257,7 +1262,7 @@ static inline unsigned short queue_logical_block_size(struct request_queue *q)
return retval;
}
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{
return queue_logical_block_size(bdev_get_queue(bdev));
}
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 292d6a10b0c2..0faae96302bd 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -114,6 +114,11 @@ int is_valid_bugaddr(unsigned long addr);
#else /* !CONFIG_GENERIC_BUG */
+static inline void *find_bug(unsigned long bugaddr)
+{
+ return NULL;
+}
+
static inline enum bug_trap_type report_bug(unsigned long bug_addr,
struct pt_regs *regs)
{
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index f7178f44825b..d2a497950639 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -17,6 +17,7 @@
#include <linux/can/error.h>
#include <linux/can/led.h>
#include <linux/can/netlink.h>
+#include <linux/can/skb.h>
#include <linux/netdevice.h>
/*
@@ -81,6 +82,36 @@ struct can_priv {
#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static inline bool can_skb_headroom_valid(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* preform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ }
+
+ return true;
+}
+
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
static inline bool can_dropped_invalid_skb(struct net_device *dev,
struct sk_buff *skb)
@@ -98,6 +129,9 @@ static inline bool can_dropped_invalid_skb(struct net_device *dev,
} else
goto inval_skb;
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
return false;
inval_skb:
diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h
index 138bbf721e70..a844749a2855 100644
--- a/include/linux/cec-funcs.h
+++ b/include/linux/cec-funcs.h
@@ -956,7 +956,8 @@ static inline void cec_msg_give_deck_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_DECK_STATUS : 0;
}
static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
@@ -1060,7 +1061,8 @@ static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_TUNER_DEVICE_STATUS : 0;
}
static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 07ca15e76100..dada47a4360f 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -29,7 +29,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
static inline void ceph_buffer_put(struct ceph_buffer *b)
{
- kref_put(&b->kref, ceph_buffer_release);
+ if (b)
+ kref_put(&b->kref, ceph_buffer_release);
}
extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index aa2e19182d99..51c5bd64bd00 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -3,6 +3,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/string.h>
+
#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
/*
@@ -12,12 +14,10 @@
*/
# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
-extern const char *ceph_file_part(const char *s, int len);
# define dout(fmt, ...) \
pr_debug("%.*s %12.12s:%-4d : " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \
- ceph_file_part(__FILE__, sizeof(__FILE__)), \
- __LINE__, ##__VA_ARGS__)
+ kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
# else
/* faux printk call just to see any compiler warnings. */
# define dout(fmt, ...) do { \
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 7620a8bc0493..8be03520995c 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -462,7 +462,7 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
*
* Find the css for the (@task, @subsys_id) combination, increment a
* reference on and return it. This function is guaranteed to return a
- * valid css.
+ * valid css. The returned css may already have been offlined.
*/
static inline struct cgroup_subsys_state *
task_get_css(struct task_struct *task, int subsys_id)
@@ -472,7 +472,13 @@ task_get_css(struct task_struct *task, int subsys_id)
rcu_read_lock();
while (true) {
css = task_css(task, subsys_id);
- if (likely(css_tryget_online(css)))
+ /*
+ * Can't use css_tryget_online() here. A task which has
+ * PF_EXITING set may stay associated with an offline css.
+ * If such task calls this function, css_tryget_online()
+ * will keep failing.
+ */
+ if (likely(css_tryget(css)))
break;
cpu_relax();
}
diff --git a/include/linux/coda.h b/include/linux/coda.h
index d30209b9cef8..0ca0c83fdb1c 100644
--- a/include/linux/coda.h
+++ b/include/linux/coda.h
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
#ifndef _CODA_HEADER_
#define _CODA_HEADER_
-#if defined(__linux__)
typedef unsigned long long u_quad_t;
-#endif
+
#include <uapi/linux/coda.h>
#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 5b8721efa948..fe1466daf291 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -19,6 +19,17 @@ struct venus_comm {
struct mutex vc_mutex;
};
+/* messages between coda filesystem in kernel and Venus */
+struct upc_req {
+ struct list_head uc_chain;
+ caddr_t uc_data;
+ u_short uc_flags;
+ u_short uc_inSize; /* Size is at most 5000 bytes */
+ u_short uc_outSize;
+ u_short uc_opcode; /* copied from data to save lookup */
+ int uc_unique;
+ wait_queue_head_t uc_sleep; /* process' wait queue */
+};
static inline struct venus_comm *coda_vcp(struct super_block *sb)
{
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 8e9b0cb8db41..61650c1830d4 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -234,6 +234,15 @@
#endif
/*
+ * calling noreturn functions, __builtin_unreachable() and __builtin_trap()
+ * confuse the stack allocation in gcc, leading to overly large stack
+ * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
+ *
+ * Adding an empty inline assembly before it works around the problem
+ */
+#define barrier_before_unreachable() asm volatile("")
+
+/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
@@ -243,7 +252,11 @@
* unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() \
- do { annotate_unreachable(); __builtin_unreachable(); } while (0)
+ do { \
+ annotate_unreachable(); \
+ barrier_before_unreachable(); \
+ __builtin_unreachable(); \
+ } while (0)
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 4f3dfabb680f..7837afabbd78 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -54,6 +54,22 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#ifdef __KERNEL__
+/*
+ * Minimal backport of compiler_attributes.h to add support for __copy
+ * to v4.9.y so that we can use it in init/exit_module to avoid
+ * -Werror=missing-attributes errors on GCC 9.
+ */
+#ifndef __has_attribute
+# define __has_attribute(x) __GCC4_has_attribute_##x
+# define __GCC4_has_attribute___copy__ 0
+#endif
+
+#if __has_attribute(__copy__)
+# define __copy(symbol) __attribute__((__copy__(symbol)))
+#else
+# define __copy(symbol)
+#endif
+
#ifdef __GNUC__
#include <linux/compiler-gcc.h>
#endif
@@ -177,6 +193,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define barrier_data(ptr) barrier()
#endif
+/* workaround for GCC PR82365 if needed */
+#ifndef barrier_before_unreachable
+# define barrier_before_unreachable() do { } while (0)
+#endif
+
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
@@ -245,23 +266,21 @@ void __read_once_size(const volatile void *p, void *res, int size)
#ifdef CONFIG_KASAN
/*
- * This function is not 'inline' because __no_sanitize_address confilcts
+ * We can't declare function 'inline' because __no_sanitize_address confilcts
* with inlining. Attempt to inline it may cause a build failure.
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
-static __no_sanitize_address __maybe_unused
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
+# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
#else
-static __always_inline
+# define __no_kasan_or_inline __always_inline
+#endif
+
+static __no_kasan_or_inline
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
-#endif
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
@@ -299,6 +318,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*/
+#include <linux/kasan-checks.h>
#define __READ_ONCE(x, check) \
({ \
@@ -317,6 +337,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
*/
#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+static __no_kasan_or_inline
+unsigned long read_word_at_a_time(const void *addr)
+{
+ kasan_check_read(addr, 1);
+ return *(unsigned long *)addr;
+}
+
#define WRITE_ONCE(x, val) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
@@ -519,7 +546,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* compiler has support to do so.
*/
#define compiletime_assert(condition, msg) \
- _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index fdf5be472eff..0da5edee9440 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -54,6 +54,13 @@ extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -269,11 +276,15 @@ extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
extern void cpu_smt_check_topology_early(void);
extern void cpu_smt_check_topology(void);
+extern int cpuhp_smt_enable(void);
+extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
#else
# define cpu_smt_control (CPU_SMT_ENABLED)
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_check_topology_early(void) { }
static inline void cpu_smt_check_topology(void) { }
+static inline int cpuhp_smt_enable(void) { return 0; }
+static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
#endif
#define IDLE_START 1
@@ -283,4 +294,7 @@ void idle_notifier_register(struct notifier_block *n);
void idle_notifier_unregister(struct notifier_block *n);
void idle_notifier_call_chain(unsigned long val);
+extern bool cpu_mitigations_off(void);
+extern bool cpu_mitigations_auto_nosmt(void);
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index 986c06c88d81..84d3c81b5978 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -45,7 +45,7 @@
* 'asm/cpufeature.h' of your favorite architecture.
*/
#define module_cpu_feature_match(x, __initfunc) \
-static struct cpu_feature const cpu_feature_match_ ## x[] = \
+static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index c9447a689522..1ab0273560ae 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -77,10 +77,10 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
+ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
- CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_METAG_TIMER_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
diff --git a/include/linux/cred.h b/include/linux/cred.h
index cf1a5d0c4eb4..4f614042214b 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -144,7 +144,11 @@ struct cred {
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
- struct rcu_head rcu; /* RCU deletion hook */
+ /* RCU deletion */
+ union {
+ int non_rcu; /* Can we skip RCU deletion? */
+ struct rcu_head rcu; /* RCU deletion hook */
+ };
};
extern void __put_cred(struct cred *);
@@ -242,6 +246,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
validate_creds(cred);
+ nonconst_cred->non_rcu = 0;
return get_new_cred(nonconst_cred);
}
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 7adf6cc4b305..633346b84cae 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -53,7 +53,7 @@ void devfreq_cooling_unregister(struct thermal_cooling_device *dfc);
#else /* !CONFIG_DEVFREQ_THERMAL */
-struct thermal_cooling_device *
+static inline struct thermal_cooling_device *
of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
struct devfreq_cooling_power *dfc_power)
{
diff --git a/include/linux/device.h b/include/linux/device.h
index 12cfe907a8ff..182926d4940e 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -682,7 +682,8 @@ extern unsigned long devm_get_free_pages(struct device *dev,
gfp_t gfp_mask, unsigned int order);
extern void devm_free_pages(struct device *dev, unsigned long addr);
-void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res);
/* allows to add/remove a custom action to devres stack */
int devm_add_action(struct device *dev, void (*action)(void *), void *data);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 704caae69c42..97f817f4eb78 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -618,8 +618,7 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline unsigned int dma_set_max_seg_size(struct device *dev,
- unsigned int size)
+static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
{
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index eaf8e1dfc582..6270680271e3 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1362,8 +1362,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
+ int ret;
- dma_get_slave_caps(tx->chan, &caps);
+ ret = dma_get_slave_caps(tx->chan, &caps);
+ if (ret)
+ return ret;
if (caps.descriptor_reuse) {
tx->flags |= DMA_CTRL_REUSE;
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 9e0d78966552..c6233227720c 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#include <linux/numa.h>
struct device;
@@ -778,6 +779,6 @@ struct mem_ctl_info {
/*
* Maximum number of memory controllers in the coherent fabric.
*/
-#define EDAC_MAX_MCS 16
+#define EDAC_MAX_MCS 2 * MAX_NUMNODES
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 80b1b8faf503..02c4f16685b6 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1427,13 +1427,18 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
unsigned long *load_addr,
unsigned long *load_size);
-efi_status_t efi_parse_options(char *cmdline);
+efi_status_t efi_parse_options(char const *cmdline);
efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
struct screen_info *si, efi_guid_t *proto,
unsigned long size);
-bool efi_runtime_disabled(void);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
+
extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
/*
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a964d076b4dc..493d6d7794de 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -732,8 +732,6 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var);
extern const unsigned char *fb_firmware_edid(struct device *device);
extern void fb_edid_to_monspecs(unsigned char *edid,
struct fb_monspecs *specs);
-extern void fb_edid_add_monspecs(unsigned char *edid,
- struct fb_monspecs *specs);
extern void fb_destroy_modedb(struct fb_videomode *modedb);
extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
@@ -807,7 +805,6 @@ struct dmt_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
-extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1f09c521adfe..0837d904405a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -599,6 +599,7 @@ void bpf_warn_invalid_xdp_action(u32 act);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
extern int bpf_jit_harden;
+extern long bpf_jit_limit;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6527e4e5729b..4c70ab9bfe1f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -143,6 +143,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Has write method(s) */
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+/* File is stream-like */
+#define FMODE_STREAM ((__force fmode_t)0x200000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -676,6 +679,7 @@ struct inode {
struct rcu_head i_rcu;
};
u64 i_version;
+ atomic64_t i_sequence; /* see futex */
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
@@ -2838,6 +2842,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
+extern int stream_open(struct inode * inode, struct file * filp);
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 6435f46d6e13..c015fa91e7cc 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -34,23 +34,26 @@ handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
union futex_key {
struct {
+ u64 i_seq;
unsigned long pgoff;
- struct inode *inode;
- int offset;
+ unsigned int offset;
} shared;
struct {
+ union {
+ struct mm_struct *mm;
+ u64 __tmp;
+ };
unsigned long address;
- struct mm_struct *mm;
- int offset;
+ unsigned int offset;
} private;
struct {
+ u64 ptr;
unsigned long word;
- void *ptr;
- int offset;
+ unsigned int offset;
} both;
};
-#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 206fe3bccccc..575f2dee8cf7 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -50,7 +50,8 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool,
+ unsigned long start_addr);
/*
* General purpose special memory pool descriptor.
@@ -130,24 +131,24 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start, unsigned int nr,
- void *data, struct gen_pool *pool);
+ void *data, struct gen_pool *pool, unsigned long start_addr);
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool);
+ struct gen_pool *pool, unsigned long start_addr);
extern struct gen_pool *devm_gen_pool_create(struct device *dev,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f8041f9de31e..d11f56bc9c7e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -284,6 +284,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
+/**
+ * gfpflags_normal_context - is gfp_flags a normal sleepable context?
+ * @gfp_flags: gfp_flags to test
+ *
+ * Test whether @gfp_flags indicates that the allocation is from the
+ * %current context and allowed to sleep.
+ *
+ * An allocation being allowed to block doesn't mean it owns the %current
+ * context. When direct reclaim path tries to allocate memory, the
+ * allocation context is nested inside whatever %current was doing at the
+ * time of the original allocation. The nested allocation may be allowed
+ * to block but modifying anything %current owns can corrupt the outer
+ * context's expectations.
+ *
+ * %true result from this function indicates that the allocation context
+ * can sleep and use anything that's associated with %current.
+ */
+static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
+{
+ return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
+ __GFP_DIRECT_RECLAIM;
+}
+
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index d12b5d566e4b..11555bd821b7 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -229,30 +229,6 @@ static inline int irq_to_gpio(unsigned irq)
return -EINVAL;
}
-static inline int
-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int
-gpiochip_add_pingroup_range(struct gpio_chip *chip,
- struct pinctrl_dev *pctldev,
- unsigned int gpio_offset, const char *pin_group)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void
-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
-{
- WARN_ON(1);
-}
-
static inline int devm_gpio_request(struct device *dev, unsigned gpio,
const char *label)
{
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index fb0fde686cb1..4a9838feb086 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -398,7 +398,7 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
- return ERR_PTR(-EINVAL);
+ return NULL;
}
static inline int desc_to_gpio(const struct gpio_desc *desc)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index fab65b61d6d4..eda06f7ee84a 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -374,6 +374,7 @@ struct hid_global {
struct hid_local {
unsigned usage[HID_MAX_USAGES]; /* usage array */
+ u8 usage_size[HID_MAX_USAGES]; /* usage size array */
unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
unsigned usage_index;
unsigned usage_minimum;
@@ -452,7 +453,7 @@ struct hid_report_enum {
};
#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
-#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
+#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
#define HID_OUTPUT_FIFO_SIZE 64
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5e00f80b1535..5ae70d2f9a2f 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -424,12 +424,18 @@ extern u64 hrtimer_get_next_event(void);
extern bool hrtimer_active(const struct hrtimer *timer);
-/*
- * Helper function to check, whether the timer is on one of the queues
+/**
+ * hrtimer_is_queued = check, whether the timer is on one of the queues
+ * @timer: Timer to check
+ *
+ * Returns: True if the timer is queued, false otherwise
+ *
+ * The function can be used lockless, but it gives only a current snapshot.
*/
-static inline int hrtimer_is_queued(struct hrtimer *timer)
+static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_ENQUEUED;
+ /* The READ_ONCE pairs with the update functions of timer->state */
+ return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
}
/*
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b699d59d0f4f..6b8a7b654771 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -92,9 +92,7 @@ void putback_active_hugepage(struct page *page);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
- struct vm_area_struct *vma,
- struct address_space *mapping,
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
pgoff_t idx, unsigned long address);
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a80516fd65c8..d3417c18ee2f 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2630,4 +2630,57 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+struct element {
+ u8 id;
+ u8 datalen;
+ u8 data[];
+} __packed;
+
+/* element iteration helpers */
+#define for_each_element(_elem, _data, _datalen) \
+ for (_elem = (const struct element *)(_data); \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) && \
+ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
+ (int)sizeof(*_elem) + _elem->datalen; \
+ _elem = (const struct element *)(_elem->data + _elem->datalen))
+
+#define for_each_element_id(element, _id, data, datalen) \
+ for_each_element(element, data, datalen) \
+ if (element->id == (_id))
+
+#define for_each_element_extid(element, extid, data, datalen) \
+ for_each_element(element, data, datalen) \
+ if (element->id == WLAN_EID_EXTENSION && \
+ element->datalen > 0 && \
+ element->data[0] == (extid))
+
+#define for_each_subelement(sub, element) \
+ for_each_element(sub, (element)->data, (element)->datalen)
+
+#define for_each_subelement_id(sub, id, element) \
+ for_each_element_id(sub, id, (element)->data, (element)->datalen)
+
+#define for_each_subelement_extid(sub, extid, element) \
+ for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
+
+/**
+ * for_each_element_completed - determine if element parsing consumed all data
+ * @element: element pointer after for_each_element() or friends
+ * @data: same data pointer as passed to for_each_element() or friends
+ * @datalen: same data length as passed to for_each_element() or friends
+ *
+ * This function returns %true if all the data was parsed or considered
+ * while walking the elements. Only use this if your for_each_element()
+ * loop cannot be broken out of, otherwise it always returns %false.
+ *
+ * If some data was malformed, this returns %false since the last parsed
+ * element will not fill the whole remaining data.
+ */
+static inline bool for_each_element_completed(const struct element *element,
+ const void *data, size_t datalen)
+{
+ return (const u8 *)element == (const u8 *)data + datalen;
+}
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 548fd535fd02..d433f5e292c9 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -28,6 +28,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + eth_hdr()
+ */
+static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb->data;
+}
+
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_inner_mac_header(skb);
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index ba7a9b0c7c57..24e9b360da65 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -84,6 +84,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
extern void unregister_pppox_proto(int proto_num);
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
/* PPPoX socket states */
enum {
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 6cc48ac55fd2..40b14736c73d 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -66,6 +66,7 @@ struct ad_sigma_delta {
bool irq_dis;
bool bus_locked;
+ bool keep_cs_asserted;
uint8_t comm;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index e353f6600b0b..27dbab59f034 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -295,7 +295,8 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
@@ -320,7 +321,8 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
-#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
+ ((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index c9be57931b58..bb5547a83daf 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -61,6 +61,7 @@ struct irq_desc {
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
+ unsigned int tot_count;
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index d073470cb342..344eb873f6f5 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1560,7 +1560,7 @@ static inline int jbd2_space_needed(journal_t *journal)
static inline unsigned long jbd2_log_space_left(journal_t *journal)
{
/* Allow for rounding errors */
- unsigned long free = journal->j_free - 32;
+ long free = journal->j_free - 32;
if (journal->j_committing_transaction) {
unsigned long committing = atomic_read(&journal->
@@ -1569,7 +1569,7 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
/* Transaction + control blocks */
free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
}
- return free;
+ return max_t(long, free, 0);
}
/*
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 820c0ad54a01..c9df9e180610 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -7,6 +7,7 @@
struct kmem_cache;
struct page;
struct vm_struct;
+struct task_struct;
#ifdef CONFIG_KASAN
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 61054f12be7c..d83fc669eeac 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -55,8 +55,8 @@
#define u64_to_user_ptr(x) ( \
{ \
- typecheck(u64, x); \
- (void __user *)(uintptr_t)x; \
+ typecheck(u64, (x)); \
+ (void __user *)(uintptr_t)(x); \
} \
)
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 5957c6a3fd7f..d9d4485ebad2 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -108,6 +108,8 @@ extern int __must_check kobject_rename(struct kobject *, const char *new_name);
extern int __must_check kobject_move(struct kobject *, struct kobject *);
extern struct kobject *kobject_get(struct kobject *kobj);
+extern struct kobject * __must_check kobject_get_unless_zero(
+ struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e23392517db9..cb527c78de9f 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -197,6 +197,7 @@ struct kretprobe_instance {
struct kretprobe *rp;
kprobe_opcode_t *ret_addr;
struct task_struct *task;
+ void *fp;
char data[0];
};
diff --git a/include/linux/kref.h b/include/linux/kref.h
index e15828fd71f1..e2df6d397ff0 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -33,6 +33,11 @@ static inline void kref_init(struct kref *kref)
atomic_set(&kref->refcount, 1);
}
+static inline int kref_read(const struct kref *kref)
+{
+ return atomic_read(&kref->refcount);
+}
+
/**
* kref_get - increment refcount for object.
* @kref: object.
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index eb55374b73f3..ab90a8541aaa 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -129,7 +129,7 @@ static inline bool is_error_page(struct page *page)
extern struct kmem_cache *kvm_vcpu_cache;
-extern spinlock_t kvm_lock;
+extern struct mutex kvm_lock;
extern struct list_head vm_list;
struct kvm_io_range {
@@ -843,6 +843,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
+bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -1208,4 +1209,10 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+ uintptr_t data, const char *name,
+ struct task_struct **thread_ptr);
+
#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index df58b01e6962..cdfb67b22317 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1222,6 +1222,7 @@ struct pci_bits {
};
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
+extern void ata_pci_shutdown_one(struct pci_dev *pdev);
extern void ata_pci_remove_one(struct pci_dev *pdev);
#ifdef CONFIG_PM
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 2a663c6bb428..bd0a55821177 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -1,10 +1,14 @@
#ifndef _LIBFDT_ENV_H
#define _LIBFDT_ENV_H
+#include <linux/kernel.h> /* For INT_MAX */
#include <linux/string.h>
#include <asm/byteorder.h>
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
+
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;
diff --git a/include/linux/list.h b/include/linux/list.h
index 5809e9a2de5b..6f935018ea05 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -271,6 +271,36 @@ static inline void list_cut_position(struct list_head *list,
__list_cut_position(list, head, entry);
}
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list. You should pass
+ * in @entry an element you know is on @head. @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ if (head->next == entry) {
+ INIT_LIST_HEAD(list);
+ return;
+ }
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry->prev;
+ list->prev->next = list;
+ head->next = entry;
+ entry->prev = head;
+}
+
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index fa7fd03cb5f9..f2e966937e39 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -51,6 +51,7 @@ struct list_lru {
struct list_lru_node *node;
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
struct list_head list;
+ bool memcg_aware;
#endif
};
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index b01fe1009084..9e20bf7f46a2 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -29,6 +29,11 @@ struct hlist_nulls_node {
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_nulls_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \
+ })
/**
* ptr_is_a_nulls - Test if a ptr is a nulls
* @ptr: ptr to be tested
@@ -66,10 +71,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
h->first = n;
if (!is_a_nulls(first))
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
@@ -79,13 +84,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
WRITE_ONCE(*pprev, next);
if (!is_a_nulls(next))
- next->pprev = pprev;
+ WRITE_ONCE(next->pprev, pprev);
}
static inline void hlist_nulls_del(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 134a2f69c21a..9469eef30095 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -272,6 +272,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
+extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource, bool online);
extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 5d42859cb441..844fc2973392 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -215,9 +215,9 @@
/* DA9063 Configuration registers */
/* OTP */
-#define DA9063_REG_OPT_COUNT 0x101
-#define DA9063_REG_OPT_ADDR 0x102
-#define DA9063_REG_OPT_DATA 0x103
+#define DA9063_REG_OTP_CONT 0x101
+#define DA9063_REG_OTP_ADDR 0x102
+#define DA9063_REG_OTP_DATA 0x103
/* Customer Trim and Configuration */
#define DA9063_REG_T_OFFSET 0x104
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index 3ca0af07fc78..0a68dc8fc25f 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -136,8 +136,8 @@
#define MAX77620_FPS_PERIOD_MIN_US 40
#define MAX20024_FPS_PERIOD_MIN_US 20
-#define MAX77620_FPS_PERIOD_MAX_US 2560
-#define MAX20024_FPS_PERIOD_MAX_US 5120
+#define MAX20024_FPS_PERIOD_MAX_US 2560
+#define MAX77620_FPS_PERIOD_MAX_US 5120
#define MAX77620_REG_FPS_GPIO1 0x54
#define MAX77620_REG_FPS_GPIO2 0x55
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index cf815577bd68..3ae1fe743bc3 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -178,7 +178,6 @@ struct max8997_led_platform_data {
struct max8997_platform_data {
/* IRQ */
int ono;
- int wakeup;
/* ---- PMIC ---- */
struct max8997_regulator_data *regulators;
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 638222e43e48..93011c61aafd 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -247,6 +247,7 @@ struct mc13xxx_platform_data {
#define MC13XXX_ADC0_TSMOD0 (1 << 12)
#define MC13XXX_ADC0_TSMOD1 (1 << 13)
#define MC13XXX_ADC0_TSMOD2 (1 << 14)
+#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
#define MC13XXX_ADC0_ADINC1 (1 << 16)
#define MC13XXX_ADC0_ADINC2 (1 << 17)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 859fd209603a..509e99076c57 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -578,6 +578,8 @@ enum mlx5_pci_status {
};
struct mlx5_td {
+ /* protects tirs list changes while tirs refresh */
+ struct mutex list_lock;
struct list_head tirs_list;
u32 tdn;
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 20ee90c47cd5..6dd276227217 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -7909,8 +7909,6 @@ struct mlx5_ifc_query_lag_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
-
struct mlx5_ifc_lagc_bits ctx;
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 11a5a46ce72b..ca6f213fa4f0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -504,11 +504,6 @@ static inline int is_vmalloc_or_module_addr(const void *x)
extern void kvfree(const void *addr);
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
- return &page[1].compound_mapcount;
-}
-
static inline int compound_mapcount(struct page *page)
{
VM_BUG_ON_PAGE(!PageCompound(page), page);
@@ -763,6 +758,10 @@ static inline bool is_zone_device_page(const struct page *page)
}
#endif
+/* 127: arbitrary random number, small enough to assemble well */
+#define page_ref_zero_or_close_to_overflow(page) \
+ ((unsigned int) page_ref_count(page) + 127u <= 127u)
+
static inline void get_page(struct page *page)
{
page = compound_head(page);
@@ -770,13 +769,22 @@ static inline void get_page(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_refcount.
*/
- VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
page_ref_inc(page);
if (unlikely(is_zone_device_page(page)))
get_zone_device_page(page);
}
+static inline __must_check bool try_get_page(struct page *page)
+{
+ page = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+ return false;
+ page_ref_inc(page);
+ return true;
+}
+
static inline void put_page(struct page *page)
{
page = compound_head(page);
@@ -1179,6 +1187,30 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long start, unsigned long end);
+/*
+ * This has to be called after a get_task_mm()/mmget_not_zero()
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
+ * It also has to be called when mmgrab() is used in the context of
+ * the process, but then the mm_count refcount is transferred outside
+ * the context of the process to run down_write() on that pinned mm.
+ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+ * the only case that holds the mmap_sem for reading that must call
+ * this function. Generally if the mmap_sem is hold for reading
+ * there's no need of this check after get_task_mm()/mmget_not_zero().
+ *
+ * This function can be obsoleted and the check can be removed, after
+ * the coredump code will hold the mmap_sem for writing before
+ * invoking the ->core_dump methods.
+ */
+static inline bool mmget_still_valid(struct mm_struct *mm)
+{
+ return likely(!mm->core_state);
+}
/**
* mm_walk - callbacks for walk_page_range
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8d6decd50220..21b18b755c0e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -262,6 +262,11 @@ struct page_frag_cache {
typedef unsigned long vm_flags_t;
+static inline atomic_t *compound_mapcount_ptr(struct page *page)
+{
+ return &page[1].compound_mapcount;
+}
+
/*
* A region containing a mapping of a non-memory backed file under NOMMU
* conditions. These are held in a global tree and are pinned by the VMAs that
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ed84c07f6a51..1abfe37314a0 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -502,9 +502,9 @@ struct platform_device_id {
#define MDIO_MODULE_PREFIX "mdio:"
-#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
+#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
#define MDIO_ID_ARGS(_id) \
- (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
+ ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
diff --git a/include/linux/module.h b/include/linux/module.h
index fd9e121c7b3f..99f330ae13da 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -129,13 +129,13 @@ extern void cleanup_module(void);
#define module_init(initfn) \
static inline initcall_t __maybe_unused __inittest(void) \
{ return initfn; } \
- int init_module(void) __attribute__((alias(#initfn)));
+ int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
static inline exitcall_t __maybe_unused __exittest(void) \
{ return exitfn; } \
- void cleanup_module(void) __attribute__((alias(#exitfn)));
+ void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
#endif
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 13f8052b9ff9..13ddba5e531d 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -392,7 +392,7 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
return mtd->dev.of_node;
}
-static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
{
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2ecf0f32444e..81c85ba6e2b8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1730,6 +1730,11 @@ struct net_device {
unsigned char if_port;
unsigned char dma;
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
unsigned int mtu;
unsigned short type;
unsigned short hard_header_len;
@@ -3565,7 +3570,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
if (debug_value == 0) /* no output */
return 0;
/* set low N bits */
- return (1 << debug_value) - 1;
+ return (1U << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 83b9a2e0d8d4..e3a91b24a31c 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -537,13 +537,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
sizeof(*addr));
}
-/* Calculate the bytes required to store the inclusive range of a-b */
-static inline int
-bitmap_bytes(u32 a, u32 b)
-{
- return 4 * ((((b - a + 8) / 8) + 3) / 4);
-}
-
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_comment.h>
diff --git a/include/linux/of.h b/include/linux/of.h
index 5c5ace1de7d9..22f048e2d37c 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -220,8 +220,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
static inline u64 of_read_number(const __be32 *cell, int size)
{
u64 r = 0;
- while (size--)
- r = (r << 32) | be32_to_cpu(*(cell++));
+ for (; size--; cell++)
+ r = (r << 32) | be32_to_cpu(*cell);
return r;
}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 74e4dda91238..896e4199623a 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -545,12 +545,28 @@ static inline int PageTransCompound(struct page *page)
*
* Unlike PageTransCompound, this is safe to be called only while
* split_huge_pmd() cannot run from under us, like if protected by the
- * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * MMU notifier, otherwise it may result in page->_mapcount check false
* positives.
+ *
+ * We have to treat page cache THP differently since every subpage of it
+ * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
+ * mapped in the current process so comparing subpage's _mapcount to
+ * compound_mapcount to filter out PTE mapped case.
*/
static inline int PageTransCompoundMap(struct page *page)
{
- return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+ struct page *head;
+
+ if (!PageTransCompound(page))
+ return 0;
+
+ if (PageAnon(page))
+ return atomic_read(&page->_mapcount) < 0;
+
+ head = compound_head(page);
+ /* File THP is PMD mapped and not PTE mapped */
+ return atomic_read(&page->_mapcount) ==
+ atomic_read(compound_mapcount_ptr(head));
}
/*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4366eb3424e8..2622a891e6ce 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -320,6 +320,8 @@ struct pci_dev {
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
controlled exclusively by
user sysfs */
+ unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
+ bit manually */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 84a109449610..b6332cb761a4 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -76,9 +76,9 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc)
*/
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
- s64 ret = fbc->count;
+ /* Prevent reloads of fbc->count */
+ s64 ret = READ_ONCE(fbc->count);
- barrier(); /* Prevent reloads of fbc->count */
if (ret >= 0)
return ret;
return 0;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a652e86b1bdb..66e96fd96886 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -333,6 +333,7 @@ struct phy_c45_device_ids {
* is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
+ * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
* link_timeout: The number of timer firings to wait before the
@@ -369,6 +370,7 @@ struct phy_device {
bool is_pseudo_fixed_link;
bool has_fixups;
bool suspended;
+ bool suspended_by_mdio_bus;
enum phy_state state;
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 4f7129389855..c6ae0d5e9f0f 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -107,18 +107,20 @@ struct pipe_buf_operations {
/*
* Get a reference to the pipe buffer.
*/
- void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
};
/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
+ *
+ * Return: %true if the reference was successfully obtained.
*/
-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- buf->ops->get(pipe, buf);
+ return buf->ops->get(pipe, buf);
}
/**
@@ -178,7 +180,7 @@ struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);
/* Generic pipe buffer ops functions */
-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
index e82c642fa53c..5913be0793a2 100644
--- a/include/linux/platform_data/dma-ep93xx.h
+++ b/include/linux/platform_data/dma-ep93xx.h
@@ -84,7 +84,7 @@ static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{
if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_NONE;
+ return DMA_TRANS_NONE;
/* even channels are for TX, odd for RX */
return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 37b057b63b46..4bee398ed2fa 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -14,7 +14,11 @@
extern struct ctl_table epoll_table[]; /* for sysctl */
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
additional memory. */
+#ifdef __clang__
+#define MAX_STACK_ALLOC 768
+#else
#define MAX_STACK_ALLOC 832
+#endif
#define FRONTEND_STACK_ALLOC 256
#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index d53a23100401..58ae371556bc 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -60,14 +60,17 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-#define PTRACE_MODE_FSCREDS 0x08
-#define PTRACE_MODE_REALCREDS 0x10
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
+#define PTRACE_MODE_SCHED 0x20
+#define PTRACE_MODE_IBPB 0x40
/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
/**
* ptrace_may_access - check whether the caller is permitted to access
@@ -85,6 +88,20 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
*/
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * Similar to ptrace_may_access(). Only to be called from context switch
+ * code. Does not call into audit and the regular LSM hooks due to locking
+ * constraints.
+ */
+extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
+
static inline int ptrace_reparented(struct task_struct *child)
{
return !same_thread_group(child->real_parent, child->parent);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 2c6c5114c089..f1bbae014889 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -641,7 +641,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
#ifdef CONFIG_PWM_SYSFS
void pwmchip_sysfs_export(struct pwm_chip *chip);
void pwmchip_sysfs_unexport(struct pwm_chip *chip);
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
#else
static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
{
@@ -650,10 +649,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
}
-
-static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
-}
#endif /* CONFIG_PWM_SYSFS */
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 55107a8ff887..23eb8ea07def 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -263,7 +263,7 @@ enum {
};
struct dqstats {
- int stat[_DQST_DQSTAT_LAST];
+ unsigned long stat[_DQST_DQSTAT_LAST];
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index f00fa86ac966..0a60fe354bc5 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -21,7 +21,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
/* i_mutex must being held */
static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
{
- return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
+ return (ia->ia_valid & ATTR_SIZE) ||
(ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
(ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
}
@@ -54,6 +54,16 @@ static inline struct dquot *dqgrab(struct dquot *dquot)
atomic_inc(&dquot->dq_count);
return dquot;
}
+
+static inline bool dquot_is_busy(struct dquot *dquot)
+{
+ if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ return true;
+ if (atomic_read(&dquot->dq_count) > 1)
+ return true;
+ return false;
+}
+
void dqput(struct dquot *dquot);
int dquot_scan_active(struct super_block *sb,
int (*fn)(struct dquot *dquot, unsigned long priv),
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 6224a0ab0b1e..4d71e3687d1e 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -33,7 +33,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
{
if (!hlist_nulls_unhashed(n)) {
__hlist_nulls_del(n);
- n->pprev = NULL;
+ WRITE_ONCE(n->pprev, NULL);
}
}
@@ -65,7 +65,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
@@ -93,10 +93,47 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
+}
+
+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist_nulls,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *i, *last = NULL;
+
+ /* Note: write side code, so rcu accessors are not needed. */
+ for (i = h->first; !is_a_nulls(i); i = i->next)
+ last = i;
+
+ if (last) {
+ n->next = last->next;
+ n->pprev = &last->next;
+ rcu_assign_pointer(hlist_next_rcu(last), n);
+ } else {
+ hlist_nulls_add_head_rcu(n, h);
+ }
}
/**
@@ -118,5 +155,19 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
+/**
+ * hlist_nulls_for_each_entry_safe -
+ * iterate over list of given type safe against removal of list entry
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_nulls_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_nulls_node within the struct.
+ */
+#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
+ for (({barrier();}), \
+ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \
+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
#endif
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 01f71e1d2e94..96037ba940ee 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -306,14 +306,12 @@ void synchronize_rcu(void);
static inline void __rcu_read_lock(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
- preempt_disable();
+ preempt_disable();
}
static inline void __rcu_read_unlock(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
- preempt_enable();
+ preempt_enable();
}
static inline void synchronize_rcu(void)
@@ -868,7 +866,7 @@ static inline void rcu_preempt_sleep_check(void)
* read-side critical sections may be preempted and they may also block, but
* only when acquiring spinlocks that are subject to priority inheritance.
*/
-static inline void rcu_read_lock(void)
+static __always_inline void rcu_read_lock(void)
{
__rcu_read_lock();
__acquire(RCU);
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index d8ecefaf63ca..3f6b8b9ef49d 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -38,14 +38,11 @@ enum ab8505_regulator_id {
AB8505_LDO_AUX6,
AB8505_LDO_INTCORE,
AB8505_LDO_ADC,
- AB8505_LDO_USB,
AB8505_LDO_AUDIO,
AB8505_LDO_ANAMIC1,
AB8505_LDO_ANAMIC2,
AB8505_LDO_AUX8,
AB8505_LDO_ANA,
- AB8505_SYSCLKREQ_2,
- AB8505_SYSCLKREQ_4,
AB8505_NUM_REGULATORS,
};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index ce63a80679d1..01036b99fd53 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -482,7 +482,7 @@ static inline unsigned int regulator_get_mode(struct regulator *regulator)
static inline int regulator_set_load(struct regulator *regulator, int load_uA)
{
- return REGULATOR_MODE_NORMAL;
+ return 0;
}
static inline int regulator_allow_bypass(struct regulator *regulator,
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 68c1448e56bb..2560f8706408 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -65,7 +65,7 @@ struct rchan
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
size_t last_toobig; /* tried to log event > subbuf size */
- struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
+ struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */
int is_global; /* One global buffer ? */
struct list_head list; /* for channel list */
struct dentry *parent; /* parent dentry passed to open */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index db1fe6772ad5..b5542aff192c 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -6,7 +6,7 @@
struct reset_controller_dev;
/**
- * struct reset_control_ops
+ * struct reset_control_ops - reset controller driver callbacks
*
* @reset: for self-deasserting resets, does all necessary
* things to reset the device
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 19d0778ec382..121c8f99ecdd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -125,7 +125,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_prepare_sync(void);
void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ebd0afb35d16..1872d4e9acbe 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1720,8 +1720,8 @@ struct task_struct {
struct seccomp seccomp;
/* Thread group tracking */
- u32 parent_exec_id;
- u32 self_exec_id;
+ u64 parent_exec_id;
+ u64 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
* mempolicy */
spinlock_t alloc_lock;
@@ -2044,7 +2044,7 @@ static inline bool in_vfork(struct task_struct *tsk)
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
+extern void task_numa_free(struct task_struct *p, bool final);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
int src_nid, int dst_cpu);
#else
@@ -2059,7 +2059,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
static inline void set_numabalancing_state(bool enabled)
{
}
-static inline void task_numa_free(struct task_struct *p)
+static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
@@ -2357,6 +2357,8 @@ static inline void memalloc_noio_restore(unsigned int flags)
#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
#define PFA_SPEC_SSB_DISABLE 4 /* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE 5 /* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE 6 /* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE 7 /* Indirect branch speculation permanently restricted */
#define TASK_PFA_TEST(name, func) \
@@ -2390,6 +2392,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
/*
* task->jobctl flags
*/
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644
index 000000000000..559ac4590593
--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/atomic.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern atomic_t sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+ return atomic_read(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+void arch_smt_update(void);
+
+#endif
diff --git a/include/linux/selection.h b/include/linux/selection.h
index 8e4624efdb6f..f1070e4180ee 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -12,8 +12,8 @@
struct tty_struct;
-extern struct vc_data *sel_cons;
struct tty_struct;
+struct vc_data;
extern void clear_selection(void);
extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty);
@@ -22,6 +22,8 @@ extern int sel_loadlut(char __user *p);
extern int mouse_reporting(void);
extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry);
+bool vc_is_sel(struct vc_data *vc);
+
extern int console_blanked;
extern const unsigned char color_table[];
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index eb4f6456521e..cd95b5e395a3 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -161,6 +161,7 @@ struct uart_port {
struct console *cons; /* struct console, if any */
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
unsigned long sysrq; /* sysrq timeout */
+ unsigned int sysrq_ch; /* char for sysrq */
#endif
/* flags must be updated while holding port mutex */
@@ -470,8 +471,42 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
}
return 0;
}
+static inline int
+uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (port->sysrq) {
+ if (ch && time_before(jiffies, port->sysrq)) {
+ port->sysrq_ch = ch;
+ port->sysrq = 0;
+ return 1;
+ }
+ port->sysrq = 0;
+ }
+ return 0;
+}
+static inline void
+uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+ int sysrq_ch;
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ spin_unlock_irqrestore(&port->lock, irqflags);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
#else
-#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; })
+static inline int
+uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
+static inline int
+uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
+static inline void
+uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
+{
+ spin_unlock_irqrestore(&port->lock, irqflags);
+}
#endif
/*
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 5308304993be..ffa58ff53e22 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -313,6 +313,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
extern void exit_signals(struct task_struct *tsk);
extern void kernel_sigaction(int, __sighandler_t);
+#define SIG_KTHREAD ((__force __sighandler_t)2)
+#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
+
static inline void allow_signal(int sig)
{
/*
@@ -320,7 +323,17 @@ static inline void allow_signal(int sig)
* know it'll be handled, so that they don't get converted to
* SIGKILL or just silently dropped.
*/
- kernel_sigaction(sig, (__force __sighandler_t)2);
+ kernel_sigaction(sig, SIG_KTHREAD);
+}
+
+static inline void allow_kernel_signal(int sig)
+{
+ /*
+ * Kernel threads handle their own signals. Let the signal code
+ * know signals sent by the kernel will be handled, so that they
+ * don't get silently dropped.
+ */
+ kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
}
static inline void disallow_signal(int sig)
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
new file mode 100644
index 000000000000..bf21591a9e5e
--- /dev/null
+++ b/include/linux/siphash.h
@@ -0,0 +1,145 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#ifndef _LINUX_SIPHASH_H
+#define _LINUX_SIPHASH_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define SIPHASH_ALIGNMENT __alignof__(u64)
+typedef struct {
+ u64 key[2];
+} siphash_key_t;
+
+static inline bool siphash_key_is_zero(const siphash_key_t *key)
+{
+ return !(key->key[0] | key->key[1]);
+}
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
+#endif
+
+u64 siphash_1u64(const u64 a, const siphash_key_t *key);
+u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
+u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
+ const siphash_key_t *key);
+u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
+ const siphash_key_t *key);
+u64 siphash_1u32(const u32 a, const siphash_key_t *key);
+u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
+ const siphash_key_t *key);
+
+static inline u64 siphash_2u32(const u32 a, const u32 b,
+ const siphash_key_t *key)
+{
+ return siphash_1u64((u64)b << 32 | a, key);
+}
+static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
+ const u32 d, const siphash_key_t *key)
+{
+ return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
+}
+
+
+static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
+ const siphash_key_t *key)
+{
+ if (__builtin_constant_p(len) && len == 4)
+ return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
+ if (__builtin_constant_p(len) && len == 8)
+ return siphash_1u64(le64_to_cpu(data[0]), key);
+ if (__builtin_constant_p(len) && len == 16)
+ return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+ key);
+ if (__builtin_constant_p(len) && len == 24)
+ return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+ le64_to_cpu(data[2]), key);
+ if (__builtin_constant_p(len) && len == 32)
+ return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+ le64_to_cpu(data[2]), le64_to_cpu(data[3]),
+ key);
+ return __siphash_aligned(data, len, key);
+}
+
+/**
+ * siphash - compute 64-bit siphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the siphash key
+ */
+static inline u64 siphash(const void *data, size_t len,
+ const siphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ return __siphash_unaligned(data, len, key);
+#endif
+ return ___siphash_aligned(data, len, key);
+}
+
+#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
+typedef struct {
+ unsigned long key[2];
+} hsiphash_key_t;
+
+u32 __hsiphash_aligned(const void *data, size_t len,
+ const hsiphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+ const hsiphash_key_t *key);
+#endif
+
+u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
+u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
+u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
+ const hsiphash_key_t *key);
+u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
+ const hsiphash_key_t *key);
+
+static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
+ const hsiphash_key_t *key)
+{
+ if (__builtin_constant_p(len) && len == 4)
+ return hsiphash_1u32(le32_to_cpu(data[0]), key);
+ if (__builtin_constant_p(len) && len == 8)
+ return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+ key);
+ if (__builtin_constant_p(len) && len == 12)
+ return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+ le32_to_cpu(data[2]), key);
+ if (__builtin_constant_p(len) && len == 16)
+ return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+ le32_to_cpu(data[2]), le32_to_cpu(data[3]),
+ key);
+ return __hsiphash_aligned(data, len, key);
+}
+
+/**
+ * hsiphash - compute 32-bit hsiphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the hsiphash key
+ */
+static inline u32 hsiphash(const void *data, size_t len,
+ const hsiphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ return __hsiphash_unaligned(data, len, key);
+#endif
+ return ___hsiphash_aligned(data, len, key);
+}
+
+#endif /* _LINUX_SIPHASH_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f8761774a94f..e37112ac332f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1178,7 +1178,8 @@ static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4
return skb->hash;
}
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+ const siphash_key_t *perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index 12910cf19869..12a4b09f4d08 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -30,7 +30,7 @@ struct smpboot_thread_data;
* @thread_comm: The base name of the thread
*/
struct smp_hotplug_thread {
- struct task_struct __percpu **store;
+ struct task_struct * __percpu *store;
struct list_head list;
int (*thread_should_run)(unsigned int cpu);
void (*thread_fn)(unsigned int cpu);
diff --git a/include/linux/string.h b/include/linux/string.h
index 2a9180942f03..b19eb06d90bb 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -111,6 +111,9 @@ extern void * memscan(void *,int,__kernel_size_t);
#ifndef __HAVE_ARCH_MEMCMP
extern int memcmp(const void *,const void *,__kernel_size_t);
#endif
+#ifndef __HAVE_ARCH_BCMP
+extern int bcmp(const void *,const void *,__kernel_size_t);
+#endif
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 7ba040c797ec..da2791b5fe87 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -185,7 +185,6 @@ struct rpc_timer {
struct rpc_wait_queue {
spinlock_t lock;
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
- pid_t owner; /* process id of last task serviced */
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char nr; /* # tasks remaining for cookie */
@@ -201,7 +200,6 @@ struct rpc_wait_queue {
* from a single cookie. The aim is to improve
* performance of NFS operations such as read/write.
*/
-#define RPC_BATCH_COUNT 16
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 55ff5593c193..d13617c7bcc4 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -135,9 +135,9 @@ struct swap_extent {
/*
* Max bad pages in the new format..
*/
-#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
#define MAX_SWAP_BADPAGES \
- ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
+ ((offsetof(union swap_header, magic.magic) - \
+ offsetof(union swap_header, info.badpages)) / sizeof(int))
enum {
SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
@@ -335,14 +335,8 @@ extern unsigned long vm_total_pages;
extern int node_reclaim_mode;
extern int sysctl_min_unmapped_ratio;
extern int sysctl_min_slab_ratio;
-extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
#else
#define node_reclaim_mode 0
-static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
- unsigned int order)
-{
- return 0;
-}
#endif
extern int page_evictable(struct page *page);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index d0c3615f9050..7f517458c64f 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -433,4 +433,7 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
tp->saved_syn = NULL;
}
+int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
+ int shiftlen);
+
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/time.h b/include/linux/time.h
index 4cea09d94208..60fd50559241 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -275,4 +275,16 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
a->tv_nsec = ns;
}
+/**
+ * time_between32 - check if a 32-bit timestamp is within a given time range
+ * @t: the time which may be within [l,h]
+ * @l: the lower bound of the range
+ * @h: the higher bound of the range
+ *
+ * time_before32(t, l, h) returns true if @l <= @t <= @h. All operands are
+ * treated as 32-bit integers.
+ *
+ * Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)).
+ */
+#define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l))
#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index fe1b8623a3a1..fe483976b119 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -356,6 +356,7 @@ struct tty_file_private {
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
#define TTY_HUPPED 18 /* Post driver->hangup() */
#define TTY_HUPPING 19 /* Hangup in progress */
+#define TTY_LDISC_CHANGING 20 /* Change pending - non-block IO */
#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
/* Values for tty->flow_change */
@@ -373,6 +374,12 @@ static inline void tty_set_flow_change(struct tty_struct *tty, int val)
smp_mb();
}
+static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file)
+{
+ return file->f_flags & O_NONBLOCK ||
+ test_bit(TTY_LDISC_CHANGING, &tty->flags);
+}
+
static inline bool tty_io_error(struct tty_struct *tty)
{
return test_bit(TTY_IO_ERROR, &tty->flags);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 5769942eb818..5d7fd18e1e9b 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -129,7 +129,6 @@ enum usb_interface_condition {
* @dev: driver model's view of this device
* @usb_dev: if an interface is bound to the USB major, this will point
* to the sysfs representation for that device.
- * @pm_usage_cnt: PM usage counter for this interface
* @reset_ws: Used for scheduling resets from atomic context.
* @resetting_device: USB core reset the device, so use alt setting 0 as
* current; needs bandwidth alloc after reset.
@@ -186,7 +185,6 @@ struct usb_interface {
struct device dev; /* interface specific device info */
struct device *usb_dev;
- atomic_t pm_usage_cnt; /* usage counter for autosuspend */
struct work_struct reset_ws; /* for resets in atomic context */
};
#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index e4516e9ded0f..4b810bc7ae63 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -48,6 +48,7 @@ struct usb_ep;
* by adding a zero length packet as needed;
* @short_not_ok: When reading data, makes short packets be
* treated as errors (queue stops advancing till cleanup).
+ * @dma_mapped: Indicates if request has been mapped to DMA (internal)
* @complete: Function called when request completes, so this request and
* its buffer may be re-used. The function will always be called with
* interrupts disabled, and it must not sleep.
@@ -103,6 +104,7 @@ struct usb_request {
unsigned no_interrupt:1;
unsigned zero:1;
unsigned short_not_ok:1;
+ unsigned dma_mapped:1;
void (*complete)(struct usb_ep *ep,
struct usb_request *req);
diff --git a/include/linux/usb/irda.h b/include/linux/usb/irda.h
index e345ceaf72d6..9dc46010a067 100644
--- a/include/linux/usb/irda.h
+++ b/include/linux/usb/irda.h
@@ -118,11 +118,22 @@ struct usb_irda_cs_descriptor {
* 6 - 115200 bps
* 7 - 576000 bps
* 8 - 1.152 Mbps
- * 9 - 5 mbps
+ * 9 - 4 Mbps
* 10..15 - Reserved
*/
#define USB_IRDA_STATUS_LINK_SPEED 0x0f
+#define USB_IRDA_LS_NO_CHANGE 0
+#define USB_IRDA_LS_2400 1
+#define USB_IRDA_LS_9600 2
+#define USB_IRDA_LS_19200 3
+#define USB_IRDA_LS_38400 4
+#define USB_IRDA_LS_57600 5
+#define USB_IRDA_LS_115200 6
+#define USB_IRDA_LS_576000 7
+#define USB_IRDA_LS_1152000 8
+#define USB_IRDA_LS_4000000 9
+
/* The following is a 4-bit value used only for
* outbound header:
*
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index e8d36938f09a..b38c1871b735 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -62,7 +62,7 @@ struct virtqueue;
/*
* Creates a virtqueue and allocates the descriptor ring. If
* may_reduce_num is set, then this may allocate a smaller ring than
- * expected. The caller should query virtqueue_get_ring_size to learn
+ * expected. The caller should query virtqueue_get_vring_size to learn
* the actual size of the ring.
*/
struct virtqueue *vring_create_virtqueue(unsigned int index,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 3d9d786a943c..d868a735545b 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -93,8 +93,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
-void vmalloc_sync_all(void);
-
+void vmalloc_sync_mappings(void);
+void vmalloc_sync_unmappings(void);
+
/*
* Lowlevel-APIs (not for driver use!)
*/
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 1bd31a38c51e..ce13d4677f1d 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -75,9 +75,18 @@ enum {
/*
* A single VMCI device has an upper limit of 128MB on the amount of
- * memory that can be used for queue pairs.
+ * memory that can be used for queue pairs. Since each queue pair
+ * consists of at least two pages, the memory limit also dictates the
+ * number of queue pairs a guest can create.
*/
#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
+#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
+
+/*
+ * There can be at most PAGE_SIZE doorbells since there is one doorbell
+ * per byte in the doorbell bitmap page.
+ */
+#define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
/*
* Queues with pre-mapped data pages must be small, so that we don't pin
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 6abd24f258bc..362db91266e6 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -141,7 +141,7 @@ static inline bool vt_force_oops_output(struct vc_data *vc)
return false;
}
-extern char vt_dont_switch;
+extern bool vt_dont_switch;
extern int default_utf8;
extern int global_cursor_default;
diff --git a/include/math-emu/soft-fp.h b/include/math-emu/soft-fp.h
index 3f284bc03180..5650c1628383 100644
--- a/include/math-emu/soft-fp.h
+++ b/include/math-emu/soft-fp.h
@@ -138,7 +138,7 @@ do { \
_FP_FRAC_ADDI_##wc(X, _FP_WORK_ROUND); \
} while (0)
-#define _FP_ROUND_ZERO(wc, X) 0
+#define _FP_ROUND_ZERO(wc, X) (void)0
#define _FP_ROUND_PINF(wc, X) \
do { \
diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h
index 4376beeb28c2..5d8ceeddc797 100644
--- a/include/media/davinci/vpbe.h
+++ b/include/media/davinci/vpbe.h
@@ -96,7 +96,7 @@ struct vpbe_config {
struct encoder_config_info *ext_encoders;
/* amplifier information goes here */
struct amp_config_info *amp;
- int num_outputs;
+ unsigned int num_outputs;
/* Order is venc outputs followed by LCD and then external encoders */
struct vpbe_output *outputs;
};
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index bb97f64e91c8..c688545f383c 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -270,7 +270,7 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
struct v4l2_subdev *__sd; \
\
__v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
- !(grpid) || __sd->grp_id == (grpid), o, f , \
+ (grpid) == 0 || __sd->grp_id == (grpid), o, f , \
##args); \
} while (0)
@@ -282,7 +282,7 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
({ \
struct v4l2_subdev *__sd; \
__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
- !(grpid) || __sd->grp_id == (grpid), o, f , \
+ (grpid) == 0 || __sd->grp_id == (grpid), o, f , \
##args); \
})
@@ -296,8 +296,8 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
struct v4l2_subdev *__sd; \
\
__v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
- !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
- ##args); \
+ (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \
+ f , ##args); \
} while (0)
/*
@@ -310,8 +310,8 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd,
({ \
struct v4l2_subdev *__sd; \
__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
- !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
- ##args); \
+ (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \
+ f , ##args); \
})
/*
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
index d2125f0cc7cd..1584c760b993 100644
--- a/include/media/v4l2-rect.h
+++ b/include/media/v4l2-rect.h
@@ -75,10 +75,10 @@ static inline void v4l2_rect_map_inside(struct v4l2_rect *r,
r->left = boundary->left;
if (r->top < boundary->top)
r->top = boundary->top;
- if (r->left + r->width > boundary->width)
- r->left = boundary->width - r->width;
- if (r->top + r->height > boundary->height)
- r->top = boundary->height - r->height;
+ if (r->left + r->width > boundary->left + boundary->width)
+ r->left = boundary->left + boundary->width - r->width;
+ if (r->top + r->height > boundary->top + boundary->height)
+ r->top = boundary->top + boundary->height - r->height;
}
/**
diff --git a/include/net/arp.h b/include/net/arp.h
index 1b3f86981757..92d2f7d7d1cb 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -17,6 +17,7 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
return val * hash_rnd[0];
}
+#ifdef CONFIG_INET
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
@@ -24,6 +25,13 @@ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
+#else
+static inline
+struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
+{
+ return NULL;
+}
+#endif
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
{
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 4931787193c3..57a7dba49d29 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -176,6 +176,9 @@ struct adv_info {
#define HCI_MAX_SHORT_NAME_LENGTH 10
+/* Min encryption key size to match with SMP */
+#define HCI_MIN_ENC_KEY_SIZE 7
+
/* Default LE RPA expiry time, 15 minutes */
#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index fe328c52c46b..801489bb14c3 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -32,6 +32,33 @@ void cfpkt_destroy(struct cfpkt *pkt);
*/
int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
+static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
+{
+ u8 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+
+ return tmp;
+}
+
+static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
+{
+ __le16 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 2);
+
+ return le16_to_cpu(tmp);
+}
+
+static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
+{
+ __le32 tmp;
+
+ cfpkt_extr_head(pkt, &tmp, 4);
+
+ return le32_to_cpu(tmp);
+}
+
/*
* Peek header from packet.
* Reads data from packet without changing packet.
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 6242f0670d23..eeff27d85104 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4184,6 +4184,17 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, int len);
/**
+ * cfg80211_send_layer2_update - send layer 2 update frame
+ *
+ * @dev: network device
+ * @addr: STA MAC address
+ *
+ * Wireless drivers can use this function to update forwarding tables in bridge
+ * devices upon STA association.
+ */
+void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr);
+
+/**
* DOC: Regulatory enforcement infrastructure
*
* TODO
diff --git a/include/net/dst.h b/include/net/dst.h
index ddcff17615da..e57e8fb9a43d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -110,7 +110,7 @@ struct dst_entry {
struct dst_metrics {
u32 metrics[RTAX_MAX];
atomic_t refcnt;
-};
+} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */
extern const struct dst_metrics dst_default_metrics;
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 456e4a6006ab..0b0ad792dd5c 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -87,6 +87,7 @@ struct fib_rules_ops {
[FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
[FRA_PRIORITY] = { .type = NLA_U32 }, \
[FRA_FWMARK] = { .type = NLA_U32 }, \
+ [FRA_TUN_ID] = { .type = NLA_U64 }, \
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index d9534927d93b..7a85a4ef6868 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -3,6 +3,8 @@
#include <linux/types.h>
#include <linux/in6.h>
+#include <linux/siphash.h>
+#include <linux/string.h>
#include <uapi/linux/if_ether.h>
/**
@@ -151,7 +153,7 @@ struct flow_dissector {
struct flow_keys {
struct flow_dissector_key_control control;
#define FLOW_KEYS_HASH_START_FIELD basic
- struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
struct flow_dissector_key_tags tags;
struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_keyid keyid;
@@ -203,4 +205,12 @@ static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissec
return ((char *)target_container) + flow_dissector->offset[key_id];
}
+static inline void
+flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
+ struct flow_dissector_key_basic *key_basic)
+{
+ memset(key_control, 0, sizeof(*key_control));
+ memset(key_basic, 0, sizeof(*key_basic));
+}
+
#endif
diff --git a/include/net/fq.h b/include/net/fq.h
index 6d8521a30c5c..2c7687902789 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -70,7 +70,7 @@ struct fq {
struct list_head backlogs;
spinlock_t lock;
u32 flows_cnt;
- u32 perturbation;
+ siphash_key_t perturbation;
u32 limit;
u32 memory_limit;
u32 memory_usage;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 4e6131cd3f43..45a0d9a006a0 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -105,7 +105,7 @@ static struct fq_flow *fq_flow_classify(struct fq *fq,
lockdep_assert_held(&fq->lock);
- hash = skb_get_hash_perturb(skb, fq->perturbation);
+ hash = skb_get_hash_perturb(skb, &fq->perturbation);
idx = reciprocal_scale(hash, fq->flows_cnt);
flow = &fq->flows[idx];
@@ -252,7 +252,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
INIT_LIST_HEAD(&fq->backlogs);
spin_lock_init(&fq->lock);
fq->flows_cnt = max_t(u32, flows_cnt, 1);
- fq->perturbation = prandom_u32();
+ get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
fq->quantum = 300;
fq->limit = 8192;
fq->memory_limit = 16 << 20; /* 16 MBytes */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index a3812e9c8fee..c2c724abde57 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -76,8 +76,8 @@ struct inet_frag_queue {
struct timer_list timer;
spinlock_t lock;
atomic_t refcnt;
- struct sk_buff *fragments; /* Used in IPv6. */
- struct rb_root rb_fragments; /* Used in IPv4. */
+ struct sk_buff *fragments; /* used in 6lopwpan IPv6. */
+ struct rb_root rb_fragments; /* Used in IPv4/IPv6. */
struct sk_buff *fragments_tail;
struct sk_buff *last_run_head;
ktime_t stamp;
@@ -152,4 +152,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
extern const u8 ip_frag_ecn_table[16];
+/* Return values of inet_frag_queue_insert() */
+#define IPFRAG_OK 0
+#define IPFRAG_DUP 1
+#define IPFRAG_OVERLAP 2
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ int offset, int end);
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ struct sk_buff *parent);
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ void *reasm_data);
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
+
#endif
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 0574493e3899..fc445e7ccadf 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -98,12 +98,18 @@ struct inet_bind_hashbucket {
struct hlist_head chain;
};
-/*
- * Sockets can be hashed in established or listening table
+/* Sockets can be hashed in established or listening table.
+ * We must use different 'nulls' end-of-chain value for all hash buckets :
+ * A socket might transition from ESTABLISH to LISTEN state without
+ * RCU grace period. A lookup in ehash table needs to handle this case.
*/
+#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
- struct hlist_head head;
+ union {
+ struct hlist_head head;
+ struct hlist_nulls_head nulls_head;
+ };
};
/* This is for listening sockets, thus all sockets which possess wildcards. */
diff --git a/include/net/ip.h b/include/net/ip.h
index f06cd30bb44c..d577fb5647c5 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -580,7 +580,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
unsigned char __user *data, int optlen);
void ip_options_undo(struct ip_options *opt);
void ip_forward_options(struct sk_buff *skb);
-int ip_options_rcv_srr(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
/*
* Functions provided by ip_sockglue.c
@@ -620,4 +620,9 @@ extern int sysctl_icmp_msgs_burst;
int ip_misc_proc_init(void);
#endif
+static inline bool inetdev_valid_mtu(unsigned int mtu)
+{
+ return likely(mtu >= IPV4_MIN_MTU);
+}
+
#endif /* _IP_H */
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2c43993e079c..e45b6286983c 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -195,6 +195,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
return rt->rt6i_flags & RTF_ANYCAST ||
(rt->rt6i_dst.plen != 128 &&
+ !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) &&
ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
}
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 1b1cf33cbfb0..2b6abd046087 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -149,9 +149,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
pkt_len = skb->len - skb_inner_network_offset(skb);
err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
- if (unlikely(net_xmit_eval(err)))
- pkt_len = -1;
- iptunnel_xmit_stats(dev, pkt_len);
+
+ if (dev) {
+ if (unlikely(net_xmit_eval(err)))
+ pkt_len = -1;
+ iptunnel_xmit_stats(dev, pkt_len);
+ }
}
#endif
#endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index cd6018a9ee24..a26165744d98 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -887,6 +887,7 @@ struct netns_ipvs {
struct delayed_work defense_work; /* Work handler */
int drop_rate;
int drop_counter;
+ int old_secure_tcp;
atomic_t dropentry;
/* locks in ctl.c */
spinlock_t dropentry_lock; /* drop entry handling */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 7cb100d25bb5..168009eef5e4 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -511,35 +511,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
}
#endif
-struct inet_frag_queue;
-
-enum ip6_defrag_users {
- IP6_DEFRAG_LOCAL_DELIVER,
- IP6_DEFRAG_CONNTRACK_IN,
- __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
- IP6_DEFRAG_CONNTRACK_OUT,
- __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
- IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
- __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
-};
-
-void ip6_frag_init(struct inet_frag_queue *q, const void *a);
-extern const struct rhashtable_params ip6_rhash_params;
-
-/*
- * Equivalent of ipv4 struct ip
- */
-struct frag_queue {
- struct inet_frag_queue q;
-
- int iif;
- unsigned int csum;
- __u16 nhoffset;
- u8 ecn;
-};
-
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
-
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
new file mode 100644
index 000000000000..1f77fb4dc79d
--- /dev/null
+++ b/include/net/ipv6_frag.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV6_FRAG_H
+#define _IPV6_FRAG_H
+#include <linux/kernel.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+enum ip6_defrag_users {
+ IP6_DEFRAG_LOCAL_DELIVER,
+ IP6_DEFRAG_CONNTRACK_IN,
+ __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_OUT,
+ __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
+ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
+};
+
+/*
+ * Equivalent of ipv4 struct ip
+ */
+struct frag_queue {
+ struct inet_frag_queue q;
+
+ int iif;
+ __u16 nhoffset;
+ u8 ecn;
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
+{
+ struct frag_queue *fq = container_of(q, struct frag_queue, q);
+ const struct frag_v6_compare_key *key = a;
+
+ q->key.v6 = *key;
+ fq->ecn = 0;
+}
+
+static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key.v6,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline int
+ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_v6_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static inline void
+ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
+{
+ struct net_device *dev = NULL;
+ struct sk_buff *head;
+
+ rcu_read_lock();
+ spin_lock(&fq->q.lock);
+
+ if (fq->q.flags & INET_FRAG_COMPLETE)
+ goto out;
+
+ inet_frag_kill(&fq->q);
+
+ dev = dev_get_by_index_rcu(net, fq->iif);
+ if (!dev)
+ goto out;
+
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+
+ /* Don't send error if the first segment did not arrive. */
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN))
+ goto out;
+
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
+ * pull the head out of the tree in order to be able to
+ * deal with head->dev.
+ */
+ head = inet_frag_pull_head(&fq->q);
+ if (!head)
+ goto out;
+
+ head->dev = dev;
+ spin_unlock(&fq->q.lock);
+
+ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+ kfree_skb(head);
+ goto out_rcu_unlock;
+
+out:
+ spin_unlock(&fq->q.lock);
+out_rcu_unlock:
+ rcu_read_unlock();
+ inet_frag_put(&fq->q);
+}
+#endif
+#endif
diff --git a/include/net/llc.h b/include/net/llc.h
index 82d989995d18..95e5ced4c133 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -66,6 +66,7 @@ struct llc_sap {
int sk_count;
struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES];
struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES];
+ struct rcu_head rcu;
};
static inline
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index df528a623548..ea985aa7a6c5 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
/* Access to a connection */
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index f6017ddc4ded..a68a460fa4f3 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -425,8 +425,8 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
unsigned long now = jiffies;
- if (neigh->used != now)
- neigh->used = now;
+ if (READ_ONCE(neigh->used) != now)
+ WRITE_ONCE(neigh->used, now);
if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
return __neigh_event_send(neigh, skb);
return 0;
@@ -454,7 +454,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
do {
seq = read_seqbegin(&hh->hh_lock);
- hh_len = hh->hh_len;
+ hh_len = READ_ONCE(hh->hh_len);
if (likely(hh_len <= HH_DATA_MOD)) {
hh_alen = HH_DATA_MOD;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index c05db6ff2515..0cdafe3935a6 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -53,6 +53,7 @@ struct net {
*/
spinlock_t rules_mod_lock;
+ u32 hash_mix;
atomic64_t cookie_gen;
struct list_head list; /* list of network namespaces */
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 0b0c35c37125..238d1b83a45a 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -48,7 +48,6 @@ static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
}
struct net_device *setup_pre_routing(struct sk_buff *skb);
-void br_netfilter_enable(void);
#if IS_ENABLED(CONFIG_IPV6)
int br_validate_ipv6(struct net *net, struct sk_buff *skb);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 9ae819e27940..b57a9f37c297 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -336,6 +336,8 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
gfp_t flags);
void nf_ct_tmpl_free(struct nf_conn *tmpl);
+u32 nf_ct_get_id(const struct nf_conn *ct);
+
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 66f6b84df287..7ba9a624090f 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -705,7 +705,8 @@ struct nft_expr_ops {
*/
struct nft_expr {
const struct nft_expr_ops *ops;
- unsigned char data[];
+ unsigned char data[]
+ __attribute__((aligned(__alignof__(u64))));
};
static inline void *nft_expr_priv(const struct nft_expr *expr)
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 69a6715d9f3f..a347b2f9e748 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -1,21 +1,10 @@
#ifndef __NET_NS_HASH_H__
#define __NET_NS_HASH_H__
-#include <asm/cache.h>
-
-struct net;
+#include <net/net_namespace.h>
static inline u32 net_hash_mix(const struct net *net)
{
-#ifdef CONFIG_NET_NS
- /*
- * shift this right to eliminate bits, that are
- * always zeroed
- */
-
- return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
-#else
- return 0;
-#endif
+ return net->hash_mix;
}
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 7adf4386ac8f..af1c5e7c7e94 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -8,6 +8,7 @@
#include <linux/uidgid.h>
#include <net/inet_frag.h>
#include <linux/rcupdate.h>
+#include <linux/siphash.h>
struct tcpm_hash_bucket;
struct ctl_table_header;
@@ -94,6 +95,7 @@ struct netns_ipv4 {
#endif
int sysctl_tcp_mtu_probing;
int sysctl_tcp_base_mss;
+ int sysctl_tcp_min_snd_mss;
int sysctl_tcp_probe_threshold;
u32 sysctl_tcp_probe_interval;
@@ -136,5 +138,6 @@ struct netns_ipv4 {
int sysctl_fib_multipath_use_neigh;
#endif
atomic_t rt_genid;
+ siphash_key_t ip_id_key;
};
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 538f3c4458b0..5d5a137b9067 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -276,6 +276,11 @@ static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
return q;
}
+static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
+{
+ return rcu_dereference_bh(qdisc->dev_queue->qdisc);
+}
+
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc_sleeping;
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 4a5b9a306c69..803fc26ef0ba 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -60,7 +60,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
- struct sctphdr *sh = sctp_hdr(skb);
+ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
__le32 ret, old = sh->checksum;
const struct skb_checksum_ops ops = {
.update = sctp_csum_update,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 579ded2c6ef1..32db5208854d 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -103,6 +103,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
/*
* sctp/socket.c
*/
+int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 11c3bf262a85..b46133a41f55 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1202,6 +1202,9 @@ struct sctp_ep_common {
/* What socket does this endpoint belong to? */
struct sock *sk;
+ /* Cache netns and it won't change once set */
+ struct net *net;
+
/* This is where we receive inbound chunks. */
struct sctp_inq inqueue;
diff --git a/include/net/sock.h b/include/net/sock.h
index 15bb04dec40e..d6bce19ca261 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -650,11 +650,22 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
hlist_add_head_rcu(&sk->sk_node, list);
}
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
+static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+}
+
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
@@ -1195,7 +1206,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
percpu_counter_inc(sk->sk_prot->sockets_allocated);
}
-static inline int
+static inline u64
sk_sockets_allocated_read_positive(struct sock *sk)
{
return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
@@ -2039,12 +2050,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
* sk_page_frag - return an appropriate page_frag
* @sk: socket
*
- * If socket allocation mode allows current thread to sleep, it means its
- * safe to use the per task page_frag instead of the per socket one.
+ * Use the per task page_frag instead of the per socket one for
+ * optimization when we know that we're in the normal context and owns
+ * everything that's associated with %current.
+ *
+ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
+ * inside other socket operations and end up recursing into sk_page_frag()
+ * while it's already in use.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- if (gfpflags_allow_blocking(sk->sk_allocation))
+ if (gfpflags_normal_context(sk->sk_allocation))
return &current->task_frag;
return &sk->sk_frag;
@@ -2131,7 +2147,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk)
return kt;
#else
- return sk->sk_stamp;
+ return READ_ONCE(sk->sk_stamp);
#endif
}
@@ -2142,7 +2158,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
sk->sk_stamp = kt;
write_sequnlock(&sk->sk_stamp_seq);
#else
- sk->sk_stamp = kt;
+ WRITE_ONCE(sk->sk_stamp, kt);
#endif
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index fed2a78fb8cb..0e3a88f808c6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -53,6 +53,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
+#define TCP_MIN_SND_MSS 48
+#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
/*
* Never offer a window over 32767 without using window scaling. Some
@@ -492,19 +494,27 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
*/
static inline void tcp_synq_overflow(const struct sock *sk)
{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ unsigned long last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
unsigned long now = jiffies;
- if (time_after(now, last_overflow + HZ))
- tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
+ if (!time_between32(now, last_overflow, last_overflow + HZ))
+ WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
}
/* syncookies: no recent synqueue overflow on this listening socket? */
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ unsigned long last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
- return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+ /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
+ * then we're under synflood. However, we have to use
+ * 'last_overflow - HZ' as lower bound. That's because a concurrent
+ * tcp_synq_overflow() could update .ts_recent_stamp after we read
+ * jiffies but before we store .ts_recent_stamp into last_overflow,
+ * which could lead to rejecting a valid syncookie.
+ */
+ return !time_between32(jiffies, last_overflow - HZ,
+ last_overflow + TCP_SYNCOOKIE_VALID);
}
static inline u32 tcp_cookie_time(void)
@@ -1510,6 +1520,11 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
void tcp_fastopen_init_key_once(bool publish);
#define TCP_FASTOPEN_KEY_LENGTH 16
+static inline void tcp_init_send_head(struct sock *sk)
+{
+ sk->sk_send_head = NULL;
+}
+
/* Fastopen key context */
struct tcp_fastopen_context {
struct crypto_cipher *tfm;
@@ -1526,6 +1541,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
sk_wmem_free_skb(sk, skb);
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
+ tcp_init_send_head(sk);
inet_csk(sk)->icsk_backoff = 0;
}
@@ -1587,9 +1603,25 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
tcp_sk(sk)->highest_sack = NULL;
}
-static inline void tcp_init_send_head(struct sock *sk)
+static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
{
- sk->sk_send_head = NULL;
+ struct sk_buff *skb = tcp_write_queue_head(sk);
+
+ if (skb == tcp_send_head(sk))
+ skb = NULL;
+
+ return skb;
+}
+
+static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
+{
+ struct sk_buff *skb = tcp_send_head(sk);
+
+ /* empty retransmit queue, for example due to zero window */
+ if (skb == tcp_write_queue_head(sk))
+ return NULL;
+
+ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
}
static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 835c30e491c8..9e2f260cbb51 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1297,6 +1297,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
return atomic_read(&x->tunnel_users);
}
+static inline bool xfrm_id_proto_valid(u8 proto)
+{
+ switch (proto) {
+ case IPPROTO_AH:
+ case IPPROTO_ESP:
+ case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
{
return (!userproto || proto == userproto ||
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index a42535f252b5..1d9c701b1c7b 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -63,6 +63,7 @@
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
+extern struct workqueue_struct *ib_comp_unbound_wq;
union ib_gid {
u8 raw[16];
@@ -1025,7 +1026,7 @@ struct ib_qp_init_attr {
struct ib_qp_cap cap;
enum ib_sig_type sq_sig_type;
enum ib_qp_type qp_type;
- enum ib_qp_create_flags create_flags;
+ u32 create_flags;
/*
* Only needed for special QP types, or when using the RW API.
@@ -1415,9 +1416,10 @@ struct ib_ah {
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
enum ib_poll_context {
- IB_POLL_DIRECT, /* caller context, no hw completions */
- IB_POLL_SOFTIRQ, /* poll from softirq context */
- IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_DIRECT, /* caller context, no hw completions */
+ IB_POLL_SOFTIRQ, /* poll from softirq context */
+ IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
};
struct ib_cq {
@@ -1434,6 +1436,7 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ struct workqueue_struct *comp_wq;
};
struct ib_srq {
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 722d3264d3bf..6be92eede5c0 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -241,6 +241,7 @@ struct fcoe_fcf {
* @vn_mac: VN_Node assigned MAC address for data
*/
struct fcoe_rport {
+ struct fc_rport_priv rdata;
unsigned long time;
u16 fcoe_len;
u16 flags;
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 56710e03101c..1fcf14aee28a 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -5,8 +5,6 @@ struct scsi_cmnd;
struct scsi_device;
struct scsi_sense_hdr;
-#define SCSI_LOG_BUFSIZE 128
-
extern void scsi_print_command(struct scsi_cmnd *);
extern size_t __scsi_format_command(char *, size_t,
const unsigned char *, size_t);
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 96bc5acdade3..49482080311a 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -185,10 +185,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
if (snd_BUG_ON(!stream))
return;
- if (stream->direction == SND_COMPRESS_PLAYBACK)
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
- else
- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
wake_up(&stream->runtime->sleep);
}
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index af1fb37c6b26..b1d3cce26ce2 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -100,7 +100,7 @@ struct snd_pcm_ops {
#endif
#define SNDRV_PCM_IOCTL1_RESET 0
-#define SNDRV_PCM_IOCTL1_INFO 1
+/* 1 is absent slot. */
#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2
#define SNDRV_PCM_IOCTL1_GSTATE 3
#define SNDRV_PCM_IOCTL1_FIFO_SIZE 4
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index f730b91e472f..5432111c8761 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -92,9 +92,9 @@ struct snd_rawmidi_substream {
struct list_head list; /* list of all substream for given stream */
int stream; /* direction */
int number; /* substream number */
- unsigned int opened: 1, /* open flag */
- append: 1, /* append flag (merge more streams) */
- active_sensing: 1; /* send active sensing when close */
+ bool opened; /* open flag */
+ bool append; /* append flag (merge more streams) */
+ bool active_sensing; /* send active sensing when close */
int use_count; /* use counter (for output) */
size_t bytes;
struct snd_rawmidi *rmidi;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index f60d755f7ac6..ab9fa975f880 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -339,6 +339,8 @@ struct device;
#define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */
#define SND_SOC_DAPM_PRE_POST_PMD \
(SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
+#define SND_SOC_DAPM_PRE_POST_PMU \
+ (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
/* convenience event type detection */
#define SND_SOC_DAPM_EVENT_ON(e) \
diff --git a/include/sound/timer.h b/include/sound/timer.h
index c4d76ff056c6..7ae226ab6990 100644
--- a/include/sound/timer.h
+++ b/include/sound/timer.h
@@ -90,6 +90,8 @@ struct snd_timer {
struct list_head ack_list_head;
struct list_head sack_list_head; /* slow ack list head */
struct tasklet_struct task_queue;
+ int max_instances; /* upper limit of timer instances */
+ int num_instances; /* current number of timer instances */
};
struct snd_timer_instance {
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 6021c3acb6c5..b1814d2762bd 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -671,7 +671,7 @@ struct iscsi_session {
atomic_t session_logout;
atomic_t session_reinstatement;
atomic_t session_stop_active;
- atomic_t sleep_on_sess_wait_comp;
+ atomic_t session_close;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index d6be935caa50..ecd1a0f7bd3e 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -63,7 +63,11 @@ TRACE_EVENT(xen_mc_callback,
TP_PROTO(xen_mc_callback_fn_t fn, void *data),
TP_ARGS(fn, data),
TP_STRUCT__entry(
- __field(xen_mc_callback_fn_t, fn)
+ /*
+ * Use field_struct to avoid is_signed_type()
+ * comparison of a function pointer.
+ */
+ __field_struct(xen_mc_callback_fn_t, fn)
__field(void *, data)
),
TP_fast_assign(
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 03725fe89859..02137b508666 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -756,7 +756,7 @@ struct drm_i915_gem_execbuffer2 {
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
-#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
diff --git a/include/uapi/linux/coda_psdev.h b/include/uapi/linux/coda_psdev.h
index 79d05981fc4b..e2c44d2f7d5b 100644
--- a/include/uapi/linux/coda_psdev.h
+++ b/include/uapi/linux/coda_psdev.h
@@ -6,19 +6,6 @@
#define CODA_PSDEV_MAJOR 67
#define MAX_CODADEVS 5 /* how many do we allow */
-
-/* messages between coda filesystem in kernel and Venus */
-struct upc_req {
- struct list_head uc_chain;
- caddr_t uc_data;
- u_short uc_flags;
- u_short uc_inSize; /* Size is at most 5000 bytes */
- u_short uc_outSize;
- u_short uc_opcode; /* copied from data to save lookup */
- int uc_unique;
- wait_queue_head_t uc_sleep; /* process' wait queue */
-};
-
#define CODA_REQ_ASYNC 0x1
#define CODA_REQ_READ 0x2
#define CODA_REQ_WRITE 0x4
diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h
index 7e4272cf1fb2..741309cedd2c 100644
--- a/include/uapi/linux/coresight-stm.h
+++ b/include/uapi/linux/coresight-stm.h
@@ -1,8 +1,10 @@
#ifndef __UAPI_CORESIGHT_STM_H_
#define __UAPI_CORESIGHT_STM_H_
-#define STM_FLAG_TIMESTAMPED BIT(3)
-#define STM_FLAG_GUARANTEED BIT(7)
+#include <linux/const.h>
+
+#define STM_FLAG_TIMESTAMPED _BITUL(3)
+#define STM_FLAG_GUARANTEED _BITUL(7)
/*
* The CoreSight STM supports guaranteed and invariant timing
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 8c5335bde212..5dd3332ebc66 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1687,6 +1687,8 @@ enum ethtool_reset_flags {
* %ethtool_link_mode_bit_indices for the link modes, and other
* link features that the link partner advertised through
* autonegotiation; 0 if unknown or not applicable. Read-only.
+ * @transceiver: Used to distinguish different possible PHY types,
+ * reported consistently by PHYLIB. Read-only.
*
* If autonegotiation is disabled, the speed and @duplex represent the
* fixed link mode and are writable if the driver supports multiple
@@ -1738,7 +1740,9 @@ struct ethtool_link_settings {
__u8 eth_tp_mdix;
__u8 eth_tp_mdix_ctrl;
__s8 link_mode_masks_nwords;
- __u32 reserved[8];
+ __u8 transceiver;
+ __u8 reserved1[3];
+ __u32 reserved[7];
__u32 link_mode_masks[0];
/* layout of link_mode_masks fields:
* __u32 map_supported[link_mode_masks_nwords];
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 42fa977e3b14..8ca749a109d5 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -215,10 +215,12 @@ struct fuse_file_lock {
* FOPEN_DIRECT_IO: bypass page cache for this open file
* FOPEN_KEEP_CACHE: don't invalidate the data cache on open
* FOPEN_NONSEEKABLE: the file is not seekable
+ * FOPEN_STREAM: the file is stream-like (no file position at all)
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
#define FOPEN_NONSEEKABLE (1 << 2)
+#define FOPEN_STREAM (1 << 4)
/**
* INIT request/reply flags
diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
index b58635f722da..ae1e1fba2e13 100644
--- a/include/uapi/linux/isdn/capicmd.h
+++ b/include/uapi/linux/isdn/capicmd.h
@@ -15,6 +15,7 @@
#define CAPI_MSG_BASELEN 8
#define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
#define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
+#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
/*----- CAPI commands -----*/
#define CAPI_ALERT 0x01
diff --git a/include/uapi/linux/netfilter/xt_sctp.h b/include/uapi/linux/netfilter/xt_sctp.h
index 58ffcfb7978e..c2b0886c7c25 100644
--- a/include/uapi/linux/netfilter/xt_sctp.h
+++ b/include/uapi/linux/netfilter/xt_sctp.h
@@ -40,19 +40,19 @@ struct xt_sctp_info {
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] |= \
- 1 << (type % bytes(__u32)); \
+ 1u << (type % bytes(__u32)); \
} while (0)
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] &= \
- ~(1 << (type % bytes(__u32))); \
+ ~(1u << (type % bytes(__u32))); \
} while (0)
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
({ \
((chunkmap)[type / bytes (__u32)] & \
- (1 << (type % bytes (__u32)))) ? 1: 0; \
+ (1u << (type % bytes (__u32)))) ? 1: 0; \
})
#define SCTP_CHUNKMAP_RESET(chunkmap) \
diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h
index 2a8a3addb675..f9b6b5be7ddf 100644
--- a/include/uapi/linux/nilfs2_ondisk.h
+++ b/include/uapi/linux/nilfs2_ondisk.h
@@ -28,7 +28,7 @@
#include <linux/types.h>
#include <linux/magic.h>
-
+#include <asm/byteorder.h>
#define NILFS_INODE_BMAP_SIZE 7
@@ -532,19 +532,19 @@ enum {
static inline void \
nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
{ \
- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
- (1UL << NILFS_CHECKPOINT_##flag)); \
+ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) | \
+ (1UL << NILFS_CHECKPOINT_##flag)); \
} \
static inline void \
nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
{ \
- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
+ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) & \
~(1UL << NILFS_CHECKPOINT_##flag)); \
} \
static inline int \
nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
{ \
- return !!(le32_to_cpu(cp->cp_flags) & \
+ return !!(__le32_to_cpu(cp->cp_flags) & \
(1UL << NILFS_CHECKPOINT_##flag)); \
}
@@ -594,20 +594,20 @@ enum {
static inline void \
nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
{ \
- su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
+ su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) | \
(1UL << NILFS_SEGMENT_USAGE_##flag));\
} \
static inline void \
nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
{ \
su->su_flags = \
- cpu_to_le32(le32_to_cpu(su->su_flags) & \
+ __cpu_to_le32(__le32_to_cpu(su->su_flags) & \
~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
} \
static inline int \
nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
{ \
- return !!(le32_to_cpu(su->su_flags) & \
+ return !!(__le32_to_cpu(su->su_flags) & \
(1UL << NILFS_SEGMENT_USAGE_##flag)); \
}
@@ -618,15 +618,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error)
static inline void
nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
{
- su->su_lastmod = cpu_to_le64(0);
- su->su_nblocks = cpu_to_le32(0);
- su->su_flags = cpu_to_le32(0);
+ su->su_lastmod = __cpu_to_le64(0);
+ su->su_nblocks = __cpu_to_le32(0);
+ su->su_flags = __cpu_to_le32(0);
}
static inline int
nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
{
- return !le32_to_cpu(su->su_flags);
+ return !__le32_to_cpu(su->su_flags);
}
/**
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 64776b72e1eb..64ec0d62e5f5 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -202,6 +202,7 @@ struct prctl_mm_map {
#define PR_SET_SPECULATION_CTRL 53
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
+# define PR_SPEC_INDIRECT_BRANCH 1
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 3442a26d36d9..56e3460d1f9f 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -282,6 +282,7 @@ enum
LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
+ LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 087b0ef82c07..bbebd258cf07 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -301,8 +301,10 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
tlv_ptr = (struct tlv_desc *)tlv;
tlv_ptr->tlv_type = htons(type);
tlv_ptr->tlv_len = htons(tlv_len);
- if (len && data)
- memcpy(TLV_DATA(tlv_ptr), data, tlv_len);
+ if (len && data) {
+ memcpy(TLV_DATA(tlv_ptr), data, len);
+ memset(TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
+ }
return TLV_SPACE(len);
}
@@ -399,8 +401,10 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
tcm_hdr->tcm_len = htonl(msg_len);
tcm_hdr->tcm_type = htons(cmd);
tcm_hdr->tcm_flags = htons(flags);
- if (data_len && data)
+ if (data_len && data) {
memcpy(TCM_DATA(msg), data, data_len);
+ memset(TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
+ }
return TCM_SPACE(data_len);
}
diff --git a/init/main.c b/init/main.c
index 3c7f71d8e704..148843e627a0 100644
--- a/init/main.c
+++ b/init/main.c
@@ -516,6 +516,8 @@ asmlinkage __visible void __init start_kernel(void)
page_alloc_init();
pr_notice("Kernel command line: %s\n", boot_command_line);
+ /* parameters may set static keys */
+ jump_label_init();
parse_early_param();
after_dashes = parse_args("Booting kernel",
static_command_line, __start___param,
@@ -525,8 +527,6 @@ asmlinkage __visible void __init start_kernel(void)
parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
NULL, set_init_arg);
- jump_label_init();
-
/*
* These use large bootmem allocations and must precede
* kmem_cache_init()
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 28a142f1be36..3f7dc5f341f7 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -369,9 +369,9 @@ static void mqueue_evict_inode(struct inode *inode)
{
struct mqueue_inode_info *info;
struct user_struct *user;
- unsigned long mq_bytes, mq_treesize;
struct ipc_namespace *ipc_ns;
- struct msg_msg *msg;
+ struct msg_msg *msg, *nmsg;
+ LIST_HEAD(tmp_msg);
clear_inode(inode);
@@ -382,20 +382,27 @@ static void mqueue_evict_inode(struct inode *inode)
info = MQUEUE_I(inode);
spin_lock(&info->lock);
while ((msg = msg_get(info)) != NULL)
- free_msg(msg);
+ list_add_tail(&msg->m_list, &tmp_msg);
kfree(info->node_cache);
spin_unlock(&info->lock);
- /* Total amount of bytes accounted for the mqueue */
- mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
- min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
- sizeof(struct posix_msg_tree_node);
-
- mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
- info->attr.mq_msgsize);
+ list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
+ list_del(&msg->m_list);
+ free_msg(msg);
+ }
user = info->user;
if (user) {
+ unsigned long mq_bytes, mq_treesize;
+
+ /* Total amount of bytes accounted for the mqueue */
+ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+ min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+ sizeof(struct posix_msg_tree_node);
+
+ mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+ info->attr.mq_msgsize);
+
spin_lock(&mq_lock);
user->mq_bytes -= mq_bytes;
/*
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index bf74eaa5c39f..6d90b191c638 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -18,6 +18,7 @@
#include <linux/utsname.h>
#include <linux/proc_ns.h>
#include <linux/uaccess.h>
+#include <linux/sched.h>
#include "util.h"
@@ -64,6 +65,9 @@ static struct msg_msg *alloc_msg(size_t len)
pseg = &msg->next;
while (len > 0) {
struct msg_msgseg *seg;
+
+ cond_resched();
+
alen = min(len, DATALEN_SEG);
seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
if (seg == NULL)
@@ -176,6 +180,8 @@ void free_msg(struct msg_msg *msg)
kfree(msg);
while (seg != NULL) {
struct msg_msgseg *tmp = seg->next;
+
+ cond_resched();
kfree(seg);
seg = tmp;
}
diff --git a/ipc/sem.c b/ipc/sem.c
index 10b94bc59d4a..5cd9d802592f 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2159,11 +2159,9 @@ void exit_sem(struct task_struct *tsk)
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
- /* we are the last process using this ulp, acquiring ulp->lock
- * isn't required. Besides that, we are also protected against
- * IPC_RMID as we hold sma->sem_perm lock now
- */
+ spin_lock(&ulp->lock);
list_del_rcu(&un->list_proc);
+ spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/kernel/Makefile b/kernel/Makefile
index 314e7d62f5f0..184fa9aa5802 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -28,6 +28,7 @@ KCOV_INSTRUMENT_extable.o := n
# Don't self-instrument.
KCOV_INSTRUMENT_kcov.o := n
KASAN_SANITIZE_kcov.o := n
+CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
# cond_syscall is currently not LTO compatible
CFLAGS_sys_ni.o = $(DISABLE_LTO)
diff --git a/kernel/audit.c b/kernel/audit.c
index 3461a3d874fe..53dcaa3b67bc 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -751,13 +751,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
audit_log_end(ab);
}
-static int audit_set_feature(struct sk_buff *skb)
+static int audit_set_feature(struct audit_features *uaf)
{
- struct audit_features *uaf;
int i;
BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
- uaf = nlmsg_data(nlmsg_hdr(skb));
/* if there is ever a version 2 we should handle that here */
@@ -823,6 +821,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
u32 seq;
void *data;
+ int data_len;
int err;
struct audit_buffer *ab;
u16 msg_type = nlh->nlmsg_type;
@@ -846,6 +845,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
}
seq = nlh->nlmsg_seq;
data = nlmsg_data(nlh);
+ data_len = nlmsg_len(nlh);
switch (msg_type) {
case AUDIT_GET: {
@@ -867,7 +867,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct audit_status s;
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
+ memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
if (s.mask & AUDIT_STATUS_ENABLED) {
err = audit_set_enabled(s.enabled);
if (err < 0)
@@ -930,7 +930,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return err;
break;
case AUDIT_SET_FEATURE:
- err = audit_set_feature(skb);
+ if (data_len < sizeof(struct audit_features))
+ return -EINVAL;
+ err = audit_set_feature(data);
if (err)
return err;
break;
@@ -942,6 +944,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
err = audit_filter(msg_type, AUDIT_FILTER_USER);
if (err == 1) { /* match or error */
+ char *str = data;
+
err = 0;
if (msg_type == AUDIT_USER_TTY) {
err = tty_audit_push();
@@ -950,19 +954,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
}
mutex_unlock(&audit_cmd_mutex);
audit_log_common_recv_msg(&ab, msg_type);
- if (msg_type != AUDIT_USER_TTY)
+ if (msg_type != AUDIT_USER_TTY) {
+ /* ensure NULL termination */
+ str[data_len - 1] = '\0';
audit_log_format(ab, " msg='%.*s'",
AUDIT_MESSAGE_TEXT_MAX,
- (char *)data);
- else {
- int size;
-
+ str);
+ } else {
audit_log_format(ab, " data=");
- size = nlmsg_len(nlh);
- if (size > 0 &&
- ((unsigned char *)data)[size - 1] == '\0')
- size--;
- audit_log_n_untrustedstring(ab, data, size);
+ if (data_len > 0 && str[data_len - 1] == '\0')
+ data_len--;
+ audit_log_n_untrustedstring(ab, str, data_len);
}
audit_set_portid(ab, NETLINK_CB(skb).portid);
audit_log_end(ab);
@@ -971,7 +973,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
break;
case AUDIT_ADD_RULE:
case AUDIT_DEL_RULE:
- if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
+ if (data_len < sizeof(struct audit_rule_data))
return -EINVAL;
if (audit_enabled == AUDIT_LOCKED) {
audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
@@ -980,7 +982,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EPERM;
}
err = audit_rule_change(msg_type, NETLINK_CB(skb).portid,
- seq, data, nlmsg_len(nlh));
+ seq, data, data_len);
break;
case AUDIT_LIST_RULES:
err = audit_list_rules_send(skb, seq);
@@ -994,7 +996,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
case AUDIT_MAKE_EQUIV: {
void *bufp = data;
u32 sizes[2];
- size_t msglen = nlmsg_len(nlh);
+ size_t msglen = data_len;
char *old, *new;
err = -EINVAL;
@@ -1070,7 +1072,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
+ memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
/* check if new data is valid */
if ((s.enabled != 0 && s.enabled != 1) ||
(s.log_passwd != 0 && s.log_passwd != 1))
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index f036b6ada6ef..712469a3103a 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -365,12 +365,12 @@ static int audit_get_nd(struct audit_watch *watch, struct path *parent)
struct dentry *d = kern_path_locked(watch->path, parent);
if (IS_ERR(d))
return PTR_ERR(d);
- inode_unlock(d_backing_inode(parent->dentry));
if (d_is_positive(d)) {
/* update watch filter fields */
watch->dev = d->d_sb->s_dev;
watch->ino = d_backing_inode(d)->i_ino;
}
+ inode_unlock(d_backing_inode(parent->dentry));
dput(d);
return 0;
}
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index cd4f41397c7e..a71ff9965cba 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -434,6 +434,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
bufp = data->buf;
for (i = 0; i < data->field_count; i++) {
struct audit_field *f = &entry->rule.fields[i];
+ u32 f_val;
err = -EINVAL;
@@ -442,12 +443,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
goto exit_free;
f->type = data->fields[i];
- f->val = data->values[i];
+ f_val = data->values[i];
/* Support legacy tests for a valid loginuid */
- if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
+ if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) {
f->type = AUDIT_LOGINUID_SET;
- f->val = 0;
+ f_val = 0;
entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
}
@@ -463,7 +464,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
case AUDIT_SUID:
case AUDIT_FSUID:
case AUDIT_OBJ_UID:
- f->uid = make_kuid(current_user_ns(), f->val);
+ f->uid = make_kuid(current_user_ns(), f_val);
if (!uid_valid(f->uid))
goto exit_free;
break;
@@ -472,11 +473,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
case AUDIT_SGID:
case AUDIT_FSGID:
case AUDIT_OBJ_GID:
- f->gid = make_kgid(current_user_ns(), f->val);
+ f->gid = make_kgid(current_user_ns(), f_val);
if (!gid_valid(f->gid))
goto exit_free;
break;
case AUDIT_ARCH:
+ f->val = f_val;
entry->rule.arch_f = f;
break;
case AUDIT_SUBJ_USER:
@@ -489,11 +491,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
case AUDIT_OBJ_TYPE:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
- str = audit_unpack_string(&bufp, &remain, f->val);
- if (IS_ERR(str))
+ str = audit_unpack_string(&bufp, &remain, f_val);
+ if (IS_ERR(str)) {
+ err = PTR_ERR(str);
goto exit_free;
- entry->rule.buflen += f->val;
-
+ }
+ entry->rule.buflen += f_val;
+ f->lsm_str = str;
err = security_audit_rule_init(f->type, f->op, str,
(void **)&f->lsm_rule);
/* Keep currently invalid fields around in case they
@@ -502,68 +506,71 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
pr_warn("audit rule for LSM \'%s\' is invalid\n",
str);
err = 0;
- }
- if (err) {
- kfree(str);
+ } else if (err)
goto exit_free;
- } else
- f->lsm_str = str;
break;
case AUDIT_WATCH:
- str = audit_unpack_string(&bufp, &remain, f->val);
- if (IS_ERR(str))
+ str = audit_unpack_string(&bufp, &remain, f_val);
+ if (IS_ERR(str)) {
+ err = PTR_ERR(str);
goto exit_free;
- entry->rule.buflen += f->val;
-
- err = audit_to_watch(&entry->rule, str, f->val, f->op);
+ }
+ err = audit_to_watch(&entry->rule, str, f_val, f->op);
if (err) {
kfree(str);
goto exit_free;
}
+ entry->rule.buflen += f_val;
break;
case AUDIT_DIR:
- str = audit_unpack_string(&bufp, &remain, f->val);
- if (IS_ERR(str))
+ str = audit_unpack_string(&bufp, &remain, f_val);
+ if (IS_ERR(str)) {
+ err = PTR_ERR(str);
goto exit_free;
- entry->rule.buflen += f->val;
-
+ }
err = audit_make_tree(&entry->rule, str, f->op);
kfree(str);
if (err)
goto exit_free;
+ entry->rule.buflen += f_val;
break;
case AUDIT_INODE:
+ f->val = f_val;
err = audit_to_inode(&entry->rule, f);
if (err)
goto exit_free;
break;
case AUDIT_FILTERKEY:
- if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
+ if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN)
goto exit_free;
- str = audit_unpack_string(&bufp, &remain, f->val);
- if (IS_ERR(str))
+ str = audit_unpack_string(&bufp, &remain, f_val);
+ if (IS_ERR(str)) {
+ err = PTR_ERR(str);
goto exit_free;
- entry->rule.buflen += f->val;
+ }
+ entry->rule.buflen += f_val;
entry->rule.filterkey = str;
break;
case AUDIT_EXE:
- if (entry->rule.exe || f->val > PATH_MAX)
+ if (entry->rule.exe || f_val > PATH_MAX)
goto exit_free;
- str = audit_unpack_string(&bufp, &remain, f->val);
+ str = audit_unpack_string(&bufp, &remain, f_val);
if (IS_ERR(str)) {
err = PTR_ERR(str);
goto exit_free;
}
- entry->rule.buflen += f->val;
-
- audit_mark = audit_alloc_mark(&entry->rule, str, f->val);
+ audit_mark = audit_alloc_mark(&entry->rule, str, f_val);
if (IS_ERR(audit_mark)) {
kfree(str);
err = PTR_ERR(audit_mark);
goto exit_free;
}
+ entry->rule.buflen += f_val;
entry->rule.exe = audit_mark;
break;
+ default:
+ f->val = f_val;
+ break;
}
}
@@ -1095,22 +1102,24 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data,
int err = 0;
struct audit_entry *entry;
- entry = audit_data_to_entry(data, datasz);
- if (IS_ERR(entry))
- return PTR_ERR(entry);
-
switch (type) {
case AUDIT_ADD_RULE:
+ entry = audit_data_to_entry(data, datasz);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
err = audit_add_rule(entry);
audit_log_rule_change("add_rule", &entry->rule, !err);
break;
case AUDIT_DEL_RULE:
+ entry = audit_data_to_entry(data, datasz);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
err = audit_del_rule(entry);
audit_log_rule_change("remove_rule", &entry->rule, !err);
break;
default:
- err = -EINVAL;
WARN_ON(1);
+ return -EINVAL;
}
if (err || type == AUDIT_DEL_RULE) {
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index c2aaf539728f..854e90be1a02 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1096,7 +1096,7 @@ static void audit_log_execve_info(struct audit_context *context,
}
/* write as much as we can to the audit log */
- if (len_buf > 0) {
+ if (len_buf >= 0) {
/* NOTE: some magic numbers here - basically if we
* can't fit a reasonable amount of data into the
* existing audit buffer, flush it and start with
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index eed911d091da..5a590f22b4d4 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,4 +1,5 @@
obj-y := core.o
+CFLAGS_core.o += $(call cc-disable-warning, override-init)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 879ca844ba1d..df2ebce927ec 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -208,27 +208,80 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
}
#ifdef CONFIG_BPF_JIT
+/* All BPF JIT sysctl knobs here. */
+int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
+int bpf_jit_harden __read_mostly;
+long bpf_jit_limit __read_mostly;
+
+static atomic_long_t bpf_jit_current;
+
+/* Can be overridden by an arch's JIT compiler if it has a custom,
+ * dedicated BPF backend memory area, or if neither of the two
+ * below apply.
+ */
+u64 __weak bpf_jit_alloc_exec_limit(void)
+{
+#if defined(MODULES_VADDR)
+ return MODULES_END - MODULES_VADDR;
+#else
+ return VMALLOC_END - VMALLOC_START;
+#endif
+}
+
+static int __init bpf_jit_charge_init(void)
+{
+ /* Only used as heuristic here to derive limit. */
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
+ PAGE_SIZE), LONG_MAX);
+ return 0;
+}
+pure_initcall(bpf_jit_charge_init);
+
+static int bpf_jit_charge_modmem(u32 pages)
+{
+ if (atomic_long_add_return(pages, &bpf_jit_current) >
+ (bpf_jit_limit >> PAGE_SHIFT)) {
+ if (!capable(CAP_SYS_ADMIN)) {
+ atomic_long_sub(pages, &bpf_jit_current);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static void bpf_jit_uncharge_modmem(u32 pages)
+{
+ atomic_long_sub(pages, &bpf_jit_current);
+}
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns)
{
struct bpf_binary_header *hdr;
- unsigned int size, hole, start;
+ u32 size, hole, start, pages;
/* Most of BPF filters are really small, but if some of them
* fill a page, allow at least 128 extra bytes to insert a
* random section of illegal instructions.
*/
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
+ pages = size / PAGE_SIZE;
+
+ if (bpf_jit_charge_modmem(pages))
+ return NULL;
hdr = module_alloc(size);
- if (hdr == NULL)
+ if (!hdr) {
+ bpf_jit_uncharge_modmem(pages);
return NULL;
+ }
/* Fill space with illegal/arch-dep instructions. */
bpf_fill_ill_insns(hdr, size);
- hdr->pages = size / PAGE_SIZE;
+ hdr->pages = pages;
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr));
start = (get_random_int() % hole) & ~(alignment - 1);
@@ -241,11 +294,12 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
+ u32 pages = hdr->pages;
+
module_memfree(hdr);
+ bpf_jit_uncharge_modmem(pages);
}
-int bpf_jit_harden __read_mostly;
-
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff)
@@ -925,8 +979,13 @@ load_byte:
STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
#else
-static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
+static unsigned int __bpf_prog_ret0_warn(void *ctx,
+ const struct bpf_insn *insn)
{
+ /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
+ * is not working properly, so warn about it!
+ */
+ WARN_ON_ONCE(1);
return 0;
}
#endif
@@ -981,7 +1040,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
fp->bpf_func = (void *) __bpf_prog_run;
#else
- fp->bpf_func = (void *) __bpf_prog_ret0;
+ fp->bpf_func = (void *) __bpf_prog_ret0_warn;
#endif
/* eBPF JITs can rewrite the program in case constant
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index a36a532c056d..8648d7d29708 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,10 +13,11 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
+#include <linux/rculist_nulls.h>
#include "percpu_freelist.h"
struct bucket {
- struct hlist_head head;
+ struct hlist_nulls_head head;
raw_spinlock_t lock;
};
@@ -40,9 +41,14 @@ enum extra_elem_state {
/* each htab element is struct htab_elem + key + value */
struct htab_elem {
union {
- struct hlist_node hash_node;
- struct bpf_htab *htab;
- struct pcpu_freelist_node fnode;
+ struct hlist_nulls_node hash_node;
+ struct {
+ void *padding;
+ union {
+ struct bpf_htab *htab;
+ struct pcpu_freelist_node fnode;
+ };
+ };
};
union {
struct rcu_head rcu;
@@ -114,8 +120,10 @@ skip_percpu_elems:
if (err)
goto free_elems;
- pcpu_freelist_populate(&htab->freelist, htab->elems, htab->elem_size,
- htab->map.max_entries);
+ pcpu_freelist_populate(&htab->freelist,
+ htab->elems + offsetof(struct htab_elem, fnode),
+ htab->elem_size, htab->map.max_entries);
+
return 0;
free_elems:
@@ -148,6 +156,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
int err, i;
u64 cost;
+ BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
+ offsetof(struct htab_elem, hash_node.pprev));
+ BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
+ offsetof(struct htab_elem, hash_node.pprev));
+
if (attr->map_flags & ~BPF_F_NO_PREALLOC)
/* reserved bits should not be used */
return ERR_PTR(-EINVAL);
@@ -233,7 +246,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
goto free_htab;
for (i = 0; i < htab->n_buckets; i++) {
- INIT_HLIST_HEAD(&htab->buckets[i].head);
+ INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
}
@@ -270,20 +283,44 @@ static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
return &htab->buckets[hash & (htab->n_buckets - 1)];
}
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
{
return &__select_bucket(htab, hash)->head;
}
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
+/* this lookup function can only be called with bucket lock taken */
+static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
void *key, u32 key_size)
{
+ struct hlist_nulls_node *n;
+ struct htab_elem *l;
+
+ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
+ if (l->hash == hash && !memcmp(&l->key, key, key_size))
+ return l;
+
+ return NULL;
+}
+
+/* can be called without bucket lock. it will repeat the loop in
+ * the unlikely event when elements moved from one bucket into another
+ * while link list is being walked
+ */
+static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
+ u32 hash, void *key,
+ u32 key_size, u32 n_buckets)
+{
+ struct hlist_nulls_node *n;
struct htab_elem *l;
- hlist_for_each_entry_rcu(l, head, hash_node)
+again:
+ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
if (l->hash == hash && !memcmp(&l->key, key, key_size))
return l;
+ if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
+ goto again;
+
return NULL;
}
@@ -291,7 +328,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct htab_elem *l;
u32 hash, key_size;
@@ -304,7 +341,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
head = select_bucket(htab, hash);
- l = lookup_elem_raw(head, hash, key, key_size);
+ l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
return l;
}
@@ -323,7 +360,7 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct htab_elem *l, *next_l;
u32 hash, key_size;
int i = 0;
@@ -340,13 +377,13 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
head = select_bucket(htab, hash);
/* lookup the key */
- l = lookup_elem_raw(head, hash, key, key_size);
+ l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
if (!l)
goto find_first_elem;
/* key was found, get next key in the same bucket */
- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
+ next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
struct htab_elem, hash_node);
if (next_l) {
@@ -365,7 +402,7 @@ find_first_elem:
head = select_bucket(htab, i);
/* pick first element in the bucket */
- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+ next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
struct htab_elem, hash_node);
if (next_l) {
/* if it's not empty, just return it */
@@ -429,9 +466,13 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
int err = 0;
if (prealloc) {
- l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
- if (!l_new)
+ struct pcpu_freelist_node *l;
+
+ l = pcpu_freelist_pop(&htab->freelist);
+ if (!l)
err = -E2BIG;
+ else
+ l_new = container_of(l, struct htab_elem, fnode);
} else {
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
atomic_dec(&htab->count);
@@ -518,7 +559,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -557,9 +598,9 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
/* add new element to the head of the list, so that
* concurrent search will find it before old elem
*/
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
if (l_old) {
- hlist_del_rcu(&l_old->hash_node);
+ hlist_nulls_del_rcu(&l_old->hash_node);
free_htab_elem(htab, l_old);
}
ret = 0;
@@ -574,7 +615,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l_new = NULL, *l_old;
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
unsigned long flags;
struct bucket *b;
u32 key_size, hash;
@@ -626,7 +667,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
ret = PTR_ERR(l_new);
goto err;
}
- hlist_add_head_rcu(&l_new->hash_node, head);
+ hlist_nulls_add_head_rcu(&l_new->hash_node, head);
}
ret = 0;
err:
@@ -644,7 +685,7 @@ static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
static int htab_map_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- struct hlist_head *head;
+ struct hlist_nulls_head *head;
struct bucket *b;
struct htab_elem *l;
unsigned long flags;
@@ -664,7 +705,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
l = lookup_elem_raw(head, hash, key, key_size);
if (l) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_del_rcu(&l->hash_node);
free_htab_elem(htab, l);
ret = 0;
}
@@ -678,12 +719,12 @@ static void delete_all_elements(struct bpf_htab *htab)
int i;
for (i = 0; i < htab->n_buckets; i++) {
- struct hlist_head *head = select_bucket(htab, i);
- struct hlist_node *n;
+ struct hlist_nulls_head *head = select_bucket(htab, i);
+ struct hlist_nulls_node *n;
struct htab_elem *l;
- hlist_for_each_entry_safe(l, n, head, hash_node) {
- hlist_del_rcu(&l->hash_node);
+ hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
+ hlist_nulls_del_rcu(&l->hash_node);
if (l->state != HTAB_EXTRA_ELEM_USED)
htab_elem_free(htab, l);
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ca7e277e8b5f..e10314223cbf 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -802,7 +802,7 @@ static int bpf_obj_get(const union bpf_attr *attr)
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
- union bpf_attr attr = {};
+ union bpf_attr attr;
int err;
if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
@@ -838,6 +838,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
}
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
+ memset(&attr, 0, sizeof(attr));
if (copy_from_user(&attr, uattr, size) != 0)
return -EFAULT;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index bb0cf1caf1cd..2d7a4fc42a88 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -6335,6 +6335,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
return;
}
+ /* Don't associate the sock with unrelated interrupted task's cgroup. */
+ if (in_interrupt())
+ return;
+
rcu_read_lock();
while (true) {
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
index a57242e0d5a6..b8b898e21c19 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -48,7 +48,7 @@ struct pids_cgroup {
* %PIDS_MAX = (%PID_MAX_LIMIT + 1).
*/
atomic64_t counter;
- int64_t limit;
+ atomic64_t limit;
/* Handle for "pids.events" */
struct cgroup_file events_file;
@@ -76,8 +76,8 @@ pids_css_alloc(struct cgroup_subsys_state *parent)
if (!pids)
return ERR_PTR(-ENOMEM);
- pids->limit = PIDS_MAX;
atomic64_set(&pids->counter, 0);
+ atomic64_set(&pids->limit, PIDS_MAX);
atomic64_set(&pids->events_limit, 0);
return &pids->css;
}
@@ -149,13 +149,14 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
for (p = pids; parent_pids(p); p = parent_pids(p)) {
int64_t new = atomic64_add_return(num, &p->counter);
+ int64_t limit = atomic64_read(&p->limit);
/*
* Since new is capped to the maximum number of pid_t, if
* p->limit is %PIDS_MAX then we know that this test will never
* fail.
*/
- if (new > p->limit)
+ if (new > limit)
goto revert;
}
@@ -280,7 +281,7 @@ set_limit:
* Limit updates don't need to be mutex'd, since it isn't
* critical that any racing fork()s follow the new limit.
*/
- pids->limit = limit;
+ atomic64_set(&pids->limit, limit);
return nbytes;
}
@@ -288,7 +289,7 @@ static int pids_max_show(struct seq_file *sf, void *v)
{
struct cgroup_subsys_state *css = seq_css(sf);
struct pids_cgroup *pids = css_pids(css);
- int64_t limit = pids->limit;
+ int64_t limit = atomic64_read(&pids->limit);
if (limit >= PIDS_MAX)
seq_printf(sf, "%s\n", PIDS_MAX_STR);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index bf875e94ac9e..d46f48e94549 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
+#include <linux/sched/smt.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/oom.h>
@@ -356,6 +357,12 @@ void cpu_hotplug_enable(void)
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#endif /* CONFIG_HOTPLUG_CPU */
+/*
+ * Architectures that need SMT-specific errata handling during SMT hotplug
+ * should override this.
+ */
+void __weak arch_smt_update(void) { }
+
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
EXPORT_SYMBOL_GPL(cpu_smt_control);
@@ -372,6 +379,7 @@ void __init cpu_smt_disable(bool force)
pr_info("SMT: Force disabled\n");
cpu_smt_control = CPU_SMT_FORCE_DISABLED;
} else {
+ pr_info("SMT: disabled\n");
cpu_smt_control = CPU_SMT_DISABLED;
}
}
@@ -507,8 +515,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
if (WARN_ON_ONCE((!cpu_online(cpu))))
return -ECANCELED;
- /* Unpark the stopper thread and the hotplug thread of the target cpu */
- stop_machine_unpark(cpu);
+ /* Unpark the hotplug thread of the target cpu */
kthread_unpark(st->thread);
/*
@@ -591,6 +598,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
}
}
+static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
+{
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
+ return true;
+ /*
+ * When CPU hotplug is disabled, then taking the CPU down is not
+ * possible because takedown_cpu() and the architecture and
+ * subsystem specific mechanisms are not available. So the CPU
+ * which would be completely unplugged again needs to stay around
+ * in the current state.
+ */
+ return st->state <= CPUHP_BRINGUP_CPU;
+}
+
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
@@ -601,8 +622,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
st->state++;
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
if (ret) {
- st->target = prev_state;
- undo_cpu_up(cpu, st);
+ if (can_rollback_cpu(st)) {
+ st->target = prev_state;
+ undo_cpu_up(cpu, st);
+ }
break;
}
}
@@ -1042,6 +1065,7 @@ out:
/* This post dead nonsense must die */
if (!ret && hasdied)
cpu_notify_nofail(CPU_POST_DEAD, cpu);
+ arch_smt_update();
return ret;
}
@@ -1090,8 +1114,8 @@ void notify_cpu_starting(unsigned int cpu)
/*
* Called from the idle task. Wake up the controlling task which brings the
- * stopper and the hotplug thread of the upcoming CPU up and then delegates
- * the rest of the online bringup to the hotplug thread.
+ * hotplug thread of the upcoming CPU up and then delegates the rest of the
+ * online bringup to the hotplug thread.
*/
void cpuhp_online_idle(enum cpuhp_state state)
{
@@ -1101,6 +1125,12 @@ void cpuhp_online_idle(enum cpuhp_state state)
if (state != CPUHP_AP_ONLINE_IDLE)
return;
+ /*
+ * Unpart the stopper thread before we start the idle loop (and start
+ * scheduling); this ensures the stopper task is always available.
+ */
+ stop_machine_unpark(smp_processor_id());
+
st->state = CPUHP_AP_ONLINE_IDLE;
complete(&st->done);
}
@@ -1161,6 +1191,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
ret = cpuhp_up_callbacks(cpu, st, target);
out:
cpu_hotplug_done();
+ arch_smt_update();
return ret;
}
@@ -1970,7 +2001,7 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}
-static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{
int cpu, ret = 0;
@@ -2002,7 +2033,7 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
return ret;
}
-static int cpuhp_smt_enable(void)
+int cpuhp_smt_enable(void)
{
int cpu, ret = 0;
@@ -2226,3 +2257,46 @@ void idle_notifier_call_chain(unsigned long val)
atomic_notifier_call_chain(&idle_notifier, val, NULL);
}
EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
+
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+ CPU_MITIGATIONS_OFF,
+ CPU_MITIGATIONS_AUTO,
+ CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+static enum cpu_mitigations cpu_mitigations __ro_after_init =
+ CPU_MITIGATIONS_AUTO;
+
+static int __init mitigations_parse_cmdline(char *arg)
+{
+ if (!strcmp(arg, "off"))
+ cpu_mitigations = CPU_MITIGATIONS_OFF;
+ else if (!strcmp(arg, "auto"))
+ cpu_mitigations = CPU_MITIGATIONS_AUTO;
+ else if (!strcmp(arg, "auto,nosmt"))
+ cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
+ else
+ pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
+ arg);
+
+ return 0;
+}
+early_param("mitigations", mitigations_parse_cmdline);
+
+/* mitigations=off */
+bool cpu_mitigations_off(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_off);
+
+/* mitigations=auto,nosmt */
+bool cpu_mitigations_auto_nosmt(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
diff --git a/kernel/cred.c b/kernel/cred.c
index 5f264fb5737d..d63a2d861ac2 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -146,7 +146,10 @@ void __put_cred(struct cred *cred)
BUG_ON(cred == current->cred);
BUG_ON(cred == current->real_cred);
- call_rcu(&cred->rcu, put_cred_rcu);
+ if (cred->non_rcu)
+ put_cred_rcu(&cred->rcu);
+ else
+ call_rcu(&cred->rcu, put_cred_rcu);
}
EXPORT_SYMBOL(__put_cred);
@@ -216,7 +219,7 @@ struct cred *cred_alloc_blank(void)
new->magic = CRED_MAGIC;
#endif
- if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
+ if (security_cred_alloc_blank(new, GFP_KERNEL_ACCOUNT) < 0)
goto error;
return new;
@@ -257,6 +260,7 @@ struct cred *prepare_creds(void)
old = task->cred;
memcpy(new, old, sizeof(struct cred));
+ new->non_rcu = 0;
atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_group_info(new->group_info);
@@ -274,7 +278,7 @@ struct cred *prepare_creds(void)
new->security = NULL;
#endif
- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
validate_creds(new);
return new;
@@ -447,6 +451,15 @@ int commit_creds(struct cred *new)
if (task->mm)
set_dumpable(task->mm, suid_dumpable);
task->pdeath_signal = 0;
+ /*
+ * If a task drops privileges and becomes nondumpable,
+ * the dumpability change must become visible before
+ * the credential change; otherwise, a __ptrace_may_access()
+ * racing with this change may be able to attach to a task it
+ * shouldn't be able to attach to (as if the task had dropped
+ * privileges without becoming nondumpable).
+ * Pairs with a read barrier in __ptrace_may_access().
+ */
smp_wmb();
}
@@ -527,7 +540,19 @@ const struct cred *override_creds(const struct cred *new)
validate_creds(old);
validate_creds(new);
- get_cred(new);
+
+ /*
+ * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
+ *
+ * That means that we do not clear the 'non_rcu' flag, since
+ * we are only installing the cred into the thread-synchronous
+ * '->cred' pointer, not the '->real_cred' pointer that is
+ * visible to other threads under RCU.
+ *
+ * Also note that we did validate_creds() manually, not depending
+ * on the validation in 'get_cred()'.
+ */
+ get_new_cred((struct cred *)new);
alter_cred_subscribers(new, 1);
rcu_assign_pointer(current->cred, new);
alter_cred_subscribers(old, -1);
@@ -610,6 +635,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
validate_creds(old);
*new = *old;
+ new->non_rcu = 0;
atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_uid(new->user);
@@ -627,7 +653,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
#ifdef CONFIG_SECURITY
new->security = NULL;
#endif
- if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
+ if (security_prepare_creds(new, old, GFP_KERNEL_ACCOUNT) < 0)
goto error;
put_cred(old);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 5a58421d7e2d..a52a6da8c3d0 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2632,7 +2632,7 @@ static int kdb_per_cpu(int argc, const char **argv)
diag = kdbgetularg(argv[3], &whichcpu);
if (diag)
return diag;
- if (!cpu_online(whichcpu)) {
+ if (whichcpu >= nr_cpu_ids || !cpu_online(whichcpu)) {
kdb_printf("cpu %ld is not online\n", whichcpu);
return KDB_BADCPUNUM;
}
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index e556751d15d9..a2b29b9bdfcb 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -2,6 +2,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/binfmts.h>
+#include <linux/elfcore.h>
Elf_Half __weak elf_core_extra_phdrs(void)
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5cbb2eda80b5..97b90faceb97 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4624,6 +4624,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
if (perf_event_check_period(event, value))
return -EINVAL;
+ if (!event->attr.freq && (value & (1ULL << 63)))
+ return -EINVAL;
+
event_function_call(event, __perf_event_period, &value);
return 0;
@@ -5300,7 +5303,15 @@ accounting:
*/
user_lock_limit *= num_online_cpus();
- user_locked = atomic_long_read(&user->locked_vm) + user_extra;
+ user_locked = atomic_long_read(&user->locked_vm);
+
+ /*
+ * sysctl_perf_event_mlock may have changed, so that
+ * user->locked_vm > user_lock_limit
+ */
+ if (user_locked > user_lock_limit)
+ user_locked = user_lock_limit;
+ user_locked += user_extra;
if (user_locked > user_lock_limit)
extra = user_locked - user_lock_limit;
@@ -5492,7 +5503,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user,
if (user_mode(regs)) {
regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
- } else if (current->mm) {
+ } else if (!(current->flags & PF_KTHREAD)) {
perf_get_regs_user(regs_user, regs, regs_user_copy);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
@@ -6616,6 +6627,7 @@ static void perf_event_mmap_output(struct perf_event *event,
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
+ u32 type = mmap_event->event_id.header.type;
int ret;
if (!perf_event_mmap_match(event, data))
@@ -6659,6 +6671,7 @@ static void perf_event_mmap_output(struct perf_event *event,
perf_output_end(&handle);
out:
mmap_event->event_id.header.size = size;
+ mmap_event->event_id.header.type = type;
}
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
@@ -10128,7 +10141,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
goto err_unlock;
}
- perf_install_in_context(ctx, event, cpu);
+ perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 99becab2c1ce..8e8b903b7613 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
unsigned long head;
again:
+ /*
+ * In order to avoid publishing a head value that goes backwards,
+ * we must ensure the load of @rb->head happens after we've
+ * incremented @rb->nest.
+ *
+ * Otherwise we can observe a @rb->head value before one published
+ * by an IRQ/NMI happening between the load and the increment.
+ */
+ barrier();
head = local_read(&rb->head);
/*
- * IRQ/NMI can happen here, which means we can miss a head update.
+ * IRQ/NMI can happen here and advance @rb->head, causing our
+ * load above to be stale.
*/
- if (!local_dec_and_test(&rb->nest))
+ /*
+ * If this isn't the outermost nesting, we don't have to update
+ * @rb->user_page->data_head.
+ */
+ if (local_read(&rb->nest) > 1) {
+ local_dec(&rb->nest);
goto out;
+ }
/*
* Since the mmap() consumer (userspace) can run on a different CPU:
@@ -88,9 +104,18 @@ again:
rb->user_page->data_head = head;
/*
- * Now check if we missed an update -- rely on previous implied
- * compiler barriers to force a re-read.
+ * We must publish the head before decrementing the nest count,
+ * otherwise an IRQ/NMI can publish a more recent head value and our
+ * write will (temporarily) publish a stale value.
+ */
+ barrier();
+ local_set(&rb->nest, 0);
+
+ /*
+ * Ensure we decrement @rb->nest before we validate the @rb->head.
+ * Otherwise we cannot be sure we caught the 'last' nested update.
*/
+ barrier();
if (unlikely(head != local_read(&rb->head))) {
local_inc(&rb->nest);
goto again;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index fbfab5722254..8ddd29476c0d 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1846,7 +1846,7 @@ static void handle_trampoline(struct pt_regs *regs)
sigill:
uprobe_warn(current, "handle uretprobe, sending SIGILL.");
- force_sig_info(SIGILL, SEND_SIG_FORCED, current);
+ force_sig(SIGILL, current);
}
@@ -1962,7 +1962,7 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
if (unlikely(err)) {
uprobe_warn(current, "execute the probed insn, sending SIGILL.");
- force_sig_info(SIGILL, SEND_SIG_FORCED, current);
+ force_sig(SIGILL, current);
}
}
diff --git a/kernel/fork.c b/kernel/fork.c
index e92b06351dec..288504431a53 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -389,7 +389,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(tsk == current);
cgroup_free(tsk);
- task_numa_free(tsk);
+ task_numa_free(tsk, true);
security_task_free(tsk);
exit_creds(tsk);
delayacct_tsk_free(tsk);
@@ -2356,7 +2356,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
struct ctl_table t;
int ret;
int threads = max_threads;
- int min = MIN_THREADS;
+ int min = 1;
int max = MAX_THREADS;
t = *table;
@@ -2368,7 +2368,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
if (ret || !write)
return ret;
- set_max_threads(threads);
+ max_threads = threads;
return 0;
}
diff --git a/kernel/futex.c b/kernel/futex.c
index 2e766ffff2cb..7123d9cab456 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -390,9 +390,9 @@ static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
*/
static struct futex_hash_bucket *hash_futex(union futex_key *key)
{
- u32 hash = jhash2((u32*)&key->both.word,
- (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
+ u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
key->both.offset);
+
return &futex_queues[hash & (futex_hashsize - 1)];
}
@@ -434,7 +434,7 @@ static void get_futex_key_refs(union futex_key *key)
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
- ihold(key->shared.inode); /* implies smp_mb(); (B) */
+ smp_mb(); /* explicit smp_mb(); (B) */
break;
case FUT_OFF_MMSHARED:
futex_get_mm(key); /* implies smp_mb(); (B) */
@@ -468,7 +468,6 @@ static void drop_futex_key_refs(union futex_key *key)
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE:
- iput(key->shared.inode);
break;
case FUT_OFF_MMSHARED:
mmdrop(key->private.mm);
@@ -476,6 +475,46 @@ static void drop_futex_key_refs(union futex_key *key)
}
}
+/*
+ * Generate a machine wide unique identifier for this inode.
+ *
+ * This relies on u64 not wrapping in the life-time of the machine; which with
+ * 1ns resolution means almost 585 years.
+ *
+ * This further relies on the fact that a well formed program will not unmap
+ * the file while it has a (shared) futex waiting on it. This mapping will have
+ * a file reference which pins the mount and inode.
+ *
+ * If for some reason an inode gets evicted and read back in again, it will get
+ * a new sequence number and will _NOT_ match, even though it is the exact same
+ * file.
+ *
+ * It is important that match_futex() will never have a false-positive, esp.
+ * for PI futexes that can mess up the state. The above argues that false-negatives
+ * are only possible for malformed programs.
+ */
+static u64 get_inode_sequence_number(struct inode *inode)
+{
+ static atomic64_t i_seq;
+ u64 old;
+
+ /* Does the inode already have a sequence number? */
+ old = atomic64_read(&inode->i_sequence);
+ if (likely(old))
+ return old;
+
+ for (;;) {
+ u64 new = atomic64_add_return(1, &i_seq);
+ if (WARN_ON_ONCE(!new))
+ continue;
+
+ old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
+ if (old)
+ return old;
+ return new;
+ }
+}
+
/**
* get_futex_key() - Get parameters which are the keys for a futex
* @uaddr: virtual address of the futex
@@ -488,9 +527,15 @@ static void drop_futex_key_refs(union futex_key *key)
*
* The key words are stored in *key on success.
*
- * For shared mappings, it's (page->index, file_inode(vma->vm_file),
- * offset_within_page). For private mappings, it's (uaddr, current->mm).
- * We can usually work out the index without swapping in the page.
+ * For shared mappings (when @fshared), the key is:
+ * ( inode->i_sequence, page->index, offset_within_page )
+ * [ also see get_inode_sequence_number() ]
+ *
+ * For private mappings (or when !@fshared), the key is:
+ * ( current->mm, address, 0 )
+ *
+ * This allows (cross process, where applicable) identification of the futex
+ * without keeping the page pinned for the duration of the FUTEX_WAIT.
*
* lock_page() might sleep, the caller should not hold a spinlock.
*/
@@ -630,8 +675,6 @@ again:
key->private.mm = mm;
key->private.address = address;
- get_futex_key_refs(key); /* implies smp_mb(); (B) */
-
} else {
struct inode *inode;
@@ -663,40 +706,14 @@ again:
goto again;
}
- /*
- * Take a reference unless it is about to be freed. Previously
- * this reference was taken by ihold under the page lock
- * pinning the inode in place so i_lock was unnecessary. The
- * only way for this check to fail is if the inode was
- * truncated in parallel which is almost certainly an
- * application bug. In such a case, just retry.
- *
- * We are not calling into get_futex_key_refs() in file-backed
- * cases, therefore a successful atomic_inc return below will
- * guarantee that get_futex_key() will still imply smp_mb(); (B).
- */
- if (!atomic_inc_not_zero(&inode->i_count)) {
- rcu_read_unlock();
- put_page(page);
-
- goto again;
- }
-
- /* Should be impossible but lets be paranoid for now */
- if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
- err = -EFAULT;
- rcu_read_unlock();
- iput(inode);
-
- goto out;
- }
-
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
- key->shared.inode = inode;
+ key->shared.i_seq = get_inode_sequence_number(inode);
key->shared.pgoff = basepage_index(tail);
rcu_read_unlock();
}
+ get_futex_key_refs(key); /* implies smp_mb(); (B) */
+
out:
put_page(page);
return err;
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index fd781a468f32..fb00cf30abd1 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -15,6 +15,7 @@
#include <linux/lockdep.h>
#include <linux/export.h>
#include <linux/sysctl.h>
+#include <linux/suspend.h>
#include <linux/utsname.h>
#include <trace/events/sched.h>
@@ -221,6 +222,28 @@ void reset_hung_task_detector(void)
}
EXPORT_SYMBOL_GPL(reset_hung_task_detector);
+static bool hung_detector_suspended;
+
+static int hungtask_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ hung_detector_suspended = true;
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ hung_detector_suspended = false;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
/*
* kthread which checks for tasks stuck in D state
*/
@@ -235,7 +258,8 @@ static int watchdog(void *dummy)
long t = hung_timeout_jiffies(hung_last_checked, timeout);
if (t <= 0) {
- if (!atomic_xchg(&reset_hung_task, 0))
+ if (!atomic_xchg(&reset_hung_task, 0) &&
+ !hung_detector_suspended)
check_hung_uninterruptible_tasks(timeout);
hung_last_checked = jiffies;
continue;
@@ -249,6 +273,10 @@ static int watchdog(void *dummy)
static int __init hung_task_init(void)
{
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
+ /* Disable hung task detector on suspend */
+ pm_notifier(hungtask_pm_notify, 0);
+
watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
return 0;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f30110e1b8c9..9f13667ccb9c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -729,7 +729,11 @@ void handle_percpu_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- kstat_incr_irqs_this_cpu(desc);
+ /*
+ * PER CPU interrupts are not serialized. Do not touch
+ * desc->tot_count.
+ */
+ __kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
@@ -758,7 +762,11 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
unsigned int irq = irq_desc_get_irq(desc);
irqreturn_t res;
- kstat_incr_irqs_this_cpu(desc);
+ /*
+ * PER CPU interrupts are not serialized. Do not touch
+ * desc->tot_count.
+ */
+ __kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
@@ -1134,6 +1142,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
{
data = data->parent_data;
+
+ if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+ return 0;
+
if (data->chip->irq_set_wake)
return data->chip->irq_set_wake(data, on);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index bc226e783bd2..22e3f29a30d8 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -199,12 +199,18 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
#undef __irqd_to_state
-static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
+static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
{
__this_cpu_inc(*desc->kstat_irqs);
__this_cpu_inc(kstat.irqs_sum);
}
+static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
+{
+ __kstat_incr_irqs_this_cpu(desc);
+ desc->tot_count++;
+}
+
static inline int irq_desc_get_node(struct irq_desc *desc)
{
return irq_common_data_get_node(&desc->irq_common_data);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 77977f55dff7..8847f277a14f 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -109,6 +109,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
desc->depth = 1;
desc->irq_count = 0;
desc->irqs_unhandled = 0;
+ desc->tot_count = 0;
desc->name = NULL;
desc->owner = owner;
for_each_possible_cpu(cpu)
@@ -266,6 +267,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
}
}
+static void irq_sysfs_del(struct irq_desc *desc)
+{
+ /*
+ * If irq_sysfs_init() has not yet been invoked (early boot), then
+ * irq_kobj_base is NULL and the descriptor was never added.
+ * kobject_del() complains about a object with no parent, so make
+ * it conditional.
+ */
+ if (irq_kobj_base)
+ kobject_del(&desc->kobj);
+}
+
static int __init irq_sysfs_init(void)
{
struct irq_desc *desc;
@@ -296,6 +309,7 @@ static struct kobj_type irq_kobj_type = {
};
static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
+static void irq_sysfs_del(struct irq_desc *desc) {}
#endif /* CONFIG_SYSFS */
@@ -405,7 +419,7 @@ static void free_desc(unsigned int irq)
* The sysfs entry must be serialized against a concurrent
* irq_sysfs_init() as well.
*/
- kobject_del(&desc->kobj);
+ irq_sysfs_del(desc);
delete_irq_desc(irq);
/*
@@ -880,11 +894,15 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
unsigned int kstat_irqs(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
- int cpu;
unsigned int sum = 0;
+ int cpu;
if (!desc || !desc->kstat_irqs)
return 0;
+ if (!irq_settings_is_per_cpu_devid(desc) &&
+ !irq_settings_is_per_cpu(desc))
+ return desc->tot_count;
+
for_each_possible_cpu(cpu)
sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
return sum;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cf94460504bb..b2fc2a581b86 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -233,7 +233,11 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
- schedule_work(&desc->affinity_notify->work);
+ if (!schedule_work(&desc->affinity_notify->work)) {
+ /* Work was already scheduled, drop our extra ref */
+ kref_put(&desc->affinity_notify->kref,
+ desc->affinity_notify->release);
+ }
}
irqd_set(data, IRQD_AFFINITY_SET);
@@ -332,8 +336,13 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
desc->affinity_notify = notify;
raw_spin_unlock_irqrestore(&desc->lock, flags);
- if (old_notify)
+ if (old_notify) {
+ if (cancel_work_sync(&old_notify->work)) {
+ /* Pending work had a ref, put that one too */
+ kref_put(&old_notify->kref, old_notify->release);
+ }
kref_put(&old_notify->kref, old_notify->release);
+ }
return 0;
}
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index b86886beee4f..867fb0ed4aa6 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -37,6 +37,8 @@ static void resend_irqs(unsigned long arg)
irq = find_first_bit(irqs_resend, nr_irqs);
clear_bit(irq, irqs_resend);
desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
local_irq_disable();
desc->handle_irq(desc);
local_irq_enable();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 0277d1216f80..e4e5e98002fe 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -119,7 +119,7 @@ out:
* invoke it.
*
* If module auto-loading support is disabled then this function
- * becomes a no-operation.
+ * simply returns -ENOENT.
*/
int __request_module(bool wait, const char *fmt, ...)
{
@@ -140,7 +140,7 @@ int __request_module(bool wait, const char *fmt, ...)
WARN_ON_ONCE(wait && current_is_async());
if (!modprobe_path[0])
- return 0;
+ return -ENOENT;
va_start(args, fmt);
ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index f580352cc6e5..1b75fb8c7735 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -514,8 +514,14 @@ static void do_free_cleaned_kprobes(void)
struct optimized_kprobe *op, *tmp;
list_for_each_entry_safe(op, tmp, &freeing_list, list) {
- BUG_ON(!kprobe_unused(&op->kp));
list_del_init(&op->list);
+ if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
+ /*
+ * This must not happen, but if there is a kprobe
+ * still in use, keep it on kprobes hash list.
+ */
+ continue;
+ }
free_aggr_kprobe(&op->kp);
}
}
@@ -668,7 +674,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
static int reuse_unused_kprobe(struct kprobe *ap)
{
struct optimized_kprobe *op;
- int ret;
BUG_ON(!kprobe_unused(ap));
/*
@@ -682,9 +687,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
- ret = kprobe_optready(ap);
- if (ret)
- return ret;
+ if (!kprobe_optready(ap))
+ return -EINVAL;
optimize_kprobe(ap);
return 0;
@@ -1456,7 +1460,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
/* Ensure it is not in reserved area nor out of text */
if (!kernel_text_address((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
- jump_label_text_reserved(p->addr, p->addr)) {
+ jump_label_text_reserved(p->addr, p->addr) ||
+ find_bug((unsigned long)p->addr)) {
ret = -EINVAL;
goto out;
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d5b779d7e79f..9f56e3fac795 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1241,9 +1241,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.class = class;
raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
+ current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
return ret;
@@ -1268,9 +1270,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.class = class;
raw_local_irq_save(flags);
+ current->lockdep_recursion = 1;
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
+ current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
return ret;
@@ -3260,17 +3264,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (depth) {
hlock = curr->held_locks + depth - 1;
if (hlock->class_idx == class_idx && nest_lock) {
- if (hlock->references) {
- /*
- * Check: unsigned int references:12, overflow.
- */
- if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
- return 0;
+ if (!references)
+ references++;
+ if (!hlock->references)
hlock->references++;
- } else {
- hlock->references = 2;
- }
+
+ hlock->references += references;
+
+ /* Overflow */
+ if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
+ return 0;
return 1;
}
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index a0f61effad25..75d80809c48c 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -219,7 +219,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
static int lockdep_stats_show(struct seq_file *m, void *v)
{
- struct lock_class *class;
unsigned long nr_unused = 0, nr_uncategorized = 0,
nr_irq_safe = 0, nr_irq_unsafe = 0,
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
@@ -229,6 +228,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
sum_forward_deps = 0;
+#ifdef CONFIG_PROVE_LOCKING
+ struct lock_class *class;
+
list_for_each_entry(class, &all_lock_classes, lock_entry) {
if (class->usage_mask == 0)
@@ -260,13 +262,13 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
nr_hardirq_read_unsafe++;
-#ifdef CONFIG_PROVE_LOCKING
sum_forward_deps += lockdep_count_forward_deps(class);
-#endif
}
#ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
#endif
+
+#endif
seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
nr_lock_classes, MAX_LOCKDEP_KEYS);
seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index babc67cfed69..b0e41c312c15 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -649,10 +649,10 @@ static void __torture_print_stats(char *page,
if (statp[i].n_lock_fail)
fail = true;
sum += statp[i].n_lock_acquired;
- if (max < statp[i].n_lock_fail)
- max = statp[i].n_lock_fail;
- if (min > statp[i].n_lock_fail)
- min = statp[i].n_lock_fail;
+ if (max < statp[i].n_lock_acquired)
+ max = statp[i].n_lock_acquired;
+ if (min > statp[i].n_lock_acquired)
+ min = statp[i].n_lock_acquired;
}
page += sprintf(page,
"%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index be06c45cbe4f..0cdbb636e316 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -127,6 +127,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
{
struct rwsem_waiter *waiter, *tmp;
long oldcount, woken = 0, adjustment = 0;
+ struct list_head wlist;
/*
* Take a peek at the queue head waiter such that we can determine
@@ -185,18 +186,42 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
* of the queue. We know that woken will be at least 1 as we accounted
* for above. Note we increment the 'active part' of the count by the
* number of readers before waking any processes up.
+ *
+ * We have to do wakeup in 2 passes to prevent the possibility that
+ * the reader count may be decremented before it is incremented. It
+ * is because the to-be-woken waiter may not have slept yet. So it
+ * may see waiter->task got cleared, finish its critical section and
+ * do an unlock before the reader count increment.
+ *
+ * 1) Collect the read-waiters in a separate list, count them and
+ * fully increment the reader count in rwsem.
+ * 2) For each waiters in the new list, clear waiter->task and
+ * put them into wake_q to be woken up later.
*/
- list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
- struct task_struct *tsk;
-
+ list_for_each_entry(waiter, &sem->wait_list, list) {
if (waiter->type == RWSEM_WAITING_FOR_WRITE)
break;
woken++;
- tsk = waiter->task;
+ }
+ list_cut_before(&wlist, &sem->wait_list, &waiter->list);
+
+ adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
+ if (list_empty(&sem->wait_list)) {
+ /* hit end of list above */
+ adjustment -= RWSEM_WAITING_BIAS;
+ }
+
+ if (adjustment)
+ atomic_long_add(adjustment, &sem->count);
+
+ /* 2nd pass */
+ list_for_each_entry_safe(waiter, tmp, &wlist, list) {
+ struct task_struct *tsk;
+ tsk = waiter->task;
get_task_struct(tsk);
- list_del(&waiter->list);
+
/*
* Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot
@@ -212,15 +237,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
/* wake_q_add() already take the task ref */
put_task_struct(tsk);
}
-
- adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
- if (list_empty(&sem->wait_list)) {
- /* hit end of list above */
- adjustment -= RWSEM_WAITING_BIAS;
- }
-
- if (adjustment)
- atomic_long_add(adjustment, &sem->count);
}
/*
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 9aa0fccd5d43..03595c29c566 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -51,19 +51,19 @@ EXPORT_SYMBOL(__rwlock_init);
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
- struct task_struct *owner = NULL;
+ struct task_struct *owner = READ_ONCE(lock->owner);
- if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
- owner = lock->owner;
+ if (owner == SPINLOCK_OWNER_INIT)
+ owner = NULL;
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
- lock, lock->magic,
+ lock, READ_ONCE(lock->magic),
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
- lock->owner_cpu);
+ READ_ONCE(lock->owner_cpu));
dump_stack();
}
@@ -80,16 +80,16 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
static inline void
debug_spin_lock_before(raw_spinlock_t *lock)
{
- SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
- SPIN_BUG_ON(lock->owner == current, lock, "recursion");
- SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
+ SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
+ SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
+ SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
lock, "cpu recursion");
}
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
- lock->owner_cpu = raw_smp_processor_id();
- lock->owner = current;
+ WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
+ WRITE_ONCE(lock->owner, current);
}
static inline void debug_spin_unlock(raw_spinlock_t *lock)
@@ -99,8 +99,8 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
- lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
+ WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
+ WRITE_ONCE(lock->owner_cpu, -1);
}
/*
@@ -183,8 +183,8 @@ static inline void debug_write_lock_before(rwlock_t *lock)
static inline void debug_write_lock_after(rwlock_t *lock)
{
- lock->owner_cpu = raw_smp_processor_id();
- lock->owner = current;
+ WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
+ WRITE_ONCE(lock->owner, current);
}
static inline void debug_write_unlock(rwlock_t *lock)
@@ -193,8 +193,8 @@ static inline void debug_write_unlock(rwlock_t *lock)
RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
- lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
+ WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
+ WRITE_ONCE(lock->owner_cpu, -1);
}
void do_raw_write_lock(rwlock_t *lock)
diff --git a/kernel/module.c b/kernel/module.c
index 2325c9821f2a..9cb1437151ae 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -995,6 +995,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
free_module(mod);
+ /* someone could wait for the module in add_unformed_module() */
+ wake_up_all(&module_wq);
return 0;
out:
mutex_unlock(&module_mutex);
@@ -3351,8 +3353,7 @@ static bool finished_loading(const char *name)
sched_annotate_sleep();
mutex_lock(&module_mutex);
mod = find_module_all(name, strlen(name), true);
- ret = !mod || mod->state == MODULE_STATE_LIVE
- || mod->state == MODULE_STATE_GOING;
+ ret = !mod || mod->state == MODULE_STATE_LIVE;
mutex_unlock(&module_mutex);
return ret;
@@ -3515,8 +3516,7 @@ again:
mutex_lock(&module_mutex);
old = find_module_all(mod->name, strlen(mod->name), true);
if (old != NULL) {
- if (old->state == MODULE_STATE_COMING
- || old->state == MODULE_STATE_UNFORMED) {
+ if (old->state != MODULE_STATE_LIVE) {
/* Wait in case it fails to load. */
mutex_unlock(&module_mutex);
err = wait_event_interruptible(module_wq,
diff --git a/kernel/notifier.c b/kernel/notifier.c
index fd2c9acbcc19..0f70f1b6fdaa 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -552,7 +552,7 @@ NOKPROBE_SYMBOL(notify_die);
int register_die_notifier(struct notifier_block *nb)
{
- vmalloc_sync_all();
+ vmalloc_sync_mappings();
return atomic_notifier_chain_register(&die_chain, nb);
}
EXPORT_SYMBOL_GPL(register_die_notifier);
diff --git a/kernel/padata.c b/kernel/padata.c
index e4a8f8d9b31a..6939111b3cbe 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -34,6 +34,8 @@
#define MAX_OBJ_NUM 1000
+static void padata_free_pd(struct parallel_data *pd);
+
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
int cpu, target_cpu;
@@ -274,7 +276,12 @@ static void padata_reorder(struct parallel_data *pd)
* The next object that needs serialization might have arrived to
* the reorder queues in the meantime, we will be called again
* from the timer function if no one else cares for it.
+ *
+ * Ensure reorder_objects is read after pd->lock is dropped so we see
+ * an increment from another task in padata_do_serial. Pairs with
+ * smp_mb__after_atomic in padata_do_serial.
*/
+ smp_mb();
if (atomic_read(&pd->reorder_objects)
&& !(pinst->flags & PADATA_RESET))
mod_timer(&pd->timer, jiffies + HZ);
@@ -296,6 +303,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
struct padata_serial_queue *squeue;
struct parallel_data *pd;
LIST_HEAD(local_list);
+ int cnt;
local_bh_disable();
squeue = container_of(serial_work, struct padata_serial_queue, work);
@@ -305,6 +313,8 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_replace_init(&squeue->serial.list, &local_list);
spin_unlock(&squeue->serial.lock);
+ cnt = 0;
+
while (!list_empty(&local_list)) {
struct padata_priv *padata;
@@ -314,9 +324,12 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_del_init(&padata->list);
padata->serial(padata);
- atomic_dec(&pd->refcnt);
+ cnt++;
}
local_bh_enable();
+
+ if (atomic_sub_and_test(cnt, &pd->refcnt))
+ padata_free_pd(pd);
}
/**
@@ -343,6 +356,13 @@ void padata_do_serial(struct padata_priv *padata)
list_add_tail(&padata->list, &pqueue->reorder.list);
spin_unlock(&pqueue->reorder.lock);
+ /*
+ * Ensure the atomic_inc of reorder_objects above is ordered correctly
+ * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
+ * in padata_reorder.
+ */
+ smp_mb__after_atomic();
+
put_cpu();
padata_reorder(pd);
@@ -432,7 +452,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
- atomic_set(&pd->refcnt, 0);
+ atomic_set(&pd->refcnt, 1);
pd->pinst = pinst;
spin_lock_init(&pd->lock);
@@ -457,31 +477,6 @@ static void padata_free_pd(struct parallel_data *pd)
kfree(pd);
}
-/* Flush all objects out of the padata queues. */
-static void padata_flush_queues(struct parallel_data *pd)
-{
- int cpu;
- struct padata_parallel_queue *pqueue;
- struct padata_serial_queue *squeue;
-
- for_each_cpu(cpu, pd->cpumask.pcpu) {
- pqueue = per_cpu_ptr(pd->pqueue, cpu);
- flush_work(&pqueue->work);
- }
-
- del_timer_sync(&pd->timer);
-
- if (atomic_read(&pd->reorder_objects))
- padata_reorder(pd);
-
- for_each_cpu(cpu, pd->cpumask.cbcpu) {
- squeue = per_cpu_ptr(pd->squeue, cpu);
- flush_work(&squeue->work);
- }
-
- BUG_ON(atomic_read(&pd->refcnt) != 0);
-}
-
static void __padata_start(struct padata_instance *pinst)
{
pinst->flags |= PADATA_INIT;
@@ -495,10 +490,6 @@ static void __padata_stop(struct padata_instance *pinst)
pinst->flags &= ~PADATA_INIT;
synchronize_rcu();
-
- get_online_cpus();
- padata_flush_queues(pinst->pd);
- put_online_cpus();
}
/* Replace the internal control structure with a new one. */
@@ -519,8 +510,8 @@ static void padata_replace(struct padata_instance *pinst,
if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
notification_mask |= PADATA_CPU_SERIAL;
- padata_flush_queues(pd_old);
- padata_free_pd(pd_old);
+ if (atomic_dec_and_test(&pd_old->refcnt))
+ padata_free_pd(pd_old);
if (notification_mask)
blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
@@ -623,8 +614,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
struct cpumask *serial_mask, *parallel_mask;
int err = -EINVAL;
- mutex_lock(&pinst->lock);
get_online_cpus();
+ mutex_lock(&pinst->lock);
switch (cpumask_type) {
case PADATA_CPU_PARALLEL:
@@ -642,8 +633,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
out:
- put_online_cpus();
mutex_unlock(&pinst->lock);
+ put_online_cpus();
return err;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index eb7bc6d60927..89198dca0180 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -144,6 +144,7 @@ void panic(const char *fmt, ...)
* after setting panic_cpu) from invoking panic() again.
*/
local_irq_disable();
+ preempt_disable_notrace();
/*
* It's possible to come here directly from a panic-assertion and
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 3976dd57db78..0eab538841fd 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -344,7 +344,7 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
}
read_lock(&tasklist_lock);
- force_sig(SIGKILL, pid_ns->child_reaper);
+ send_sig(SIGKILL, pid_ns->child_reaper, 1);
read_unlock(&tasklist_lock);
do_exit(0);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index b26dbc48c75b..81695a492ebe 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -256,6 +256,11 @@ void swsusp_show_speed(ktime_t start, ktime_t stop,
kps / 1000, (kps % 1000) / 10);
}
+__weak int arch_resume_nosmt(void)
+{
+ return 0;
+}
+
/**
* create_image - Create a hibernation image.
* @platform_mode: Whether or not to use the platform driver.
@@ -322,6 +327,10 @@ static int create_image(int platform_mode)
Enable_cpus:
enable_nonboot_cpus();
+ /* Allow architectures to do nosmt-specific post-resume dances */
+ if (!in_suspend)
+ error = arch_resume_nosmt();
+
Platform_finish:
platform_finish(platform_mode);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 4f0f0604f1c4..5dfac92521fa 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -732,8 +732,15 @@ zone_found:
* We have found the zone. Now walk the radix tree to find the leaf node
* for our PFN.
*/
+
+ /*
+ * If the zone we wish to scan is the the current zone and the
+ * pfn falls into the current node then we do not need to walk
+ * the tree.
+ */
node = bm->cur.node;
- if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
+ if (zone == bm->cur.zone &&
+ ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
goto node_found;
node = zone->rtree;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 09f9842f9f42..362f177d2c84 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -356,7 +356,6 @@ DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static u32 syslog_idx;
-static enum log_flags syslog_prev;
static size_t syslog_partial;
/* index and sequence number of the first record stored in the buffer */
@@ -370,7 +369,6 @@ static u32 log_next_idx;
/* the next printk record to write to the console */
static u64 console_seq;
static u32 console_idx;
-static enum log_flags console_prev;
/* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
@@ -385,6 +383,7 @@ static u32 clear_idx;
/* record buffer */
#define LOG_ALIGN __alignof__(struct printk_log)
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+#define LOG_BUF_LEN_MAX (u32)(1 << 31)
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
@@ -639,27 +638,15 @@ static void append_char(char **pp, char *e, char c)
}
static ssize_t msg_print_ext_header(char *buf, size_t size,
- struct printk_log *msg, u64 seq,
- enum log_flags prev_flags)
+ struct printk_log *msg, u64 seq)
{
u64 ts_usec = msg->ts_nsec;
- char cont = '-';
do_div(ts_usec, 1000);
- /*
- * If we couldn't merge continuation line fragments during the print,
- * export the stored flags to allow an optional external merge of the
- * records. Merging the records isn't always neccessarily correct, like
- * when we hit a race during printing. In most cases though, it produces
- * better readable output. 'c' in the record flags mark the first
- * fragment of a line, '+' the following.
- */
- if (msg->flags & LOG_CONT)
- cont = (prev_flags & LOG_CONT) ? '+' : 'c';
-
return scnprintf(buf, size, "%u,%llu,%llu,%c;",
- (msg->facility << 3) | msg->level, seq, ts_usec, cont);
+ (msg->facility << 3) | msg->level, seq, ts_usec,
+ msg->flags & LOG_CONT ? 'c' : '-');
}
static ssize_t msg_print_ext_body(char *buf, size_t size,
@@ -714,7 +701,6 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
struct devkmsg_user {
u64 seq;
u32 idx;
- enum log_flags prev;
struct ratelimit_state rs;
struct mutex lock;
char buf[CONSOLE_EXT_LOG_MAX];
@@ -824,12 +810,11 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
msg = log_from_idx(user->idx);
len = msg_print_ext_header(user->buf, sizeof(user->buf),
- msg, user->seq, user->prev);
+ msg, user->seq);
len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
log_dict(msg), msg->dict_len,
log_text(msg), msg->text_len);
- user->prev = msg->flags;
user->idx = log_next(user->idx);
user->seq++;
raw_spin_unlock_irq(&logbuf_lock);
@@ -999,18 +984,23 @@ void log_buf_kexec_setup(void)
static unsigned long __initdata new_log_buf_len;
/* we practice scaling the ring buffer by powers of 2 */
-static void __init log_buf_len_update(unsigned size)
+static void __init log_buf_len_update(u64 size)
{
+ if (size > (u64)LOG_BUF_LEN_MAX) {
+ size = (u64)LOG_BUF_LEN_MAX;
+ pr_err("log_buf over 2G is not supported.\n");
+ }
+
if (size)
size = roundup_pow_of_two(size);
if (size > log_buf_len)
- new_log_buf_len = size;
+ new_log_buf_len = (unsigned long)size;
}
/* save requested log_buf_len since it's too early to process it */
static int __init log_buf_len_setup(char *str)
{
- unsigned int size;
+ u64 size;
if (!str)
return -EINVAL;
@@ -1060,7 +1050,7 @@ void __init setup_log_buf(int early)
{
unsigned long flags;
char *new_log_buf;
- int free;
+ unsigned int free;
if (log_buf != __log_buf)
return;
@@ -1080,7 +1070,7 @@ void __init setup_log_buf(int early)
}
if (unlikely(!new_log_buf)) {
- pr_err("log_buf_len: %ld bytes not available\n",
+ pr_err("log_buf_len: %lu bytes not available\n",
new_log_buf_len);
return;
}
@@ -1093,8 +1083,8 @@ void __init setup_log_buf(int early)
memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
- pr_info("log_buf_len: %d bytes\n", log_buf_len);
- pr_info("early log buf free: %d(%d%%)\n",
+ pr_info("log_buf_len: %u bytes\n", log_buf_len);
+ pr_info("early log buf free: %u(%u%%)\n",
free, (free * 100) / __LOG_BUF_LEN);
}
@@ -1215,26 +1205,12 @@ static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
return len;
}
-static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
- bool syslog, char *buf, size_t size)
+static size_t msg_print_text(const struct printk_log *msg, bool syslog, char *buf, size_t size)
{
const char *text = log_text(msg);
size_t text_size = msg->text_len;
- bool prefix = true;
- bool newline = true;
size_t len = 0;
- if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
- prefix = false;
-
- if (msg->flags & LOG_CONT) {
- if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
- prefix = false;
-
- if (!(msg->flags & LOG_NEWLINE))
- newline = false;
- }
-
do {
const char *next = memchr(text, '\n', text_size);
size_t text_len;
@@ -1252,22 +1228,17 @@ static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
text_len + 1 >= size - len)
break;
- if (prefix)
- len += print_prefix(msg, syslog, buf + len);
+ len += print_prefix(msg, syslog, buf + len);
memcpy(buf + len, text, text_len);
len += text_len;
- if (next || newline)
- buf[len++] = '\n';
+ buf[len++] = '\n';
} else {
/* SYSLOG_ACTION_* buffer size only calculation */
- if (prefix)
- len += print_prefix(msg, syslog, NULL);
+ len += print_prefix(msg, syslog, NULL);
len += text_len;
- if (next || newline)
- len++;
+ len++;
}
- prefix = true;
text = next;
} while (text);
@@ -1293,7 +1264,6 @@ static int syslog_print(char __user *buf, int size)
/* messages are gone, move to first one */
syslog_seq = log_first_seq;
syslog_idx = log_first_idx;
- syslog_prev = 0;
syslog_partial = 0;
}
if (syslog_seq == log_next_seq) {
@@ -1303,13 +1273,11 @@ static int syslog_print(char __user *buf, int size)
skip = syslog_partial;
msg = log_from_idx(syslog_idx);
- n = msg_print_text(msg, syslog_prev, true, text,
- LOG_LINE_MAX + PREFIX_MAX);
+ n = msg_print_text(msg, true, text, LOG_LINE_MAX + PREFIX_MAX);
if (n - syslog_partial <= size) {
/* message fits into buffer, move forward */
syslog_idx = log_next(syslog_idx);
syslog_seq++;
- syslog_prev = msg->flags;
n -= syslog_partial;
syslog_partial = 0;
} else if (!len){
@@ -1352,7 +1320,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
u64 next_seq;
u64 seq;
u32 idx;
- enum log_flags prev;
/*
* Find first record that fits, including all following records,
@@ -1360,12 +1327,10 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
*/
seq = clear_seq;
idx = clear_idx;
- prev = 0;
while (seq < log_next_seq) {
struct printk_log *msg = log_from_idx(idx);
- len += msg_print_text(msg, prev, true, NULL, 0);
- prev = msg->flags;
+ len += msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
}
@@ -1373,12 +1338,10 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
/* move first record forward until length fits into the buffer */
seq = clear_seq;
idx = clear_idx;
- prev = 0;
while (len > size && seq < log_next_seq) {
struct printk_log *msg = log_from_idx(idx);
- len -= msg_print_text(msg, prev, true, NULL, 0);
- prev = msg->flags;
+ len -= msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
}
@@ -1391,7 +1354,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
struct printk_log *msg = log_from_idx(idx);
int textlen;
- textlen = msg_print_text(msg, prev, true, text,
+ textlen = msg_print_text(msg, true, text,
LOG_LINE_MAX + PREFIX_MAX);
if (textlen < 0) {
len = textlen;
@@ -1399,7 +1362,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
}
idx = log_next(idx);
seq++;
- prev = msg->flags;
raw_spin_unlock_irq(&logbuf_lock);
if (copy_to_user(buf + len, text, textlen))
@@ -1412,7 +1374,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
/* messages are gone, move to next one */
seq = log_first_seq;
idx = log_first_idx;
- prev = 0;
}
}
}
@@ -1513,7 +1474,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
/* messages are gone, move to first one */
syslog_seq = log_first_seq;
syslog_idx = log_first_idx;
- syslog_prev = 0;
syslog_partial = 0;
}
if (source == SYSLOG_FROM_PROC) {
@@ -1526,16 +1486,14 @@ int do_syslog(int type, char __user *buf, int len, int source)
} else {
u64 seq = syslog_seq;
u32 idx = syslog_idx;
- enum log_flags prev = syslog_prev;
error = 0;
while (seq < log_next_seq) {
struct printk_log *msg = log_from_idx(idx);
- error += msg_print_text(msg, prev, true, NULL, 0);
+ error += msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
- prev = msg->flags;
}
error -= syslog_partial;
}
@@ -1717,7 +1675,7 @@ static size_t cont_print_text(char *text, size_t size)
size_t textlen = 0;
size_t len;
- if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) {
+ if (cont.cons == 0) {
textlen += print_time(cont.ts_nsec, text);
size -= textlen;
}
@@ -1985,11 +1943,9 @@ static u64 syslog_seq;
static u32 syslog_idx;
static u64 console_seq;
static u32 console_idx;
-static enum log_flags syslog_prev;
static u64 log_first_seq;
static u32 log_first_idx;
static u64 log_next_seq;
-static enum log_flags console_prev;
static struct cont {
size_t len;
size_t cons;
@@ -2001,15 +1957,15 @@ static char *log_dict(const struct printk_log *msg) { return NULL; }
static struct printk_log *log_from_idx(u32 idx) { return NULL; }
static u32 log_next(u32 idx) { return 0; }
static ssize_t msg_print_ext_header(char *buf, size_t size,
- struct printk_log *msg, u64 seq,
- enum log_flags prev_flags) { return 0; }
+ struct printk_log *msg,
+ u64 seq) { return 0; }
static ssize_t msg_print_ext_body(char *buf, size_t size,
char *dict, size_t dict_len,
char *text, size_t text_len) { return 0; }
static void call_console_drivers(int level,
const char *ext_text, size_t ext_len,
const char *text, size_t len) {}
-static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
+static size_t msg_print_text(const struct printk_log *msg,
bool syslog, char *buf, size_t size) { return 0; }
static size_t cont_print_text(char *text, size_t size) { return 0; }
static bool suppress_message_printing(int level) { return false; }
@@ -2398,7 +2354,6 @@ again:
/* messages are gone, move to first one */
console_seq = log_first_seq;
console_idx = log_first_idx;
- console_prev = 0;
} else {
len = 0;
}
@@ -2423,16 +2378,14 @@ skip:
* will properly dump everything later.
*/
msg->flags &= ~LOG_NOCONS;
- console_prev = msg->flags;
goto skip;
}
- len += msg_print_text(msg, console_prev, false,
- text + len, sizeof(text) - len);
+ len += msg_print_text(msg, false, text + len, sizeof(text) - len);
if (nr_ext_console_drivers) {
ext_len = msg_print_ext_header(ext_text,
sizeof(ext_text),
- msg, console_seq, console_prev);
+ msg, console_seq);
ext_len += msg_print_ext_body(ext_text + ext_len,
sizeof(ext_text) - ext_len,
log_dict(msg), msg->dict_len,
@@ -2440,7 +2393,6 @@ skip:
}
console_idx = log_next(console_idx);
console_seq++;
- console_prev = msg->flags;
raw_spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
@@ -2735,7 +2687,6 @@ void register_console(struct console *newcon)
raw_spin_lock_irqsave(&logbuf_lock, flags);
console_seq = syslog_seq;
console_idx = syslog_idx;
- console_prev = syslog_prev;
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
/*
* We're about to replay the log buffer. Only do this to the
@@ -3085,7 +3036,7 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
goto out;
msg = log_from_idx(dumper->cur_idx);
- l = msg_print_text(msg, 0, syslog, line, size);
+ l = msg_print_text(msg, syslog, line, size);
dumper->cur_idx = log_next(dumper->cur_idx);
dumper->cur_seq++;
@@ -3154,7 +3105,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
u32 idx;
u64 next_seq;
u32 next_idx;
- enum log_flags prev;
size_t l = 0;
bool ret = false;
@@ -3177,27 +3127,23 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
/* calculate length of entire buffer */
seq = dumper->cur_seq;
idx = dumper->cur_idx;
- prev = 0;
while (seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
- l += msg_print_text(msg, prev, true, NULL, 0);
+ l += msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
- prev = msg->flags;
}
/* move first record forward until length fits into the buffer */
seq = dumper->cur_seq;
idx = dumper->cur_idx;
- prev = 0;
- while (l > size && seq < dumper->next_seq) {
+ while (l >= size && seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
- l -= msg_print_text(msg, prev, true, NULL, 0);
+ l -= msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
- prev = msg->flags;
}
/* last message in next interation */
@@ -3208,10 +3154,9 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
while (seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
- l += msg_print_text(msg, prev, syslog, buf + l, size - l);
+ l += msg_print_text(msg, syslog, buf + l, size - l);
idx = log_next(idx);
seq++;
- prev = msg->flags;
}
dumper->next_seq = next_seq;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index f39a7be98fc1..ea3370e205fb 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -74,9 +74,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
*/
static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
{
- rcu_read_lock();
- __ptrace_link(child, new_parent, __task_cred(new_parent));
- rcu_read_unlock();
+ __ptrace_link(child, new_parent, current_cred());
}
/**
@@ -258,6 +256,9 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
{
+ if (mode & PTRACE_MODE_SCHED)
+ return false;
+
if (mode & PTRACE_MODE_NOAUDIT)
return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
else
@@ -319,15 +320,32 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
return -EPERM;
ok:
rcu_read_unlock();
+ /*
+ * If a task drops privileges and becomes nondumpable (through a syscall
+ * like setresuid()) while we are trying to access it, we must ensure
+ * that the dumpability is read after the credentials; otherwise,
+ * we may be able to attach to a task that we shouldn't be able to
+ * attach to (as if the task had dropped privileges without becoming
+ * nondumpable).
+ * Pairs with a write barrier in commit_creds().
+ */
+ smp_rmb();
mm = task->mm;
if (mm &&
((get_dumpable(mm) != SUID_DUMP_USER) &&
!ptrace_has_cap(mm->user_ns, mode)))
return -EPERM;
+ if (mode & PTRACE_MODE_SCHED)
+ return 0;
return security_ptrace_access_check(task, mode);
}
+bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
+{
+ return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
+}
+
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
{
int err;
@@ -700,6 +718,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
if (arg.nr < 0)
return -EINVAL;
+ /* Ensure arg.off fits in an unsigned long */
+ if (arg.off > ULONG_MAX)
+ return 0;
+
if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
pending = &child->signal->shared_pending;
else
@@ -707,18 +729,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
for (i = 0; i < arg.nr; ) {
siginfo_t info;
- s32 off = arg.off + i;
+ unsigned long off = arg.off + i;
+ bool found = false;
spin_lock_irq(&child->sighand->siglock);
list_for_each_entry(q, &pending->list, list) {
if (!off--) {
+ found = true;
copy_siginfo(&info, &q->info);
break;
}
}
spin_unlock_irq(&child->sighand->siglock);
- if (off >= 0) /* beyond the end of the list */
+ if (!found) /* beyond the end of the list */
break;
#ifdef CONFIG_COMPAT
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 123ccbd22449..2b8579d5a544 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -453,6 +453,10 @@ rcu_perf_cleanup(void)
if (torture_cleanup_begin())
return;
+ if (!cur_ops) {
+ torture_cleanup_end();
+ return;
+ }
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
@@ -574,6 +578,7 @@ rcu_perf_init(void)
pr_alert(" %s", perf_ops[i]->name);
pr_alert("\n");
firsterr = -EINVAL;
+ cur_ops = NULL;
goto unwind;
}
if (cur_ops->init)
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index bf08fee53dc7..5393bbcf3c1a 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1595,6 +1595,10 @@ rcu_torture_cleanup(void)
cur_ops->cb_barrier();
return;
}
+ if (!cur_ops) {
+ torture_cleanup_end();
+ return;
+ }
rcu_torture_barrier_cleanup();
torture_stop_kthread(rcu_torture_stall, stall_task);
@@ -1730,6 +1734,7 @@ rcu_torture_init(void)
pr_alert(" %s", torture_ops[i]->name);
pr_alert("\n");
firsterr = -EINVAL;
+ cur_ops = NULL;
goto unwind;
}
if (cur_ops->fqs == NULL && fqs_duration != 0) {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6b3fff6a6437..82cec9a666e7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1162,7 +1162,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
if (cpumask_equal(&p->cpus_allowed, new_mask))
goto out;
- if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
+ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+ if (dest_cpu >= nr_cpu_ids) {
ret = -EINVAL;
goto out;
}
@@ -1183,7 +1184,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
- dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
@@ -7355,11 +7355,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
return 0;
}
+#ifdef CONFIG_SCHED_SMT
+atomic_t sched_smt_present = ATOMIC_INIT(0);
+#endif
+
int sched_cpu_activate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going up, increment the number of cores with SMT present.
+ */
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ atomic_inc(&sched_smt_present);
+#endif
set_cpu_active(cpu, true);
if (sched_smp_initialized) {
@@ -7408,6 +7419,14 @@ int sched_cpu_deactivate(unsigned int cpu)
else
synchronize_rcu();
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going down, decrement the number of cores with SMT present.
+ */
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ atomic_dec(&sched_smt_present);
+#endif
+
if (!sched_smp_initialized)
return 0;
@@ -8455,10 +8474,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL;
-#else
- /* We don't support RT-tasks being in separate groups */
- if (task->sched_class != &fair_sched_class)
- return -EINVAL;
#endif
/*
* Serialize against wake_up_new_task() such that if its
@@ -8493,6 +8508,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
{
+ if (shareval > scale_load_down(ULONG_MAX))
+ shareval = MAX_SHARES;
return sched_group_set_shares(css_tg(css), scale_load(shareval));
}
@@ -8592,8 +8609,10 @@ int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
period = ktime_to_ns(tg->cfs_bandwidth.period);
if (cfs_quota_us < 0)
quota = RUNTIME_INF;
- else
+ else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
quota = (u64)cfs_quota_us * NSEC_PER_USEC;
+ else
+ return -EINVAL;
return tg_set_cfs_bandwidth(tg, period, quota);
}
@@ -8615,6 +8634,9 @@ int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
{
u64 quota, period;
+ if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
+ return -EINVAL;
+
period = (u64)cfs_period_us * NSEC_PER_USEC;
quota = tg->cfs_bandwidth.quota;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0c91d72f3e8f..5e65c7eea872 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1925,6 +1925,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
if (p->last_task_numa_placement) {
delta = runtime - p->last_sum_exec_runtime;
*period = now - p->last_task_numa_placement;
+
+ /* Avoid time going backwards, prevent potential divide error: */
+ if (unlikely((s64)*period < 0))
+ *period = 0;
} else {
delta = p->se.avg.load_sum / p->se.load.weight;
*period = LOAD_AVG_MAX;
@@ -2253,13 +2257,23 @@ no_join:
return;
}
-void task_numa_free(struct task_struct *p)
+/*
+ * Get rid of NUMA staticstics associated with a task (either current or dead).
+ * If @final is set, the task is dead and has reached refcount zero, so we can
+ * safely free all relevant data structures. Otherwise, there might be
+ * concurrent reads from places like load balancing and procfs, and we should
+ * reset the data back to default state without freeing ->numa_faults.
+ */
+void task_numa_free(struct task_struct *p, bool final)
{
struct numa_group *grp = p->numa_group;
- void *numa_faults = p->numa_faults;
+ unsigned long *numa_faults = p->numa_faults;
unsigned long flags;
int i;
+ if (!numa_faults)
+ return;
+
if (grp) {
spin_lock_irqsave(&grp->lock, flags);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
@@ -2272,8 +2286,14 @@ void task_numa_free(struct task_struct *p)
put_numa_group(grp);
}
- p->numa_faults = NULL;
- kfree(numa_faults);
+ if (final) {
+ p->numa_faults = NULL;
+ kfree(numa_faults);
+ } else {
+ p->total_numa_faults = 0;
+ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+ numa_faults[i] = 0;
+ }
}
/*
@@ -3862,6 +3882,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
if (likely(cfs_rq->runtime_remaining > 0))
return;
+ if (cfs_rq->throttled)
+ return;
/*
* if we're unable to extend our runtime we resched so that the active
* hierarchy can be throttled
@@ -4057,6 +4079,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
if (!cfs_rq_throttled(cfs_rq))
goto next;
+ /* By the above check, this should never be true */
+ SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
+
runtime = -cfs_rq->runtime_remaining + 1;
if (runtime > remaining)
runtime = remaining;
@@ -4347,12 +4372,15 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+extern const u64 max_cfs_quota_period;
+
static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{
struct cfs_bandwidth *cfs_b =
container_of(timer, struct cfs_bandwidth, period_timer);
int overrun;
int idle = 0;
+ int count = 0;
raw_spin_lock(&cfs_b->lock);
for (;;) {
@@ -4360,6 +4388,36 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
if (!overrun)
break;
+ if (++count > 3) {
+ u64 new, old = ktime_to_ns(cfs_b->period);
+
+ /*
+ * Grow period by a factor of 2 to avoid losing precision.
+ * Precision loss in the quota/period ratio can cause __cfs_schedulable
+ * to fail.
+ */
+ new = old * 2;
+ if (new < max_cfs_quota_period) {
+ cfs_b->period = ns_to_ktime(new);
+ cfs_b->quota *= 2;
+
+ pr_warn_ratelimited(
+ "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
+ smp_processor_id(),
+ div_u64(new, NSEC_PER_USEC),
+ div_u64(cfs_b->quota, NSEC_PER_USEC));
+ } else {
+ pr_warn_ratelimited(
+ "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
+ smp_processor_id(),
+ div_u64(old, NSEC_PER_USEC),
+ div_u64(cfs_b->quota, NSEC_PER_USEC));
+ }
+
+ /* reset count so we don't come right back in here */
+ count = 0;
+ }
+
idle = do_sched_cfs_period_timer(cfs_b, overrun);
}
if (idle)
@@ -6634,10 +6692,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
if (cfs_rq->last_h_load_update == now)
return;
- cfs_rq->h_load_next = NULL;
+ WRITE_ONCE(cfs_rq->h_load_next, NULL);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- cfs_rq->h_load_next = se;
+ WRITE_ONCE(cfs_rq->h_load_next, se);
if (cfs_rq->last_h_load_update == now)
break;
}
@@ -6647,7 +6705,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
cfs_rq->last_h_load_update = now;
}
- while ((se = cfs_rq->h_load_next) != NULL) {
+ while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
load = cfs_rq->h_load;
load = div64_ul(load * se->avg.load_avg,
cfs_rq_load_avg(cfs_rq) + 1);
@@ -7879,9 +7937,10 @@ more_balance:
out_balanced:
/*
* We reach balance although we may have faced some affinity
- * constraints. Clear the imbalance flag if it was set.
+ * constraints. Clear the imbalance flag only if other tasks got
+ * a chance to move and fix the imbalance.
*/
- if (sd_parent) {
+ if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
int *group_imbalance = &sd_parent->groups->sgc->imbalance;
if (*group_imbalance)
@@ -7899,13 +7958,22 @@ out_all_pinned:
sd->nr_balance_failed = 0;
out_one_pinned:
+ ld_moved = 0;
+
+ /*
+ * idle_balance() disregards balance intervals, so we could repeatedly
+ * reach this code, which would lead to balance_interval skyrocketting
+ * in a short amount of time. Skip the balance_interval increase logic
+ * to avoid that.
+ */
+ if (env.idle == CPU_NEWLY_IDLE)
+ goto out;
+
/* tune up the balancing interval */
if (((env.flags & LBF_ALL_PINNED) &&
sd->balance_interval < MAX_PINNED_INTERVAL) ||
(sd->balance_interval < sd->max_interval))
sd->balance_interval *= 2;
-
- ld_moved = 0;
out:
return ld_moved;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ec6e838e991a..819bd5fb0264 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,7 @@
#include <linux/sched.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
+#include <linux/sched/smt.h>
#include <linux/u64_stats_sync.h>
#include <linux/sched/deadline.h>
#include <linux/kernel_stat.h>
@@ -72,7 +73,13 @@ static inline void update_idle_core(struct rq *rq) { }
#ifdef CONFIG_64BIT
# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
-# define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT)
+# define scale_load_down(w) \
+({ \
+ unsigned long __w = (w); \
+ if (__w) \
+ __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
+ __w; \
+})
#else
# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
# define scale_load(w) (w)
diff --git a/kernel/signal.c b/kernel/signal.c
index c091dcc9f19b..bedca1629f26 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -71,10 +71,19 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
handler = sig_handler(t, sig);
+ /* SIGKILL and SIGSTOP may not be sent to the global init */
+ if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
+ return true;
+
if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
handler == SIG_DFL && !(force && sig_kernel_only(sig)))
return 1;
+ /* Only allow kernel generated signals to this kthread */
+ if (unlikely((t->flags & PF_KTHREAD) &&
+ (handler == SIG_KTHREAD_KERNEL) && !force))
+ return true;
+
return sig_handler_ignored(handler, sig);
}
@@ -364,27 +373,32 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
{
struct sigqueue *q = NULL;
struct user_struct *user;
+ int sigpending;
/*
* Protect access to @t credentials. This can go away when all
* callers hold rcu read lock.
+ *
+ * NOTE! A pending signal will hold on to the user refcount,
+ * and we get/put the refcount only when the sigpending count
+ * changes from/to zero.
*/
rcu_read_lock();
- user = get_uid(__task_cred(t)->user);
- atomic_inc(&user->sigpending);
+ user = __task_cred(t)->user;
+ sigpending = atomic_inc_return(&user->sigpending);
+ if (sigpending == 1)
+ get_uid(user);
rcu_read_unlock();
- if (override_rlimit ||
- atomic_read(&user->sigpending) <=
- task_rlimit(t, RLIMIT_SIGPENDING)) {
+ if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
q = kmem_cache_alloc(sigqueue_cachep, flags);
} else {
print_dropped_signal(sig);
}
if (unlikely(q == NULL)) {
- atomic_dec(&user->sigpending);
- free_uid(user);
+ if (atomic_dec_and_test(&user->sigpending))
+ free_uid(user);
} else {
INIT_LIST_HEAD(&q->list);
q->flags = 0;
@@ -398,8 +412,8 @@ static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
- atomic_dec(&q->user->sigpending);
- free_uid(q->user);
+ if (atomic_dec_and_test(&q->user->sigpending))
+ free_uid(q->user);
kmem_cache_free(sigqueue_cachep, q);
}
@@ -1646,7 +1660,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
* This is only possible if parent == real_parent.
* Check if it has changed security domain.
*/
- if (tsk->parent_exec_id != tsk->parent->self_exec_id)
+ if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
sig = SIGCHLD;
}
@@ -2244,6 +2258,8 @@ relock:
if (signal_group_exit(signal)) {
ksig->info.si_signo = signr = SIGKILL;
sigdelset(&current->pending.signal, SIGKILL);
+ trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
+ &sighand->action[SIGKILL - 1]);
recalc_sigpending();
goto fatal;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 6c4e9b533258..157277cbf83a 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1762,7 +1762,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map)
((unsigned long)prctl_map->__m1 __op \
(unsigned long)prctl_map->__m2) ? 0 : -EINVAL
error = __prctl_check_order(start_code, <, end_code);
- error |= __prctl_check_order(start_data, <, end_data);
+ error |= __prctl_check_order(start_data,<=, end_data);
error |= __prctl_check_order(start_brk, <=, brk);
error |= __prctl_check_order(arg_start, <=, arg_end);
error |= __prctl_check_order(env_start, <=, env_end);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index efd340a510a9..34449ec0689d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -124,7 +124,9 @@ static int zero;
static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static int __maybe_unused four = 4;
+static unsigned long zero_ul;
static unsigned long one_ul = 1;
+static unsigned long long_max = LONG_MAX;
static int one_hundred = 100;
static int one_thousand = 1000;
#ifdef CONFIG_PRINTK
@@ -1396,7 +1398,7 @@ static struct ctl_table vm_table[] = {
.procname = "drop_caches",
.data = &sysctl_drop_caches,
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0200,
.proc_handler = drop_caches_sysctl_handler,
.extra1 = &one,
.extra2 = &four,
@@ -1682,6 +1684,8 @@ static struct ctl_table fs_table[] = {
.maxlen = sizeof(files_stat.max_files),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
+ .extra1 = &zero_ul,
+ .extra2 = &long_max,
},
{
.procname = "nr_open",
@@ -2523,8 +2527,10 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
if (neg)
continue;
val = convmul * val / convdiv;
- if ((min && val < *min) || (max && val > *max))
- continue;
+ if ((min && val < *min) || (max && val > *max)) {
+ err = -EINVAL;
+ break;
+ }
*i = val;
} else {
val = convdiv * (*i) / convmul;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index cbb387a265db..23df1fbad4b4 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -559,25 +559,33 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
- struct taskstats *stats;
+ struct taskstats *stats_new, *stats;
- if (sig->stats || thread_group_empty(tsk))
- goto ret;
+ /* Pairs with smp_store_release() below. */
+ stats = smp_load_acquire(&sig->stats);
+ if (stats || thread_group_empty(tsk))
+ return stats;
/* No problem if kmem_cache_zalloc() fails */
- stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
+ stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
spin_lock_irq(&tsk->sighand->siglock);
- if (!sig->stats) {
- sig->stats = stats;
- stats = NULL;
+ stats = sig->stats;
+ if (!stats) {
+ /*
+ * Pairs with smp_store_release() above and order the
+ * kmem_cache_zalloc().
+ */
+ smp_store_release(&sig->stats, stats_new);
+ stats = stats_new;
+ stats_new = NULL;
}
spin_unlock_irq(&tsk->sighand->siglock);
- if (stats)
- kmem_cache_free(taskstats_cache, stats);
-ret:
- return sig->stats;
+ if (stats_new)
+ kmem_cache_free(taskstats_cache, stats_new);
+
+ return stats;
}
/* Send pid data out on exit */
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index a0ee81f49a87..6aef4a0bed29 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -544,7 +544,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
enum alarmtimer_type type;
if (!alarmtimer_get_rtcdev())
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!capable(CAP_WAKE_ALARM))
return -EPERM;
@@ -586,7 +586,7 @@ static void alarm_timer_get(struct k_itimer *timr,
static int alarm_timer_del(struct k_itimer *timr)
{
if (!rtcdev)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0)
return TIMER_RETRY;
@@ -610,7 +610,7 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
ktime_t exp;
if (!rtcdev)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (flags & ~TIMER_ABSTIME)
return -EINVAL;
@@ -772,7 +772,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
struct restart_block *restart;
if (!alarmtimer_get_rtcdev())
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (flags & ~TIMER_ABSTIME)
return -EINVAL;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 7e4fad75acaa..2924ff544c9e 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -272,8 +272,15 @@ static void clocksource_watchdog(unsigned long data)
next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
if (next_cpu >= nr_cpu_ids)
next_cpu = cpumask_first(cpu_online_mask);
- watchdog_timer.expires += WATCHDOG_INTERVAL;
- add_timer_on(&watchdog_timer, next_cpu);
+
+ /*
+ * Arm timer if not already pending: could race with concurrent
+ * pair clocksource_stop_watchdog() clocksource_start_watchdog().
+ */
+ if (!timer_pending(&watchdog_timer)) {
+ watchdog_timer.expires += WATCHDOG_INTERVAL;
+ add_timer_on(&watchdog_timer, next_cpu);
+ }
out:
spin_unlock(&watchdog_lock);
}
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index c93729661be0..f31637ced7fa 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -870,7 +870,8 @@ static int enqueue_hrtimer(struct hrtimer *timer,
base->cpu_base->active_bases |= 1 << base->index;
- timer->state = HRTIMER_STATE_ENQUEUED;
+ /* Pairs with the lockless read in hrtimer_is_queued() */
+ WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
return timerqueue_add(&base->active, &timer->node);
}
@@ -892,7 +893,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
u8 state = timer->state;
- timer->state = newstate;
+ /* Pairs with the lockless read in hrtimer_is_queued() */
+ WRITE_ONCE(timer->state, newstate);
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
@@ -919,8 +921,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
{
- if (hrtimer_is_queued(timer)) {
- u8 state = timer->state;
+ u8 state = timer->state;
+
+ if (state & HRTIMER_STATE_ENQUEUED) {
int reprogram;
/*
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 6df8927c58a5..4bdb59604526 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -42,6 +42,7 @@ static u64 tick_length_base;
#define MAX_TICKADJ 500LL /* usecs */
#define MAX_TICKADJ_SCALED \
(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
+#define MAX_TAI_OFFSET 100000
/*
* phase-lock loop variables
@@ -639,7 +640,8 @@ static inline void process_adjtimex_modes(struct timex *txc,
time_constant = max(time_constant, 0l);
}
- if (txc->modes & ADJ_TAI && txc->constant > 0)
+ if (txc->modes & ADJ_TAI &&
+ txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET)
*time_tai = txc->constant;
if (txc->modes & ADJ_OFFSET)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index b625cc7fcc1c..b5603248d841 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1586,21 +1586,23 @@ void timer_clear_idle(void)
static int collect_expired_timers(struct timer_base *base,
struct hlist_head *heads)
{
+ unsigned long now = READ_ONCE(jiffies);
+
/*
* NOHZ optimization. After a long idle sleep we need to forward the
* base to current jiffies. Avoid a loop by searching the bitfield for
* the next expiring timer.
*/
- if ((long)(jiffies - base->clk) > 2) {
+ if ((long)(now - base->clk) > 2) {
unsigned long next = __next_timer_interrupt(base);
/*
* If the next timer is ahead of time forward to current
* jiffies, otherwise forward to the next expiry time:
*/
- if (time_after(next, jiffies)) {
+ if (time_after(next, now)) {
/* The call site will increment clock! */
- base->clk = jiffies - 1;
+ base->clk = now - 1;
return 0;
}
base->clk = next;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 1407ed20ea93..b7c5d230b4b2 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -299,23 +299,6 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
SEQ_printf(m, "\n");
}
-static int timer_list_show(struct seq_file *m, void *v)
-{
- struct timer_list_iter *iter = v;
-
- if (iter->cpu == -1 && !iter->second_pass)
- timer_list_header(m, iter->now);
- else if (!iter->second_pass)
- print_cpu(m, iter->cpu, iter->now);
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
- else if (iter->cpu == -1 && iter->second_pass)
- timer_list_show_tickdevices_header(m);
- else
- print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
-#endif
- return 0;
-}
-
void sysrq_timer_list_show(void)
{
u64 now = ktime_to_ns(ktime_get());
@@ -334,6 +317,24 @@ void sysrq_timer_list_show(void)
return;
}
+#ifdef CONFIG_PROC_FS
+static int timer_list_show(struct seq_file *m, void *v)
+{
+ struct timer_list_iter *iter = v;
+
+ if (iter->cpu == -1 && !iter->second_pass)
+ timer_list_header(m, iter->now);
+ else if (!iter->second_pass)
+ print_cpu(m, iter->cpu, iter->now);
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ else if (iter->cpu == -1 && iter->second_pass)
+ timer_list_show_tickdevices_header(m);
+ else
+ print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
+#endif
+ return 0;
+}
+
static void *move_iter(struct timer_list_iter *iter, loff_t offset)
{
for (; offset; offset--) {
@@ -405,3 +406,4 @@ static int __init init_timer_list_procfs(void)
return 0;
}
__initcall(init_timer_list_procfs);
+#endif
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 087204c733eb..c74920f318c5 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -417,7 +417,7 @@ static int __init init_tstats_procfs(void)
{
struct proc_dir_entry *pe;
- pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
if (!pe)
return -ENOMEM;
return 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8f4227d4cd39..2ae98f8bce81 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -32,6 +32,7 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
+#include <linux/kprobes.h>
#include <trace/events/sched.h>
@@ -608,8 +609,7 @@ static int function_stat_show(struct seq_file *m, void *v)
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- avg = rec->time;
- do_div(avg, rec->counter);
+ avg = div64_ul(rec->time, rec->counter);
if (tracing_thresh && (avg < tracing_thresh))
goto out;
#endif
@@ -635,7 +635,8 @@ static int function_stat_show(struct seq_file *m, void *v)
* Divide only 1000 for ns^2 -> us^2 conversion.
* trace_print_graph_duration will divide 1000 again.
*/
- do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
+ stddev = div64_ul(stddev,
+ rec->counter * (rec->counter - 1) * 1000);
}
trace_seq_init(&s);
@@ -1630,6 +1631,11 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
return keep_regs;
}
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+static struct ftrace_ops *
+ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
int filter_hash,
bool inc)
@@ -1758,15 +1764,17 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
}
/*
- * If the rec had TRAMP enabled, then it needs to
- * be cleared. As TRAMP can only be enabled iff
- * there is only a single ops attached to it.
- * In otherwords, always disable it on decrementing.
- * In the future, we may set it if rec count is
- * decremented to one, and the ops that is left
- * has a trampoline.
+ * The TRAMP needs to be set only if rec count
+ * is decremented to one, and the ops that is
+ * left has a trampoline. As TRAMP can only be
+ * enabled if there is only a single ops attached
+ * to it.
*/
- rec->flags &= ~FTRACE_FL_TRAMP;
+ if (ftrace_rec_count(rec) == 1 &&
+ ftrace_find_tramp_ops_any(rec))
+ rec->flags |= FTRACE_FL_TRAMP;
+ else
+ rec->flags &= ~FTRACE_FL_TRAMP;
/*
* flags will be cleared in ftrace_check_record()
@@ -1959,11 +1967,6 @@ static void print_ip_ins(const char *fmt, const unsigned char *p)
printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
}
-static struct ftrace_ops *
-ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
-static struct ftrace_ops *
-ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
-
enum ftrace_bug_type ftrace_bug_type;
const void *ftrace_expected;
@@ -5246,7 +5249,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
tr->ops->func = ftrace_stub;
}
-static inline void
+static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ignored, struct pt_regs *regs)
{
@@ -5309,11 +5312,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
{
__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
}
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
#else
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
{
__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
}
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
#endif
/*
@@ -5343,6 +5348,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
preempt_enable_notrace();
trace_clear_recursion(bit);
}
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
/**
* ftrace_ops_get_func - get the function a trampoline should call
@@ -5449,9 +5455,10 @@ static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
struct trace_array *tr = m->private;
struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
- if (v == FTRACE_NO_PIDS)
+ if (v == FTRACE_NO_PIDS) {
+ (*pos)++;
return NULL;
-
+ }
return trace_pid_next(pid_list, v, pos);
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f316e90ad538..2cfe11e1190b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -701,7 +701,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
preempt_disable_notrace();
time = rb_time_stamp(buffer);
- preempt_enable_no_resched_notrace();
+ preempt_enable_notrace();
return time;
}
@@ -4037,6 +4037,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
* @buffer: The ring buffer to read from
* @cpu: The cpu buffer to iterate over
+ * @flags: gfp flags to use for memory allocation
*
* This performs the initial preparations necessary to iterate
* through the buffer. Memory is allocated, buffer recording
@@ -4054,7 +4055,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* This overall must be paired with ring_buffer_read_finish.
*/
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter;
@@ -4062,7 +4063,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ iter = kmalloc(sizeof(*iter), flags);
if (!iter)
return NULL;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f18dedf9195e..6a170a78b453 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -500,8 +500,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
* not modified.
*/
pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
- if (!pid_list)
+ if (!pid_list) {
+ trace_parser_put(&parser);
return -ENOMEM;
+ }
pid_list->pid_max = READ_ONCE(pid_max);
@@ -511,6 +513,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
if (!pid_list->pids) {
+ trace_parser_put(&parser);
kfree(pid_list);
return -ENOMEM;
}
@@ -3449,7 +3452,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+ ring_buffer_read_prepare(iter->trace_buffer->buffer,
+ cpu, GFP_KERNEL);
}
ring_buffer_read_prepare_sync();
for_each_tracing_cpu(cpu) {
@@ -3459,7 +3463,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
- ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
+ ring_buffer_read_prepare(iter->trace_buffer->buffer,
+ cpu, GFP_KERNEL);
ring_buffer_read_prepare_sync();
ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
@@ -3695,9 +3700,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
if (tracing_disabled)
return -ENODEV;
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
ret = seq_open(file, &show_traces_seq_ops);
- if (ret)
+ if (ret) {
+ trace_array_put(tr);
return ret;
+ }
m = file->private_data;
m->private = tr;
@@ -3705,6 +3715,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
return 0;
}
+static int show_traces_release(struct inode *inode, struct file *file)
+{
+ struct trace_array *tr = inode->i_private;
+
+ trace_array_put(tr);
+ return seq_release(inode, file);
+}
+
static ssize_t
tracing_write_stub(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
@@ -3735,8 +3753,8 @@ static const struct file_operations tracing_fops = {
static const struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
- .release = seq_release,
.llseek = seq_lseek,
+ .release = show_traces_release,
};
static ssize_t
@@ -5199,6 +5217,7 @@ waitagain:
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
cpumask_clear(iter->started);
+ trace_seq_init(&iter->seq);
iter->pos = -1;
trace_event_read_lock();
@@ -5815,11 +5834,15 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
break;
}
#endif
- if (!tr->allocated_snapshot) {
+ if (!tr->allocated_snapshot)
+ ret = resize_buffer_duplicate_size(&tr->max_buffer,
+ &tr->trace_buffer, iter->cpu_file);
+ else
ret = alloc_snapshot(tr);
- if (ret < 0)
- break;
- }
+
+ if (ret < 0)
+ break;
+
local_irq_disable();
/* Now, we're going to swap */
if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
@@ -6140,12 +6163,16 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
buf->private = 0;
}
-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+ if (ref->ref > INT_MAX/2)
+ return false;
+
ref->ref++;
+ return true;
}
/* Pipe buffer operations for a buffer. */
@@ -7604,12 +7631,8 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
cnt++;
- /* reset all but tr, trace, and overruns */
- memset(&iter.seq, 0,
- sizeof(struct trace_iterator) -
- offsetof(struct trace_iterator, seq));
+ trace_iterator_reset(&iter);
iter.iter_flags |= TRACE_FILE_LAT_FMT;
- iter.pos = -1;
if (trace_find_next_entry_inc(&iter) != NULL) {
int ret;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index b0d8576c27ae..476c6c4204da 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1673,4 +1673,22 @@ static inline void trace_event_enum_update(struct trace_enum_map **map, int len)
extern struct trace_iterator *tracepoint_print_iter;
+/*
+ * Reset the state of the trace_iterator so that it can read consumed data.
+ * Normally, the trace_iterator is used for reading the data when it is not
+ * consumed, and must retain state.
+ */
+static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
+{
+ const size_t offset = offsetof(struct trace_iterator, seq);
+
+ /*
+ * Keep gcc from complaining about overwriting more than just one
+ * member in the structure.
+ */
+ memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
+
+ iter->pos = -1;
+}
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 9549ed120556..af969f753e5e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1310,9 +1310,6 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
char buf[32];
int len;
- if (*ppos)
- return 0;
-
if (unlikely(!id))
return -ENODEV;
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 7e6971ba9541..c9ca2ed50c0e 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -126,9 +126,10 @@ static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
{
struct trace_event_file *event_file = event_file_data(m->private);
- if (t == SHOW_AVAILABLE_TRIGGERS)
+ if (t == SHOW_AVAILABLE_TRIGGERS) {
+ (*pos)++;
return NULL;
-
+ }
return seq_list_next(t, &event_file->triggers, pos);
}
@@ -1067,14 +1068,10 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file)
{
- int ret = register_trigger(glob, ops, data, file);
-
- if (ret > 0 && tracing_alloc_snapshot() != 0) {
- unregister_trigger(glob, ops, data, file);
- ret = 0;
- }
+ if (tracing_alloc_snapshot() != 0)
+ return 0;
- return ret;
+ return register_trigger(glob, ops, data, file);
}
static int
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index f00b0131c8f9..5fe23f0ee7db 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -151,7 +151,7 @@ void trace_hwlat_callback(bool enter)
if (enter)
nmi_ts_start = time_get();
else
- nmi_total_ts = time_get() - nmi_ts_start;
+ nmi_total_ts += time_get() - nmi_ts_start;
}
if (enter)
@@ -257,6 +257,8 @@ static int get_sample(void)
/* Keep a running maximum ever recorded hardware latency */
if (sample > tr->max_latency)
tr->max_latency = sample;
+ if (outer_sample > tr->max_latency)
+ tr->max_latency = outer_sample;
}
out:
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 57149bce6aad..cf6337dc41f4 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -40,24 +40,22 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
kdb_printf("Dumping ftrace buffer:\n");
- /* reset all but tr, trace, and overruns */
- memset(&iter.seq, 0,
- sizeof(struct trace_iterator) -
- offsetof(struct trace_iterator, seq));
+ trace_iterator_reset(&iter);
iter.iter_flags |= TRACE_FILE_LAT_FMT;
- iter.pos = -1;
if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] =
- ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
+ ring_buffer_read_prepare(iter.trace_buffer->buffer,
+ cpu, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu]);
tracing_iter_reset(&iter, cpu);
}
} else {
iter.cpu_file = cpu_file;
iter.buffer_iter[cpu_file] =
- ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
+ ring_buffer_read_prepare(iter.trace_buffer->buffer,
+ cpu_file, GFP_ATOMIC);
ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file);
}
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 9d4399b553a3..6403f45da9d5 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -625,7 +625,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_migrate_task\n");
- return;
+ goto fail_deprobe_sched_switch;
}
wakeup_reset(tr);
@@ -643,6 +643,8 @@ static void start_wakeup_tracer(struct trace_array *tr)
printk(KERN_ERR "failed to start wakeup tracer\n");
return;
+fail_deprobe_sched_switch:
+ unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
fail_deprobe:
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 2a1abbaca10e..5d2c08efd12f 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -201,6 +201,11 @@ check_stack(unsigned long ip, unsigned long *stack)
local_irq_restore(flags);
}
+/* Some archs may not define MCOUNT_INSN_SIZE */
+#ifndef MCOUNT_INSN_SIZE
+# define MCOUNT_INSN_SIZE 0
+#endif
+
static void
stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 413ff108fbd0..d19f2191960e 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -277,18 +277,22 @@ static int tracing_stat_init(void)
d_tracing = tracing_init_dentry();
if (IS_ERR(d_tracing))
- return 0;
+ return -ENODEV;
stat_dir = tracefs_create_dir("trace_stat", d_tracing);
- if (!stat_dir)
+ if (!stat_dir) {
pr_warn("Could not create tracefs 'trace_stat' entry\n");
+ return -ENOMEM;
+ }
return 0;
}
static int init_stat_file(struct stat_session *session)
{
- if (!stat_dir && tracing_stat_init())
- return -ENODEV;
+ int ret;
+
+ if (!stat_dir && (ret = tracing_stat_init()))
+ return ret;
session->file = tracefs_create_file(session->ts->name, 0644,
stat_dir,
@@ -301,7 +305,7 @@ static int init_stat_file(struct stat_session *session)
int register_stat_tracer(struct tracer_stat *trace)
{
struct stat_session *session, *node;
- int ret;
+ int ret = -EINVAL;
if (!trace)
return -EINVAL;
@@ -312,17 +316,15 @@ int register_stat_tracer(struct tracer_stat *trace)
/* Already registered? */
mutex_lock(&all_stat_sessions_mutex);
list_for_each_entry(node, &all_stat_sessions, session_list) {
- if (node->ts == trace) {
- mutex_unlock(&all_stat_sessions_mutex);
- return -EINVAL;
- }
+ if (node->ts == trace)
+ goto out;
}
- mutex_unlock(&all_stat_sessions_mutex);
+ ret = -ENOMEM;
/* Init the session */
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
- return -ENOMEM;
+ goto out;
session->ts = trace;
INIT_LIST_HEAD(&session->session_list);
@@ -331,15 +333,16 @@ int register_stat_tracer(struct tracer_stat *trace)
ret = init_stat_file(session);
if (ret) {
destroy_session(session);
- return ret;
+ goto out;
}
+ ret = 0;
/* Register */
- mutex_lock(&all_stat_sessions_mutex);
list_add_tail(&session->session_list, &all_stat_sessions);
+ out:
mutex_unlock(&all_stat_sessions_mutex);
- return 0;
+ return ret;
}
void unregister_stat_tracer(struct tracer_stat *trace)
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 305039b122fa..35b2ba07f3c6 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -90,8 +90,8 @@ static int tracing_map_cmp_atomic64(void *val_a, void *val_b)
#define DEFINE_TRACING_MAP_CMP_FN(type) \
static int tracing_map_cmp_##type(void *val_a, void *val_b) \
{ \
- type a = *(type *)val_a; \
- type b = *(type *)val_b; \
+ type a = (type)(*(u64 *)val_a); \
+ type b = (type)(*(u64 *)val_b); \
\
return (a > b) ? 1 : ((a < b) ? -1 : 0); \
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1961dd408bc5..00c295d3104b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1384,14 +1384,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
WARN_ON_ONCE(!is_chained_work(wq)))
return;
retry:
- if (req_cpu == WORK_CPU_UNBOUND)
- cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-
/* pwq which will be used unless @work is executing elsewhere */
- if (!(wq->flags & WQ_UNBOUND))
- pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
- else
+ if (wq->flags & WQ_UNBOUND) {
+ if (req_cpu == WORK_CPU_UNBOUND)
+ cpu = wq_select_unbound_cpu(raw_smp_processor_id());
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+ } else {
+ if (req_cpu == WORK_CPU_UNBOUND)
+ cpu = raw_smp_processor_id();
+ pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+ }
/*
* If @work was previously on a different pool, it might still be
@@ -2344,8 +2346,14 @@ repeat:
*/
if (need_to_create_worker(pool)) {
spin_lock(&wq_mayday_lock);
- get_pwq(pwq);
- list_move_tail(&pwq->mayday_node, &wq->maydays);
+ /*
+ * Queue iff we aren't racing destruction
+ * and somebody else hasn't queued it already.
+ */
+ if (wq->rescuer && list_empty(&pwq->mayday_node)) {
+ get_pwq(pwq);
+ list_add_tail(&pwq->mayday_node, &wq->maydays);
+ }
spin_unlock(&wq_mayday_lock);
}
}
@@ -4031,9 +4039,29 @@ void destroy_workqueue(struct workqueue_struct *wq)
struct pool_workqueue *pwq;
int node;
+ /*
+ * Remove it from sysfs first so that sanity check failure doesn't
+ * lead to sysfs name conflicts.
+ */
+ workqueue_sysfs_unregister(wq);
+
/* drain it before proceeding with destruction */
drain_workqueue(wq);
+ /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
+ if (wq->rescuer) {
+ struct worker *rescuer = wq->rescuer;
+
+ /* this prevents new queueing */
+ spin_lock_irq(&wq_mayday_lock);
+ wq->rescuer = NULL;
+ spin_unlock_irq(&wq_mayday_lock);
+
+ /* rescuer will empty maydays list before exiting */
+ kthread_stop(rescuer->task);
+ kfree(rescuer);
+ }
+
/* sanity checks */
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
@@ -4063,11 +4091,6 @@ void destroy_workqueue(struct workqueue_struct *wq)
list_del_rcu(&wq->list);
mutex_unlock(&wq_pool_mutex);
- workqueue_sysfs_unregister(wq);
-
- if (wq->rescuer)
- kthread_stop(wq->rescuer->task);
-
if (!(wq->flags & WQ_UNBOUND)) {
/*
* The base ref is never dropped on per-cpu pwqs. Directly
@@ -4344,7 +4367,8 @@ static void show_pwq(struct pool_workqueue *pwq)
pr_info(" pwq %d:", pool->id);
pr_cont_pool_info(pool);
- pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
+ pr_cont(" active=%d/%d refcnt=%d%s\n",
+ pwq->nr_active, pwq->max_active, pwq->refcnt,
!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 58a22ca10f33..bc5ff3a53d4a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -535,7 +535,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
int "Maximum kmemleak early log entries"
depends on DEBUG_KMEMLEAK
range 200 40000
- default 400
+ default 16000
help
Kmemleak must track all the memory allocations to avoid
reporting false positives. Since memory may be allocated or
@@ -1822,9 +1822,9 @@ config TEST_HASH
tristate "Perform selftest on hash functions"
default n
help
- Enable this option to test the kernel's integer (<linux/hash,h>)
- and string (<linux/stringhash.h>) hash functions on boot
- (or module load).
+ Enable this option to test the kernel's integer (<linux/hash.h>),
+ string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
+ hash functions on boot (or module load).
This is intended to help people writing architecture-specific
optimized versions. If unsure, say N.
diff --git a/lib/Makefile b/lib/Makefile
index 50144a3aeebd..452d2956a5a2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,7 +22,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o chacha20.o md5.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o
+ earlycpio.o seq_buf.o siphash.o \
+ nmi_backtrace.o nodemask.o win_minmax.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -44,8 +45,9 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
-obj-$(CONFIG_TEST_HASH) += test_hash.o
+obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
+CFLAGS_test_kasan.o += -fno-builtin
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LKM) += test_module.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 0b66f0e5eb6b..a44b20a829dd 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@@ -1212,3 +1213,22 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
}
EXPORT_SYMBOL(bitmap_copy_le);
#endif
+
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
+{
+ return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
+ flags);
+}
+EXPORT_SYMBOL(bitmap_alloc);
+
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
+{
+ return bitmap_alloc(nbits, flags | __GFP_ZERO);
+}
+EXPORT_SYMBOL(bitmap_zalloc);
+
+void bitmap_free(const unsigned long *bitmap)
+{
+ kfree(bitmap);
+}
+EXPORT_SYMBOL(bitmap_free);
diff --git a/lib/bsearch.c b/lib/bsearch.c
index e33c179089db..d50048446b77 100644
--- a/lib/bsearch.c
+++ b/lib/bsearch.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/bsearch.h>
+#include <linux/kprobes.h>
/*
* bsearch - binary search an array of elements
@@ -51,3 +52,4 @@ void *bsearch(const void *key, const void *base, size_t num, size_t size,
return NULL;
}
EXPORT_SYMBOL(bsearch);
+NOKPROBE_SYMBOL(bsearch);
diff --git a/lib/devres.c b/lib/devres.c
index cb1464c411a2..38912892053c 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -131,7 +131,8 @@ EXPORT_SYMBOL(devm_iounmap);
* if (IS_ERR(base))
* return PTR_ERR(base);
*/
-void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res)
{
resource_size_t size;
const char *name;
diff --git a/lib/div64.c b/lib/div64.c
index 7f345259c32f..c1c1a4c36dd5 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -102,7 +102,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
quot = div_u64_rem(dividend, divisor, &rem32);
*remainder = rem32;
} else {
- int n = 1 + fls(high);
+ int n = fls(high);
quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0)
@@ -140,7 +140,7 @@ u64 div64_u64(u64 dividend, u64 divisor)
if (high == 0) {
quot = div_u64(dividend, divisor);
} else {
- int n = 1 + fls(high);
+ int n = fls(high);
quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0)
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 8971370bfb16..4435bec55fb5 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -435,6 +435,7 @@ void debug_dma_dump_mappings(struct device *dev)
}
spin_unlock_irqrestore(&bucket->lock, flags);
+ cond_resched();
}
}
EXPORT_SYMBOL(debug_dma_dump_mappings);
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index c30d07e99dba..72de6444934d 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -44,7 +44,12 @@ retry:
was_locked = 1;
} else {
local_irq_restore(flags);
- cpu_relax();
+ /*
+ * Wait for the lock to release before jumping to
+ * atomic_cmpxchg() in order to mitigate the thundering herd
+ * problem.
+ */
+ do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
goto retry;
}
diff --git a/lib/genalloc.c b/lib/genalloc.c
index ca06adc4f445..7e85d1e37a6e 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <linux/genalloc.h>
#include <linux/of_device.h>
+#include <linux/vmalloc.h>
static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
{
@@ -187,7 +188,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
int nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
- chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
+ chunk = vzalloc_node(nbytes, nid);
if (unlikely(chunk == NULL))
return -ENOMEM;
@@ -251,7 +252,7 @@ void gen_pool_destroy(struct gen_pool *pool)
bit = find_next_bit(chunk->bits, end_bit, 0);
BUG_ON(bit < end_bit);
- kfree(chunk);
+ vfree(chunk);
}
kfree_const(pool->name);
kfree(pool);
@@ -311,7 +312,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
end_bit = chunk_size(chunk) >> order;
retry:
start_bit = algo(chunk->bits, end_bit, start_bit,
- nbits, data, pool);
+ nbits, data, pool, chunk->start_addr);
if (start_bit >= end_bit)
continue;
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -525,7 +526,7 @@ EXPORT_SYMBOL(gen_pool_set_algo);
*/
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
return bitmap_find_next_zero_area(map, size, start, nr, 0);
}
@@ -543,16 +544,19 @@ EXPORT_SYMBOL(gen_pool_first_fit);
*/
unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
struct genpool_data_align *alignment;
- unsigned long align_mask;
+ unsigned long align_mask, align_off;
int order;
alignment = data;
order = pool->min_alloc_order;
align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
- return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+ align_off = (start_addr & (alignment->align - 1)) >> order;
+
+ return bitmap_find_next_zero_area_off(map, size, start, nr,
+ align_mask, align_off);
}
EXPORT_SYMBOL(gen_pool_first_fit_align);
@@ -567,7 +571,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align);
*/
unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
struct genpool_data_fixed *fixed_data;
int order;
@@ -601,7 +605,8 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc);
*/
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start,
- unsigned int nr, void *data, struct gen_pool *pool)
+ unsigned int nr, void *data, struct gen_pool *pool,
+ unsigned long start_addr)
{
unsigned long align_mask = roundup_pow_of_two(nr) - 1;
@@ -624,7 +629,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
*/
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
unsigned long start_bit = size;
unsigned long len = size + 1;
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 1afb545a37c5..6d35274170bc 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/bitops.h>
/**
* int_sqrt - rough approximation to sqrt
@@ -21,10 +22,7 @@ unsigned long int_sqrt(unsigned long x)
if (x <= 1)
return x;
- m = 1UL << (BITS_PER_LONG - 2);
- while (m > x)
- m >>= 2;
-
+ m = 1UL << (__fls(x) & ~1UL);
while (m != 0) {
b = y + m;
y >>= 1;
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 90ba1eb1df06..a94227c55551 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -82,7 +82,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
{
size /= esize;
- size = roundup_pow_of_two(size);
+ if (!is_power_of_2(size))
+ size = rounddown_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
diff --git a/lib/kobject.c b/lib/kobject.c
index f58c7f2b229c..bbbb067de8ec 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -599,12 +599,15 @@ struct kobject *kobject_get(struct kobject *kobj)
}
EXPORT_SYMBOL(kobject_get);
-static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
+struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
{
+ if (!kobj)
+ return NULL;
if (!kref_get_unless_zero(&kobj->kref))
kobj = NULL;
return kobj;
}
+EXPORT_SYMBOL(kobject_get_unless_zero);
/*
* kobject_cleanup - free kobject resources.
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 468fb7cd1221..edf345b7f06b 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -37,6 +37,7 @@
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
{
mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL;
+ struct karatsuba_ctx karactx = {};
mpi_ptr_t xp_marker = NULL;
mpi_ptr_t tspace = NULL;
mpi_ptr_t rp, ep, mp, bp;
@@ -164,13 +165,11 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
int c;
mpi_limb_t e;
mpi_limb_t carry_limb;
- struct karatsuba_ctx karactx;
xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1));
if (!xp)
goto enomem;
- memset(&karactx, 0, sizeof karactx);
negative_result = (ep[0] & 1) && base->sign;
i = esize - 1;
@@ -295,8 +294,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
if (mod_shift_cnt)
mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
MPN_NORMALIZE(rp, rsize);
-
- mpihelp_release_karatsuba_ctx(&karactx);
}
if (negative_result && rsize) {
@@ -313,6 +310,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
leave:
rc = 0;
enomem:
+ mpihelp_release_karatsuba_ctx(&karactx);
if (assign_rp)
mpi_assign_limb_space(res, rp, size);
if (mp_marker)
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 3057011f5599..10ca69475611 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -24,7 +24,7 @@ endif
ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
NEON_FLAGS := -ffreestanding
ifeq ($(ARCH),arm)
-NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
+NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
endif
ifeq ($(ARCH),arm64)
CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only
diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk
index c6aa03631df8..0809805a7e23 100644
--- a/lib/raid6/unroll.awk
+++ b/lib/raid6/unroll.awk
@@ -13,7 +13,7 @@ BEGIN {
for (i = 0; i < rep; ++i) {
tmp = $0
gsub(/\$\$/, i, tmp)
- gsub(/\$\#/, n, tmp)
+ gsub(/\$#/, n, tmp)
gsub(/\$\*/, "$", tmp)
print tmp
}
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
index 0ec3f257ffdf..a5d313381539 100644
--- a/lib/reed_solomon/decode_rs.c
+++ b/lib/reed_solomon/decode_rs.c
@@ -42,8 +42,18 @@
BUG_ON(pad < 0 || pad >= nn);
/* Does the caller provide the syndrome ? */
- if (s != NULL)
- goto decode;
+ if (s != NULL) {
+ for (i = 0; i < nroots; i++) {
+ /* The syndrome is in index form,
+ * so nn represents zero
+ */
+ if (s[i] != nn)
+ goto decode;
+ }
+
+ /* syndrome is zero, no errors to correct */
+ return 0;
+ }
/* form the syndromes; i.e., evaluate data(x) at roots of
* g(x) */
@@ -99,9 +109,9 @@
if (no_eras > 0) {
/* Init lambda to be the erasure locator polynomial */
lambda[1] = alpha_to[rs_modnn(rs,
- prim * (nn - 1 - eras_pos[0]))];
+ prim * (nn - 1 - (eras_pos[0] + pad)))];
for (i = 1; i < no_eras; i++) {
- u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
+ u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
for (j = i + 1; j > 0; j--) {
tmp = index_of[lambda[j - 1]];
if (tmp != nn) {
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 004fc70fc56a..ef8c14a56d0a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -317,7 +317,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
if (prv)
table->nents = ++table->orig_nents;
- return -ENOMEM;
+ return -ENOMEM;
}
sg_init_table(sg, alloc_size);
@@ -496,17 +496,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
{
if (!miter->__remaining) {
struct scatterlist *sg;
- unsigned long pgoffset;
if (!__sg_page_iter_next(&miter->piter))
return false;
sg = miter->piter.sg;
- pgoffset = miter->piter.sg_pgoffset;
- miter->__offset = pgoffset ? 0 : sg->offset;
+ miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
+ miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
+ miter->__offset &= PAGE_SIZE - 1;
miter->__remaining = sg->offset + sg->length -
- (pgoffset << PAGE_SHIFT) - miter->__offset;
+ (miter->piter.sg_pgoffset << PAGE_SHIFT) -
+ miter->__offset;
miter->__remaining = min_t(unsigned long, miter->__remaining,
PAGE_SIZE - miter->__offset);
}
diff --git a/lib/siphash.c b/lib/siphash.c
new file mode 100644
index 000000000000..3ae58b4edad6
--- /dev/null
+++ b/lib/siphash.c
@@ -0,0 +1,551 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#include <linux/siphash.h>
+#include <asm/unaligned.h>
+
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+#include <linux/dcache.h>
+#include <asm/word-at-a-time.h>
+#endif
+
+#define SIPROUND \
+ do { \
+ v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
+ v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
+ v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
+ v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
+ } while (0)
+
+#define PREAMBLE(len) \
+ u64 v0 = 0x736f6d6570736575ULL; \
+ u64 v1 = 0x646f72616e646f6dULL; \
+ u64 v2 = 0x6c7967656e657261ULL; \
+ u64 v3 = 0x7465646279746573ULL; \
+ u64 b = ((u64)(len)) << 56; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define POSTAMBLE \
+ v3 ^= b; \
+ SIPROUND; \
+ SIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ PREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = le64_to_cpup(data);
+ v3 ^= m;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48;
+ case 6: b |= ((u64)end[5]) << 40;
+ case 5: b |= ((u64)end[4]) << 32;
+ case 4: b |= le32_to_cpup(data); break;
+ case 3: b |= ((u64)end[2]) << 16;
+ case 2: b |= le16_to_cpup(data); break;
+ case 1: b |= end[0];
+ }
+#endif
+ POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ PREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = get_unaligned_le64(data);
+ v3 ^= m;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48;
+ case 6: b |= ((u64)end[5]) << 40;
+ case 5: b |= ((u64)end[4]) << 32;
+ case 4: b |= get_unaligned_le32(end); break;
+ case 3: b |= ((u64)end[2]) << 16;
+ case 2: b |= get_unaligned_le16(end); break;
+ case 1: b |= end[0];
+ }
+#endif
+ POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_unaligned);
+#endif
+
+/**
+ * siphash_1u64 - compute 64-bit siphash PRF value of a u64
+ * @first: first u64
+ * @key: the siphash key
+ */
+u64 siphash_1u64(const u64 first, const siphash_key_t *key)
+{
+ PREAMBLE(8)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u64);
+
+/**
+ * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
+ * @first: first u64
+ * @second: second u64
+ * @key: the siphash key
+ */
+u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+ PREAMBLE(16)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_2u64);
+
+/**
+ * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @key: the siphash key
+ */
+u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
+ const siphash_key_t *key)
+{
+ PREAMBLE(24)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= third;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u64);
+
+/**
+ * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @forth: forth u64
+ * @key: the siphash key
+ */
+u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
+ const u64 forth, const siphash_key_t *key)
+{
+ PREAMBLE(32)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= third;
+ v3 ^= forth;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= forth;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_4u64);
+
+u64 siphash_1u32(const u32 first, const siphash_key_t *key)
+{
+ PREAMBLE(4)
+ b |= first;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u32);
+
+u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
+ const siphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ PREAMBLE(12)
+ v3 ^= combined;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= combined;
+ b |= third;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u32);
+
+#if BITS_PER_LONG == 64
+/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
+ * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
+ */
+
+#define HSIPROUND SIPROUND
+#define HPREAMBLE(len) PREAMBLE(len)
+#define HPOSTAMBLE \
+ v3 ^= b; \
+ HSIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ HSIPROUND; \
+ HSIPROUND; \
+ HSIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = le64_to_cpup(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48;
+ case 6: b |= ((u64)end[5]) << 40;
+ case 5: b |= ((u64)end[4]) << 32;
+ case 4: b |= le32_to_cpup(data); break;
+ case 3: b |= ((u64)end[2]) << 16;
+ case 2: b |= le16_to_cpup(data); break;
+ case 1: b |= end[0];
+ }
+#endif
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+ const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = get_unaligned_le64(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48;
+ case 6: b |= ((u64)end[5]) << 40;
+ case 5: b |= ((u64)end[4]) << 32;
+ case 4: b |= get_unaligned_le32(end); break;
+ case 3: b |= ((u64)end[2]) << 16;
+ case 2: b |= get_unaligned_le16(end); break;
+ case 1: b |= end[0];
+ }
+#endif
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+ HPREAMBLE(4)
+ b |= first;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ HPREAMBLE(8)
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+ const hsiphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ HPREAMBLE(12)
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ b |= third;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+ const u32 forth, const hsiphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ HPREAMBLE(16)
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ combined = (u64)forth << 32 | third;
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#else
+#define HSIPROUND \
+ do { \
+ v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
+ v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
+ v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
+ v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
+ } while (0)
+
+#define HPREAMBLE(len) \
+ u32 v0 = 0; \
+ u32 v1 = 0; \
+ u32 v2 = 0x6c796765U; \
+ u32 v3 = 0x74656462U; \
+ u32 b = ((u32)(len)) << 24; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define HPOSTAMBLE \
+ v3 ^= b; \
+ HSIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ HSIPROUND; \
+ HSIPROUND; \
+ HSIPROUND; \
+ return v1 ^ v3;
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u32));
+ const u8 left = len & (sizeof(u32) - 1);
+ u32 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u32)) {
+ m = le32_to_cpup(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+ switch (left) {
+ case 3: b |= ((u32)end[2]) << 16;
+ case 2: b |= le16_to_cpup(data); break;
+ case 1: b |= end[0];
+ }
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+ const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u32));
+ const u8 left = len & (sizeof(u32) - 1);
+ u32 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u32)) {
+ m = get_unaligned_le32(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+ switch (left) {
+ case 3: b |= ((u32)end[2]) << 16;
+ case 2: b |= get_unaligned_le16(end); break;
+ case 1: b |= end[0];
+ }
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+ HPREAMBLE(4)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+ HPREAMBLE(8)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ HSIPROUND;
+ v0 ^= second;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+ const hsiphash_key_t *key)
+{
+ HPREAMBLE(12)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ HSIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ HSIPROUND;
+ v0 ^= third;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+ const u32 forth, const hsiphash_key_t *key)
+{
+ HPREAMBLE(16)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ HSIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ HSIPROUND;
+ v0 ^= third;
+ v3 ^= forth;
+ HSIPROUND;
+ v0 ^= forth;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#endif
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index f87d138e9672..759ff419fe61 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -92,15 +92,19 @@ static bool init_stack_slab(void **prealloc)
return true;
if (stack_slabs[depot_index] == NULL) {
stack_slabs[depot_index] = *prealloc;
+ *prealloc = NULL;
} else {
- stack_slabs[depot_index + 1] = *prealloc;
+ /* If this is the last depot slab, do not touch the next one. */
+ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
+ stack_slabs[depot_index + 1] = *prealloc;
+ *prealloc = NULL;
+ }
/*
* This smp_store_release pairs with smp_load_acquire() from
* |next_slab_inited| above and in depot_save_stack().
*/
smp_store_release(&next_slab_inited, 1);
}
- *prealloc = NULL;
return true;
}
diff --git a/lib/string.c b/lib/string.c
index 1a7d3fd52541..ed72a03da8e9 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -202,7 +202,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
while (max >= sizeof(unsigned long)) {
unsigned long c, data;
- c = *(unsigned long *)(src+res);
+ c = read_word_at_a_time(src+res);
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
@@ -798,6 +798,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
EXPORT_SYMBOL(memcmp);
#endif
+#ifndef __HAVE_ARCH_BCMP
+/**
+ * bcmp - returns 0 if and only if the buffers have identical contents.
+ * @a: pointer to first buffer.
+ * @b: pointer to second buffer.
+ * @len: size of buffers.
+ *
+ * The sign or magnitude of a non-zero return value has no particular
+ * meaning, and architectures may implement their own more efficient bcmp(). So
+ * while this particular implementation is a simple (tail) call to memcmp, do
+ * not rely on anything but whether the return value is zero or non-zero.
+ */
+#undef bcmp
+int bcmp(const void *a, const void *b, size_t len)
+{
+ return memcmp(a, b, len);
+}
+EXPORT_SYMBOL(bcmp);
+#endif
+
#ifndef __HAVE_ARCH_MEMSCAN
/**
* memscan - Find a character in an area of memory.
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 7e35fc450c5b..5a07f19059c3 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -22,10 +22,11 @@
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
+static inline long do_strncpy_from_user(char *dst, const char __user *src,
+ unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- long res = 0;
+ unsigned long res = 0;
/*
* Truncate 'max' to the user-specified limit, so that
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 8e105ed4df12..9ff4f3bbb1aa 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -27,7 +27,7 @@
static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- long align, res = 0;
+ unsigned long align, res = 0;
unsigned long c;
/*
@@ -41,7 +41,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
* Do everything aligned. But that means that we
* need to also expand the maximum..
*/
- align = (sizeof(long) - 1) & (unsigned long)src;
+ align = (sizeof(unsigned long) - 1) & (unsigned long)src;
src -= align;
max += align;
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index fbdf87920093..6e76a448867d 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -124,6 +124,7 @@ static noinline void __init kmalloc_oob_krealloc_more(void)
if (!ptr1 || !ptr2) {
pr_err("Allocation failed\n");
kfree(ptr1);
+ kfree(ptr2);
return;
}
@@ -355,7 +356,7 @@ static noinline void __init kasan_stack_oob(void)
static noinline void __init ksize_unpoisons_memory(void)
{
char *ptr;
- size_t size = 123, real_size = size;
+ size_t size = 123, real_size;
pr_info("ksize() unpoisons the whole allocated chunk\n");
ptr = kmalloc(size, GFP_KERNEL);
diff --git a/lib/test_siphash.c b/lib/test_siphash.c
new file mode 100644
index 000000000000..a6d854d933bf
--- /dev/null
+++ b/lib/test_siphash.c
@@ -0,0 +1,223 @@
+/* Test cases for siphash.c
+ *
+ * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/siphash.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+/* Test vectors taken from reference source available at:
+ * https://github.com/veorq/SipHash
+ */
+
+static const siphash_key_t test_key_siphash =
+ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u64 test_vectors_siphash[64] = {
+ 0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL,
+ 0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL,
+ 0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL,
+ 0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL,
+ 0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL,
+ 0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL,
+ 0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL,
+ 0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL,
+ 0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL,
+ 0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL,
+ 0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL,
+ 0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL,
+ 0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL,
+ 0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL,
+ 0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL,
+ 0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL,
+ 0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL,
+ 0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL,
+ 0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL,
+ 0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL,
+ 0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL,
+ 0x958a324ceb064572ULL
+};
+
+#if BITS_PER_LONG == 64
+static const hsiphash_key_t test_key_hsiphash =
+ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u32 test_vectors_hsiphash[64] = {
+ 0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU,
+ 0xe7ddf7fbU, 0x88d38328U, 0x49533b67U,
+ 0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU,
+ 0x6c063de4U, 0x92ff097fU, 0xf94dc352U,
+ 0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U,
+ 0x2a519956U, 0x7d908b66U, 0x63dbd80cU,
+ 0xb473e63eU, 0x8d297d1cU, 0xa6cce040U,
+ 0x2b45f844U, 0xa320872eU, 0xdae6c123U,
+ 0x67349c8cU, 0x705b0979U, 0xca9913a5U,
+ 0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U,
+ 0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU,
+ 0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U,
+ 0xada26206U, 0xa3c33057U, 0xae3a36a1U,
+ 0x7b108392U, 0x99e41531U, 0x3f1ad944U,
+ 0xc8138825U, 0xc28949a6U, 0xfaf8876bU,
+ 0x9f042196U, 0x68b1d623U, 0x8b5114fdU,
+ 0xdf074c46U, 0x12cc86b3U, 0x0a52098fU,
+ 0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U,
+ 0x73f0bce6U, 0x70a7e980U, 0x243c6d75U,
+ 0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U,
+ 0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U,
+ 0xb7bbb3a8U
+};
+#else
+static const hsiphash_key_t test_key_hsiphash =
+ {{ 0x03020100U, 0x07060504U }};
+
+static const u32 test_vectors_hsiphash[64] = {
+ 0x5814c896U, 0xe7e864caU, 0xbc4b0e30U,
+ 0x01539939U, 0x7e059ea6U, 0x88e3d89bU,
+ 0xa0080b65U, 0x9d38d9d6U, 0x577999b1U,
+ 0xc839caedU, 0xe4fa32cfU, 0x959246eeU,
+ 0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU,
+ 0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU,
+ 0x06712339U, 0x522aca67U, 0x911bb605U,
+ 0x90a65f0eU, 0xf826ef7bU, 0x62512debU,
+ 0x57150ad7U, 0x5d473507U, 0x1ec47442U,
+ 0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U,
+ 0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU,
+ 0xe0f6c934U, 0xb0652033U, 0x9b9851ccU,
+ 0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU,
+ 0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU,
+ 0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U,
+ 0x65671619U, 0x9f5fff91U, 0xd89c5267U,
+ 0x007783ebU, 0x95766243U, 0xab639262U,
+ 0x9c7e1390U, 0xc368dda6U, 0x38ddc455U,
+ 0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU,
+ 0x2ee80657U, 0x33dbb66aU, 0xae3f0577U,
+ 0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U,
+ 0x87178304U
+};
+#endif
+
+static int __init siphash_test_init(void)
+{
+ u8 in[64] __aligned(SIPHASH_ALIGNMENT);
+ u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
+ u8 i;
+ int ret = 0;
+
+ for (i = 0; i < 64; ++i) {
+ in[i] = i;
+ in_unaligned[i + 1] = i;
+ if (siphash(in, i, &test_key_siphash) !=
+ test_vectors_siphash[i]) {
+ pr_info("siphash self-test aligned %u: FAIL\n", i + 1);
+ ret = -EINVAL;
+ }
+ if (siphash(in_unaligned + 1, i, &test_key_siphash) !=
+ test_vectors_siphash[i]) {
+ pr_info("siphash self-test unaligned %u: FAIL\n", i + 1);
+ ret = -EINVAL;
+ }
+ if (hsiphash(in, i, &test_key_hsiphash) !=
+ test_vectors_hsiphash[i]) {
+ pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1);
+ ret = -EINVAL;
+ }
+ if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) !=
+ test_vectors_hsiphash[i]) {
+ pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1);
+ ret = -EINVAL;
+ }
+ }
+ if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) !=
+ test_vectors_siphash[8]) {
+ pr_info("siphash self-test 1u64: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ &test_key_siphash) != test_vectors_siphash[16]) {
+ pr_info("siphash self-test 2u64: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, &test_key_siphash) !=
+ test_vectors_siphash[24]) {
+ pr_info("siphash self-test 3u64: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
+ &test_key_siphash) != test_vectors_siphash[32]) {
+ pr_info("siphash self-test 4u64: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_1u32(0x03020100U, &test_key_siphash) !=
+ test_vectors_siphash[4]) {
+ pr_info("siphash self-test 1u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) !=
+ test_vectors_siphash[8]) {
+ pr_info("siphash self-test 2u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_siphash) !=
+ test_vectors_siphash[12]) {
+ pr_info("siphash self-test 3u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (siphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) !=
+ test_vectors_siphash[16]) {
+ pr_info("siphash self-test 4u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) !=
+ test_vectors_hsiphash[4]) {
+ pr_info("hsiphash self-test 1u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) !=
+ test_vectors_hsiphash[8]) {
+ pr_info("hsiphash self-test 2u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (hsiphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_hsiphash) !=
+ test_vectors_hsiphash[12]) {
+ pr_info("hsiphash self-test 3u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (hsiphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) !=
+ test_vectors_hsiphash[16]) {
+ pr_info("hsiphash self-test 4u32: FAIL\n");
+ ret = -EINVAL;
+ }
+ if (!ret)
+ pr_info("self-tests: pass\n");
+ return ret;
+}
+
+static void __exit siphash_test_exit(void)
+{
+}
+
+module_init(siphash_test_init);
+module_exit(siphash_test_exit);
+
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 60e108c5c173..c652b4a820cc 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -86,11 +86,13 @@ static bool is_inline_int(struct type_descriptor *type)
return bits <= inline_bits;
}
-static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
+static s_max get_signed_val(struct type_descriptor *type, void *val)
{
if (is_inline_int(type)) {
unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
- return ((s_max)val) << extra_bits >> extra_bits;
+ unsigned long ulong_val = (unsigned long)val;
+
+ return ((s_max)ulong_val) << extra_bits >> extra_bits;
}
if (type_bit_width(type) == 64)
@@ -99,15 +101,15 @@ static s_max get_signed_val(struct type_descriptor *type, unsigned long val)
return *(s_max *)val;
}
-static bool val_is_negative(struct type_descriptor *type, unsigned long val)
+static bool val_is_negative(struct type_descriptor *type, void *val)
{
return type_is_signed(type) && get_signed_val(type, val) < 0;
}
-static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
+static u_max get_unsigned_val(struct type_descriptor *type, void *val)
{
if (is_inline_int(type))
- return val;
+ return (unsigned long)val;
if (type_bit_width(type) == 64)
return *(u64 *)val;
@@ -116,7 +118,7 @@ static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val)
}
static void val_to_string(char *str, size_t size, struct type_descriptor *type,
- unsigned long value)
+ void *value)
{
if (type_is_int(type)) {
if (type_bit_width(type) == 128) {
@@ -168,8 +170,8 @@ static void ubsan_epilogue(unsigned long *flags)
current->in_ubsan--;
}
-static void handle_overflow(struct overflow_data *data, unsigned long lhs,
- unsigned long rhs, char op)
+static void handle_overflow(struct overflow_data *data, void *lhs,
+ void *rhs, char op)
{
struct type_descriptor *type = data->type;
@@ -196,8 +198,7 @@ static void handle_overflow(struct overflow_data *data, unsigned long lhs,
}
void __ubsan_handle_add_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '+');
@@ -205,23 +206,21 @@ void __ubsan_handle_add_overflow(struct overflow_data *data,
EXPORT_SYMBOL(__ubsan_handle_add_overflow);
void __ubsan_handle_sub_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '-');
}
EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
void __ubsan_handle_mul_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '*');
}
EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
void __ubsan_handle_negate_overflow(struct overflow_data *data,
- unsigned long old_val)
+ void *old_val)
{
unsigned long flags;
char old_val_str[VALUE_LENGTH];
@@ -242,8 +241,7 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
void __ubsan_handle_divrem_overflow(struct overflow_data *data,
- unsigned long lhs,
- unsigned long rhs)
+ void *lhs, void *rhs)
{
unsigned long flags;
char rhs_val_str[VALUE_LENGTH];
@@ -328,7 +326,7 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
}
void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
- unsigned long ptr)
+ void *ptr)
{
struct type_mismatch_data_common common_data = {
.location = &data->location,
@@ -337,12 +335,12 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
.type_check_kind = data->type_check_kind
};
- ubsan_type_mismatch_common(&common_data, ptr);
+ ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
- unsigned long ptr)
+ void *ptr)
{
struct type_mismatch_data_common common_data = {
@@ -352,7 +350,7 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
.type_check_kind = data->type_check_kind
};
- ubsan_type_mismatch_common(&common_data, ptr);
+ ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
@@ -376,7 +374,7 @@ void __ubsan_handle_nonnull_return(struct nonnull_return_data *data)
EXPORT_SYMBOL(__ubsan_handle_nonnull_return);
void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
- unsigned long bound)
+ void *bound)
{
unsigned long flags;
char bound_str[VALUE_LENGTH];
@@ -393,8 +391,7 @@ void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data,
}
EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive);
-void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
- unsigned long index)
+void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
{
unsigned long flags;
char index_str[VALUE_LENGTH];
@@ -412,7 +409,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data,
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
- unsigned long lhs, unsigned long rhs)
+ void *lhs, void *rhs)
{
unsigned long flags;
struct type_descriptor *rhs_type = data->rhs_type;
@@ -463,7 +460,7 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
- unsigned long val)
+ void *val)
{
unsigned long flags;
char val_str[VALUE_LENGTH];
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6ff2d7744223..113b7d317079 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -669,6 +669,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
bdi->cgwb_congested_tree = RB_ROOT;
atomic_set(&bdi->usage_cnt, 1);
+ init_rwsem(&bdi->wb_switch_rwsem);
ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (!ret) {
diff --git a/mm/cma.c b/mm/cma.c
index 397687fc51f9..7cb569a188c4 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -100,8 +100,10 @@ static int __init cma_activate_area(struct cma *cma)
cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!cma->bitmap)
+ if (!cma->bitmap) {
+ cma->count = 0;
return -ENOMEM;
+ }
WARN_ON_ONCE(!pfn_valid(pfn));
zone = page_zone(pfn_to_page(pfn));
@@ -266,6 +268,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
*/
alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
+ if (fixed && base & (alignment - 1)) {
+ ret = -EINVAL;
+ pr_err("Region at %pa must be aligned to %pa bytes\n",
+ &base, &alignment);
+ goto err;
+ }
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);
@@ -296,6 +304,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
if (limit == 0 || limit > memblock_end)
limit = memblock_end;
+ if (base + size > limit) {
+ ret = -EINVAL;
+ pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
+ &size, &base, &limit);
+ goto err;
+ }
+
/* Reserve memory */
if (fixed) {
if (memblock_is_region_reserved(base, size) ||
@@ -339,12 +354,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
if (ret)
- goto err;
+ goto free_mem;
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
&base);
return 0;
+free_mem:
+ memblock_free(base, size);
err:
pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
return ret;
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index f8e4b60db167..da50dab56b70 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -57,7 +57,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
mutex_lock(&cma->lock);
for (;;) {
start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
- if (start >= cma->count)
+ if (start >= bitmap_maxno)
break;
end = find_next_bit(cma->bitmap, bitmap_maxno, start);
maxchunk = max(end - start, maxchunk);
diff --git a/mm/filemap.c b/mm/filemap.c
index 6d2f561d517c..b046d8f147e2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -383,7 +383,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
.range_end = end,
};
- if (!mapping_cap_writeback_dirty(mapping))
+ if (!mapping_cap_writeback_dirty(mapping) ||
+ !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
return 0;
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
diff --git a/mm/gup.c b/mm/gup.c
index 99c2f10188c0..6bb7a8eb7f82 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -153,7 +153,10 @@ retry:
}
if (flags & FOLL_GET) {
- get_page(page);
+ if (unlikely(!try_get_page(page))) {
+ page = ERR_PTR(-ENOMEM);
+ goto out;
+ }
/* drop the pgmap reference now that we hold the page */
if (pgmap) {
@@ -292,7 +295,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else {
- get_page(page);
+ if (unlikely(!try_get_page(page))) {
+ spin_unlock(ptl);
+ return ERR_PTR(-ENOMEM);
+ }
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
@@ -348,7 +354,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
goto unmap;
*page = pte_page(*pte);
}
- get_page(*page);
+ if (unlikely(!try_get_page(*page))) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
out:
ret = 0;
unmap:
@@ -1231,6 +1240,20 @@ struct page *get_dump_page(unsigned long addr)
*/
#ifdef CONFIG_HAVE_GENERIC_RCU_GUP
+/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+ struct page *head = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(head) < 0))
+ return NULL;
+ if (unlikely(!page_cache_add_speculative(head, refs)))
+ return NULL;
+ return head;
+}
+
#ifdef __HAVE_ARCH_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
@@ -1263,9 +1286,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ head = try_get_compound_head(page, 1);
+ if (!head)
goto pte_unmap;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1313,17 +1336,16 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return 0;
refs = 0;
- head = pmd_page(orig);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(pmd_page(orig), refs);
+ if (!head) {
*nr -= refs;
return 0;
}
@@ -1348,17 +1370,16 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return 0;
refs = 0;
- head = pud_page(orig);
- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(pud_page(orig), refs);
+ if (!head) {
*nr -= refs;
return 0;
}
@@ -1384,17 +1405,16 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
return 0;
refs = 0;
- head = pgd_page(orig);
- page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
+ page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(pgd_page(orig), refs);
+ if (!head) {
*nr -= refs;
return 0;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7ea8da990b9d..5fbd77d52602 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -30,6 +30,7 @@
#include <linux/userfaultfd_k.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
+#include <linux/page_owner.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -1950,6 +1951,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
}
ClearPageCompound(head);
+
+ split_page_owner(head, HPAGE_PMD_ORDER);
+
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
page_ref_inc(head);
@@ -2091,7 +2095,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
unsigned long flags;
pgoff_t end;
- VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
+ VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
VM_BUG_ON_PAGE(!PageCompound(page), page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8b682da98d95..9914da93069e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1247,12 +1247,23 @@ void free_huge_page(struct page *page)
ClearPagePrivate(page);
/*
- * A return code of zero implies that the subpool will be under its
- * minimum size if the reservation is not restored after page is free.
- * Therefore, force restore_reserve operation.
+ * If PagePrivate() was set on page, page allocation consumed a
+ * reservation. If the page was associated with a subpool, there
+ * would have been a page reserved in the subpool before allocation
+ * via hugepage_subpool_get_pages(). Since we are 'restoring' the
+ * reservtion, do not call hugepage_subpool_put_pages() as this will
+ * remove the reserved page from the subpool.
*/
- if (hugepage_subpool_put_pages(spool, 1) == 0)
- restore_reserve = true;
+ if (!restore_reserve) {
+ /*
+ * A return code of zero implies that the subpool will be
+ * under its minimum size if the reservation is not restored
+ * after page is free. Therefore, force restore_reserve
+ * operation.
+ */
+ if (hugepage_subpool_put_pages(spool, 1) == 0)
+ restore_reserve = true;
+ }
spin_lock(&hugetlb_lock);
clear_page_huge_active(page);
@@ -3812,21 +3823,14 @@ backout_unlocked:
}
#ifdef CONFIG_SMP
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
- struct vm_area_struct *vma,
- struct address_space *mapping,
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
pgoff_t idx, unsigned long address)
{
unsigned long key[2];
u32 hash;
- if (vma->vm_flags & VM_SHARED) {
- key[0] = (unsigned long) mapping;
- key[1] = idx;
- } else {
- key[0] = (unsigned long) mm;
- key[1] = address >> huge_page_shift(h);
- }
+ key[0] = (unsigned long) mapping;
+ key[1] = idx;
hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
@@ -3837,9 +3841,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
* For uniprocesor systems we always use a single mutex, so just
* return 0 and avoid the hashing overhead.
*/
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
- struct vm_area_struct *vma,
- struct address_space *mapping,
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
pgoff_t idx, unsigned long address)
{
return 0;
@@ -3885,7 +3887,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
- hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
+ hash = hugetlb_fault_mutex_hash(h, mapping, idx, address);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep);
@@ -3993,6 +3995,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long vaddr = *position;
unsigned long remainder = *nr_pages;
struct hstate *h = hstate_vma(vma);
+ int err = -EFAULT;
while (vaddr < vma->vm_end && remainder) {
pte_t *pte;
@@ -4064,6 +4067,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte));
+
+ /*
+ * Instead of doing 'try_get_page()' below in the same_page
+ * loop, just check the count once here.
+ */
+ if (unlikely(page_count(page) <= 0)) {
+ if (pages) {
+ spin_unlock(ptl);
+ remainder = 0;
+ err = -ENOMEM;
+ break;
+ }
+ }
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
@@ -4090,7 +4106,7 @@ same_page:
*nr_pages = remainder;
*position = vaddr;
- return i ? i : -EFAULT;
+ return i ? i : err;
}
#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index eec1150125b9..e430e04997ee 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -196,7 +196,7 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
again:
rcu_read_lock();
h_cg = hugetlb_cgroup_from_task(current);
- if (!css_tryget_online(&h_cg->css)) {
+ if (!css_tryget(&h_cg->css)) {
rcu_read_unlock();
goto again;
}
diff --git a/mm/internal.h b/mm/internal.h
index 3e2d01694747..f6df7cb8cbc0 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -442,6 +442,16 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
#define NODE_RECLAIM_SOME 0
#define NODE_RECLAIM_SUCCESS 1
+#ifdef CONFIG_NUMA
+extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
+#else
+static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
+ unsigned int order)
+{
+ return NODE_RECLAIM_NOSCAN;
+}
+#endif
+
extern int hwpoison_filter(struct page *p);
extern u32 hwpoison_filter_dev_major;
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 4ce386c44cf1..1169c1fe941f 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -80,7 +80,14 @@ void kasan_unpoison_task_stack(struct task_struct *task)
/* Unpoison the stack for the current task beyond a watermark sp value. */
asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
{
- __kasan_unpoison_stack(current, watermark);
+ /*
+ * Calculate the task stack base address. Avoid using 'current'
+ * because this function is called by early resume code which hasn't
+ * yet set up the percpu register (%gs).
+ */
+ void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
+
+ kasan_unpoison_shadow(base, watermark - base);
}
/*
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
index 3f9a41cf0ac6..31238dad85fb 100644
--- a/mm/kasan/kasan_init.c
+++ b/mm/kasan/kasan_init.c
@@ -15,6 +15,7 @@
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
+#include <linux/mm.h>
#include <linux/pfn.h>
#include <asm/page.h>
@@ -49,7 +50,7 @@ static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
pte_t *pte = pte_offset_kernel(pmd, addr);
pte_t zero_pte;
- zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL);
+ zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
zero_pte = pte_wrprotect(zero_pte);
while (addr + PAGE_SIZE <= end) {
@@ -69,7 +70,7 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
next = pmd_addr_end(addr, end);
if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
- pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
continue;
}
@@ -92,9 +93,9 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
pmd_t *pmd;
- pud_populate(&init_mm, pud, kasan_zero_pmd);
+ pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
continue;
}
@@ -135,11 +136,11 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
* puds,pmds, so pgd_populate(), pud_populate()
* is noops.
*/
- pgd_populate(&init_mm, pgd, kasan_zero_pud);
+ pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud));
pud = pud_offset(pgd, addr);
- pud_populate(&init_mm, pud, kasan_zero_pmd);
+ pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
continue;
}
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 8ca412aebcf1..c505ac5b2d46 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -302,6 +302,7 @@ void kasan_report(unsigned long addr, size_t size,
disable_trace_on_warning();
info.access_addr = (void *)addr;
+ info.first_bad_addr = (void *)addr;
info.access_size = size;
info.is_write = is_write;
info.ip = ip;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e0cfc3a54b6a..8217ee5d66ef 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1004,6 +1004,9 @@ static void collapse_huge_page(struct mm_struct *mm,
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
+ result = SCAN_ANY_PROCESS;
+ if (!mmget_still_valid(mm))
+ goto out;
result = hugepage_vma_revalidate(mm, address, &vma);
if (result)
goto out;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 9e66449ed91f..d05133b37b17 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -569,7 +569,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
if (in_irq()) {
object->pid = 0;
strncpy(object->comm, "hardirq", sizeof(object->comm));
- } else if (in_softirq()) {
+ } else if (in_serving_softirq()) {
object->pid = 0;
strncpy(object->comm, "softirq", sizeof(object->comm));
} else {
diff --git a/mm/ksm.c b/mm/ksm.c
index 614b2cce9ad7..d6c81a5076a7 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -710,13 +710,13 @@ static int remove_stable_node(struct stable_node *stable_node)
return 0;
}
- if (WARN_ON_ONCE(page_mapped(page))) {
- /*
- * This should not happen: but if it does, just refuse to let
- * merge_across_nodes be switched - there is no need to panic.
- */
- err = -EBUSY;
- } else {
+ /*
+ * Page could be still mapped if this races with __mmput() running in
+ * between ksm_exit() and exit_mmap(). Just refuse to let
+ * merge_across_nodes/max_page_sharing be switched.
+ */
+ err = -EBUSY;
+ if (!page_mapped(page)) {
/*
* The stable node did not yet appear stale to get_ksm_page(),
* since that allows for an unmapped ksm page to be recognized
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7a40fa2be858..16361c989af9 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -42,11 +42,7 @@ static void list_lru_unregister(struct list_lru *lru)
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
- /*
- * This needs node 0 to be always present, even
- * in the systems supporting sparse numa ids.
- */
- return !!lru->node[0].memcg_lrus;
+ return lru->memcg_aware;
}
static inline struct list_lru_one *
@@ -317,7 +313,7 @@ static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
}
return 0;
fail:
- __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
+ __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
return -ENOMEM;
}
@@ -389,6 +385,8 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
int i;
+ lru->memcg_aware = memcg_aware;
+
if (!memcg_aware)
return 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 86a6b331b964..2f7f934bf435 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -741,7 +741,7 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
if (unlikely(!memcg))
memcg = root_mem_cgroup;
}
- } while (!css_tryget_online(&memcg->css));
+ } while (!css_tryget(&memcg->css));
rcu_read_unlock();
return memcg;
}
@@ -887,26 +887,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
css_put(&prev->css);
}
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+ struct mem_cgroup *dead_memcg)
{
- struct mem_cgroup *memcg = dead_memcg;
struct mem_cgroup_reclaim_iter *iter;
struct mem_cgroup_per_node *mz;
int nid;
int i;
- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
- for_each_node(nid) {
- mz = mem_cgroup_nodeinfo(memcg, nid);
- for (i = 0; i <= DEF_PRIORITY; i++) {
- iter = &mz->iter[i];
- cmpxchg(&iter->position,
- dead_memcg, NULL);
- }
+ for_each_node(nid) {
+ mz = mem_cgroup_nodeinfo(from, nid);
+ for (i = 0; i <= DEF_PRIORITY; i++) {
+ iter = &mz->iter[i];
+ cmpxchg(&iter->position,
+ dead_memcg, NULL);
}
}
}
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+ struct mem_cgroup *memcg = dead_memcg;
+ struct mem_cgroup *last;
+
+ do {
+ __invalidate_reclaim_iterators(memcg, dead_memcg);
+ last = memcg;
+ } while ((memcg = parent_mem_cgroup(memcg)));
+
+ /*
+ * When cgruop1 non-hierarchy mode is used,
+ * parent_mem_cgroup() does not walk all the way up to the
+ * cgroup root (root_mem_cgroup). So we have to handle
+ * dead_memcg from cgroup root separately.
+ */
+ if (last != root_mem_cgroup)
+ __invalidate_reclaim_iterators(root_mem_cgroup,
+ dead_memcg);
+}
+
/*
* Iteration constructs for visiting all cgroups (under a tree). If
* loops are exited prematurely (break), mem_cgroup_iter_break() must
@@ -2306,6 +2325,16 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
+
+ /*
+ * Enforce __GFP_NOFAIL allocation because callers are not
+ * prepared to see failures and likely do not have any failure
+ * handling code.
+ */
+ if (gfp & __GFP_NOFAIL) {
+ page_counter_charge(&memcg->kmem, nr_pages);
+ return 0;
+ }
cancel_charge(memcg, nr_pages);
return -ENOMEM;
}
@@ -3452,7 +3481,7 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
unsigned long usage;
- int i, j, size;
+ int i, j, size, entries;
mutex_lock(&memcg->thresholds_lock);
@@ -3472,14 +3501,20 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
__mem_cgroup_threshold(memcg, type == _MEMSWAP);
/* Calculate new number of threshold */
- size = 0;
+ size = entries = 0;
for (i = 0; i < thresholds->primary->size; i++) {
if (thresholds->primary->entries[i].eventfd != eventfd)
size++;
+ else
+ entries++;
}
new = thresholds->spare;
+ /* If no items related to eventfd have been cleared, nothing to do */
+ if (!entries)
+ goto unlock;
+
/* Set thresholds array to NULL if we don't have thresholds */
if (!size) {
kfree(new);
@@ -5697,6 +5732,10 @@ void mem_cgroup_sk_alloc(struct sock *sk)
return;
}
+ /* Do not associate the sock with unrelated interrupted task's memcg. */
+ if (in_interrupt())
+ return;
+
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
if (memcg == root_mem_cgroup)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b4c8d7b9ab82..449999657c0b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1340,7 +1340,12 @@ static int online_memory_block(struct memory_block *mem, void *arg)
return memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
}
-/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
+/*
+ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+ * and online/offline operations (triggered e.g. by sysfs).
+ *
+ * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
+ */
int __ref add_memory_resource(int nid, struct resource *res, bool online)
{
u64 start, size;
@@ -1418,9 +1423,9 @@ out:
mem_hotplug_done();
return ret;
}
-EXPORT_SYMBOL_GPL(add_memory_resource);
-int __ref add_memory(int nid, u64 start, u64 size)
+/* requires device_hotplug_lock, see add_memory_resource() */
+int __ref __add_memory(int nid, u64 start, u64 size)
{
struct resource *res;
int ret;
@@ -1434,6 +1439,17 @@ int __ref add_memory(int nid, u64 start, u64 size)
release_memory_resource(res);
return ret;
}
+
+int add_memory(int nid, u64 start, u64 size)
+{
+ int rc;
+
+ lock_device_hotplug();
+ rc = __add_memory(nid, start, size);
+ unlock_device_hotplug();
+
+ return rc;
+}
EXPORT_SYMBOL_GPL(add_memory);
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 593b74bed59b..a2be65bf5d8c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -547,11 +547,16 @@ retry:
goto retry;
}
- migrate_page_add(page, qp->pagelist, flags);
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+ if (!vma_migratable(vma))
+ break;
+ migrate_page_add(page, qp->pagelist, flags);
+ } else
+ break;
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
- return 0;
+ return addr != end ? -EIO : 0;
}
static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -623,7 +628,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
unsigned long endvma = vma->vm_end;
unsigned long flags = qp->flags;
- if (!vma_migratable(vma))
+ /*
+ * Need check MPOL_MF_STRICT to return -EIO if possible
+ * regardless of vma_migratable
+ */
+ if (!vma_migratable(vma) &&
+ !(flags & MPOL_MF_STRICT))
return 1;
if (endvma > end)
@@ -650,7 +660,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
}
/* queue pages from current vma */
- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+ if (flags & MPOL_MF_VALID)
return 0;
return 1;
}
@@ -2734,6 +2744,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
char *flags = strchr(str, '=');
int err = 1;
+ if (flags)
+ *flags++ = '\0'; /* terminate mode string */
+
if (nodelist) {
/* NUL-terminate mode or flags string */
*nodelist++ = '\0';
@@ -2744,9 +2757,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
} else
nodes_clear(nodes);
- if (flags)
- *flags++ = '\0'; /* terminate mode string */
-
for (mode = 0; mode < MPOL_MAX; mode++) {
if (!strcmp(str, policy_modes[mode])) {
break;
@@ -2758,7 +2768,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
switch (mode) {
case MPOL_PREFERRED:
/*
- * Insist on a nodelist of one node only
+ * Insist on a nodelist of one node only, although later
+ * we use first_node(nodes) to grab a single node, so here
+ * nodelist (or nodes) cannot be empty.
*/
if (nodelist) {
char *rest = nodelist;
@@ -2766,6 +2778,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
rest++;
if (*rest)
goto out;
+ if (nodes_empty(nodes))
+ goto out;
}
break;
case MPOL_INTERLEAVE:
diff --git a/mm/mincore.c b/mm/mincore.c
index bfb866435478..3b6a883d0926 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -167,6 +167,22 @@ out:
return 0;
}
+static inline bool can_do_mincore(struct vm_area_struct *vma)
+{
+ if (vma_is_anonymous(vma))
+ return true;
+ if (!vma->vm_file)
+ return false;
+ /*
+ * Reveal pagecache information only for non-anonymous mappings that
+ * correspond to the files the calling process could (if tried) open
+ * for writing; otherwise we'd be including shared non-exclusive
+ * mappings, which opens a side channel.
+ */
+ return inode_owner_or_capable(file_inode(vma->vm_file)) ||
+ inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
+}
+
/*
* Do a chunk of "sys_mincore()". We've already checked
* all the arguments, we hold the mmap semaphore: we should
@@ -187,8 +203,13 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
vma = find_vma(current->mm, addr);
if (!vma || addr < vma->vm_start)
return -ENOMEM;
- mincore_walk.mm = vma->vm_mm;
end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
+ if (!can_do_mincore(vma)) {
+ unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
+ memset(vec, 1, pages);
+ return pages;
+ }
+ mincore_walk.mm = vma->vm_mm;
err = walk_page_range(addr, end, &mincore_walk);
if (err < 0)
return err;
diff --git a/mm/mlock.c b/mm/mlock.c
index f0505692a5f4..3e7fe404bfb8 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -630,11 +630,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
* is also counted.
* Return value: previously mlocked page counts
*/
-static int count_mm_mlocked_page_nr(struct mm_struct *mm,
+static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
unsigned long start, size_t len)
{
struct vm_area_struct *vma;
- int count = 0;
+ unsigned long count = 0;
if (mm == NULL)
mm = current->mm;
diff --git a/mm/mmap.c b/mm/mmap.c
index 3f2314ad6acd..d221266d100f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -87,12 +87,6 @@ static void unmap_region(struct mm_struct *mm,
* MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
* w: (no) no w: (no) no w: (copy) copy w: (no) no
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
- *
- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
- * MAP_PRIVATE:
- * r: (no) no
- * w: (no) no
- * x: (yes) yes
*/
pgprot_t protection_map[16] = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
@@ -2448,7 +2442,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr))
return vma;
- if (!prev || expand_stack(prev, addr))
+ /* don't alter vm_end if the coredump is running */
+ if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
return NULL;
if (prev->vm_flags & VM_LOCKED)
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
@@ -2474,6 +2469,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
return vma;
if (!(vma->vm_flags & VM_GROWSDOWN))
return NULL;
+ /* don't alter vm_start if the coredump is running */
+ if (!mmget_still_valid(mm))
+ return NULL;
start = vma->vm_start;
if (expand_stack(vma, addr))
return NULL;
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index f4259e496f83..7a66e37efb4d 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -286,7 +286,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
* thanks to mm_take_all_locks().
*/
spin_lock(&mm->mmu_notifier_mm->lock);
- hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
+ hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
spin_unlock(&mm->mmu_notifier_mm->lock);
mm_drop_all_locks(mm);
diff --git a/mm/nommu.c b/mm/nommu.c
index 44265e00b701..b40ec74f364c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -445,10 +445,14 @@ void vm_unmap_aliases(void)
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
/*
- * Implement a stub for vmalloc_sync_all() if the architecture chose not to
- * have one.
+ * Implement a stub for vmalloc_sync_[un]mapping() if the architecture
+ * chose not to have one.
*/
-void __weak vmalloc_sync_all(void)
+void __weak vmalloc_sync_mappings(void)
+{
+}
+
+void __weak vmalloc_sync_unmappings(void)
{
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 281a46aeae61..462c778b9fb5 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -200,11 +200,11 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
if (this_bw < tot_bw) {
if (min) {
min *= this_bw;
- do_div(min, tot_bw);
+ min = div64_ul(min, tot_bw);
}
if (max < 100) {
max *= this_bw;
- do_div(max, tot_bw);
+ max = div64_ul(max, tot_bw);
}
}
@@ -2141,6 +2141,13 @@ EXPORT_SYMBOL(tag_pages_for_writeback);
* not miss some pages (e.g., because some other process has cleared TOWRITE
* tag we set). The rule we follow is that TOWRITE tag can be cleared only
* by the process clearing the DIRTY tag (and submitting the page for IO).
+ *
+ * To avoid deadlocks between range_cyclic writeback and callers that hold
+ * pages in PageWriteback to aggregate IO until write_cache_pages() returns,
+ * we do not loop back to the start of the file. Doing so causes a page
+ * lock/page writeback access order inversion - we should only ever lock
+ * multiple pages in ascending page->index order, and looping back to the start
+ * of the file violates that rule and causes deadlocks.
*/
int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
@@ -2155,7 +2162,6 @@ int write_cache_pages(struct address_space *mapping,
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
- int cycled;
int range_whole = 0;
int tag;
@@ -2163,23 +2169,17 @@ int write_cache_pages(struct address_space *mapping,
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
- if (index == 0)
- cycled = 1;
- else
- cycled = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
- cycled = 1; /* ignore range_cyclic tests */
}
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
-retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
@@ -2287,17 +2287,14 @@ continue_unlock:
pagevec_release(&pvec);
cond_resched();
}
- if (!cycled && !done) {
- /*
- * range_cyclic:
- * We hit the last page and there is more work to be done: wrap
- * back to the start of the file
- */
- cycled = 1;
- index = 0;
- end = writeback_index - 1;
- goto retry;
- }
+
+ /*
+ * If we hit the last page and there is more work to be done: wrap
+ * back the index back to the start of the file for the next
+ * time we are called.
+ */
+ if (wbc->range_cyclic && !done)
+ done_index = 0;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 05f141e39ac1..ef710e387862 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3955,11 +3955,11 @@ refill:
/* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users.
*/
- page_ref_add(page, size);
+ page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
/* reset page count bias and offset to start of new frag */
nc->pfmemalloc = page_is_pfmemalloc(page);
- nc->pagecnt_bias = size + 1;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
nc->offset = size;
}
@@ -3975,10 +3975,10 @@ refill:
size = nc->size;
#endif
/* OK, page count is 0, we can safely set it */
- set_page_count(page, size + 1);
+ set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
/* reset page count bias and offset to start of new frag */
- nc->pagecnt_bias = size + 1;
+ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
offset = size - fragsz;
}
@@ -5491,13 +5491,15 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long *zone_end_pfn,
unsigned long *ignored)
{
+ unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
+ unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
/* When hotadd a new node from cpu_up(), the node should be empty */
if (!node_start_pfn && !node_end_pfn)
return 0;
/* Get the start and end of the zone */
- *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
- *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
+ *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
+ *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
adjust_zone_range_for_zone_movable(nid, zone_type,
node_start_pfn, node_end_pfn,
zone_start_pfn, zone_end_pfn);
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 121dcffc4ec1..a7be1c7a79f6 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -286,6 +286,7 @@ static void free_page_ext(void *addr)
table_size = get_entry_size() * PAGES_PER_SECTION;
BUG_ON(PageReserved(page));
+ kmemleak_free(addr);
free_pages_exact(addr, table_size);
}
}
diff --git a/mm/page_idle.c b/mm/page_idle.c
index ae11aa914e55..ded173d6c5b5 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -131,7 +131,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
end_pfn = pfn + count * BITS_PER_BYTE;
if (end_pfn > max_pfn)
- end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
+ end_pfn = max_pfn;
for (; pfn < end_pfn; pfn++) {
bit = pfn % BITMAP_CHUNK_BITS;
@@ -176,7 +176,7 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
end_pfn = pfn + count * BITS_PER_BYTE;
if (end_pfn > max_pfn)
- end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
+ end_pfn = max_pfn;
for (; pfn < end_pfn; pfn++) {
bit = pfn % BITMAP_CHUNK_BITS;
diff --git a/mm/percpu.c b/mm/percpu.c
index 3794cfc88689..0462a2a00f05 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2048,8 +2048,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
ai->groups[group].base_offset = areas[group] - base;
}
- pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
- PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
+ pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
+ PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
ai->dyn_size, ai->unit_size);
rc = pcpu_setup_first_chunk(ai, base);
@@ -2162,8 +2162,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
}
/* we're ready, commit */
- pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
- unit_pages, psize_str, vm.addr, ai->static_size,
+ pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
+ unit_pages, psize_str, ai->static_size,
ai->reserved_size, ai->dyn_size);
rc = pcpu_setup_first_chunk(ai, vm.addr);
diff --git a/mm/shmem.c b/mm/shmem.c
index 944242491059..90ccbb35458b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2457,31 +2457,33 @@ static void shmem_tag_pins(struct address_space *mapping)
void **slot;
pgoff_t start;
struct page *page;
+ unsigned int tagged = 0;
lru_add_drain();
start = 0;
- rcu_read_lock();
+ spin_lock_irq(&mapping->tree_lock);
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
- page = radix_tree_deref_slot(slot);
+ page = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
if (!page || radix_tree_exception(page)) {
if (radix_tree_deref_retry(page)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
} else if (page_count(page) - page_mapcount(page) > 1) {
- spin_lock_irq(&mapping->tree_lock);
radix_tree_tag_set(&mapping->page_tree, iter.index,
SHMEM_TAG_PINNED);
- spin_unlock_irq(&mapping->tree_lock);
}
- if (need_resched()) {
- cond_resched_rcu();
- slot = radix_tree_iter_next(&iter);
- }
+ if (++tagged % 1024)
+ continue;
+
+ slot = radix_tree_iter_next(&iter);
+ spin_unlock_irq(&mapping->tree_lock);
+ cond_resched();
+ spin_lock_irq(&mapping->tree_lock);
}
- rcu_read_unlock();
+ spin_unlock_irq(&mapping->tree_lock);
}
/*
@@ -2693,7 +2695,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
}
shmem_falloc.waitq = &shmem_falloc_waitq;
- shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+ shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
spin_lock(&inode->i_lock);
inode->i_private = &shmem_falloc;
diff --git a/mm/slab.c b/mm/slab.c
index 354a09deecff..9547f02b4af9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -566,14 +566,6 @@ static void start_cpu_timer(int cpu)
static void init_arraycache(struct array_cache *ac, int limit, int batch)
{
- /*
- * The array_cache structures contain pointers to free object.
- * However, when such objects are allocated or transferred to another
- * cache the pointers are not cleared and they could be counted as
- * valid references during a kmemleak scan. Therefore, kmemleak must
- * not scan such objects.
- */
- kmemleak_no_scan(ac);
if (ac) {
ac->avail = 0;
ac->limit = limit;
@@ -589,6 +581,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
struct array_cache *ac = NULL;
ac = kmalloc_node(memsize, gfp, node);
+ /*
+ * The array_cache structures contain pointers to free object.
+ * However, when such objects are allocated or transferred to another
+ * cache the pointers are not cleared and they could be counted as
+ * valid references during a kmemleak scan. Therefore, kmemleak must
+ * not scan such objects.
+ */
+ kmemleak_no_scan(ac);
init_arraycache(ac, entries, batchcount);
return ac;
}
@@ -683,6 +683,7 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
alc = kmalloc_node(memsize, gfp, node);
if (alc) {
+ kmemleak_no_scan(alc);
init_arraycache(&alc->ac, entries, batch);
spin_lock_init(&alc->lock);
}
@@ -4364,8 +4365,12 @@ static int leaks_show(struct seq_file *m, void *p)
* whole processing.
*/
do {
- set_store_user_clean(cachep);
drain_cpu_caches(cachep);
+ /*
+ * drain_cpu_caches() could make kmemleak_object and
+ * debug_objects_cache dirty, so reset afterwards.
+ */
+ set_store_user_clean(cachep);
x[1] = 0;
diff --git a/mm/slub.c b/mm/slub.c
index 131dee87a67c..9b44423f1cf0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1909,8 +1909,6 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
if (node == NUMA_NO_NODE)
searchnode = numa_mem_id();
- else if (!node_present_pages(node))
- searchnode = node_to_mem_node(node);
object = get_partial_node(s, get_node(s, searchnode), c, flags);
if (object || node != NUMA_NO_NODE)
@@ -2506,17 +2504,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
struct page *page;
page = c->page;
- if (!page)
+ if (!page) {
+ /*
+ * if the node is not online or has no normal memory, just
+ * ignore the node constraint
+ */
+ if (unlikely(node != NUMA_NO_NODE &&
+ !node_state(node, N_NORMAL_MEMORY)))
+ node = NUMA_NO_NODE;
goto new_slab;
+ }
redo:
if (unlikely(!node_match(page, node))) {
- int searchnode = node;
-
- if (node != NUMA_NO_NODE && !node_present_pages(node))
- searchnode = node_to_mem_node(node);
-
- if (unlikely(!node_match(page, searchnode))) {
+ /*
+ * same as above but node_match() being false already
+ * implies node != NUMA_NO_NODE
+ */
+ if (!node_state(node, N_NORMAL_MEMORY)) {
+ node = NUMA_NO_NODE;
+ goto redo;
+ } else {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, page, c->freelist);
c->page = NULL;
@@ -2935,11 +2943,13 @@ redo:
barrier();
if (likely(page == c->page)) {
- set_freepointer(s, tail_obj, c->freelist);
+ void **freelist = READ_ONCE(c->freelist);
+
+ set_freepointer(s, tail_obj, freelist);
if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
- c->freelist, tid,
+ freelist, tid,
head, next_tid(tid)))) {
note_cmpxchg_failure("slab_free", s, tid);
@@ -3115,6 +3125,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
if (unlikely(!object)) {
/*
+ * We may have removed an object from c->freelist using
+ * the fastpath in the previous iteration; in that case,
+ * c->tid has not been bumped yet.
+ * Since ___slab_alloc() may reenable interrupts while
+ * allocating memory, we should bump c->tid now.
+ */
+ c->tid = next_tid(c->tid);
+
+ /*
* Invoking slow path likely have side-effect
* of re-populating per CPU c->freelist
*/
@@ -4718,7 +4737,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
}
}
- get_online_mems();
+ /*
+ * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
+ * already held which will conflict with an existing lock order:
+ *
+ * mem_hotplug_lock->slab_mutex->kernfs_mutex
+ *
+ * We don't really need mem_hotplug_lock (to hold off
+ * slab_mem_going_offline_callback) here because slab's memory hot
+ * unplug code doesn't destroy the kmem_cache->node[] data.
+ */
+
#ifdef CONFIG_SLUB_DEBUG
if (flags & SO_ALL) {
struct kmem_cache_node *n;
@@ -4759,7 +4788,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]);
#endif
- put_online_mems();
kfree(nodes);
return x + sprintf(buf + x, "\n");
}
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 3c8da0af9695..c2de343baad4 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm.h>
+#include <linux/highmem.h>
#include <linux/slab.h>
#include <asm/sections.h>
@@ -124,7 +125,7 @@ static inline const char *check_kernel_text_object(const void *ptr,
static inline const char *check_bogus_address(const void *ptr, unsigned long n)
{
/* Reject if object wraps past end of memory. */
- if ((unsigned long)ptr + n < (unsigned long)ptr)
+ if ((unsigned long)ptr + (n - 1) < (unsigned long)ptr)
return "<wrapped address>";
/* Reject if NULL or ZERO-allocation. */
@@ -217,7 +218,12 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
if (!virt_addr_valid(ptr))
return NULL;
- page = virt_to_head_page(ptr);
+ /*
+ * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
+ * highmem page or fallback to virt_to_page(). The following
+ * is effectively a highmem-aware virt_to_head_page().
+ */
+ page = compound_head(kmap_to_page((void *)ptr));
/* Check slab allocator for flags and size. */
if (PageSlab(page))
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e6aa073f01df..153deec1df35 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -459,7 +459,11 @@ nocache:
}
found:
- if (addr + size > vend)
+ /*
+ * Check also calculated address against the vstart,
+ * because it can be 0 because of big align request.
+ */
+ if (addr + size > vend || addr < vstart)
goto overflow;
va->va_start = addr;
@@ -1704,6 +1708,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return NULL;
/*
+ * First make sure the mappings are removed from all page-tables
+ * before they are freed.
+ */
+ vmalloc_sync_unmappings();
+
+ /*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
* flag. It means that vm_struct is not fully initialized.
* Now, it is fully initialized, so remove this flag here.
@@ -2237,13 +2247,19 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
EXPORT_SYMBOL(remap_vmalloc_range);
/*
- * Implement a stub for vmalloc_sync_all() if the architecture chose not to
- * have one.
+ * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
+ * not to have one.
+ *
+ * The purpose of this function is to make sure the vmalloc area
+ * mappings are identical in all page-tables in the system.
*/
-void __weak vmalloc_sync_all(void)
+void __weak vmalloc_sync_mappings(void)
{
}
+void __weak vmalloc_sync_unmappings(void)
+{
+}
static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
{
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 5e6a4d76659d..e60435d556e3 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1075,13 +1075,8 @@ const char * const vmstat_text[] = {
#endif
#endif /* CONFIG_MEMORY_BALLOON */
#ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
-#else
- "", /* nr_tlb_remote_flush */
- "", /* nr_tlb_remote_flush_received */
-#endif /* CONFIG_SMP */
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
#endif /* CONFIG_DEBUG_TLBFLUSH */
@@ -1799,7 +1794,7 @@ static int __init setup_vmstat(void)
#endif
#ifdef CONFIG_PROC_FS
proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+ proc_create("pagetypeinfo", 0400, NULL, &pagetypeinfo_file_ops);
proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index cf15851a7d2f..e4cca3f5331e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -52,6 +52,7 @@
#include <linux/zpool.h>
#include <linux/mount.h>
#include <linux/migrate.h>
+#include <linux/wait.h>
#include <linux/pagemap.h>
#define ZSPAGE_MAGIC 0x58
@@ -265,6 +266,10 @@ struct zs_pool {
#ifdef CONFIG_COMPACTION
struct inode *inode;
struct work_struct free_work;
+ /* A wait queue for when migration races with async_free_zspage() */
+ wait_queue_head_t migration_wait;
+ atomic_long_t isolated_pages;
+ bool destroying;
#endif
};
@@ -1939,6 +1944,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
zspage->isolated--;
}
+static void putback_zspage_deferred(struct zs_pool *pool,
+ struct size_class *class,
+ struct zspage *zspage)
+{
+ enum fullness_group fg;
+
+ fg = putback_zspage(class, zspage);
+ if (fg == ZS_EMPTY)
+ schedule_work(&pool->free_work);
+
+}
+
+static inline void zs_pool_dec_isolated(struct zs_pool *pool)
+{
+ VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
+ atomic_long_dec(&pool->isolated_pages);
+ /*
+ * There's no possibility of racing, since wait_for_isolated_drain()
+ * checks the isolated count under &class->lock after enqueuing
+ * on migration_wait.
+ */
+ if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
+ wake_up_all(&pool->migration_wait);
+}
+
static void replace_sub_page(struct size_class *class, struct zspage *zspage,
struct page *newpage, struct page *oldpage)
{
@@ -2008,6 +2038,7 @@ bool zs_page_isolate(struct page *page, isolate_mode_t mode)
*/
if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
get_zspage_mapping(zspage, &class_idx, &fullness);
+ atomic_long_inc(&pool->isolated_pages);
remove_zspage(class, zspage, fullness);
}
@@ -2096,8 +2127,21 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage,
* Page migration is done so let's putback isolated zspage to
* the list if @page is final isolated subpage in the zspage.
*/
- if (!is_zspage_isolated(zspage))
- putback_zspage(class, zspage);
+ if (!is_zspage_isolated(zspage)) {
+ /*
+ * We cannot race with zs_destroy_pool() here because we wait
+ * for isolation to hit zero before we start destroying.
+ * Also, we ensure that everyone can see pool->destroying before
+ * we start waiting.
+ */
+ putback_zspage_deferred(pool, class, zspage);
+ zs_pool_dec_isolated(pool);
+ }
+
+ if (page_zone(newpage) != page_zone(page)) {
+ dec_zone_page_state(page, NR_ZSPAGES);
+ inc_zone_page_state(newpage, NR_ZSPAGES);
+ }
reset_page(page);
put_page(page);
@@ -2144,13 +2188,12 @@ void zs_page_putback(struct page *page)
spin_lock(&class->lock);
dec_zspage_isolation(zspage);
if (!is_zspage_isolated(zspage)) {
- fg = putback_zspage(class, zspage);
/*
* Due to page_lock, we cannot free zspage immediately
* so let's defer.
*/
- if (fg == ZS_EMPTY)
- schedule_work(&pool->free_work);
+ putback_zspage_deferred(pool, class, zspage);
+ zs_pool_dec_isolated(pool);
}
spin_unlock(&class->lock);
}
@@ -2174,8 +2217,36 @@ static int zs_register_migration(struct zs_pool *pool)
return 0;
}
+static bool pool_isolated_are_drained(struct zs_pool *pool)
+{
+ return atomic_long_read(&pool->isolated_pages) == 0;
+}
+
+/* Function for resolving migration */
+static void wait_for_isolated_drain(struct zs_pool *pool)
+{
+
+ /*
+ * We're in the process of destroying the pool, so there are no
+ * active allocations. zs_page_isolate() fails for completely free
+ * zspages, so we need only wait for the zs_pool's isolated
+ * count to hit zero.
+ */
+ wait_event(pool->migration_wait,
+ pool_isolated_are_drained(pool));
+}
+
static void zs_unregister_migration(struct zs_pool *pool)
{
+ pool->destroying = true;
+ /*
+ * We need a memory barrier here to ensure global visibility of
+ * pool->destroying. Thus pool->isolated pages will either be 0 in which
+ * case we don't care, or it will be > 0 and pool->destroying will
+ * ensure that we wake up once isolation hits 0.
+ */
+ smp_mb();
+ wait_for_isolated_drain(pool); /* This can block */
flush_work(&pool->free_work);
iput(pool->inode);
}
@@ -2422,6 +2493,10 @@ struct zs_pool *zs_create_pool(const char *name)
if (!pool->name)
goto err;
+#ifdef CONFIG_COMPACTION
+ init_waitqueue_head(&pool->migration_wait);
+#endif
+
if (create_cache(pool))
goto err;
diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
index 7008d53e455c..e61679bf0908 100644
--- a/net/6lowpan/nhc.c
+++ b/net/6lowpan/nhc.c
@@ -18,7 +18,7 @@
#include "nhc.h"
static struct rb_root rb_root = RB_ROOT;
-static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX];
+static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
static DEFINE_SPINLOCK(lowpan_nhc_lock);
static int lowpan_nhc_insert(struct lowpan_nhc *nhc)
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index cc1557978066..ecdfeaafba9c 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -109,6 +109,7 @@ int vlan_check_real_dev(struct net_device *real_dev,
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+void vlan_dev_uninit(struct net_device *dev);
bool vlan_dev_inherit_address(struct net_device *dev,
struct net_device *real_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index fb3e2a50d76e..892929d43898 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -366,10 +366,12 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ifrr.ifr_ifru = ifr->ifr_ifru;
switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (!net_eq(dev_net(dev), &init_net))
+ break;
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- case SIOCSHWTSTAMP:
case SIOCGHWTSTAMP:
if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
@@ -608,7 +610,8 @@ static int vlan_dev_init(struct net_device *dev)
return 0;
}
-static void vlan_dev_uninit(struct net_device *dev)
+/* Note: this function might be called multiple times for the same device. */
+void vlan_dev_uninit(struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 1270207f3d7c..214cc068ffc2 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -92,11 +92,13 @@ static int vlan_changelink(struct net_device *dev,
struct ifla_vlan_flags *flags;
struct ifla_vlan_qos_mapping *m;
struct nlattr *attr;
- int rem;
+ int rem, err;
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
- vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
+ if (err)
+ return err;
}
if (data[IFLA_VLAN_INGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
@@ -107,7 +109,9 @@ static int vlan_changelink(struct net_device *dev,
if (data[IFLA_VLAN_EGRESS_QOS]) {
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
m = nla_data(attr);
- vlan_dev_set_egress_priority(dev, m->from, m->to);
+ err = vlan_dev_set_egress_priority(dev, m->from, m->to);
+ if (err)
+ return err;
}
}
return 0;
@@ -153,10 +157,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
err = vlan_changelink(dev, tb, data);
- if (err < 0)
- return err;
-
- return register_vlan_dev(dev);
+ if (!err)
+ err = register_vlan_dev(dev);
+ if (err)
+ vlan_dev_uninit(dev);
+ return err;
}
static inline size_t vlan_qos_map_size(unsigned int n)
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 145f80518064..ed1e39ccaebf 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -570,9 +570,10 @@ int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
if (ret) {
p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
trace_9p_protocol_dump(clnt, &fake_pdu);
+ return ret;
}
- return ret;
+ return fake_pdu.offset;
}
EXPORT_SYMBOL(p9stat_read);
@@ -621,13 +622,19 @@ int p9dirent_read(struct p9_client *clnt, char *buf, int len,
if (ret) {
p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
trace_9p_protocol_dump(clnt, &fake_pdu);
- goto out;
+ return ret;
}
- strcpy(dirent->d_name, nameptr);
+ ret = strscpy(dirent->d_name, nameptr, sizeof(dirent->d_name));
+ if (ret < 0) {
+ p9_debug(P9_DEBUG_ERROR,
+ "On the wire dirent name too long: %s\n",
+ nameptr);
+ kfree(nameptr);
+ return ret;
+ }
kfree(nameptr);
-out:
return fake_pdu.offset;
}
EXPORT_SYMBOL(p9dirent_read);
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 38aa6345bdfa..9c0c894b56f8 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/module.h>
+#include "trans_common.h"
/**
* p9_release_req_pages - Release pages after the transaction.
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 5a2ad4707463..8e4313ad3f02 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -254,8 +254,7 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_DISCONNECTED:
if (rdma)
rdma->state = P9_RDMA_CLOSED;
- if (c)
- c->status = Disconnected;
+ c->status = Disconnected;
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
@@ -454,7 +453,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
err = post_recv(client, rpl_context);
if (err) {
- p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
+ p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
goto recv_error;
}
/* remove posted receive buffer from request structure */
@@ -523,7 +522,7 @@ dont_need_post_recv:
recv_error:
kfree(rpl_context);
spin_lock_irqsave(&rdma->req_lock, flags);
- if (rdma->state < P9_RDMA_CLOSING) {
+ if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
rdma->state = P9_RDMA_CLOSING;
spin_unlock_irqrestore(&rdma->req_lock, flags);
rdma_disconnect(rdma->cm_id);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e73fd647065a..f88911cffa1a 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -764,10 +764,16 @@ static struct p9_trans_module p9_virtio_trans = {
/* The standard init function */
static int __init p9_virtio_init(void)
{
+ int rc;
+
INIT_LIST_HEAD(&virtio_chan_list);
v9fs_register_trans(&p9_virtio_trans);
- return register_virtio_driver(&p9_virtio_drv);
+ rc = register_virtio_driver(&p9_virtio_drv);
+ if (rc)
+ v9fs_unregister_trans(&p9_virtio_trans);
+
+ return rc;
}
static void __exit p9_virtio_cleanup(void)
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 8ad3ec2610b6..b9e85a4751a6 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -879,15 +879,24 @@ static struct notifier_block aarp_notifier = {
static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
-void __init aarp_proto_init(void)
+int __init aarp_proto_init(void)
{
+ int rc;
+
aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
- if (!aarp_dl)
+ if (!aarp_dl) {
printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
+ return -ENOMEM;
+ }
setup_timer(&aarp_timer, aarp_expire_timeout, 0);
aarp_timer.expires = jiffies + sysctl_aarp_expiry_time;
add_timer(&aarp_timer);
- register_netdevice_notifier(&aarp_notifier);
+ rc = register_netdevice_notifier(&aarp_notifier);
+ if (rc) {
+ del_timer_sync(&aarp_timer);
+ unregister_snap_client(aarp_dl);
+ }
+ return rc;
}
/* Remove the AARP entries associated with a device. */
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index af46bc49e1e9..b5f84f428aa6 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -293,7 +293,7 @@ out_interface:
goto out;
}
-void __exit atalk_proc_exit(void)
+void atalk_proc_exit(void)
{
remove_proc_entry("interface", atalk_proc_dir);
remove_proc_entry("route", atalk_proc_dir);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 10d2bdce686e..93209c009df5 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1029,6 +1029,11 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol,
*/
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
goto out;
+
+ rc = -EPERM;
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
+ goto out;
+
rc = -ENOMEM;
sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
if (!sk)
@@ -1906,31 +1911,61 @@ static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
EXPORT_SYMBOL(atrtr_get_dev);
EXPORT_SYMBOL(atalk_find_dev_addr);
-static const char atalk_err_snap[] __initconst =
- KERN_CRIT "Unable to register DDP with SNAP.\n";
-
/* Called by proto.c on kernel start up */
static int __init atalk_init(void)
{
- int rc = proto_register(&ddp_proto, 0);
+ int rc;
- if (rc != 0)
+ rc = proto_register(&ddp_proto, 0);
+ if (rc)
goto out;
- (void)sock_register(&atalk_family_ops);
+ rc = sock_register(&atalk_family_ops);
+ if (rc)
+ goto out_proto;
+
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
- if (!ddp_dl)
- printk(atalk_err_snap);
+ if (!ddp_dl) {
+ pr_crit("Unable to register DDP with SNAP.\n");
+ rc = -ENOMEM;
+ goto out_sock;
+ }
dev_add_pack(&ltalk_packet_type);
dev_add_pack(&ppptalk_packet_type);
- register_netdevice_notifier(&ddp_notifier);
- aarp_proto_init();
- atalk_proc_init();
- atalk_register_sysctl();
+ rc = register_netdevice_notifier(&ddp_notifier);
+ if (rc)
+ goto out_snap;
+
+ rc = aarp_proto_init();
+ if (rc)
+ goto out_dev;
+
+ rc = atalk_proc_init();
+ if (rc)
+ goto out_aarp;
+
+ rc = atalk_register_sysctl();
+ if (rc)
+ goto out_proc;
out:
return rc;
+out_proc:
+ atalk_proc_exit();
+out_aarp:
+ aarp_cleanup_module();
+out_dev:
+ unregister_netdevice_notifier(&ddp_notifier);
+out_snap:
+ dev_remove_pack(&ppptalk_packet_type);
+ dev_remove_pack(&ltalk_packet_type);
+ unregister_snap_client(ddp_dl);
+out_sock:
+ sock_unregister(PF_APPLETALK);
+out_proto:
+ proto_unregister(&ddp_proto);
+ goto out;
}
module_init(atalk_init);
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index ebb864361f7a..4e6042e0fcac 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -44,9 +44,12 @@ static struct ctl_table atalk_table[] = {
static struct ctl_table_header *atalk_table_header;
-void atalk_register_sysctl(void)
+int __init atalk_register_sysctl(void)
{
atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
+ if (!atalk_table_header)
+ return -ENOMEM;
+ return 0;
}
void atalk_unregister_sysctl(void)
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 1e84c5226c84..704892d79bf1 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -721,7 +721,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
{
- if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+ if (arg < 0 || arg >= MAX_LEC_ITF)
+ return -EINVAL;
+ arg = array_index_nospec(arg, MAX_LEC_ITF);
+ if (!dev_lec[arg])
return -EINVAL;
vcc->proto_data = dev_lec[arg];
return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -739,6 +742,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
i = arg;
if (arg >= MAX_LEC_ITF)
return -EINVAL;
+ i = array_index_nospec(arg, MAX_LEC_ITF);
if (!dev_lec[i]) {
int size;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 2772f6a13fcb..de55a3f001dc 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -859,6 +859,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
break;
case SOCK_RAW:
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
break;
default:
return -ESOCKTNOSUPPORT;
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 149f82bd83fd..6ba56f215229 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -443,9 +443,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
}
if (ax25->sk != NULL) {
+ local_bh_disable();
bh_lock_sock(ax25->sk);
sock_reset_flag(ax25->sk, SOCK_ZAPPED);
bh_unlock_sock(ax25->sk);
+ local_bh_enable();
}
put:
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 1ae8c59fcb2d..2b663622bdb4 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -34,6 +34,7 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/lockdep.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/pkt_sched.h>
@@ -149,7 +150,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
* Return: 0 on success, a negative error code otherwise.
*/
static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
- int max_if_num)
+ unsigned int max_if_num)
{
void *data_ptr;
size_t old_size;
@@ -193,7 +194,8 @@ unlock:
*/
static void
batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
- int max_if_num, int del_if_num)
+ unsigned int max_if_num,
+ unsigned int del_if_num)
{
size_t chunk_size;
size_t if_offset;
@@ -231,7 +233,8 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
*/
static void
batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
- int max_if_num, int del_if_num)
+ unsigned int max_if_num,
+ unsigned int del_if_num)
{
size_t if_offset;
void *data_ptr;
@@ -268,7 +271,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
* Return: 0 on success, a negative error code otherwise.
*/
static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
- int max_if_num, int del_if_num)
+ unsigned int max_if_num,
+ unsigned int del_if_num)
{
spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
@@ -302,7 +306,8 @@ static struct batadv_orig_node *
batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
{
struct batadv_orig_node *orig_node;
- int size, hash_added;
+ int hash_added;
+ size_t size;
orig_node = batadv_orig_hash_find(bat_priv, addr);
if (orig_node)
@@ -366,14 +371,18 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
unsigned char *ogm_buff;
u32 random_seqno;
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
- if (!ogm_buff)
+ if (!ogm_buff) {
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
return -ENOMEM;
+ }
hard_iface->bat_iv.ogm_buff = ogm_buff;
@@ -385,35 +394,59 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
batadv_ogm_packet->reserved = 0;
batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+
return 0;
}
static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
{
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
kfree(hard_iface->bat_iv.ogm_buff);
hard_iface->bat_iv.ogm_buff = NULL;
+
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
{
struct batadv_ogm_packet *batadv_ogm_packet;
- unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+ void *ogm_buff;
- batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ ogm_buff = hard_iface->bat_iv.ogm_buff;
+ if (!ogm_buff)
+ goto unlock;
+
+ batadv_ogm_packet = ogm_buff;
ether_addr_copy(batadv_ogm_packet->orig,
hard_iface->net_dev->dev_addr);
ether_addr_copy(batadv_ogm_packet->prev_sender,
hard_iface->net_dev->dev_addr);
+
+unlock:
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
static void
batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
{
struct batadv_ogm_packet *batadv_ogm_packet;
- unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+ void *ogm_buff;
- batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ ogm_buff = hard_iface->bat_iv.ogm_buff;
+ if (!ogm_buff)
+ goto unlock;
+
+ batadv_ogm_packet = ogm_buff;
batadv_ogm_packet->ttl = BATADV_TTL;
+
+unlock:
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
}
/* when do we schedule our own ogm to be sent */
@@ -450,17 +483,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
* batadv_iv_ogm_aggr_packet - checks if there is another OGM attached
* @buff_pos: current position in the skb
* @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm_packet: potential OGM in buffer
*
* Return: true if there is enough space for another OGM, false otherwise.
*/
-static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
- __be16 tvlv_len)
+static bool
+batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+ const struct batadv_ogm_packet *ogm_packet)
{
int next_buff_pos = 0;
- next_buff_pos += buff_pos + BATADV_OGM_HLEN;
- next_buff_pos += ntohs(tvlv_len);
+ /* check if there is enough space for the header */
+ next_buff_pos += buff_pos + sizeof(*ogm_packet);
+ if (next_buff_pos > packet_len)
+ return false;
+
+ /* check if there is enough space for the optional TVLV */
+ next_buff_pos += ntohs(ogm_packet->tvlv_len);
return (next_buff_pos <= packet_len) &&
(next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -488,7 +527,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
/* adjust all flags and log packets */
while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
- batadv_ogm_packet->tvlv_len)) {
+ batadv_ogm_packet)) {
/* we might have aggregated direct link packets with an
* ordinary base packet
*/
@@ -892,7 +931,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
u32 i;
size_t word_index;
u8 *w;
- int if_num;
+ unsigned int if_num;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -913,7 +952,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
}
}
-static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+/**
+ * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
+ * @hard_iface: interface whose ogm buffer should be transmitted
+ */
+static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
@@ -924,8 +967,10 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
u16 tvlv_len = 0;
unsigned long send_time;
- if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
- (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
+ lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
+
+ /* interface already disabled by batadv_iv_ogm_iface_disable */
+ if (!*ogm_buff)
return;
/* the interface gets activated here to avoid race conditions between
@@ -994,6 +1039,17 @@ out:
batadv_hardif_put(primary_if);
}
+static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+{
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
+ hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
+ return;
+
+ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+ batadv_iv_ogm_schedule_buff(hard_iface);
+ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+}
+
/**
* batadv_iv_ogm_orig_update - use OGM to update corresponding data in an
* originator
@@ -1022,7 +1078,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
struct batadv_neigh_node *tmp_neigh_node = NULL;
struct batadv_neigh_node *router = NULL;
struct batadv_orig_node *orig_node_tmp;
- int if_num;
+ unsigned int if_num;
u8 sum_orig, sum_neigh;
u8 *neigh_addr;
u8 tq_avg;
@@ -1180,7 +1236,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
u8 total_count;
u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
- int if_num;
+ unsigned int if_num;
unsigned int tq_asym_penalty, inv_asym_penalty;
unsigned int combined_tq;
unsigned int tq_iface_penalty;
@@ -1221,7 +1277,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
orig_node->last_seen = jiffies;
/* find packet count of corresponding one hop neighbor */
- spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
if_num = if_incoming->if_num;
orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
@@ -1231,7 +1287,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
} else {
neigh_rq_count = 0;
}
- spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+ spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
/* pay attention to not get a value bigger than 100 % */
if (orig_eq_count > neigh_rq_count)
@@ -1699,9 +1755,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
if (is_my_orig) {
unsigned long *word;
- int offset;
+ size_t offset;
s32 bit_pos;
- s16 if_num;
+ unsigned int if_num;
u8 *weight;
orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
@@ -1841,7 +1897,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
/* unpack the aggregated packets and process them one by one */
while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
- ogm_packet->tvlv_len)) {
+ ogm_packet)) {
batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
ogm_offset += BATADV_OGM_HLEN;
@@ -2467,12 +2523,22 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
return ret;
}
-static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
+static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
{
/* begin scheduling originator messages on that interface */
batadv_iv_ogm_schedule(hard_iface);
}
+/**
+ * batadv_iv_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
+{
+ /* set default TQ difference threshold to 20 */
+ atomic_set(&bat_priv->gw.sel_class, 20);
+}
+
static struct batadv_gw_node *
batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
@@ -2797,8 +2863,8 @@ unlock:
static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.name = "BATMAN_IV",
.iface = {
- .activate = batadv_iv_iface_activate,
.enable = batadv_iv_ogm_iface_enable,
+ .enabled = batadv_iv_iface_enabled,
.disable = batadv_iv_ogm_iface_disable,
.update_mac = batadv_iv_ogm_iface_update_mac,
.primary_set = batadv_iv_ogm_primary_iface_set,
@@ -2821,6 +2887,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
.del_if = batadv_iv_ogm_orig_del_if,
},
.gw = {
+ .init_sel_class = batadv_iv_init_sel_class,
.get_best_gw_node = batadv_iv_gw_get_best_gw_node,
.is_eligible = batadv_iv_gw_is_eligible,
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 4348118e7eac..18fa602e5fc6 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -19,7 +19,6 @@
#include "main.h"
#include <linux/atomic.h>
-#include <linux/bug.h>
#include <linux/cache.h>
#include <linux/errno.h>
#include <linux/if_ether.h>
@@ -623,11 +622,11 @@ static int batadv_v_neigh_cmp(struct batadv_neigh_node *neigh1,
int ret = 0;
ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
- if (WARN_ON(!ifinfo1))
+ if (!ifinfo1)
goto err_ifinfo1;
ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
- if (WARN_ON(!ifinfo2))
+ if (!ifinfo2)
goto err_ifinfo2;
ret = ifinfo1->bat_v.throughput - ifinfo2->bat_v.throughput;
@@ -649,11 +648,11 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
bool ret = false;
ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
- if (WARN_ON(!ifinfo1))
+ if (!ifinfo1)
goto err_ifinfo1;
ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
- if (WARN_ON(!ifinfo2))
+ if (!ifinfo2)
goto err_ifinfo2;
threshold = ifinfo1->bat_v.throughput / 4;
@@ -668,6 +667,16 @@ err_ifinfo1:
return ret;
}
+/**
+ * batadv_v_init_sel_class - initialize GW selection class
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
+{
+ /* set default throughput difference threshold to 5Mbps */
+ atomic_set(&bat_priv->gw.sel_class, 50);
+}
+
static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
char *buff, size_t count)
{
@@ -805,7 +814,7 @@ static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
}
orig_gw = batadv_gw_node_get(bat_priv, orig_node);
- if (!orig_node)
+ if (!orig_gw)
goto out;
if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
@@ -1054,6 +1063,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
.dump = batadv_v_orig_dump,
},
.gw = {
+ .init_sel_class = batadv_v_init_sel_class,
.store_sel_class = batadv_v_store_sel_class,
.show_sel_class = batadv_v_show_sel_class,
.get_best_gw_node = batadv_v_gw_get_best_gw_node,
@@ -1094,9 +1104,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
if (ret < 0)
return ret;
- /* set default throughput difference threshold to 5Mbps */
- atomic_set(&bat_priv->gw.sel_class, 50);
-
return 0;
}
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 5d79004de25c..62df763b2aae 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -19,6 +19,7 @@
#include "main.h"
#include <linux/atomic.h>
+#include <linux/bitops.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
@@ -29,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/netdevice.h>
+#include <linux/nl80211.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
@@ -100,8 +102,12 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
*/
return 0;
}
- if (!ret)
- return sinfo.expected_throughput / 100;
+ if (ret)
+ goto default_throughput;
+ if (!(sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT)))
+ goto default_throughput;
+
+ return sinfo.expected_throughput / 100;
}
/* unsupported WiFi driver version */
@@ -185,6 +191,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
struct sk_buff *skb;
int probe_len, i;
int elp_skb_len;
+ void *tmp;
/* this probing routine is for Wifi neighbours only */
if (!batadv_is_wifi_netdev(hard_iface->net_dev))
@@ -216,7 +223,8 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
* the packet to be exactly of that size to make the link
* throughput estimation effective.
*/
- skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
+ tmp = skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
+ memset(tmp, 0, probe_len - hard_iface->bat_v.elp_skb->len);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending unicast (probe) ELP packet on interface %s to %pM\n",
@@ -327,21 +335,23 @@ out:
*/
int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
{
+ static const size_t tvlv_padding = sizeof(__be32);
struct batadv_elp_packet *elp_packet;
unsigned char *elp_buff;
u32 random_seqno;
size_t size;
int res = -ENOMEM;
- size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
+ size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
if (!hard_iface->bat_v.elp_skb)
goto out;
skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
- elp_buff = skb_put(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+ elp_buff = skb_put(hard_iface->bat_v.elp_skb,
+ BATADV_ELP_HLEN + tvlv_padding);
elp_packet = (struct batadv_elp_packet *)elp_buff;
- memset(elp_packet, 0, BATADV_ELP_HLEN);
+ memset(elp_packet, 0, BATADV_ELP_HLEN + tvlv_padding);
elp_packet->packet_type = BATADV_ELP;
elp_packet->version = BATADV_COMPAT_VERSION;
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 1aeeadca620c..b0cae59bd327 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -28,6 +28,8 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/rculist.h>
@@ -127,22 +129,19 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
}
/**
- * batadv_v_ogm_send - periodic worker broadcasting the own OGM
- * @work: work queue item
+ * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
+ * @bat_priv: the bat priv with all the soft interface information
*/
-static void batadv_v_ogm_send(struct work_struct *work)
+static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
{
struct batadv_hard_iface *hard_iface;
- struct batadv_priv_bat_v *bat_v;
- struct batadv_priv *bat_priv;
struct batadv_ogm2_packet *ogm_packet;
struct sk_buff *skb, *skb_tmp;
unsigned char *ogm_buff, *pkt_buff;
int ogm_buff_len;
u16 tvlv_len = 0;
- bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
- bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+ lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
goto out;
@@ -210,6 +209,23 @@ out:
}
/**
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
+ * @work: work queue item
+ */
+static void batadv_v_ogm_send(struct work_struct *work)
+{
+ struct batadv_priv_bat_v *bat_v;
+ struct batadv_priv *bat_priv;
+
+ bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
+ bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+
+ mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+ batadv_v_ogm_send_softif(bat_priv);
+ mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
+}
+
+/**
* batadv_v_ogm_iface_enable - prepare an interface for B.A.T.M.A.N. V
* @hard_iface: the interface to prepare
*
@@ -235,11 +251,15 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
struct batadv_ogm2_packet *ogm_packet;
+ mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
if (!bat_priv->bat_v.ogm_buff)
- return;
+ goto unlock;
ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
+
+unlock:
+ mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
}
/**
@@ -618,17 +638,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
* batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
* @buff_pos: current position in the skb
* @packet_len: total length of the skb
- * @tvlv_len: tvlv length of the previously considered OGM
+ * @ogm2_packet: potential OGM2 in buffer
*
* Return: true if there is enough space for another OGM, false otherwise.
*/
-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
- __be16 tvlv_len)
+static bool
+batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+ const struct batadv_ogm2_packet *ogm2_packet)
{
int next_buff_pos = 0;
- next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
- next_buff_pos += ntohs(tvlv_len);
+ /* check if there is enough space for the header */
+ next_buff_pos += buff_pos + sizeof(*ogm2_packet);
+ if (next_buff_pos > packet_len)
+ return false;
+
+ /* check if there is enough space for the optional TVLV */
+ next_buff_pos += ntohs(ogm2_packet->tvlv_len);
return (next_buff_pos <= packet_len) &&
(next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -775,7 +801,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
ogm_packet = (struct batadv_ogm2_packet *)skb->data;
while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
- ogm_packet->tvlv_len)) {
+ ogm_packet)) {
batadv_v_ogm_process(skb, ogm_offset, if_incoming);
ogm_offset += BATADV_OGM2_HLEN;
@@ -821,6 +847,8 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
+ mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
+
return 0;
}
@@ -832,7 +860,11 @@ void batadv_v_ogm_free(struct batadv_priv *bat_priv)
{
cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
+ mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+
kfree(bat_priv->bat_v.ogm_buff);
bat_priv->bat_v.ogm_buff = NULL;
bat_priv->bat_v.ogm_buff_len = 0;
+
+ mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
}
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 8b6f654bc85d..00123064eb26 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -802,6 +802,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
const u8 *mac, const unsigned short vid)
{
struct batadv_bla_claim search_claim, *claim;
+ struct batadv_bla_claim *claim_removed_entry;
+ struct hlist_node *claim_removed_node;
ether_addr_copy(search_claim.addr, mac);
search_claim.vid = vid;
@@ -812,10 +814,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
mac, BATADV_PRINT_VID(vid));
- batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
- batadv_choose_claim, claim);
- batadv_claim_put(claim); /* reference from the hash is gone */
+ claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+ batadv_compare_claim,
+ batadv_choose_claim, claim);
+ if (!claim_removed_node)
+ goto free_claim;
+ /* reference from the hash is gone */
+ claim_removed_entry = hlist_entry(claim_removed_node,
+ struct batadv_bla_claim, hash_entry);
+ batadv_claim_put(claim_removed_entry);
+
+free_claim:
/* don't need the reference from hash_find() anymore */
batadv_claim_put(claim);
}
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index b4ffba7dd583..e0ab277db503 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -18,6 +18,7 @@
#include "debugfs.h"
#include "main.h"
+#include <linux/dcache.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/errno.h>
@@ -340,6 +341,25 @@ out:
}
/**
+ * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif
+ * @hard_iface: hard interface which was renamed
+ */
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+ const char *name = hard_iface->net_dev->name;
+ struct dentry *dir;
+ struct dentry *d;
+
+ dir = hard_iface->debug_dir;
+ if (!dir)
+ return;
+
+ d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+ if (!d)
+ pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
+/**
* batadv_debugfs_del_hardif - delete the base directory for a hard interface
* in debugfs.
* @hard_iface: hard interface which is deleted.
@@ -403,6 +423,26 @@ out:
return -ENOMEM;
}
+/**
+ * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif
+ * @dev: net_device which was renamed
+ */
+void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ const char *name = dev->name;
+ struct dentry *dir;
+ struct dentry *d;
+
+ dir = bat_priv->debug_dir;
+ if (!dir)
+ return;
+
+ d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+ if (!d)
+ pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
void batadv_debugfs_del_meshif(struct net_device *dev)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index e49121ee55f6..59a0d6d70ecd 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -29,8 +29,10 @@ struct net_device;
void batadv_debugfs_init(void);
void batadv_debugfs_destroy(void);
int batadv_debugfs_add_meshif(struct net_device *dev);
+void batadv_debugfs_rename_meshif(struct net_device *dev);
void batadv_debugfs_del_meshif(struct net_device *dev);
int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface);
void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
#else
@@ -48,6 +50,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev)
return 0;
}
+static inline void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+}
+
static inline void batadv_debugfs_del_meshif(struct net_device *dev)
{
}
@@ -59,6 +65,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
}
static inline
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+}
+
+static inline
void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
{
}
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index df7c6a080188..83c7009b0da1 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -242,6 +242,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
u32 hash = 0;
const struct batadv_dat_entry *dat = data;
const unsigned char *key;
+ __be16 vid;
u32 i;
key = (const unsigned char *)&dat->ip;
@@ -251,7 +252,8 @@ static u32 batadv_hash_dat(const void *data, u32 size)
hash ^= (hash >> 6);
}
- key = (const unsigned char *)&dat->vid;
+ vid = htons(dat->vid);
+ key = (__force const unsigned char *)&vid;
for (i = 0; i < sizeof(dat->vid); i++) {
hash += key[i];
hash += (hash << 10);
@@ -1023,8 +1025,9 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
skb_reset_mac_header(skb_new);
skb_new->protocol = eth_type_trans(skb_new,
bat_priv->soft_iface);
- bat_priv->stats.rx_packets++;
- bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
+ batadv_inc_counter(bat_priv, BATADV_CNT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
+ skb->len + ETH_HLEN + hdr_size);
bat_priv->soft_iface->last_rx = jiffies;
netif_rx(skb_new);
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index a06b6041f3e0..fef21f75892e 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -232,8 +232,10 @@ err_unlock:
spin_unlock_bh(&chain->lock);
err:
- if (!ret)
+ if (!ret) {
kfree(frag_entry_new);
+ kfree_skb(skb);
+ }
return ret;
}
@@ -305,7 +307,7 @@ free:
*
* There are three possible outcomes: 1) Packet is merged: Return true and
* set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
- * to NULL; 3) Error: Return false and leave skb as is.
+ * to NULL; 3) Error: Return false and free skb.
*
* Return: true when packet is merged or buffered, false when skb is not not
* used.
@@ -330,9 +332,9 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb,
goto out_err;
out:
- *skb = skb_out;
ret = true;
out_err:
+ *skb = skb_out;
return ret;
}
@@ -482,12 +484,20 @@ int batadv_frag_send_packet(struct sk_buff *skb,
*/
if (skb->priority >= 256 && skb->priority <= 263)
frag_header.priority = skb->priority - 256;
+ else
+ frag_header.priority = 0;
ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
ether_addr_copy(frag_header.dest, orig_node->orig);
/* Eat and send fragments from the tail of skb */
while (skb->len > max_fragment_size) {
+ /* The initial check in this function should cover this case */
+ if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
+ ret = -1;
+ goto out;
+ }
+
skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
if (!skb_fragment)
goto out;
@@ -505,12 +515,6 @@ int batadv_frag_send_packet(struct sk_buff *skb,
}
frag_header.no++;
-
- /* The initial check in this function should cover this case */
- if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
- ret = -1;
- goto out;
- }
}
/* Make room for the fragment header. */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index ed9aaf30fbcf..3bd7ed6b6b3e 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
@@ -325,6 +326,9 @@ out:
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (gw.list_lock).
*/
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
@@ -332,6 +336,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node;
+ lockdep_assert_held(&bat_priv->gw.list_lock);
+
if (gateway->bandwidth_down == 0)
return;
@@ -346,10 +352,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
- spin_lock_bh(&bat_priv->gw.list_lock);
kref_get(&gw_node->refcount);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
- spin_unlock_bh(&bat_priv->gw.list_lock);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@@ -404,11 +408,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
+ spin_lock_bh(&bat_priv->gw.list_lock);
gw_node = batadv_gw_node_get(bat_priv, orig_node);
if (!gw_node) {
batadv_gw_node_add(bat_priv, orig_node, gateway);
+ spin_unlock_bh(&bat_priv->gw.list_lock);
goto out;
}
+ spin_unlock_bh(&bat_priv->gw.list_lock);
if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
(gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 21184810d89f..3e3f91ab694f 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
*/
void batadv_gw_init(struct batadv_priv *bat_priv)
{
+ if (bat_priv->algo_ops->gw.init_sel_class)
+ bat_priv->algo_ops->gw.init_sel_class(bat_priv);
+ else
+ atomic_set(&bat_priv->gw.sel_class, 1);
+
batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
NULL, BATADV_TVLV_GW, 1,
BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 8f7883b7d717..f528761674df 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
#include <linux/rculist.h>
@@ -539,6 +540,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);
+ if (bat_priv->num_ifaces >= UINT_MAX) {
+ ret = -ENOSPC;
+ goto err_dev;
+ }
+
ret = netdev_master_upper_dev_link(hard_iface->net_dev,
soft_iface, NULL, NULL);
if (ret)
@@ -591,6 +597,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
batadv_hardif_recalc_extra_skbroom(soft_iface);
+ if (bat_priv->algo_ops->iface.enabled)
+ bat_priv->algo_ops->iface.enabled(hard_iface);
+
out:
return 0;
@@ -646,7 +655,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
/* nobody uses this interface anymore */
- if (!bat_priv->num_ifaces) {
+ if (bat_priv->num_ifaces == 0) {
batadv_gw_check_client_stop(bat_priv);
if (autodel == BATADV_IF_CLEANUP_AUTO)
@@ -682,7 +691,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
if (ret)
goto free_if;
- hard_iface->if_num = -1;
+ hard_iface->if_num = 0;
hard_iface->net_dev = net_dev;
hard_iface->soft_iface = NULL;
hard_iface->if_status = BATADV_IF_NOT_IN_USE;
@@ -694,6 +703,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
INIT_LIST_HEAD(&hard_iface->list);
INIT_HLIST_HEAD(&hard_iface->neigh_list);
+ mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
spin_lock_init(&hard_iface->neigh_list_lock);
kref_init(&hard_iface->refcount);
@@ -750,6 +760,32 @@ void batadv_hardif_remove_interfaces(void)
rtnl_unlock();
}
+/**
+ * batadv_hard_if_event_softif() - Handle events for soft interfaces
+ * @event: NETDEV_* event to handle
+ * @net_dev: net_device which generated an event
+ *
+ * Return: NOTIFY_* result
+ */
+static int batadv_hard_if_event_softif(unsigned long event,
+ struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ batadv_sysfs_add_meshif(net_dev);
+ bat_priv = netdev_priv(net_dev);
+ batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
+ break;
+ case NETDEV_CHANGENAME:
+ batadv_debugfs_rename_meshif(net_dev);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static int batadv_hard_if_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -758,12 +794,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
struct batadv_hard_iface *primary_if = NULL;
struct batadv_priv *bat_priv;
- if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
- batadv_sysfs_add_meshif(net_dev);
- bat_priv = netdev_priv(net_dev);
- batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
- return NOTIFY_DONE;
- }
+ if (batadv_softif_is_valid(net_dev))
+ return batadv_hard_if_event_softif(event, net_dev);
hard_iface = batadv_hardif_get_by_netdev(net_dev);
if (!hard_iface && (event == NETDEV_REGISTER ||
@@ -807,6 +839,9 @@ static int batadv_hard_if_event(struct notifier_block *this,
if (hard_iface == primary_if)
batadv_primary_if_update_addr(bat_priv, NULL);
break;
+ case NETDEV_CHANGENAME:
+ batadv_debugfs_rename_hardif(hard_iface);
+ break;
default:
break;
}
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 64cb6acbe0a6..d7ba4fd24e3d 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -114,7 +114,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
{
struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
- return attr ? nla_get_u32(attr) : 0;
+ return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
}
/**
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 7c8d16086f0f..8466f83fc32f 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1495,7 +1495,7 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
}
int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
- int max_if_num)
+ unsigned int max_if_num)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_algo_ops *bao = bat_priv->algo_ops;
@@ -1530,7 +1530,7 @@ err:
}
int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
- int max_if_num)
+ unsigned int max_if_num)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batadv_hashtable *hash = bat_priv->orig_hash;
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index ebc56183f358..fab0b2cc141d 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -78,9 +78,9 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
- int max_if_num);
+ unsigned int max_if_num);
int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
- int max_if_num);
+ unsigned int max_if_num);
struct batadv_orig_node_vlan *
batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
unsigned short vid);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 8b98609ebc1e..19059ae26e51 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -930,7 +930,6 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
bool is4addr;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
/* the caller function should have already pulled 2 bytes */
@@ -951,9 +950,13 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
return NET_RX_DROP;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
/* packet for me */
if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
if (is4addr) {
+ unicast_4addr_packet =
+ (struct batadv_unicast_4addr_packet *)skb->data;
subtype = unicast_4addr_packet->subtype;
batadv_dat_inc_counter(bat_priv, subtype);
@@ -1080,6 +1083,12 @@ int batadv_recv_frag_packet(struct sk_buff *skb,
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
+ /* batadv_frag_skb_buffer will always consume the skb and
+ * the caller should therefore never try to free the
+ * skb after this point
+ */
+ ret = NET_RX_SUCCESS;
+
/* Add fragment to buffer and merge if possible. */
if (!batadv_frag_skb_buffer(&skb, orig_node_src))
goto out;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a92512a46e91..99d2c453c872 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -808,7 +808,6 @@ static int batadv_softif_init_late(struct net_device *dev)
atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
#endif
atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
- atomic_set(&bat_priv->gw.sel_class, 20);
atomic_set(&bat_priv->gw.bandwidth_down, 100);
atomic_set(&bat_priv->gw.bandwidth_up, 20);
atomic_set(&bat_priv->orig_interval, 1000);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b9f9a310eb78..d40d83949b00 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -615,14 +615,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
struct batadv_tt_global_entry *tt_global,
const char *message)
{
+ struct batadv_tt_global_entry *tt_removed_entry;
+ struct hlist_node *tt_removed_node;
+
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting global tt entry %pM (vid: %d): %s\n",
tt_global->common.addr,
BATADV_PRINT_VID(tt_global->common.vid), message);
- batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
- batadv_choose_tt, &tt_global->common);
- batadv_tt_global_entry_put(tt_global);
+ tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+ batadv_compare_tt,
+ batadv_choose_tt,
+ &tt_global->common);
+ if (!tt_removed_node)
+ return;
+
+ /* drop reference of remove hash entry */
+ tt_removed_entry = hlist_entry(tt_removed_node,
+ struct batadv_tt_global_entry,
+ common.hash_entry);
+ batadv_tt_global_entry_put(tt_removed_entry);
}
/**
@@ -855,7 +867,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
struct batadv_orig_node_vlan *vlan;
u8 *tt_change_ptr;
- rcu_read_lock();
+ spin_lock_bh(&orig_node->vlan_list_lock);
hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
num_vlan++;
num_entries += atomic_read(&vlan->tt.num_entries);
@@ -893,7 +905,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
out:
- rcu_read_unlock();
+ spin_unlock_bh(&orig_node->vlan_list_lock);
return tvlv_len;
}
@@ -924,15 +936,20 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
struct batadv_tvlv_tt_vlan_data *tt_vlan;
struct batadv_softif_vlan *vlan;
u16 num_vlan = 0;
- u16 num_entries = 0;
+ u16 vlan_entries = 0;
+ u16 total_entries = 0;
u16 tvlv_len;
u8 *tt_change_ptr;
int change_offset;
- rcu_read_lock();
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ vlan_entries = atomic_read(&vlan->tt.num_entries);
+ if (vlan_entries < 1)
+ continue;
+
num_vlan++;
- num_entries += atomic_read(&vlan->tt.num_entries);
+ total_entries += vlan_entries;
}
change_offset = sizeof(**tt_data);
@@ -940,7 +957,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
/* if tt_len is negative, allocate the space needed by the full table */
if (*tt_len < 0)
- *tt_len = batadv_tt_len(num_entries);
+ *tt_len = batadv_tt_len(total_entries);
tvlv_len = *tt_len;
tvlv_len += change_offset;
@@ -957,6 +974,10 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ vlan_entries = atomic_read(&vlan->tt.num_entries);
+ if (vlan_entries < 1)
+ continue;
+
tt_vlan->vid = htons(vlan->vid);
tt_vlan->crc = htonl(vlan->tt.crc);
@@ -967,7 +988,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
out:
- rcu_read_unlock();
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return tvlv_len;
}
@@ -1308,9 +1329,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid, const char *message,
bool roaming)
{
+ struct batadv_tt_local_entry *tt_removed_entry;
struct batadv_tt_local_entry *tt_local_entry;
u16 flags, curr_flags = BATADV_NO_FLAGS;
- void *tt_entry_exists;
+ struct hlist_node *tt_removed_node;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
@@ -1339,15 +1361,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
*/
batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
- tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+ tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
batadv_compare_tt,
batadv_choose_tt,
&tt_local_entry->common);
- if (!tt_entry_exists)
+ if (!tt_removed_node)
goto out;
- /* extra call to free the local tt entry */
- batadv_tt_local_entry_put(tt_local_entry);
+ /* drop reference of remove hash entry */
+ tt_removed_entry = hlist_entry(tt_removed_node,
+ struct batadv_tt_local_entry,
+ common.hash_entry);
+ batadv_tt_local_entry_put(tt_removed_entry);
out:
if (tt_local_entry)
@@ -1523,6 +1548,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
* by a given originator
* @entry: the TT global entry to check
* @orig_node: the originator to search in the list
+ * @flags: a pointer to store TT flags for the given @entry received
+ * from @orig_node
*
* find out if an orig_node is already in the list of a tt_global_entry.
*
@@ -1530,7 +1557,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
*/
static bool
batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
- const struct batadv_orig_node *orig_node)
+ const struct batadv_orig_node *orig_node,
+ u8 *flags)
{
struct batadv_tt_orig_list_entry *orig_entry;
bool found = false;
@@ -1538,15 +1566,51 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
if (orig_entry) {
found = true;
+
+ if (flags)
+ *flags = orig_entry->flags;
+
batadv_tt_orig_list_entry_put(orig_entry);
}
return found;
}
+/**
+ * batadv_tt_global_sync_flags - update TT sync flags
+ * @tt_global: the TT global entry to update sync flags in
+ *
+ * Updates the sync flag bits in the tt_global flag attribute with a logical
+ * OR of all sync flags from any of its TT orig entries.
+ */
+static void
+batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
+{
+ struct batadv_tt_orig_list_entry *orig_entry;
+ const struct hlist_head *head;
+ u16 flags = BATADV_NO_FLAGS;
+
+ rcu_read_lock();
+ head = &tt_global->orig_list;
+ hlist_for_each_entry_rcu(orig_entry, head, list)
+ flags |= orig_entry->flags;
+ rcu_read_unlock();
+
+ flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
+ tt_global->common.flags = flags;
+}
+
+/**
+ * batadv_tt_global_orig_entry_add - add or update a TT orig entry
+ * @tt_global: the TT global entry to add an orig entry in
+ * @orig_node: the originator to add an orig entry for
+ * @ttvn: translation table version number of this changeset
+ * @flags: TT sync flags
+ */
static void
batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
- struct batadv_orig_node *orig_node, int ttvn)
+ struct batadv_orig_node *orig_node, int ttvn,
+ u8 flags)
{
struct batadv_tt_orig_list_entry *orig_entry;
@@ -1558,7 +1622,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
* was added during a "temporary client detection"
*/
orig_entry->ttvn = ttvn;
- goto out;
+ orig_entry->flags = flags;
+ goto sync_flags;
}
orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
@@ -1570,6 +1635,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
orig_entry->orig_node = orig_node;
orig_entry->ttvn = ttvn;
+ orig_entry->flags = flags;
kref_init(&orig_entry->refcount);
kref_get(&orig_entry->refcount);
@@ -1577,6 +1643,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
&tt_global->orig_list);
atomic_inc(&tt_global->orig_list_count);
+sync_flags:
+ batadv_tt_global_sync_flags(tt_global);
out:
if (orig_entry)
batadv_tt_orig_list_entry_put(orig_entry);
@@ -1640,7 +1708,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
ether_addr_copy(common->addr, tt_addr);
common->vid = vid;
- common->flags = flags;
+ if (!is_multicast_ether_addr(common->addr))
+ common->flags = flags & (~BATADV_TT_SYNC_MASK);
+
tt_global_entry->roam_at = 0;
/* node must store current time in case of roaming. This is
* needed to purge this entry out on timeout (if nobody claims
@@ -1682,7 +1752,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
if (!(common->flags & BATADV_TT_CLIENT_TEMP))
goto out;
if (batadv_tt_global_entry_has_orig(tt_global_entry,
- orig_node))
+ orig_node, NULL))
goto out_remove;
batadv_tt_global_del_orig_list(tt_global_entry);
goto add_orig_entry;
@@ -1700,10 +1770,11 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
}
/* the change can carry possible "attribute" flags like the
- * TT_CLIENT_WIFI, therefore they have to be copied in the
+ * TT_CLIENT_TEMP, therefore they have to be copied in the
* client entry
*/
- common->flags |= flags;
+ if (!is_multicast_ether_addr(common->addr))
+ common->flags |= flags & (~BATADV_TT_SYNC_MASK);
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a
@@ -1720,7 +1791,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
}
add_orig_entry:
/* add the new orig_entry (if needed) or update it */
- batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
+ batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
+ flags & BATADV_TT_SYNC_MASK);
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new global tt entry: %pM (vid: %d, via %pM)\n",
@@ -1943,6 +2015,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
struct batadv_tt_orig_list_entry *orig,
bool best)
{
+ u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
void *hdr;
struct batadv_orig_node_vlan *vlan;
u8 last_ttvn;
@@ -1972,7 +2045,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
- nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
+ nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
goto nla_put_failure;
if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
@@ -2586,6 +2659,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
unsigned short vid)
{
struct batadv_hashtable *hash = bat_priv->tt.global_hash;
+ struct batadv_tt_orig_list_entry *tt_orig;
struct batadv_tt_common_entry *tt_common;
struct batadv_tt_global_entry *tt_global;
struct hlist_head *head;
@@ -2624,8 +2698,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
/* find out if this global entry is announced by this
* originator
*/
- if (!batadv_tt_global_entry_has_orig(tt_global,
- orig_node))
+ tt_orig = batadv_tt_global_orig_entry_find(tt_global,
+ orig_node);
+ if (!tt_orig)
continue;
/* use network order to read the VID: this ensures that
@@ -2637,10 +2712,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
/* compute the CRC on flags that have to be kept in sync
* among nodes
*/
- flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+ flags = tt_orig->flags;
crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
+
+ batadv_tt_orig_list_entry_put(tt_orig);
}
rcu_read_unlock();
}
@@ -2818,23 +2895,46 @@ unlock:
}
/**
- * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * batadv_tt_local_valid() - verify local tt entry and get flags
* @entry_ptr: to be checked local tt entry
* @data_ptr: not used but definition required to satisfy the callback prototype
+ * @flags: a pointer to store TT flags for this client to
+ *
+ * Checks the validity of the given local TT entry. If it is, then the provided
+ * flags pointer is updated.
*
* Return: true if the entry is a valid, false otherwise.
*/
-static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
+static bool batadv_tt_local_valid(const void *entry_ptr,
+ const void *data_ptr,
+ u8 *flags)
{
const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
return false;
+
+ if (flags)
+ *flags = tt_common_entry->flags;
+
return true;
}
+/**
+ * batadv_tt_global_valid() - verify global tt entry and get flags
+ * @entry_ptr: to be checked global tt entry
+ * @data_ptr: an orig_node object (may be NULL)
+ * @flags: a pointer to store TT flags for this client to
+ *
+ * Checks the validity of the given global TT entry. If it is, then the provided
+ * flags pointer is updated either with the common (summed) TT flags if data_ptr
+ * is NULL or the specific, per originator TT flags otherwise.
+ *
+ * Return: true if the entry is a valid, false otherwise.
+ */
static bool batadv_tt_global_valid(const void *entry_ptr,
- const void *data_ptr)
+ const void *data_ptr,
+ u8 *flags)
{
const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
const struct batadv_tt_global_entry *tt_global_entry;
@@ -2848,7 +2948,8 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
struct batadv_tt_global_entry,
common);
- return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
+ return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node,
+ flags);
}
/**
@@ -2858,25 +2959,34 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
* @hash: hash table containing the tt entries
* @tt_len: expected tvlv tt data buffer length in number of bytes
* @tvlv_buff: pointer to the buffer to fill with the TT data
- * @valid_cb: function to filter tt change entries
+ * @valid_cb: function to filter tt change entries and to return TT flags
* @cb_data: data passed to the filter function as argument
+ *
+ * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
+ * is not provided then this becomes a no-op.
*/
static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash,
void *tvlv_buff, u16 tt_len,
bool (*valid_cb)(const void *,
- const void *),
+ const void *,
+ u8 *flags),
void *cb_data)
{
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tvlv_tt_change *tt_change;
struct hlist_head *head;
u16 tt_tot, tt_num_entries = 0;
+ u8 flags;
+ bool ret;
u32 i;
tt_tot = batadv_tt_entries(tt_len);
tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
+ if (!valid_cb)
+ return;
+
rcu_read_lock();
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -2886,11 +2996,12 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
if (tt_tot == tt_num_entries)
break;
- if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
+ ret = valid_cb(tt_common_entry, cb_data, &flags);
+ if (!ret)
continue;
ether_addr_copy(tt_change->addr, tt_common_entry->addr);
- tt_change->flags = tt_common_entry->flags;
+ tt_change->flags = flags;
tt_change->vid = htons(tt_common_entry->vid);
memset(tt_change->reserved, 0,
sizeof(tt_change->reserved));
@@ -3684,6 +3795,8 @@ static void batadv_tt_purge(struct work_struct *work)
void batadv_tt_free(struct batadv_priv *bat_priv)
{
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
+
batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index b3dd1a381aad..c17b74e51fe9 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -27,6 +27,7 @@
#include <linux/compiler.h>
#include <linux/if_ether.h>
#include <linux/kref.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/sched.h> /* for linux/wait.h */
@@ -81,11 +82,13 @@ enum batadv_dhcp_recipient {
* @ogm_buff: buffer holding the OGM packet
* @ogm_buff_len: length of the OGM packet buffer
* @ogm_seqno: OGM sequence number - used to identify each OGM
+ * @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len
*/
struct batadv_hard_iface_bat_iv {
unsigned char *ogm_buff;
int ogm_buff_len;
atomic_t ogm_seqno;
+ struct mutex ogm_buff_mutex;
};
/**
@@ -139,7 +142,7 @@ struct batadv_hard_iface_bat_v {
*/
struct batadv_hard_iface {
struct list_head list;
- s16 if_num;
+ unsigned int if_num;
char if_status;
struct net_device *net_dev;
u8 num_bcasts;
@@ -966,12 +969,14 @@ struct batadv_softif_vlan {
* @ogm_buff: buffer holding the OGM packet
* @ogm_buff_len: length of the OGM packet buffer
* @ogm_seqno: OGM sequence number - used to identify each OGM
+ * @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len
* @ogm_wq: workqueue used to schedule OGM transmissions
*/
struct batadv_priv_bat_v {
unsigned char *ogm_buff;
int ogm_buff_len;
atomic_t ogm_seqno;
+ struct mutex ogm_buff_mutex;
struct delayed_work ogm_wq;
};
@@ -1060,7 +1065,7 @@ struct batadv_priv {
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
- char num_ifaces;
+ unsigned int num_ifaces;
struct kobject *mesh_obj;
struct dentry *debug_dir;
struct hlist_head forw_bat_list;
@@ -1241,6 +1246,7 @@ struct batadv_tt_global_entry {
* struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
* @orig_node: pointer to orig node announcing this non-mesh client
* @ttvn: translation table version number which added the non-mesh client
+ * @flags: per orig entry TT sync flags
* @list: list node for batadv_tt_global_entry::orig_list
* @refcount: number of contexts the object is used
* @rcu: struct used for freeing in an RCU-safe manner
@@ -1248,6 +1254,7 @@ struct batadv_tt_global_entry {
struct batadv_tt_orig_list_entry {
struct batadv_orig_node *orig_node;
u8 ttvn;
+ u8 flags;
struct hlist_node list;
struct kref refcount;
struct rcu_head rcu;
@@ -1397,6 +1404,7 @@ struct batadv_forw_packet {
* @activate: start routing mechanisms when hard-interface is brought up
* (optional)
* @enable: init routing info when hard-interface is enabled
+ * @enabled: notification when hard-interface was enabled (optional)
* @disable: de-init routing info when hard-interface is disabled
* @update_mac: (re-)init mac addresses of the protocol information
* belonging to this hard-interface
@@ -1405,6 +1413,7 @@ struct batadv_forw_packet {
struct batadv_algo_iface_ops {
void (*activate)(struct batadv_hard_iface *hard_iface);
int (*enable)(struct batadv_hard_iface *hard_iface);
+ void (*enabled)(struct batadv_hard_iface *hard_iface);
void (*disable)(struct batadv_hard_iface *hard_iface);
void (*update_mac)(struct batadv_hard_iface *hard_iface);
void (*primary_set)(struct batadv_hard_iface *hard_iface);
@@ -1452,9 +1461,10 @@ struct batadv_algo_neigh_ops {
*/
struct batadv_algo_orig_ops {
void (*free)(struct batadv_orig_node *orig_node);
- int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
- int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
- int del_if_num);
+ int (*add_if)(struct batadv_orig_node *orig_node,
+ unsigned int max_if_num);
+ int (*del_if)(struct batadv_orig_node *orig_node,
+ unsigned int max_if_num, unsigned int del_if_num);
#ifdef CONFIG_BATMAN_ADV_DEBUGFS
void (*print)(struct batadv_priv *priv, struct seq_file *seq,
struct batadv_hard_iface *hard_iface);
@@ -1466,6 +1476,7 @@ struct batadv_algo_orig_ops {
/**
* struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
+ * @init_sel_class: initialize GW selection class (optional)
* @store_sel_class: parse and stores a new GW selection class (optional)
* @show_sel_class: prints the current GW selection class (optional)
* @get_best_gw_node: select the best GW from the list of available nodes
@@ -1476,6 +1487,7 @@ struct batadv_algo_orig_ops {
* @dump: dump gateways to a netlink socket (optional)
*/
struct batadv_algo_gw_ops {
+ void (*init_sel_class)(struct batadv_priv *bat_priv);
ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
size_t count);
ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index de7b82ece499..21096c882223 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -187,10 +187,16 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
}
if (!rt) {
- nexthop = &lowpan_cb(skb)->gw;
-
- if (ipv6_addr_any(nexthop))
- return NULL;
+ if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
+ /* There is neither route nor gateway,
+ * probably the destination is a direct peer.
+ */
+ nexthop = daddr;
+ } else {
+ /* There is a known gateway
+ */
+ nexthop = &lowpan_cb(skb)->gw;
+ }
} else {
nexthop = rt6_nexthop(rt, daddr);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index cc061495f653..1d085eed72d0 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1054,8 +1054,10 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
if (!conn)
return ERR_PTR(-ENOMEM);
- if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
+ if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
+ hci_conn_del(conn);
return ERR_PTR(-EBUSY);
+ }
conn->state = BT_CONNECT;
set_bit(HCI_CONN_SCANNING, &conn->flags);
@@ -1281,8 +1283,16 @@ auth:
return 0;
encrypt:
- if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+ if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
+ /* Ensure that the encryption key size has been read,
+ * otherwise stall the upper layer responses.
+ */
+ if (!conn->enc_key_size)
+ return 0;
+
+ /* Nothing else needed, all requirements are met */
return 1;
+ }
hci_conn_encrypt(conn);
return 0;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 4bd72d2fe415..a70b078ceb3c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -4180,7 +4180,14 @@ static void hci_rx_work(struct work_struct *work)
hci_send_to_sock(hdev, skb);
}
- if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ /* If the device has been opened in HCI_USER_CHANNEL,
+ * the userspace has exclusive access to device.
+ * When device is HCI_INIT, we still need to process
+ * the data packets to the driver in order
+ * to complete its setup().
+ */
+ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ !test_bit(HCI_INIT, &hdev->flags)) {
kfree_skb(skb);
continue;
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 1015d9c8d97d..4a89e121d662 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -1093,6 +1093,14 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
instance_flags = get_adv_instance_flags(hdev, instance);
+ /* If instance already has the flags set skip adding it once
+ * again.
+ */
+ if (adv_instance && eir_get_data(adv_instance->adv_data,
+ adv_instance->adv_data_len, EIR_FLAGS,
+ NULL))
+ goto skip_flags;
+
/* The Add Advertising command allows userspace to set both the general
* and limited discoverable flags.
*/
@@ -1125,6 +1133,7 @@ static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
}
}
+skip_flags:
if (adv_instance) {
memcpy(ptr, adv_instance->adv_data,
adv_instance->adv_data_len);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index ca1836941f3c..44b3146c6117 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -826,6 +826,8 @@ static int hci_sock_release(struct socket *sock)
if (!sk)
return 0;
+ lock_sock(sk);
+
switch (hci_pi(sk)->channel) {
case HCI_CHANNEL_MONITOR:
atomic_dec(&monitor_promisc);
@@ -873,6 +875,7 @@ static int hci_sock_release(struct socket *sock)
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
+ release_sock(sk);
sock_put(sk);
return 0;
}
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 008ba439bd62..cc80c76177b6 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -76,6 +76,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
sockfd_put(csock);
return err;
}
+ ca.name[sizeof(ca.name)-1] = 0;
err = hidp_connection_add(&ca, csock, isock);
if (!err && copy_to_user(argp, &ca, sizeof(ca)))
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 1fc23cb4a3e0..11012a509070 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1340,6 +1340,21 @@ static void l2cap_request_info(struct l2cap_conn *conn)
sizeof(req), &req);
}
+static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
+{
+ /* The minimum encryption key size needs to be enforced by the
+ * host stack before establishing any L2CAP connections. The
+ * specification in theory allows a minimum of 1, but to align
+ * BR/EDR and LE transports, a minimum of 7 is chosen.
+ *
+ * This check might also be called for unencrypted connections
+ * that have no key size requirements. Ensure that the link is
+ * actually encrypted before enforcing a key size.
+ */
+ return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
+ hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
+}
+
static void l2cap_do_start(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
@@ -1357,9 +1372,14 @@ static void l2cap_do_start(struct l2cap_chan *chan)
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
return;
- if (l2cap_chan_check_security(chan, true) &&
- __l2cap_no_conn_pending(chan))
+ if (!l2cap_chan_check_security(chan, true) ||
+ !__l2cap_no_conn_pending(chan))
+ return;
+
+ if (l2cap_check_enc_key_size(conn->hcon))
l2cap_start_connection(chan);
+ else
+ __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
}
static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
@@ -1438,7 +1458,10 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
continue;
}
- l2cap_start_connection(chan);
+ if (l2cap_check_enc_key_size(conn->hcon))
+ l2cap_start_connection(chan);
+ else
+ l2cap_chan_close(chan, ECONNREFUSED);
} else if (chan->state == BT_CONNECT2) {
struct l2cap_conn_rsp rsp;
@@ -3326,16 +3349,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
+ if (len < 0)
+ break;
hint = type & L2CAP_CONF_HINT;
type &= L2CAP_CONF_MASK;
switch (type) {
case L2CAP_CONF_MTU:
+ if (olen != 2)
+ break;
mtu = val;
break;
case L2CAP_CONF_FLUSH_TO:
+ if (olen != 2)
+ break;
chan->flush_to = val;
break;
@@ -3343,26 +3372,30 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
break;
case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *) val, olen);
+ if (olen != sizeof(rfc))
+ break;
+ memcpy(&rfc, (void *) val, olen);
break;
case L2CAP_CONF_FCS:
+ if (olen != 1)
+ break;
if (val == L2CAP_FCS_NONE)
set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
break;
case L2CAP_CONF_EFS:
- if (olen == sizeof(efs)) {
- remote_efs = 1;
- memcpy(&efs, (void *) val, olen);
- }
+ if (olen != sizeof(efs))
+ break;
+ remote_efs = 1;
+ memcpy(&efs, (void *) val, olen);
break;
case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
return -ECONNREFUSED;
-
set_bit(FLAG_EXT_CTRL, &chan->flags);
set_bit(CONF_EWS_RECV, &chan->conf_state);
chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
@@ -3372,7 +3405,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
default:
if (hint)
break;
-
result = L2CAP_CONF_UNKNOWN;
*((u8 *) ptr++) = type;
break;
@@ -3537,58 +3569,65 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+ if (len < 0)
+ break;
switch (type) {
case L2CAP_CONF_MTU:
+ if (olen != 2)
+ break;
if (val < L2CAP_DEFAULT_MIN_MTU) {
*result = L2CAP_CONF_UNACCEPT;
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
+ endptr - ptr);
break;
case L2CAP_CONF_FLUSH_TO:
+ if (olen != 2)
+ break;
chan->flush_to = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
+ chan->flush_to, endptr - ptr);
break;
case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *)val, olen);
-
+ if (olen != sizeof(rfc))
+ break;
+ memcpy(&rfc, (void *)val, olen);
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
rfc.mode != chan->mode)
return -ECONNREFUSED;
-
chan->fcs = 0;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
chan->tx_win, endptr - ptr);
break;
case L2CAP_CONF_EFS:
- if (olen == sizeof(efs)) {
- memcpy(&efs, (void *)val, olen);
-
- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != chan->local_stype)
- return -ECONNREFUSED;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs, endptr - ptr);
- }
+ if (olen != sizeof(efs))
+ break;
+ memcpy(&efs, (void *)val, olen);
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype)
+ return -ECONNREFUSED;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs, endptr - ptr);
break;
case L2CAP_CONF_FCS:
+ if (olen != 1)
+ break;
if (*result == L2CAP_CONF_PENDING)
if (val == L2CAP_FCS_NONE)
set_bit(CONF_RECV_NO_FCS,
@@ -3717,13 +3756,18 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+ if (len < 0)
+ break;
switch (type) {
case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *)val, olen);
+ if (olen != sizeof(rfc))
+ break;
+ memcpy(&rfc, (void *)val, olen);
break;
case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
txwin_ext = val;
break;
}
@@ -4330,6 +4374,12 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
l2cap_chan_lock(chan);
+ if (chan->state != BT_DISCONN) {
+ l2cap_chan_unlock(chan);
+ mutex_unlock(&conn->chan_lock);
+ return 0;
+ }
+
l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);
@@ -4858,10 +4908,8 @@ void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
chan, result, local_amp_id, remote_amp_id);
- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
- l2cap_chan_unlock(chan);
+ if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
return;
- }
if (chan->state != BT_CONNECTED) {
l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
@@ -6761,6 +6809,16 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
chan->sdu_len = sdu_len;
chan->sdu_last_frag = skb;
+ /* Detect if remote is not able to use the selected MPS */
+ if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
+ u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
+
+ /* Adjust the number of credits */
+ BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
+ chan->mps = mps_len;
+ l2cap_chan_le_send_credits(chan);
+ }
+
return 0;
}
@@ -7426,7 +7484,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
}
if (chan->state == BT_CONNECT) {
- if (!status)
+ if (!status && l2cap_check_enc_key_size(hcon))
l2cap_start_connection(chan);
else
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
@@ -7435,7 +7493,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
struct l2cap_conn_rsp rsp;
__u16 res, stat;
- if (!status) {
+ if (!status && l2cap_check_enc_key_size(hcon)) {
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
res = L2CAP_CR_PEND;
stat = L2CAP_CS_AUTHOR_PEND;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2f2cb5e27cdd..a8c63ef75f73 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
if (IS_ERR(dlc))
return PTR_ERR(dlc);
- else if (dlc) {
- rfcomm_dlc_put(dlc);
+ if (dlc)
return -EBUSY;
- }
dlc = rfcomm_dlc_alloc(GFP_KERNEL);
if (!dlc)
return -ENOMEM;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 1abfbcd8090a..6670b7ffc200 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2514,6 +2514,19 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
goto distribute;
}
+ /* Drop IRK if peer is using identity address during pairing but is
+ * providing different address as identity information.
+ *
+ * Microsoft Surface Precision Mouse is known to have this bug.
+ */
+ if (hci_is_identity_address(&hcon->dst, hcon->dst_type) &&
+ (bacmp(&info->bdaddr, &hcon->dst) ||
+ info->addr_type != hcon->dst_type)) {
+ bt_dev_err(hcon->hdev,
+ "ignoring IRK with invalid identity address");
+ goto distribute;
+ }
+
bacpy(&smp->id_addr, &info->bdaddr);
smp->id_addr_type = info->addr_type;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 5f5e28f210e0..928bd5515f02 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -210,6 +210,12 @@ static int br_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ /* dev_set_mac_addr() can be called by a master device on bridge's
+ * NETDEV_UNREGISTER, but since it's being destroyed do nothing
+ */
+ if (dev->reg_state != NETREG_REGISTERED)
+ return -EBUSY;
+
spin_lock_bh(&br->lock);
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
/* Mac address will be changed in br_stp_change_bridge_id(). */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 8e173324693d..925818a05398 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -519,13 +519,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
call_netdevice_notifiers(NETDEV_JOIN, dev);
err = dev_set_allmulti(dev, 1);
- if (err)
- goto put_back;
+ if (err) {
+ kfree(p); /* kobject not yet init'd, manually free */
+ goto err1;
+ }
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
SYSFS_BRIDGE_PORT_ATTR);
if (err)
- goto err1;
+ goto err2;
err = br_sysfs_addif(p);
if (err)
@@ -608,12 +610,9 @@ err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
kobject_put(&p->kobj);
- p = NULL; /* kobject_put frees */
-err1:
dev_set_allmulti(dev, -1);
-put_back:
+err1:
dev_put(dev);
- kfree(p);
return err;
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 267b46af407f..c615dff40ab4 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -231,13 +231,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
/* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_bridge_port *p = br_port_get_rcu(skb->dev);
-
__br_handle_local_finish(skb);
- BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
- br_pass_frame_up(skb);
- return 0;
+ /* return 1 to signal the okfn() was called so it's ok to use the skb */
+ return 1;
}
/*
@@ -308,10 +305,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
goto forward;
}
- /* Deliver packet to local host only */
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
- NULL, skb, skb->dev, NULL, br_handle_local_finish);
- return RX_HANDLER_CONSUMED;
+ /* The else clause should be hit when nf_hook():
+ * - returns < 0 (drop/error)
+ * - returns = 0 (stolen/nf_queue)
+ * Thus return 1 from the okfn() to signal the skb is ok to pass
+ */
+ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+ dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+ br_handle_local_finish) == 1) {
+ return RX_HANDLER_PASS;
+ } else {
+ return RX_HANDLER_CONSUMED;
+ }
}
forward:
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 6406010e155b..7007683973b4 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -372,7 +372,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
struct nlmsghdr *nlh;
struct nlattr *nest;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2136e45f5277..80c81c7e3cf9 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1036,6 +1036,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
int type;
int err = 0;
__be32 group;
+ u16 nsrcs;
ih = igmpv3_report_hdr(skb);
num = ntohs(ih->ngrec);
@@ -1049,8 +1050,9 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
grec = (void *)(skb->data + len - sizeof(*grec));
group = grec->grec_mca;
type = grec->grec_type;
+ nsrcs = ntohs(grec->grec_nsrcs);
- len += ntohs(grec->grec_nsrcs) * 4;
+ len += nsrcs * 4;
if (!pskb_may_pull(skb, len))
return -EINVAL;
@@ -1070,7 +1072,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
type == IGMPV3_MODE_IS_INCLUDE) &&
- ntohs(grec->grec_nsrcs) == 0) {
+ nsrcs == 0) {
br_ip4_multicast_leave_group(br, port, group, vid);
} else {
err = br_ip4_multicast_add_group(br, port, group, vid);
@@ -1103,23 +1105,26 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
len = skb_transport_offset(skb) + sizeof(*icmp6h);
for (i = 0; i < num; i++) {
- __be16 *nsrcs, _nsrcs;
-
- nsrcs = skb_header_pointer(skb,
- len + offsetof(struct mld2_grec,
- grec_nsrcs),
- sizeof(_nsrcs), &_nsrcs);
- if (!nsrcs)
+ __be16 *_nsrcs, __nsrcs;
+ u16 nsrcs;
+
+ _nsrcs = skb_header_pointer(skb,
+ len + offsetof(struct mld2_grec,
+ grec_nsrcs),
+ sizeof(__nsrcs), &__nsrcs);
+ if (!_nsrcs)
return -EINVAL;
+ nsrcs = ntohs(*_nsrcs);
+
if (!pskb_may_pull(skb,
len + sizeof(*grec) +
- sizeof(struct in6_addr) * ntohs(*nsrcs)))
+ sizeof(struct in6_addr) * nsrcs))
return -EINVAL;
grec = (struct mld2_grec *)(skb->data + len);
len += sizeof(*grec) +
- sizeof(struct in6_addr) * ntohs(*nsrcs);
+ sizeof(struct in6_addr) * nsrcs;
/* We treat these as MLDv1 reports for now. */
switch (grec->grec_type) {
@@ -1137,7 +1142,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
- ntohs(*nsrcs) == 0) {
+ nsrcs == 0) {
br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
vid);
} else {
@@ -1374,7 +1379,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld_msg *mld;
struct net_bridge_mdb_entry *mp;
struct mld2_query *mld2q;
@@ -1418,7 +1422,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (is_general_query) {
saddr.proto = htons(ETH_P_IPV6);
- saddr.u.ip6 = ip6h->saddr;
+ saddr.u.ip6 = ipv6_hdr(skb)->saddr;
br_multicast_query_received(br, port, &br->ip6_other_query,
&saddr, max_delay);
@@ -1485,6 +1489,9 @@ br_multicast_leave_group(struct net_bridge *br,
if (p->port != port)
continue;
+ if (p->flags & MDB_PG_FLAGS_PERMANENT)
+ break;
+
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
@@ -1983,7 +1990,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
__br_multicast_open(br, query);
- list_for_each_entry(port, &br->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &br->port_list, list) {
if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING)
continue;
@@ -1995,6 +2003,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
br_multicast_enable(&port->ip6_own_query);
#endif
}
+ rcu_read_unlock();
}
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 7e42c0d1f55b..62e045c9d452 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -512,6 +512,7 @@ static unsigned int br_nf_pre_routing(void *priv,
nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
skb->protocol = htons(ETH_P_IP);
+ skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb->dev, NULL,
@@ -642,6 +643,9 @@ static unsigned int br_nf_forward_arp(void *priv,
nf_bridge_pull_encap_header(skb);
}
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
+ return NF_DROP;
+
if (arp_hdr(skb)->ar_pln != 4) {
if (IS_VLAN_ARP(skb))
nf_bridge_push_encap_header(skb);
@@ -878,11 +882,6 @@ static const struct nf_br_ops br_ops = {
.br_dev_xmit_hook = br_nf_dev_xmit,
};
-void br_netfilter_enable(void)
-{
-}
-EXPORT_SYMBOL_GPL(br_netfilter_enable);
-
/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
* br_dev_queue_push_xmit is called afterwards */
static struct nf_hook_ops br_nf_ops[] __read_mostly = {
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index a1b57cb07f1e..8c08dd07419f 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
skb->protocol = htons(ETH_P_IPV6);
+ skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+
NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 5881fbc114a9..36282eb3492d 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -147,7 +147,6 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev)
{
- const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p;
struct net_bridge *br;
const unsigned char *buf;
@@ -176,7 +175,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
if (p->state == BR_STATE_DISABLED)
goto out;
- if (!ether_addr_equal(dest, br->group_addr))
+ if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
goto out;
if (p->flags & BR_BPDU_GUARD) {
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index b6de4f457161..5172caac645c 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -622,6 +622,11 @@ void br_vlan_flush(struct net_bridge *br)
ASSERT_RTNL();
+ /* delete auto-added default pvid local fdb before flushing vlans
+ * otherwise it will be leaked on bridge device init failure
+ */
+ br_fdb_delete_by_port(br, NULL, 0, 1);
+
vg = br_vlan_group(br);
__vlan_flush(vg);
RCU_INIT_POINTER(br->vlgrp, NULL);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index c7e5aaf2eeb8..1d850edecd72 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1894,7 +1894,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
}
static int ebt_buf_add(struct ebt_entries_buf_state *state,
- void *data, unsigned int sz)
+ const void *data, unsigned int sz)
{
if (state->buf_kern_start == NULL)
goto count_only;
@@ -1928,7 +1928,7 @@ enum compat_mwt {
EBT_COMPAT_TARGET,
};
-static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
+static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
enum compat_mwt compat_mwt,
struct ebt_entries_buf_state *state,
const unsigned char *base)
@@ -2004,22 +2004,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
/* return size of all matches, watchers or target, including necessary
* alignment and padding.
*/
-static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
unsigned int size_left, enum compat_mwt type,
struct ebt_entries_buf_state *state, const void *base)
{
+ const char *buf = (const char *)match32;
int growth = 0;
- char *buf;
if (size_left == 0)
return 0;
- buf = (char *) match32;
-
- while (size_left >= sizeof(*match32)) {
+ do {
struct ebt_entry_match *match_kern;
int ret;
+ if (size_left < sizeof(*match32))
+ return -EINVAL;
+
match_kern = (struct ebt_entry_match *) state->buf_kern_start;
if (match_kern) {
char *tmp;
@@ -2056,21 +2057,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;
- if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
- return -EINVAL;
-
match32 = (struct compat_ebt_entry_mwt *) buf;
- }
+ } while (size_left);
return growth;
}
/* called for all ebt_entry structures. */
-static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
+static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
unsigned int *total,
struct ebt_entries_buf_state *state)
{
- unsigned int i, j, startoff, new_offset = 0;
+ unsigned int i, j, startoff, next_expected_off, new_offset = 0;
/* stores match/watchers/targets & offset of next struct ebt_entry: */
unsigned int offsets[4];
unsigned int *offsets_update = NULL;
@@ -2157,11 +2155,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
return ret;
}
- startoff = state->buf_user_offset - startoff;
+ next_expected_off = state->buf_user_offset - startoff;
+ if (next_expected_off != entry->next_offset)
+ return -EINVAL;
- if (WARN_ON(*total < startoff))
+ if (*total < entry->next_offset)
return -EINVAL;
- *total -= startoff;
+ *total -= entry->next_offset;
return 0;
}
@@ -2182,7 +2182,9 @@ static int compat_copy_entries(unsigned char *data, unsigned int size_user,
if (ret < 0)
return ret;
- WARN_ON(size_remaining);
+ if (size_remaining)
+ return -EINVAL;
+
return state->buf_kern_offset;
}
@@ -2287,8 +2289,10 @@ static int compat_do_replace(struct net *net, void __user *user,
state.buf_kern_len = size64;
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
- if (WARN_ON(ret < 0))
+ if (WARN_ON(ret < 0)) {
+ vfree(entries_tmp);
goto out_unlock;
+ }
vfree(entries_tmp);
tmp.entries_size = size64;
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index f5afda1abc76..4dc82e9a855d 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -352,15 +352,14 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
u8 cmdrsp;
u8 cmd;
int ret = -1;
- u16 tmp16;
u8 len;
u8 param[255];
- u8 linkid;
+ u8 linkid = 0;
struct cfctrl *cfctrl = container_obj(layer);
struct cfctrl_request_info rsp, *req;
- cfpkt_extr_head(pkt, &cmdrsp, 1);
+ cmdrsp = cfpkt_extr_head_u8(pkt);
cmd = cmdrsp & CFCTRL_CMD_MASK;
if (cmd != CFCTRL_CMD_LINK_ERR
&& CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
@@ -378,13 +377,12 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
u8 physlinkid;
u8 prio;
u8 tmp;
- u32 tmp32;
u8 *cp;
int i;
struct cfctrl_link_param linkparam;
memset(&linkparam, 0, sizeof(linkparam));
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
serv = tmp & CFCTRL_SRV_MASK;
linkparam.linktype = serv;
@@ -392,13 +390,13 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
servtype = tmp >> 4;
linkparam.chtype = servtype;
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
physlinkid = tmp & 0x07;
prio = tmp >> 3;
linkparam.priority = prio;
linkparam.phyid = physlinkid;
- cfpkt_extr_head(pkt, &endpoint, 1);
+ endpoint = cfpkt_extr_head_u8(pkt);
linkparam.endpoint = endpoint & 0x03;
switch (serv) {
@@ -407,45 +405,43 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_VIDEO:
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
linkparam.u.video.connid = tmp;
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_DATAGRAM:
- cfpkt_extr_head(pkt, &tmp32, 4);
linkparam.u.datagram.connid =
- le32_to_cpu(tmp32);
+ cfpkt_extr_head_u32(pkt);
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_RFM:
/* Construct a frame, convert
* DatagramConnectionID
* to network format long and copy it out...
*/
- cfpkt_extr_head(pkt, &tmp32, 4);
linkparam.u.rfm.connid =
- le32_to_cpu(tmp32);
+ cfpkt_extr_head_u32(pkt);
cp = (u8 *) linkparam.u.rfm.volume;
- for (cfpkt_extr_head(pkt, &tmp, 1);
+ for (tmp = cfpkt_extr_head_u8(pkt);
cfpkt_more(pkt) && tmp != '\0';
- cfpkt_extr_head(pkt, &tmp, 1))
+ tmp = cfpkt_extr_head_u8(pkt))
*cp++ = tmp;
*cp = '\0';
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_UTIL:
@@ -454,13 +450,11 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
* to network format long and copy it out...
*/
/* Fifosize KB */
- cfpkt_extr_head(pkt, &tmp16, 2);
linkparam.u.utility.fifosize_kb =
- le16_to_cpu(tmp16);
+ cfpkt_extr_head_u16(pkt);
/* Fifosize bufs */
- cfpkt_extr_head(pkt, &tmp16, 2);
linkparam.u.utility.fifosize_bufs =
- le16_to_cpu(tmp16);
+ cfpkt_extr_head_u16(pkt);
/* name */
cp = (u8 *) linkparam.u.utility.name;
caif_assert(sizeof(linkparam.u.utility.name)
@@ -468,24 +462,24 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
for (i = 0;
i < UTILITY_NAME_LENGTH
&& cfpkt_more(pkt); i++) {
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
*cp++ = tmp;
}
/* Length */
- cfpkt_extr_head(pkt, &len, 1);
+ len = cfpkt_extr_head_u8(pkt);
linkparam.u.utility.paramlen = len;
/* Param Data */
cp = linkparam.u.utility.params;
while (cfpkt_more(pkt) && len--) {
- cfpkt_extr_head(pkt, &tmp, 1);
+ tmp = cfpkt_extr_head_u8(pkt);
*cp++ = tmp;
}
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
/* Length */
- cfpkt_extr_head(pkt, &len, 1);
+ len = cfpkt_extr_head_u8(pkt);
/* Param Data */
cfpkt_extr_head(pkt, &param, len);
break;
@@ -522,7 +516,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
}
break;
case CFCTRL_CMD_LINK_DESTROY:
- cfpkt_extr_head(pkt, &linkid, 1);
+ linkid = cfpkt_extr_head_u8(pkt);
cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
break;
case CFCTRL_CMD_LINK_ERR:
diff --git a/net/can/af_can.c b/net/can/af_can.c
index ac1552d8b4ad..e5a9e3d76e26 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -113,6 +113,7 @@ EXPORT_SYMBOL(can_ioctl);
static void can_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_error_queue);
}
static const struct can_proto *can_get_proto(int protocol)
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index bf0294cf4d22..18c4b34bd6e0 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -45,19 +45,6 @@ bool libceph_compatible(void *data)
}
EXPORT_SYMBOL(libceph_compatible);
-/*
- * find filename portion of a path (/foo/bar/baz -> baz)
- */
-const char *ceph_file_part(const char *s, int len)
-{
- const char *e = s + len;
-
- while (e != s && *(e-1) != '/')
- e--;
- return e;
-}
-EXPORT_SYMBOL(ceph_file_part);
-
const char *ceph_msg_type_name(int type)
{
switch (type) {
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 146502f310ce..619c63a74594 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -96,7 +96,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
if (error)
goto out_err;
- if (sk->sk_receive_queue.prev != skb)
+ if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
goto out;
/* Socket shut down? */
diff --git a/net/core/dev.c b/net/core/dev.c
index 8e187f90c85d..842654302110 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2997,7 +2997,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
}
skb = next;
- if (netif_xmit_stopped(txq) && skb) {
+ if (netif_tx_queue_stopped(txq) && skb) {
rc = NETDEV_TX_BUSY;
break;
}
@@ -4828,7 +4828,6 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb);
- eth = skb_gro_header_fast(skb, 0);
if (unlikely(skb_gro_header_hard(skb, hlen))) {
eth = skb_gro_header_slow(skb, hlen, 0);
if (unlikely(!eth)) {
@@ -4838,6 +4837,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
return NULL;
}
} else {
+ eth = (const struct ethhdr *)skb->data;
gro_pull_from_frag0(skb, hlen);
NAPI_GRO_CB(skb)->frag0 += hlen;
NAPI_GRO_CB(skb)->frag0_len -= hlen;
@@ -5083,7 +5083,10 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
goto out;
/* Note: ndo_busy_poll method is optional in linux-4.5 */
- busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
+ if (napi->dev->netdev_ops)
+ busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
+ else
+ busy_poll = NULL;
do {
rc = 0;
@@ -6581,7 +6584,8 @@ static int __dev_set_mtu(struct net_device *dev, int new_mtu)
if (ops->ndo_change_mtu)
return ops->ndo_change_mtu(dev, new_mtu);
- dev->mtu = new_mtu;
+ /* Pairs with all the lockless reads of dev->mtu in the stack */
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -7350,6 +7354,8 @@ int register_netdevice(struct net_device *dev)
ret = notifier_to_errno(ret);
if (ret) {
rollback_registered(dev);
+ rcu_barrier();
+
dev->reg_state = NETREG_UNREGISTERED;
}
/*
@@ -7499,7 +7505,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
refcnt = netdev_refcnt_read(dev);
- if (time_after(jiffies, warning_time + 10 * HZ)) {
+ if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
dev->name, refcnt);
warning_time = jiffies;
@@ -8293,6 +8299,8 @@ static void __net_exit default_device_exit(struct net *net)
/* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+ if (__dev_get_by_name(&init_net, fb_name))
+ snprintf(fb_name, IFNAMSIZ, "dev%%d");
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
pr_emerg("%s: failed to move %s to init_net: %d\n",
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a8a9938aeceb..47d2d6e2b780 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -514,6 +514,8 @@ convert_link_ksettings_to_legacy_settings(
= link_ksettings->base.eth_tp_mdix;
legacy_settings->eth_tp_mdix_ctrl
= link_ksettings->base.eth_tp_mdix_ctrl;
+ legacy_settings->transceiver
+ = link_ksettings->base.transceiver;
return retval;
}
@@ -878,8 +880,13 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
if (rc >= 0)
info.n_priv_flags = rc;
}
- if (ops->get_regs_len)
- info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_regs_len) {
+ int ret = ops->get_regs_len(dev);
+
+ if (ret > 0)
+ info.regdump_len = ret;
+ }
+
if (ops->get_eeprom_len)
info.eedump_len = ops->get_eeprom_len(dev);
@@ -1380,6 +1387,9 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
return -EFAULT;
reglen = ops->get_regs_len(dev);
+ if (reglen <= 0)
+ return reglen;
+
if (regs.len > reglen)
regs.len = reglen;
@@ -1390,13 +1400,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
return -ENOMEM;
}
+ if (regs.len < reglen)
+ reglen = regs.len;
+
ops->get_regs(dev, &regs, regbuf);
ret = -EFAULT;
if (copy_to_user(useraddr, &regs, sizeof(regs)))
goto out;
useraddr += offsetof(struct ethtool_regs, data);
- if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
+ if (copy_to_user(useraddr, regbuf, reglen))
goto out;
ret = 0;
@@ -1427,11 +1440,13 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct ethtool_wolinfo wol;
if (!dev->ethtool_ops->get_wol)
return -EOPNOTSUPP;
+ memset(&wol, 0, sizeof(struct ethtool_wolinfo));
+ wol.cmd = ETHTOOL_GWOL;
dev->ethtool_ops->get_wol(dev, &wol);
if (copy_to_user(useraddr, &wol, sizeof(wol)))
@@ -1801,17 +1816,22 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
gstrings.len = ret;
- data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
- if (!data)
- return -ENOMEM;
+ if (gstrings.len) {
+ data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
- __ethtool_get_strings(dev, gstrings.string_set, data);
+ __ethtool_get_strings(dev, gstrings.string_set, data);
+ } else {
+ data = NULL;
+ }
ret = -EFAULT;
if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
goto out;
useraddr += sizeof(gstrings);
- if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ if (gstrings.len &&
+ copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
goto out;
ret = 0;
@@ -1899,17 +1919,21 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
return -EFAULT;
stats.n_stats = n_stats;
- data = kmalloc(n_stats * sizeof(u64), GFP_USER);
- if (!data)
- return -ENOMEM;
+ if (n_stats) {
+ data = kmalloc(n_stats * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
- ops->get_ethtool_stats(dev, &stats, data);
+ ops->get_ethtool_stats(dev, &stats, data);
+ } else {
+ data = NULL;
+ }
ret = -EFAULT;
if (copy_to_user(useraddr, &stats, sizeof(stats)))
goto out;
useraddr += sizeof(stats);
- if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
goto out;
ret = 0;
@@ -1938,19 +1962,23 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
return -EFAULT;
stats.n_stats = n_stats;
- data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
- if (!data)
- return -ENOMEM;
+ if (n_stats) {
+ data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
- mutex_lock(&phydev->lock);
- phydev->drv->get_stats(phydev, &stats, data);
- mutex_unlock(&phydev->lock);
+ mutex_lock(&phydev->lock);
+ phydev->drv->get_stats(phydev, &stats, data);
+ mutex_unlock(&phydev->lock);
+ } else {
+ data = NULL;
+ }
ret = -EFAULT;
if (copy_to_user(useraddr, &stats, sizeof(stats)))
goto out;
useraddr += sizeof(stats);
- if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
goto out;
ret = 0;
@@ -2306,9 +2334,10 @@ out:
return ret;
}
-static int ethtool_get_per_queue_coalesce(struct net_device *dev,
- void __user *useraddr,
- struct ethtool_per_queue_op *per_queue_opt)
+static noinline_for_stack int
+ethtool_get_per_queue_coalesce(struct net_device *dev,
+ void __user *useraddr,
+ struct ethtool_per_queue_op *per_queue_opt)
{
u32 bit;
int ret;
@@ -2338,9 +2367,10 @@ static int ethtool_get_per_queue_coalesce(struct net_device *dev,
return 0;
}
-static int ethtool_set_per_queue_coalesce(struct net_device *dev,
- void __user *useraddr,
- struct ethtool_per_queue_op *per_queue_opt)
+static noinline_for_stack int
+ethtool_set_per_queue_coalesce(struct net_device *dev,
+ void __user *useraddr,
+ struct ethtool_per_queue_op *per_queue_opt)
{
u32 bit;
int i, ret = 0;
@@ -2397,7 +2427,7 @@ roll_back:
return ret;
}
-static int ethtool_set_per_queue(struct net_device *dev,
+static int noinline_for_stack ethtool_set_per_queue(struct net_device *dev,
void __user *useraddr, u32 sub_cmd)
{
struct ethtool_per_queue_op per_queue_opt;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index be4629c344a6..9f172906cc88 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -640,7 +640,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh = nlmsg_data(nlh);
frh->family = ops->family;
- frh->table = rule->table;
+ frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
if (nla_put_u32(skb, FRA_TABLE, rule->table))
goto nla_put_failure;
if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index ab7c50026cae..26b0f70d2f1c 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -563,45 +563,34 @@ out_bad:
}
EXPORT_SYMBOL(__skb_flow_dissect);
-static u32 hashrnd __read_mostly;
+static siphash_key_t hashrnd __read_mostly;
static __always_inline void __flow_hash_secret_init(void)
{
net_get_random_once(&hashrnd, sizeof(hashrnd));
}
-static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
- u32 keyval)
+static const void *flow_keys_hash_start(const struct flow_keys *flow)
{
- return jhash2(words, length, keyval);
-}
-
-static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
-{
- const void *p = flow;
-
- BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
- return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
+ BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
+ return &flow->FLOW_KEYS_HASH_START_FIELD;
}
static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
{
- size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
- BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
- BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
- sizeof(*flow) - sizeof(flow->addrs));
+ size_t len = offsetof(typeof(*flow), addrs) - FLOW_KEYS_HASH_OFFSET;
switch (flow->control.addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
- diff -= sizeof(flow->addrs.v4addrs);
+ len += sizeof(flow->addrs.v4addrs);
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
- diff -= sizeof(flow->addrs.v6addrs);
+ len += sizeof(flow->addrs.v6addrs);
break;
case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
- diff -= sizeof(flow->addrs.tipcaddrs);
+ len += sizeof(flow->addrs.tipcaddrs);
break;
}
- return (sizeof(*flow) - diff) / sizeof(u32);
+ return len;
}
__be32 flow_get_u32_src(const struct flow_keys *flow)
@@ -667,14 +656,15 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
}
}
-static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
+ const siphash_key_t *keyval)
{
u32 hash;
__flow_hash_consistentify(keys);
- hash = __flow_hash_words(flow_keys_hash_start(keys),
- flow_keys_hash_length(keys), keyval);
+ hash = siphash(flow_keys_hash_start(keys),
+ flow_keys_hash_length(keys), keyval);
if (!hash)
hash = 1;
@@ -684,12 +674,13 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
u32 flow_hash_from_keys(struct flow_keys *keys)
{
__flow_hash_secret_init();
- return __flow_hash_from_keys(keys, hashrnd);
+ return __flow_hash_from_keys(keys, &hashrnd);
}
EXPORT_SYMBOL(flow_hash_from_keys);
static inline u32 ___skb_get_hash(const struct sk_buff *skb,
- struct flow_keys *keys, u32 keyval)
+ struct flow_keys *keys,
+ const siphash_key_t *keyval)
{
skb_flow_dissect_flow_keys(skb, keys,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
@@ -737,7 +728,7 @@ u32 __skb_get_hash_symmetric(struct sk_buff *skb)
NULL, 0, 0, 0,
FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
- return __flow_hash_from_keys(&keys, hashrnd);
+ return __flow_hash_from_keys(&keys, &hashrnd);
}
EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
@@ -757,13 +748,14 @@ void __skb_get_hash(struct sk_buff *skb)
__flow_hash_secret_init();
- hash = ___skb_get_hash(skb, &keys, hashrnd);
+ hash = ___skb_get_hash(skb, &keys, &hashrnd);
__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
}
EXPORT_SYMBOL(__skb_get_hash);
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+ const siphash_key_t *perturb)
{
struct flow_keys keys;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8eb1916b742c..6578d1f8e6c4 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -18,6 +18,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
+#include <linux/kmemleak.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -30,6 +31,7 @@
#include <linux/times.h>
#include <net/net_namespace.h>
#include <net/neighbour.h>
+#include <net/arp.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/netevent.h>
@@ -324,12 +326,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
return NULL;
- if (size <= PAGE_SIZE)
+ if (size <= PAGE_SIZE) {
buckets = kzalloc(size, GFP_ATOMIC);
- else
+ } else {
buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
+ kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
+ }
if (!buckets) {
kfree(ret);
return NULL;
@@ -349,10 +353,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
struct neighbour __rcu **buckets = nht->hash_buckets;
- if (size <= PAGE_SIZE)
+ if (size <= PAGE_SIZE) {
kfree(buckets);
- else
+ } else {
+ kmemleak_free(buckets);
free_pages((unsigned long)buckets, get_order(size));
+ }
kfree(nht);
}
@@ -981,6 +987,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
atomic_set(&neigh->probes,
NEIGH_VAR(neigh->parms, UCAST_PROBES));
+ neigh_del_timer(neigh);
neigh->nud_state = NUD_INCOMPLETE;
neigh->updated = now;
next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
@@ -997,6 +1004,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
}
} else if (neigh->nud_state & NUD_STALE) {
neigh_dbg(2, "neigh %p is delayed\n", neigh);
+ neigh_del_timer(neigh);
neigh->nud_state = NUD_DELAY;
neigh->updated = jiffies;
neigh_add_timer(neigh, jiffies +
@@ -1050,7 +1058,7 @@ static void neigh_update_hhs(struct neighbour *neigh)
if (update) {
hh = &neigh->hh;
- if (hh->hh_len) {
+ if (READ_ONCE(hh->hh_len)) {
write_seqlock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
write_sequnlock_bh(&hh->hh_lock);
@@ -1311,7 +1319,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
struct net_device *dev = neigh->dev;
unsigned int seq;
- if (dev->header_ops->cache && !neigh->hh.hh_len)
+ if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
neigh_hh_init(neigh);
do {
@@ -1826,8 +1834,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
goto nla_put_failure;
{
unsigned long now = jiffies;
- unsigned int flush_delta = now - tbl->last_flush;
- unsigned int rand_delta = now - tbl->last_rand;
+ long flush_delta = now - tbl->last_flush;
+ long rand_delta = now - tbl->last_rand;
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
@@ -2489,7 +2497,13 @@ int neigh_xmit(int index, struct net_device *dev,
if (!tbl)
goto out;
rcu_read_lock_bh();
- neigh = __neigh_lookup_noref(tbl, addr, dev);
+ if (index == NEIGH_ARP_TABLE) {
+ u32 key = *((u32 *)addr);
+
+ neigh = __ipv4_neigh_lookup_noref(dev, key);
+ } else {
+ neigh = __neigh_lookup_noref(tbl, addr, dev);
+ }
if (!neigh)
neigh = __neigh_create(tbl, addr, dev, false);
err = PTR_ERR(neigh);
@@ -2697,6 +2711,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
}
void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
+ __acquires(tbl->lock)
__acquires(rcu_bh)
{
struct neigh_seq_state *state = seq->private;
@@ -2707,6 +2722,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
rcu_read_lock_bh();
state->nht = rcu_dereference_bh(tbl->nht);
+ read_lock(&tbl->lock);
return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
}
@@ -2740,8 +2756,13 @@ out:
EXPORT_SYMBOL(neigh_seq_next);
void neigh_seq_stop(struct seq_file *seq, void *v)
+ __releases(tbl->lock)
__releases(rcu_bh)
{
+ struct neigh_seq_state *state = seq->private;
+ struct neigh_table *tbl = state->tbl;
+
+ read_unlock(&tbl->lock);
rcu_read_unlock_bh();
}
EXPORT_SYMBOL(neigh_seq_stop);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 04fd04ccaa04..7630fa80db92 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -282,6 +282,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
atomic_set(&net->count, 1);
atomic_set(&net->passive, 1);
+ get_random_bytes(&net->hash_mix, sizeof(u32));
net->dev_base_seq = 1;
net->user_ns = user_ns;
idr_init(&net->netns_ids);
@@ -801,7 +802,8 @@ static int __init net_ns_init(void)
mutex_unlock(&net_mutex);
- register_pernet_subsys(&net_ns_ops);
+ if (register_pernet_subsys(&net_ns_ops))
+ panic("Could not register network namespace subsystems");
rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 2e4eef71471d..db65b0cdfc4c 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -55,30 +55,60 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
kfree(css_cls_state(css));
}
+/*
+ * To avoid freezing of sockets creation for tasks with big number of threads
+ * and opened sockets lets release file_lock every 1000 iterated descriptors.
+ * New sockets will already have been created with new classid.
+ */
+
+struct update_classid_context {
+ u32 classid;
+ unsigned int batch;
+};
+
+#define UPDATE_CLASSID_BATCH 1000
+
static int update_classid_sock(const void *v, struct file *file, unsigned n)
{
int err;
+ struct update_classid_context *ctx = (void *)v;
struct socket *sock = sock_from_file(file, &err);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
- sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
- (unsigned long)v);
+ sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
spin_unlock(&cgroup_sk_update_lock);
}
+ if (--ctx->batch == 0) {
+ ctx->batch = UPDATE_CLASSID_BATCH;
+ return n + 1;
+ }
return 0;
}
+static void update_classid_task(struct task_struct *p, u32 classid)
+{
+ struct update_classid_context ctx = {
+ .classid = classid,
+ .batch = UPDATE_CLASSID_BATCH
+ };
+ unsigned int fd = 0;
+
+ do {
+ task_lock(p);
+ fd = iterate_fd(p->files, fd, update_classid_sock, &ctx);
+ task_unlock(p);
+ cond_resched();
+ } while (fd);
+}
+
static void cgrp_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct task_struct *p;
cgroup_taskset_for_each(p, css, tset) {
- task_lock(p);
- iterate_fd(p->files, 0, update_classid_sock,
- (void *)(unsigned long)css_cls_state(css)->classid);
- task_unlock(p);
+ update_classid_task(p, css_cls_state(css)->classid);
}
}
@@ -100,10 +130,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
css_task_iter_start(css, &it);
while ((p = css_task_iter_next(&it))) {
- task_lock(p);
- iterate_fd(p->files, 0, update_classid_sock,
- (void *)(unsigned long)cs->classid);
- task_unlock(p);
+ update_classid_task(p, cs->classid);
cond_resched();
}
css_task_iter_end(&it);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9b2d61120c0d..5de180a9b7f5 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
txq = netdev_get_tx_queue(dev, q_index);
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (netif_xmit_frozen_or_stopped(txq) ||
- netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
+ !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
skb_queue_head(&npinfo->txq, skb);
HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags);
@@ -357,7 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
HARD_TX_UNLOCK(dev, txq);
- if (status == NETDEV_TX_OK)
+ if (dev_xmit_complete(status))
break;
}
@@ -374,7 +374,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
}
- if (status != NETDEV_TX_OK) {
+ if (!dev_xmit_complete(status)) {
skb_queue_tail(&npinfo->txq, skb);
schedule_delayed_work(&npinfo->tx_work,0);
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 306b8f0e03c1..433b26feb320 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3147,7 +3147,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
{
while (thread_is_running(t)) {
+ /* note: 't' will still be around even after the unlock/lock
+ * cycle because pktgen_thread threads are only cleared at
+ * net exit
+ */
+ mutex_unlock(&pktgen_thread_lock);
msleep_interruptible(100);
+ mutex_lock(&pktgen_thread_lock);
if (signal_pending(current))
goto signal;
@@ -3162,6 +3168,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
struct pktgen_thread *t;
int sig = 1;
+ /* prevent from racing with rmmod */
+ if (!try_module_get(THIS_MODULE))
+ return sig;
+
mutex_lock(&pktgen_thread_lock);
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
@@ -3175,6 +3185,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
t->control |= (T_STOP);
mutex_unlock(&pktgen_thread_lock);
+ module_put(THIS_MODULE);
return sig;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ba724576764e..ead1a32c68f7 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1724,6 +1724,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_MAC]) {
struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
+ if (ivm->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_mac)
err = ops->ndo_set_vf_mac(dev, ivm->vf,
@@ -1735,6 +1737,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_VLAN]) {
struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
+ if (ivv->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
@@ -1767,6 +1771,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (len == 0)
return -EINVAL;
+ if (ivvl[0]->vf >= INT_MAX)
+ return -EINVAL;
err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
ivvl[0]->qos, ivvl[0]->vlan_proto);
if (err < 0)
@@ -1777,6 +1783,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
struct ifla_vf_info ivf;
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_get_vf_config)
err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
@@ -1795,6 +1803,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_RATE]) {
struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_rate)
err = ops->ndo_set_vf_rate(dev, ivt->vf,
@@ -1807,6 +1817,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_SPOOFCHK]) {
struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
+ if (ivs->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_spoofchk)
err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
@@ -1818,6 +1830,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_LINK_STATE]) {
struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
+ if (ivl->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_link_state)
err = ops->ndo_set_vf_link_state(dev, ivl->vf,
@@ -1831,6 +1845,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
err = -EOPNOTSUPP;
ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
+ if (ivrssq_en->vf >= INT_MAX)
+ return -EINVAL;
if (ops->ndo_set_vf_rss_query_en)
err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
ivrssq_en->setting);
@@ -1841,6 +1857,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_TRUST]) {
struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_trust)
err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
@@ -1851,15 +1869,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
if (tb[IFLA_VF_IB_NODE_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP;
-
return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
}
if (tb[IFLA_VF_IB_PORT_GUID]) {
struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
+ if (ivt->vf >= INT_MAX)
+ return -EINVAL;
if (!ops->ndo_set_vf_guid)
return -EOPNOTSUPP;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4a71d78d0c6a..7164569c1ec8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3094,6 +3094,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
int pos;
int dummy;
+ if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
+ (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
+ /* gso_size is untrusted, and we have a frag_list with a linear
+ * non head_frag head.
+ *
+ * (we assume checking the first list_skb member suffices;
+ * i.e if either of the list_skb members have non head_frag
+ * head, then the first one has too).
+ *
+ * If head_skb's headlen does not fit requested gso_size, it
+ * means that the frag_list members do NOT terminate on exact
+ * gso_size boundaries. Hence we cannot perform skb_frag_t page
+ * sharing. Therefore we must fallback to copying the frag_list
+ * skbs; we do so by disabling SG.
+ */
+ if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
+ features &= ~NETIF_F_SG;
+ }
+
__skb_push(head_skb, doffset);
proto = skb_network_protocol(head_skb, &dummy);
if (unlikely(!proto))
diff --git a/net/core/sock.c b/net/core/sock.c
index 3041aa6df602..41794a698da6 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1426,8 +1426,6 @@ static void __sk_destruct(struct rcu_head *head)
sk_filter_uncharge(sk, filter);
RCU_INIT_POINTER(sk->sk_filter, NULL);
}
- if (rcu_access_pointer(sk->sk_reuseport_cb))
- reuseport_detach_sock(sk);
sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
@@ -1450,7 +1448,14 @@ static void __sk_destruct(struct rcu_head *head)
void sk_destruct(struct sock *sk)
{
- if (sock_flag(sk, SOCK_RCU_FREE))
+ bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
+
+ if (rcu_access_pointer(sk->sk_reuseport_cb)) {
+ reuseport_detach_sock(sk);
+ use_call_rcu = true;
+ }
+
+ if (use_call_rcu)
call_rcu(&sk->sk_rcu, __sk_destruct);
else
__sk_destruct(&sk->sk_rcu);
@@ -2151,7 +2156,7 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
}
if (sk_has_memory_pressure(sk)) {
- int alloc;
+ u64 alloc;
if (!sk_under_memory_pressure(sk))
return 1;
diff --git a/net/core/stream.c b/net/core/stream.c
index 1086c8b280a8..6e41b20bf9f8 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -118,7 +118,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
int err = 0;
long vm_wait = 0;
long current_timeo = *timeo_p;
- bool noblock = (*timeo_p ? false : true);
DEFINE_WAIT(wait);
if (sk_stream_memory_free(sk))
@@ -131,11 +130,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
- if (!*timeo_p) {
- if (noblock)
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- goto do_nonblock;
- }
+ if (!*timeo_p)
+ goto do_eagain;
if (signal_pending(current))
goto do_interrupted;
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -167,7 +163,13 @@ out:
do_error:
err = -EPIPE;
goto out;
-do_nonblock:
+do_eagain:
+ /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
+ * be generated later.
+ * When TCP receives ACK packets that make room, tcp_check_space()
+ * only calls tcp_new_space() if SOCK_NOSPACE is set.
+ */
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
err = -EAGAIN;
goto out;
do_interrupted:
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 546ba76b35a5..b4318c1b5b96 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -24,9 +24,12 @@
static int zero = 0;
static int one = 1;
+static int two __maybe_unused = 2;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
+static long long_one __maybe_unused = 1;
+static long long_max __maybe_unused = LONG_MAX;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -231,6 +234,52 @@ static int proc_do_rss_key(struct ctl_table *table, int write,
return proc_dostring(&fake_table, write, buffer, lenp, ppos);
}
+#ifdef CONFIG_BPF_JIT
+static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret, jit_enable = *(int *)table->data;
+ struct ctl_table tmp = *table;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ tmp.data = &jit_enable;
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret) {
+ *(int *)table->data = jit_enable;
+ if (jit_enable == 2)
+ pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
+ }
+ return ret;
+}
+
+# ifdef CONFIG_HAVE_EBPF_JIT
+static int
+proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+}
+# endif /* CONFIG_HAVE_EBPF_JIT */
+
+static int
+proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+}
+#endif
+
static struct ctl_table net_core_table[] = {
#ifdef CONFIG_NET
{
@@ -292,13 +341,14 @@ static struct ctl_table net_core_table[] = {
.data = &bpf_jit_enable,
.maxlen = sizeof(int),
.mode = 0644,
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
- .proc_handler = proc_dointvec
-#else
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax_bpf_enable,
+# ifdef CONFIG_BPF_JIT_ALWAYS_ON
.extra1 = &one,
.extra2 = &one,
-#endif
+# else
+ .extra1 = &zero,
+ .extra2 = &two,
+# endif
},
# ifdef CONFIG_HAVE_EBPF_JIT
{
@@ -306,9 +356,20 @@ static struct ctl_table net_core_table[] = {
.data = &bpf_jit_harden,
.maxlen = sizeof(int),
.mode = 0600,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax_bpf_restricted,
+ .extra1 = &zero,
+ .extra2 = &two,
},
# endif
+ {
+ .procname = "bpf_jit_limit",
+ .data = &bpf_jit_limit,
+ .maxlen = sizeof(long),
+ .mode = 0600,
+ .proc_handler = proc_dolongvec_minmax_bpf_restricted,
+ .extra1 = &long_one,
+ .extra2 = &long_max,
+ },
#endif
{
.procname = "netdev_tstamp_prequeue",
diff --git a/net/core/utils.c b/net/core/utils.c
index cf5622b9ccc4..3317f90b32eb 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -316,6 +316,23 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
}
EXPORT_SYMBOL(inet_proto_csum_replace4);
+/**
+ * inet_proto_csum_replace16 - update layer 4 header checksum field
+ * @sum: Layer 4 header checksum field
+ * @skb: sk_buff for the packet
+ * @from: old IPv6 address
+ * @to: new IPv6 address
+ * @pseudohdr: True if layer 4 header checksum includes pseudoheader
+ *
+ * Update layer 4 header as per the update in IPv6 src/dst address.
+ *
+ * There is no need to update skb->csum in this function, because update in two
+ * fields a.) IPv6 src/dst address and b.) L4 header checksum cancels each other
+ * for skb->csum calculation. Whereas inet_proto_csum_replace4 function needs to
+ * update skb->csum, because update in 3 fields a.) IPv4 src/dst address,
+ * b.) IPv4 Header checksum and c.) L4 header checksum results in same diff as
+ * L4 Header checksum for skb->csum calculation.
+ */
void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
bool pseudohdr)
@@ -327,9 +344,6 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_PARTIAL) {
*sum = csum_fold(csum_partial(diff, sizeof(diff),
~csum_unfold(*sum)));
- if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
- skb->csum = ~csum_partial(diff, sizeof(diff),
- ~skb->csum);
} else if (pseudohdr)
*sum = ~csum_fold(csum_partial(diff, sizeof(diff),
csum_unfold(*sum)));
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index f227f002c73d..db87d9f58019 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local,
if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len))
return -ENOMEM;
- return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval);
+ if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) {
+ kfree(fval.sp.vec);
+ return -ENOMEM;
+ }
+
+ return 0;
}
/**
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 1d6d3aaa8c3d..322268b88fec 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -121,7 +121,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_daddr,
inet->inet_sport,
inet->inet_dport);
- inet->inet_id = dp->dccps_iss ^ jiffies;
+ inet->inet_id = prandom_u32();
err = dccp_connect(sk);
rt = NULL;
@@ -417,7 +417,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
- newinet->inet_id = jiffies;
+ newinet->inet_id = prandom_u32();
if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
goto put_and_exit;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 93c706172f40..87c513b5ff2e 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -431,8 +431,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
- newnp->mcast_oif = inet6_iif(skb);
- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
+ newnp->mcast_oif = inet_iif(skb);
+ newnp->mcast_hops = ip_hdr(skb)->ttl;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index b2c26b081134..80554e7e9a0f 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -55,7 +55,7 @@
#include <net/dn_neigh.h>
#include <net/dn_fib.h>
-#define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn))
+#define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn))
static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 0f99297b2fb3..f1792a847d0b 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -59,7 +59,7 @@ static struct dsa_switch_tree *dsa_add_dst(u32 tree)
dst->tree = tree;
dst->cpu_switch = -1;
INIT_LIST_HEAD(&dst->list);
- list_add_tail(&dsa_switch_trees, &dst->list);
+ list_add_tail(&dst->list, &dsa_switch_trees);
kref_init(&dst->refcount);
return dst;
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 21bffde6e4bf..98074338cd83 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -146,6 +146,8 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
skb->dev->stats.rx_packets++;
skb->dev->stats.rx_bytes += skb->len;
+ skb->offload_fwd_mark = 1;
+
netif_receive_skb(skb);
return 0;
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index 0c90cacee7aa..8108741cf8fb 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -40,9 +40,6 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
struct dsa_slave_priv *p = netdev_priv(dev);
u16 *phdr, hdr;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
if (skb_cow_head(skb, 0) < 0)
goto out_free;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 24d7aff8db1a..204aa0131fbe 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -238,7 +238,12 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16
eth->h_proto = type;
memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
- hh->hh_len = ETH_HLEN;
+
+ /* Pairs with READ_ONCE() in neigh_resolve_output(),
+ * neigh_hh_output() and neigh_update_hhs().
+ */
+ smp_store_release(&hh->hh_len, ETH_HLEN);
+
return 0;
}
EXPORT_SYMBOL(eth_header_cache);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 52694cb759b0..aead5ac4dbf6 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -281,6 +281,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
skb->dev->dev_addr, skb->len) <= 0)
goto out;
skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
if (hsrVer > 0) {
hsr_tag = (typeof(hsr_tag)) skb_put(skb, sizeof(struct hsr_tag));
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 6705420b3111..d7206581145d 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -468,13 +468,9 @@ int hsr_get_node_data(struct hsr_priv *hsr,
struct hsr_port *port;
unsigned long tdiff;
-
- rcu_read_lock();
node = find_node_by_AddrA(&hsr->node_db, addr);
- if (!node) {
- rcu_read_unlock();
- return -ENOENT; /* No such entry */
- }
+ if (!node)
+ return -ENOENT;
ether_addr_copy(addr_b, node->MacAddressB);
@@ -509,7 +505,5 @@ int hsr_get_node_data(struct hsr_priv *hsr,
*addr_b_ifindex = -1;
}
- rcu_read_unlock();
-
return 0;
}
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index d4d1617f43a8..1da236db04d6 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -63,10 +63,15 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
else
multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
- if (!data[IFLA_HSR_VERSION])
+ if (!data[IFLA_HSR_VERSION]) {
hsr_version = 0;
- else
+ } else {
hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]);
+ if (hsr_version > 1) {
+ netdev_info(dev, "Only versions 0..1 are supported");
+ return -EINVAL;
+ }
+ }
return hsr_dev_finalize(dev, link, multicast_spec, hsr_version);
}
@@ -137,6 +142,7 @@ static struct genl_family hsr_genl_family = {
.name = "HSR",
.version = 1,
.maxattr = HSR_A_MAX,
+ .netnsok = true,
};
static const struct genl_multicast_group hsr_mcgrps[] = {
@@ -264,17 +270,16 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
if (!na)
goto invalid;
- hsr_dev = __dev_get_by_index(genl_info_net(info),
- nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+ rcu_read_lock();
+ hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
+ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
if (!hsr_dev)
- goto invalid;
+ goto rcu_unlock;
if (!is_hsr_master(hsr_dev))
- goto invalid;
-
+ goto rcu_unlock;
/* Send reply */
-
- skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb_out) {
res = -ENOMEM;
goto fail;
@@ -326,12 +331,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
if (res < 0)
goto nla_put_failure;
- rcu_read_lock();
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
if (port)
res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
port->dev->ifindex);
- rcu_read_unlock();
if (res < 0)
goto nla_put_failure;
@@ -341,20 +344,22 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
if (res < 0)
goto nla_put_failure;
- rcu_read_lock();
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
if (port)
res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
port->dev->ifindex);
- rcu_read_unlock();
if (res < 0)
goto nla_put_failure;
+ rcu_read_unlock();
+
genlmsg_end(skb_out, msg_head);
genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
return 0;
+rcu_unlock:
+ rcu_read_unlock();
invalid:
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
return 0;
@@ -364,6 +369,7 @@ nla_put_failure:
/* Fall through */
fail:
+ rcu_read_unlock();
return res;
}
@@ -371,16 +377,14 @@ fail:
*/
static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
{
- /* For receiving */
- struct nlattr *na;
+ unsigned char addr[ETH_ALEN];
struct net_device *hsr_dev;
-
- /* For sending */
struct sk_buff *skb_out;
- void *msg_head;
struct hsr_priv *hsr;
- void *pos;
- unsigned char addr[ETH_ALEN];
+ bool restart = false;
+ struct nlattr *na;
+ void *pos = NULL;
+ void *msg_head;
int res;
if (!info)
@@ -390,17 +394,17 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
if (!na)
goto invalid;
- hsr_dev = __dev_get_by_index(genl_info_net(info),
- nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+ rcu_read_lock();
+ hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
+ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
if (!hsr_dev)
- goto invalid;
+ goto rcu_unlock;
if (!is_hsr_master(hsr_dev))
- goto invalid;
-
+ goto rcu_unlock;
+restart:
/* Send reply */
-
- skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb_out) {
res = -ENOMEM;
goto fail;
@@ -414,18 +418,26 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
goto nla_put_failure;
}
- res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
- if (res < 0)
- goto nla_put_failure;
+ if (!restart) {
+ res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+ if (res < 0)
+ goto nla_put_failure;
+ }
hsr = netdev_priv(hsr_dev);
- rcu_read_lock();
- pos = hsr_get_next_node(hsr, NULL, addr);
+ if (!pos)
+ pos = hsr_get_next_node(hsr, NULL, addr);
while (pos) {
res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
if (res < 0) {
- rcu_read_unlock();
+ if (res == -EMSGSIZE) {
+ genlmsg_end(skb_out, msg_head);
+ genlmsg_unicast(genl_info_net(info), skb_out,
+ info->snd_portid);
+ restart = true;
+ goto restart;
+ }
goto nla_put_failure;
}
pos = hsr_get_next_node(hsr, pos, addr);
@@ -437,15 +449,18 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
return 0;
+rcu_unlock:
+ rcu_read_unlock();
invalid:
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL);
return 0;
nla_put_failure:
- kfree_skb(skb_out);
+ nlmsg_free(skb_out);
/* Fall through */
fail:
+ rcu_read_unlock();
return res;
}
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
index f5b60388d02f..3d94f547db00 100644
--- a/net/hsr/hsr_slave.c
+++ b/net/hsr/hsr_slave.c
@@ -31,6 +31,8 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
rcu_read_lock(); /* hsr->node_db, hsr->ports */
port = hsr_port_get_rcu(skb->dev);
+ if (!port)
+ goto finish_pass;
if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
/* Directly kill frames sent by ourselves */
@@ -149,16 +151,16 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
if (port == NULL)
return -ENOMEM;
+ port->hsr = hsr;
+ port->dev = dev;
+ port->type = type;
+
if (type != HSR_PT_MASTER) {
res = hsr_portdev_setup(dev, port);
if (res)
goto fail_dev_setup;
}
- port->hsr = hsr;
- port->dev = dev;
- port->type = type;
-
list_add_tail_rcu(&port->port_list, &hsr->ports);
synchronize_rcu();
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index aab1e2dfdfca..5936bfafb1c4 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -25,7 +25,7 @@
#include <net/ieee802154_netdev.h>
#include <net/6lowpan.h>
-#include <net/ipv6.h>
+#include <net/ipv6_frag.h>
#include <net/inet_frag.h>
#include "6lowpan_i.h"
@@ -633,7 +633,7 @@ err_sysctl:
void lowpan_net_frag_exit(void)
{
- inet_frags_fini(&lowpan_frags);
lowpan_frags_sysctl_unregister();
unregister_pernet_subsys(&lowpan_frags_ops);
+ inet_frags_fini(&lowpan_frags);
}
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index 35c432668454..040983fc15da 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -30,7 +30,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
[IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
[IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
[IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
[IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index bf5d26d83af0..f66e4afb978a 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1003,6 +1003,9 @@ static int ieee802154_create(struct net *net, struct socket *sock,
switch (sock->type) {
case SOCK_RAW:
+ rc = -EPERM;
+ if (!capable(CAP_NET_RAW))
+ goto out;
proto = &ieee802154_raw_prot;
ops = &ieee802154_raw_ops;
break;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index b54b3ca939db..4d265d4a0dbe 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -298,6 +298,7 @@ config SYN_COOKIES
config NET_IPVTI
tristate "Virtual (secure) IP: tunneling"
+ depends on IPV6 || IPV6=n
select INET_TUNNEL
select NET_IP_TUNNEL
depends on INET_XFRM_MODE_TUNNEL
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 71bcab94c5c7..0a6f72763beb 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1738,6 +1738,7 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options *opt = (struct ip_options *)optbuf;
+ int res;
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
return;
@@ -1749,7 +1750,11 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
memset(opt, 0, sizeof(struct ip_options));
opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
- if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
+ rcu_read_lock();
+ res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
+ rcu_read_unlock();
+
+ if (res)
return;
if (gateway)
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index f915abff1350..d3eddfd13875 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -75,7 +75,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
inet->inet_dport = usin->sin_port;
sk->sk_state = TCP_ESTABLISHED;
sk_set_txhash(sk);
- inet->inet_id = jiffies;
+ inet->inet_id = prandom_u32();
sk_dst_set(sk, &rt->dst);
err = 0;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index f08f984ebc56..af3363f4543f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -67,6 +67,11 @@
#include "fib_lookup.h"
+#define IPV6ONLY_FLAGS \
+ (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
+ IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
+ IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
+
static struct ipv4_devconf ipv4_devconf = {
.data = {
[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
@@ -453,6 +458,9 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
ifa->ifa_flags &= ~IFA_F_SECONDARY;
last_primary = &in_dev->ifa_list;
+ /* Don't set IPv6 only flags to IPv4 addresses */
+ ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
+
for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
ifap = &ifa1->ifa_next) {
if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
@@ -552,12 +560,15 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
return NULL;
}
-static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
+static int ip_mc_autojoin_config(struct net *net, bool join,
+ const struct in_ifaddr *ifa)
{
+#if defined(CONFIG_IP_MULTICAST)
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ifa->ifa_address,
.imr_ifindex = ifa->ifa_dev->dev->ifindex,
};
+ struct sock *sk = net->ipv4.mc_autojoin_sk;
int ret;
ASSERT_RTNL();
@@ -570,6 +581,9 @@ static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
release_sock(sk);
return ret;
+#else
+ return -EOPNOTSUPP;
+#endif
}
static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -609,7 +623,7 @@ static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
continue;
if (ipv4_is_multicast(ifa->ifa_address))
- ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
+ ip_mc_autojoin_config(net, false, ifa);
__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
return 0;
}
@@ -865,8 +879,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
- int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
- true, ifa);
+ int ret = ip_mc_autojoin_config(net, true, ifa);
if (ret < 0) {
inet_free_ifa(ifa);
@@ -1378,11 +1391,6 @@ skip:
}
}
-static bool inetdev_valid_mtu(unsigned int mtu)
-{
- return mtu >= IPV4_MIN_MTU;
-}
-
static void inetdev_send_gratuitous_arp(struct net_device *dev,
struct in_device *in_dev)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 90c654012510..6aec95e1fc13 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1358,8 +1358,8 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
int ret = 0;
unsigned int hash = fib_laddr_hashfn(local);
struct hlist_head *head = &fib_info_laddrhash[hash];
+ int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
struct net *net = dev_net(dev);
- int tb_id = l3mdev_fib_table(dev);
struct fib_info *fi;
if (!fib_info_laddrhash || local == 0)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 36f0a8c581d0..a1a7ed6fc8dd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2256,6 +2256,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
" %Zd bytes, size of tnode: %Zd bytes.\n",
LEAF_SIZE, TNODE_SIZE(0));
+ rcu_read_lock();
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
@@ -2275,7 +2276,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
trie_show_usage(seq, t->stats);
#endif
}
+ cond_resched_rcu();
}
+ rcu_read_unlock();
return 0;
}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 030d1531e897..17acc89f9dec 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -119,6 +119,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
struct guehdr *guehdr;
void *data;
u16 doffset = 0;
+ u8 proto_ctype;
if (!fou)
return 1;
@@ -210,13 +211,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely(guehdr->control))
return gue_control_message(skb, guehdr);
+ proto_ctype = guehdr->proto_ctype;
__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
skb_reset_transport_header(skb);
if (iptunnel_pull_offloads(skb))
goto drop;
- return -guehdr->proto_ctype;
+ return -proto_ctype;
drop:
kfree_skb(skb);
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index b798862b6be5..4a5e55e94a9e 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -60,7 +60,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
}
EXPORT_SYMBOL_GPL(gre_del_protocol);
-/* Fills in tpi and returns header length to be pulled. */
+/* Fills in tpi and returns header length to be pulled.
+ * Note that caller must use pskb_may_pull() before pulling GRE header.
+ */
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto, int nhs)
{
@@ -86,13 +88,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
options = (__be32 *)(greh + 1);
if (greh->flags & GRE_CSUM) {
- if (skb_checksum_simple_validate(skb)) {
+ if (!skb_checksum_simple_validate(skb)) {
+ skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+ null_compute_pseudo);
+ } else if (csum_err) {
*csum_err = true;
return -EINVAL;
}
- skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
- null_compute_pseudo);
options++;
}
@@ -113,8 +116,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
* - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
*/
if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+ u8 _val, *val;
+
+ val = skb_header_pointer(skb, nhs + hdr_len,
+ sizeof(_val), &_val);
+ if (!val)
+ return -EINVAL;
tpi->proto = proto;
- if ((*(u8 *)options & 0xF0) != 0x40)
+ if ((*val & 0xF0) != 0x40)
hdr_len += 4;
}
tpi->hdr_len = hdr_len;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 172d3dfed0c4..cc5c8d598e5e 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -256,10 +256,11 @@ bool icmp_global_allow(void)
bool rc = false;
/* Check if token bucket is empty and cannot be refilled
- * without taking the spinlock.
+ * without taking the spinlock. The READ_ONCE() are paired
+ * with the following WRITE_ONCE() in this same function.
*/
- if (!icmp_global.credit) {
- delta = min_t(u32, now - icmp_global.stamp, HZ);
+ if (!READ_ONCE(icmp_global.credit)) {
+ delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
if (delta < HZ / 50)
return false;
}
@@ -269,14 +270,14 @@ bool icmp_global_allow(void)
if (delta >= HZ / 50) {
incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
if (incr)
- icmp_global.stamp = now;
+ WRITE_ONCE(icmp_global.stamp, now);
}
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
if (credit) {
credit--;
rc = true;
}
- icmp_global.credit = credit;
+ WRITE_ONCE(icmp_global.credit, credit);
spin_unlock(&icmp_global.lock);
return rc;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index f2e6e874e4ec..02c1736c0b89 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -190,6 +190,17 @@ static void ip_ma_put(struct ip_mc_list *im)
pmc != NULL; \
pmc = rtnl_dereference(pmc->next_rcu))
+static void ip_sf_list_clear_all(struct ip_sf_list *psf)
+{
+ struct ip_sf_list *next;
+
+ while (psf) {
+ next = psf->sf_next;
+ kfree(psf);
+ psf = next;
+ }
+}
+
#ifdef CONFIG_IP_MULTICAST
/*
@@ -635,6 +646,13 @@ static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
}
}
+static void kfree_pmc(struct ip_mc_list *pmc)
+{
+ ip_sf_list_clear_all(pmc->sources);
+ ip_sf_list_clear_all(pmc->tomb);
+ kfree(pmc);
+}
+
static void igmpv3_send_cr(struct in_device *in_dev)
{
struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
@@ -671,7 +689,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
else
in_dev->mc_tomb = pmc_next;
in_dev_put(pmc->interface);
- kfree(pmc);
+ kfree_pmc(pmc);
} else
pmc_prev = pmc;
}
@@ -1194,13 +1212,13 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
im->interface = pmc->interface;
im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
if (im->sfmode == MCAST_INCLUDE) {
- im->tomb = pmc->tomb;
- im->sources = pmc->sources;
+ swap(im->tomb, pmc->tomb);
+ swap(im->sources, pmc->sources);
for (psf = im->sources; psf; psf = psf->sf_next)
psf->sf_crcount = im->crcount;
}
in_dev_put(pmc->interface);
- kfree(pmc);
+ kfree_pmc(pmc);
}
spin_unlock_bh(&im->lock);
}
@@ -1221,21 +1239,18 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
nextpmc = pmc->next;
ip_mc_clear_src(pmc);
in_dev_put(pmc->interface);
- kfree(pmc);
+ kfree_pmc(pmc);
}
/* clear dead sources, too */
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
- struct ip_sf_list *psf, *psf_next;
+ struct ip_sf_list *psf;
spin_lock_bh(&pmc->lock);
psf = pmc->tomb;
pmc->tomb = NULL;
spin_unlock_bh(&pmc->lock);
- for (; psf; psf = psf_next) {
- psf_next = psf->sf_next;
- kfree(psf);
- }
+ ip_sf_list_clear_all(psf);
}
rcu_read_unlock();
}
@@ -2099,7 +2114,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
static void ip_mc_clear_src(struct ip_mc_list *pmc)
{
- struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
+ struct ip_sf_list *tomb, *sources;
spin_lock_bh(&pmc->lock);
tomb = pmc->tomb;
@@ -2111,14 +2126,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
pmc->sfcount[MCAST_EXCLUDE] = 1;
spin_unlock_bh(&pmc->lock);
- for (psf = tomb; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
- for (psf = sources; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
+ ip_sf_list_clear_all(tomb);
+ ip_sf_list_clear_all(sources);
}
/* Join a multicast group
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index e4d16fc5bbb3..4273576e1475 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -868,12 +868,13 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct inet_listen_hashbucket *ilb;
+ struct hlist_nulls_node *node;
struct sock *sk;
num = 0;
ilb = &hashinfo->listening_hash[i];
spin_lock_bh(&ilb->lock);
- sk_for_each(sk, &ilb->head) {
+ sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 0fb49dedc9fb..2325cd3454a6 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -24,6 +24,62 @@
#include <net/sock.h>
#include <net/inet_frag.h>
#include <net/inet_ecn.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ };
+ struct sk_buff *next_frag;
+ int frag_run_len;
+};
+
+#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void fragcb_clear(struct sk_buff *skb)
+{
+ RB_CLEAR_NODE(&skb->rbnode);
+ FRAG_CB(skb)->next_frag = NULL;
+ FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void fragrun_append_to_last(struct inet_frag_queue *q,
+ struct sk_buff *skb)
+{
+ fragcb_clear(skb);
+
+ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+ FRAG_CB(q->fragments_tail)->next_frag = skb;
+ q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+ fragcb_clear(skb);
+
+ if (q->last_run_head)
+ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+ &q->last_run_head->rbnode.rb_right);
+ else
+ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+ q->fragments_tail = skb;
+ q->last_run_head = skb;
+}
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
* Value : 0xff if frame should be dropped.
@@ -122,6 +178,28 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
kmem_cache_free(f->frags_cachep, q);
}
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+{
+ struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
+
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
+ while (skb) {
+ struct sk_buff *next = FRAG_CB(skb)->next_frag;
+
+ sum += skb->truesize;
+ kfree_skb(skb);
+ skb = next;
+ }
+ }
+ return sum;
+}
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
void inet_frag_destroy(struct inet_frag_queue *q)
{
struct sk_buff *fp;
@@ -223,3 +301,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
return fq;
}
EXPORT_SYMBOL(inet_frag_find);
+
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ int offset, int end)
+{
+ struct sk_buff *last = q->fragments_tail;
+
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ *
+ * Duplicates, however, should be ignored (i.e. skb dropped, but the
+ * queue/fragments kept for later reassembly).
+ */
+ if (!last)
+ fragrun_create(q, skb); /* First fragment. */
+ else if (last->ip_defrag_offset + last->len < end) {
+ /* This is the common case: skb goes to the end. */
+ /* Detect and discard overlaps. */
+ if (offset < last->ip_defrag_offset + last->len)
+ return IPFRAG_OVERLAP;
+ if (offset == last->ip_defrag_offset + last->len)
+ fragrun_append_to_last(q, skb);
+ else
+ fragrun_create(q, skb);
+ } else {
+ /* Binary search. Note that skb can become the first fragment,
+ * but not the last (covered above).
+ */
+ struct rb_node **rbn, *parent;
+
+ rbn = &q->rb_fragments.rb_node;
+ do {
+ struct sk_buff *curr;
+ int curr_run_end;
+
+ parent = *rbn;
+ curr = rb_to_skb(parent);
+ curr_run_end = curr->ip_defrag_offset +
+ FRAG_CB(curr)->frag_run_len;
+ if (end <= curr->ip_defrag_offset)
+ rbn = &parent->rb_left;
+ else if (offset >= curr_run_end)
+ rbn = &parent->rb_right;
+ else if (offset >= curr->ip_defrag_offset &&
+ end <= curr_run_end)
+ return IPFRAG_DUP;
+ else
+ return IPFRAG_OVERLAP;
+ } while (*rbn);
+ /* Here we have parent properly set, and rbn pointing to
+ * one of its NULL left/right children. Insert skb.
+ */
+ fragcb_clear(skb);
+ rb_link_node(&skb->rbnode, parent, rbn);
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+ }
+
+ skb->ip_defrag_offset = offset;
+
+ return IPFRAG_OK;
+}
+EXPORT_SYMBOL(inet_frag_queue_insert);
+
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ struct sk_buff *parent)
+{
+ struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
+ struct sk_buff **nextp;
+ int delta;
+
+ if (head != skb) {
+ fp = skb_clone(skb, GFP_ATOMIC);
+ if (!fp)
+ return NULL;
+ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+ if (RB_EMPTY_NODE(&skb->rbnode))
+ FRAG_CB(parent)->next_frag = fp;
+ else
+ rb_replace_node(&skb->rbnode, &fp->rbnode,
+ &q->rb_fragments);
+ if (q->fragments_tail == skb)
+ q->fragments_tail = fp;
+ skb_morph(skb, head);
+ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &q->rb_fragments);
+ consume_skb(head);
+ head = skb;
+ }
+ WARN_ON(head->ip_defrag_offset != 0);
+
+ delta = -head->truesize;
+
+ /* Head of list must not be cloned. */
+ if (skb_unclone(head, GFP_ATOMIC))
+ return NULL;
+
+ delta += head->truesize;
+ if (delta)
+ add_frag_mem_limit(q->net, delta);
+
+ /* If the first fragment is fragmented itself, we split
+ * it to two chunks: the first with data and paged part
+ * and the second, holding only fragments.
+ */
+ if (skb_has_frag_list(head)) {
+ struct sk_buff *clone;
+ int i, plen = 0;
+
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
+ return NULL;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->data_len = head->data_len - plen;
+ clone->len = clone->data_len;
+ head->truesize += clone->truesize;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+ add_frag_mem_limit(q->net, clone->truesize);
+ skb_shinfo(head)->frag_list = clone;
+ nextp = &clone->next;
+ } else {
+ nextp = &skb_shinfo(head)->frag_list;
+ }
+
+ return nextp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_prepare);
+
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ void *reasm_data)
+{
+ struct sk_buff **nextp = (struct sk_buff **)reasm_data;
+ struct rb_node *rbn;
+ struct sk_buff *fp;
+
+ skb_push(head, head->data - skb_network_header(head));
+
+ /* Traverse the tree in order, to build frag_list. */
+ fp = FRAG_CB(head)->next_frag;
+ rbn = rb_next(&head->rbnode);
+ rb_erase(&head->rbnode, &q->rb_fragments);
+ while (rbn || fp) {
+ /* fp points to the next sk_buff in the current run;
+ * rbn points to the next run.
+ */
+ /* Go through the current run. */
+ while (fp) {
+ *nextp = fp;
+ nextp = &fp->next;
+ fp->prev = NULL;
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ fp->sk = NULL;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ fp = FRAG_CB(fp)->next_frag;
+ }
+ /* Move to the next run. */
+ if (rbn) {
+ struct rb_node *rbnext = rb_next(rbn);
+
+ fp = rb_to_skb(rbn);
+ rb_erase(rbn, &q->rb_fragments);
+ rbn = rbnext;
+ }
+ }
+ sub_frag_mem_limit(q->net, head->truesize);
+
+ *nextp = NULL;
+ head->next = NULL;
+ head->prev = NULL;
+ head->tstamp = q->stamp;
+}
+EXPORT_SYMBOL(inet_frag_reasm_finish);
+
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
+{
+ struct sk_buff *head;
+
+ if (q->fragments) {
+ head = q->fragments;
+ q->fragments = head->next;
+ } else {
+ struct sk_buff *skb;
+
+ head = skb_rb_first(&q->rb_fragments);
+ if (!head)
+ return NULL;
+ skb = FRAG_CB(head)->next_frag;
+ if (skb)
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &q->rb_fragments);
+ else
+ rb_erase(&head->rbnode, &q->rb_fragments);
+ memset(&head->rbnode, 0, sizeof(head->rbnode));
+ barrier();
+ }
+ if (head == q->fragments_tail)
+ q->fragments_tail = NULL;
+
+ sub_frag_mem_limit(q->net, head->truesize);
+
+ return head;
+}
+EXPORT_SYMBOL(inet_frag_pull_head);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index b9bcf3db3af9..4bf542f4d980 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -218,9 +218,10 @@ struct sock *__inet_lookup_listener(struct net *net,
int score, hiscore = 0, matches = 0, reuseport = 0;
bool exact_dif = inet_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
+ struct hlist_nulls_node *node;
u32 phash = 0;
- sk_for_each_rcu(sk, &ilb->head) {
+ sk_nulls_for_each_rcu(sk, node, &ilb->nulls_head) {
score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;
@@ -441,10 +442,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
bool match_wildcard))
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
+ const struct hlist_nulls_node *node;
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
- sk_for_each_rcu(sk2, &ilb->head) {
+ sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
if (sk2 != sk &&
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
@@ -482,9 +484,9 @@ int __inet_hash(struct sock *sk, struct sock *osk,
}
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk->sk_family == AF_INET6)
- hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
+ __sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
else
- hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+ __sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
sock_set_flag(sk, SOCK_RCU_FREE);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
unlock:
@@ -527,10 +529,7 @@ void inet_unhash(struct sock *sk)
spin_lock_bh(lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
- if (listener)
- done = __sk_del_node_init(sk);
- else
- done = __sk_nulls_del_node_init_rcu(sk);
+ done = __sk_nulls_del_node_init_rcu(sk);
if (done)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock);
@@ -666,7 +665,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
- INIT_HLIST_HEAD(&h->listening_hash[i].head);
+ INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
+ i + LISTENING_NULLS_BASE);
}
}
EXPORT_SYMBOL_GPL(inet_hashinfo_init);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index c7334d1e392a..6e9ba9dfb5b2 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -56,57 +56,6 @@
*/
static const char ip_frag_cache_name[] = "ip4-frags";
-/* Use skb->cb to track consecutive/adjacent fragments coming at
- * the end of the queue. Nodes in the rb-tree queue will
- * contain "runs" of one or more adjacent fragments.
- *
- * Invariants:
- * - next_frag is NULL at the tail of a "run";
- * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
- */
-struct ipfrag_skb_cb {
- struct inet_skb_parm h;
- struct sk_buff *next_frag;
- int frag_run_len;
-};
-
-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
-
-static void ip4_frag_init_run(struct sk_buff *skb)
-{
- BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
-
- FRAG_CB(skb)->next_frag = NULL;
- FRAG_CB(skb)->frag_run_len = skb->len;
-}
-
-/* Append skb to the last "run". */
-static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
- struct sk_buff *skb)
-{
- RB_CLEAR_NODE(&skb->rbnode);
- FRAG_CB(skb)->next_frag = NULL;
-
- FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
- FRAG_CB(q->fragments_tail)->next_frag = skb;
- q->fragments_tail = skb;
-}
-
-/* Create a new "run" with the skb. */
-static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
-{
- if (q->last_run_head)
- rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
- &q->last_run_head->rbnode.rb_right);
- else
- rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
- rb_insert_color(&skb->rbnode, &q->rb_fragments);
-
- ip4_frag_init_run(skb);
- q->fragments_tail = skb;
- q->last_run_head = skb;
-}
-
/* Describe an entry in the "incomplete datagrams" queue. */
struct ipq {
struct inet_frag_queue q;
@@ -210,27 +159,9 @@ static void ip_expire(unsigned long arg)
* pull the head out of the tree in order to be able to
* deal with head->dev.
*/
- if (qp->q.fragments) {
- head = qp->q.fragments;
- qp->q.fragments = head->next;
- } else {
- head = skb_rb_first(&qp->q.rb_fragments);
- if (!head)
- goto out;
- if (FRAG_CB(head)->next_frag)
- rb_replace_node(&head->rbnode,
- &FRAG_CB(head)->next_frag->rbnode,
- &qp->q.rb_fragments);
- else
- rb_erase(&head->rbnode, &qp->q.rb_fragments);
- memset(&head->rbnode, 0, sizeof(head->rbnode));
- barrier();
- }
- if (head == qp->q.fragments_tail)
- qp->q.fragments_tail = NULL;
-
- sub_frag_mem_limit(qp->q.net, head->truesize);
-
+ head = inet_frag_pull_head(&qp->q);
+ if (!head)
+ goto out;
head->dev = dev_get_by_index_rcu(net, qp->iif);
if (!head->dev)
goto out;
@@ -343,12 +274,10 @@ static int ip_frag_reinit(struct ipq *qp)
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
- struct rb_node **rbn, *parent;
- struct sk_buff *skb1, *prev_tail;
- int ihl, end, skb1_run_end;
+ int ihl, end, flags, offset;
+ struct sk_buff *prev_tail;
struct net_device *dev;
unsigned int fragsize;
- int flags, offset;
int err = -ENOENT;
u8 ecn;
@@ -380,7 +309,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
*/
if (end < qp->q.len ||
((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
- goto err;
+ goto discard_qp;
qp->q.flags |= INET_FRAG_LAST_IN;
qp->q.len = end;
} else {
@@ -392,82 +321,33 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
if (end > qp->q.len) {
/* Some bits beyond end -> corruption. */
if (qp->q.flags & INET_FRAG_LAST_IN)
- goto err;
+ goto discard_qp;
qp->q.len = end;
}
}
if (end == offset)
- goto err;
+ goto discard_qp;
err = -ENOMEM;
if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
- goto err;
+ goto discard_qp;
err = pskb_trim_rcsum(skb, end - offset);
if (err)
- goto err;
+ goto discard_qp;
/* Note : skb->rbnode and skb->dev share the same location. */
dev = skb->dev;
/* Makes sure compiler wont do silly aliasing games */
barrier();
- /* RFC5722, Section 4, amended by Errata ID : 3089
- * When reassembling an IPv6 datagram, if
- * one or more its constituent fragments is determined to be an
- * overlapping fragment, the entire datagram (and any constituent
- * fragments) MUST be silently discarded.
- *
- * We do the same here for IPv4 (and increment an snmp counter) but
- * we do not want to drop the whole queue in response to a duplicate
- * fragment.
- */
-
- err = -EINVAL;
- /* Find out where to put this fragment. */
prev_tail = qp->q.fragments_tail;
- if (!prev_tail)
- ip4_frag_create_run(&qp->q, skb); /* First fragment. */
- else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
- /* This is the common case: skb goes to the end. */
- /* Detect and discard overlaps. */
- if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
- goto discard_qp;
- if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
- ip4_frag_append_to_last_run(&qp->q, skb);
- else
- ip4_frag_create_run(&qp->q, skb);
- } else {
- /* Binary search. Note that skb can become the first fragment,
- * but not the last (covered above).
- */
- rbn = &qp->q.rb_fragments.rb_node;
- do {
- parent = *rbn;
- skb1 = rb_to_skb(parent);
- skb1_run_end = skb1->ip_defrag_offset +
- FRAG_CB(skb1)->frag_run_len;
- if (end <= skb1->ip_defrag_offset)
- rbn = &parent->rb_left;
- else if (offset >= skb1_run_end)
- rbn = &parent->rb_right;
- else if (offset >= skb1->ip_defrag_offset &&
- end <= skb1_run_end)
- goto err; /* No new data, potential duplicate */
- else
- goto discard_qp; /* Found an overlap */
- } while (*rbn);
- /* Here we have parent properly set, and rbn pointing to
- * one of its NULL left/right children. Insert skb.
- */
- ip4_frag_init_run(skb);
- rb_link_node(&skb->rbnode, parent, rbn);
- rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
- }
+ err = inet_frag_queue_insert(&qp->q, skb, offset, end);
+ if (err)
+ goto insert_error;
if (dev)
qp->iif = dev->ifindex;
- skb->ip_defrag_offset = offset;
qp->q.stamp = skb->tstamp;
qp->q.meat += skb->len;
@@ -492,15 +372,24 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
skb->_skb_refdst = 0UL;
err = ip_frag_reasm(qp, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
+ if (err)
+ inet_frag_kill(&qp->q);
return err;
}
skb_dst_drop(skb);
return -EINPROGRESS;
+insert_error:
+ if (err == IPFRAG_DUP) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ err = -EINVAL;
+ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
discard_qp:
inet_frag_kill(&qp->q);
- __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
err:
kfree_skb(skb);
return err;
@@ -512,12 +401,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph;
- struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
- struct sk_buff **nextp; /* To build frag_list. */
- struct rb_node *rbn;
- int len;
- int ihlen;
- int err;
+ void *reasm_data;
+ int len, err;
u8 ecn;
ipq_kill(qp);
@@ -527,111 +412,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
err = -EINVAL;
goto out_fail;
}
- /* Make the one we just received the head. */
- if (head != skb) {
- fp = skb_clone(skb, GFP_ATOMIC);
- if (!fp)
- goto out_nomem;
- FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
- if (RB_EMPTY_NODE(&skb->rbnode))
- FRAG_CB(prev_tail)->next_frag = fp;
- else
- rb_replace_node(&skb->rbnode, &fp->rbnode,
- &qp->q.rb_fragments);
- if (qp->q.fragments_tail == skb)
- qp->q.fragments_tail = fp;
- skb_morph(skb, head);
- FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
- rb_replace_node(&head->rbnode, &skb->rbnode,
- &qp->q.rb_fragments);
- consume_skb(head);
- head = skb;
- }
-
- WARN_ON(head->ip_defrag_offset != 0);
- /* Allocate a new buffer for the datagram. */
- ihlen = ip_hdrlen(head);
- len = ihlen + qp->q.len;
+ /* Make the one we just received the head. */
+ reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
+ if (!reasm_data)
+ goto out_nomem;
+ len = ip_hdrlen(skb) + qp->q.len;
err = -E2BIG;
if (len > 65535)
goto out_oversize;
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
- goto out_nomem;
-
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments. */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (!clone)
- goto out_nomem;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = clone->data_len = head->data_len - plen;
- head->truesize += clone->truesize;
- clone->csum = 0;
- clone->ip_summed = head->ip_summed;
- add_frag_mem_limit(qp->q.net, clone->truesize);
- skb_shinfo(head)->frag_list = clone;
- nextp = &clone->next;
- } else {
- nextp = &skb_shinfo(head)->frag_list;
- }
+ inet_frag_reasm_finish(&qp->q, skb, reasm_data);
- skb_push(head, head->data - skb_network_header(head));
+ skb->dev = dev;
+ IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
- /* Traverse the tree in order, to build frag_list. */
- fp = FRAG_CB(head)->next_frag;
- rbn = rb_next(&head->rbnode);
- rb_erase(&head->rbnode, &qp->q.rb_fragments);
- while (rbn || fp) {
- /* fp points to the next sk_buff in the current run;
- * rbn points to the next run.
- */
- /* Go through the current run. */
- while (fp) {
- *nextp = fp;
- nextp = &fp->next;
- fp->prev = NULL;
- memset(&fp->rbnode, 0, sizeof(fp->rbnode));
- fp->sk = NULL;
- head->data_len += fp->len;
- head->len += fp->len;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
- fp = FRAG_CB(fp)->next_frag;
- }
- /* Move to the next run. */
- if (rbn) {
- struct rb_node *rbnext = rb_next(rbn);
-
- fp = rb_to_skb(rbn);
- rb_erase(rbn, &qp->q.rb_fragments);
- rbn = rbnext;
- }
- }
- sub_frag_mem_limit(qp->q.net, head->truesize);
-
- *nextp = NULL;
- head->next = NULL;
- head->prev = NULL;
- head->dev = dev;
- head->tstamp = qp->q.stamp;
- IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
-
- iph = ip_hdr(head);
+ iph = ip_hdr(skb);
iph->tot_len = htons(len);
iph->tos |= ecn;
@@ -644,7 +441,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
* from one very small df-fragment and one large non-df frag.
*/
if (qp->max_df_size == qp->q.max_size) {
- IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+ IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
iph->frag_off = htons(IP_DF);
} else {
iph->frag_off = 0;
@@ -742,28 +539,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
}
EXPORT_SYMBOL(ip_check_defrag);
-unsigned int inet_frag_rbtree_purge(struct rb_root *root)
-{
- struct rb_node *p = rb_first(root);
- unsigned int sum = 0;
-
- while (p) {
- struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
-
- p = rb_next(p);
- rb_erase(&skb->rbnode, root);
- while (skb) {
- struct sk_buff *next = FRAG_CB(skb)->next_frag;
-
- sum += skb->truesize;
- kfree_skb(skb);
- skb = next;
- }
- }
- return sum;
-}
-EXPORT_SYMBOL(inet_frag_rbtree_purge);
-
#ifdef CONFIG_SYSCTL
static int dist_min;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 576f705d8180..9609ad71dd26 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -224,13 +224,10 @@ static void gre_err(struct sk_buff *skb, u32 info)
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct tnl_ptk_info tpi;
- bool csum_err = false;
- if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
- iph->ihl * 4) < 0) {
- if (!csum_err) /* ignore csum errors. */
- return;
- }
+ if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
+ iph->ihl * 4) < 0)
+ return;
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index bcadca26523b..ce0ce1401f28 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -259,11 +259,10 @@ int ip_local_deliver(struct sk_buff *skb)
ip_local_deliver_finish);
}
-static inline bool ip_rcv_options(struct sk_buff *skb)
+static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
{
struct ip_options *opt;
const struct iphdr *iph;
- struct net_device *dev = skb->dev;
/* It looks as overkill, because not all
IP options require packet mangling.
@@ -299,7 +298,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
}
}
- if (ip_options_rcv_srr(skb))
+ if (ip_options_rcv_srr(skb, dev))
goto drop;
}
@@ -361,7 +360,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
}
#endif
- if (iph->ihl > 5 && ip_rcv_options(skb))
+ if (iph->ihl > 5 && ip_rcv_options(skb, dev))
goto drop;
rt = skb_rtable(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 4cd3b5ad9cee..570cdb547234 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -614,7 +614,7 @@ void ip_forward_options(struct sk_buff *skb)
}
}
-int ip_options_rcv_srr(struct sk_buff *skb)
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
{
struct ip_options *opt = &(IPCB(skb)->opt);
int srrspace, srrptr;
@@ -649,7 +649,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
orefdst = skb->_skb_refdst;
skb_dst_set(skb, NULL);
- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
+ err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
rt2 = skb_rtable(skb);
if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
skb_dst_drop(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 100c86f1f547..4f3decbe6a3a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -492,6 +492,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
+ to->skb_iif = from->skb_iif;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;
@@ -1158,13 +1159,17 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
rt = *rtp;
if (unlikely(!rt))
return -EFAULT;
- /*
- * We steal reference to this route, caller should not release it
- */
- *rtp = NULL;
+
cork->fragsize = ip_sk_use_pmtu(sk) ?
- dst_mtu(&rt->dst) : rt->dst.dev->mtu;
+ dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
+
+ if (!inetdev_valid_mtu(cork->fragsize))
+ return -ENETUNREACH;
+
cork->dst = &rt->dst;
+ /* We stole this route, caller should not release it. */
+ *rtp = NULL;
+
cork->length = 0;
cork->ttl = ipc->ttl;
cork->tos = ipc->tos;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index e6ee6acac80c..dd5db4cc7d06 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -155,11 +155,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
cand = t;
}
- if (flags & TUNNEL_NO_KEY)
- goto skip_key_lookup;
-
hlist_for_each_entry_rcu(t, head, hash_node) {
- if (t->parms.i_key != key ||
+ if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
t->parms.iph.saddr != 0 ||
t->parms.iph.daddr != 0 ||
!(t->dev->flags & IFF_UP))
@@ -171,7 +168,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
cand = t;
}
-skip_key_lookup:
if (cand)
return cand;
@@ -653,13 +649,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
dst = tnl_params->daddr;
if (dst == 0) {
/* NBMA tunnel */
+ struct ip_tunnel_info *tun_info;
if (!skb_dst(skb)) {
dev->stats.tx_fifo_errors++;
goto tx_error;
}
- if (skb->protocol == htons(ETH_P_IP)) {
+ tun_info = skb_tunnel_info(skb);
+ if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
+ ip_tunnel_info_af(tun_info) == AF_INET &&
+ tun_info->key.u.ipv4.dst)
+ dst = tun_info->key.u.ipv4.dst;
+ else if (skb->protocol == htons(ETH_P_IP)) {
rt = skb_rtable(skb);
dst = rt_nexthop(rt, inner_iph->daddr);
}
@@ -1178,10 +1180,8 @@ int ip_tunnel_init(struct net_device *dev)
iph->version = 4;
iph->ihl = 5;
- if (tunnel->collect_md) {
- dev->features |= NETIF_F_NETNS_LOCAL;
+ if (tunnel->collect_md)
netif_keep_dst(dev);
- }
return 0;
}
EXPORT_SYMBOL_GPL(ip_tunnel_init);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 270e79f4d40e..58e0dab06f19 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -208,8 +208,39 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
int mtu;
if (!dst) {
- dev->stats.tx_carrier_errors++;
- goto tx_error_icmp;
+ switch (skb->protocol) {
+ case htons(ETH_P_IP): {
+ struct rtable *rt;
+
+ fl->u.ip4.flowi4_oif = dev->ifindex;
+ fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+ rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+ if (IS_ERR(rt)) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
+ dst = &rt->dst;
+ skb_dst_set(skb, dst);
+ break;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ fl->u.ip6.flowi6_oif = dev->ifindex;
+ fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+ dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+ if (dst->error) {
+ dst_release(dst);
+ dst = NULL;
+ dev->stats.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
+ skb_dst_set(skb, dst);
+ break;
+#endif
+ default:
+ dev->stats.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
}
dst_hold(dst);
@@ -678,9 +709,9 @@ static int __init vti_init(void)
return err;
rtnl_link_failed:
- xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
-xfrm_tunnel_failed:
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+xfrm_tunnel_failed:
+ xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
xfrm_proto_comp_failed:
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm_proto_ah_failed:
@@ -696,6 +727,7 @@ pernet_dev_failed:
static void __exit vti_fini(void)
{
rtnl_link_unregister(&vti_link_ops);
+ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index d35815e5967b..e02b86265194 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -403,10 +403,11 @@ next: ;
return 1;
}
-static inline int check_target(struct arpt_entry *e, const char *name)
+static int check_target(struct arpt_entry *e, struct net *net, const char *name)
{
struct xt_entry_target *t = arpt_get_target(e);
struct xt_tgchk_param par = {
+ .net = net,
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
@@ -418,8 +419,9 @@ static inline int check_target(struct arpt_entry *e, const char *name)
return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
}
-static inline int
-find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
+static int
+find_check_entry(struct arpt_entry *e, struct net *net, const char *name,
+ unsigned int size,
struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
@@ -438,7 +440,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
}
t->u.kernel.target = target;
- ret = check_target(e, name);
+ ret = check_target(e, net, name);
if (ret)
goto err;
return 0;
@@ -513,12 +515,13 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
return 0;
}
-static inline void cleanup_entry(struct arpt_entry *e)
+static void cleanup_entry(struct arpt_entry *e, struct net *net)
{
struct xt_tgdtor_param par;
struct xt_entry_target *t;
t = arpt_get_target(e);
+ par.net = net;
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_ARP;
@@ -531,7 +534,9 @@ static inline void cleanup_entry(struct arpt_entry *e)
/* Checks and translates the user-supplied table segment (held in
* newinfo).
*/
-static int translate_table(struct xt_table_info *newinfo, void *entry0,
+static int translate_table(struct net *net,
+ struct xt_table_info *newinfo,
+ void *entry0,
const struct arpt_replace *repl)
{
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
@@ -597,7 +602,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
- ret = find_check_entry(iter, repl->name, repl->size,
+ ret = find_check_entry(iter, net, repl->name, repl->size,
&alloc_state);
if (ret != 0)
break;
@@ -608,7 +613,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
xt_entry_foreach(iter, entry0, newinfo->size) {
if (i-- == 0)
break;
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
}
return ret;
}
@@ -935,7 +940,7 @@ static int __do_replace(struct net *net, const char *name,
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries;
xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
@@ -987,7 +992,7 @@ static int do_replace(struct net *net, const void __user *user,
goto free_newinfo;
}
- ret = translate_table(newinfo, loc_cpu_entry, &tmp);
+ ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
@@ -999,7 +1004,7 @@ static int do_replace(struct net *net, const void __user *user,
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
@@ -1164,7 +1169,8 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
}
}
-static int translate_compat_table(struct xt_table_info **pinfo,
+static int translate_compat_table(struct net *net,
+ struct xt_table_info **pinfo,
void **pentry0,
const struct compat_arpt_replace *compatr)
{
@@ -1230,7 +1236,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
repl.num_counters = 0;
repl.counters = NULL;
repl.size = newinfo->size;
- ret = translate_table(newinfo, entry1, &repl);
+ ret = translate_table(net, newinfo, entry1, &repl);
if (ret)
goto free_newinfo;
@@ -1283,7 +1289,7 @@ static int compat_do_replace(struct net *net, void __user *user,
goto free_newinfo;
}
- ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
+ ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
@@ -1295,7 +1301,7 @@ static int compat_do_replace(struct net *net, void __user *user,
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
@@ -1522,7 +1528,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
return ret;
}
-static void __arpt_unregister_table(struct xt_table *table)
+static void __arpt_unregister_table(struct net *net, struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
@@ -1534,7 +1540,7 @@ static void __arpt_unregister_table(struct xt_table *table)
/* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries;
xt_entry_foreach(iter, loc_cpu_entry, private->size)
- cleanup_entry(iter);
+ cleanup_entry(iter, net);
if (private->number > private->initial_entries)
module_put(table_owner);
xt_free_table_info(private);
@@ -1559,7 +1565,7 @@ int arpt_register_table(struct net *net,
loc_cpu_entry = newinfo->entries;
memcpy(loc_cpu_entry, repl->entries, repl->size);
- ret = translate_table(newinfo, loc_cpu_entry, repl);
+ ret = translate_table(net, newinfo, loc_cpu_entry, repl);
if (ret != 0)
goto out_free;
@@ -1574,7 +1580,7 @@ int arpt_register_table(struct net *net,
ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
if (ret != 0) {
- __arpt_unregister_table(new_table);
+ __arpt_unregister_table(net, new_table);
*res = NULL;
}
@@ -1589,7 +1595,7 @@ void arpt_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops)
{
nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
- __arpt_unregister_table(table);
+ __arpt_unregister_table(net, table);
}
/* The built-in targets: standard (NULL) and error. */
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index ec48d8eafc7e..8b221398534b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -306,6 +306,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
+ SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 59d8770055ed..ed53bf5d2b68 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -169,6 +169,7 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
*/
static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
{
+ int dif = inet_iif(skb);
struct sock *sk;
struct hlist_head *head;
int delivered = 0;
@@ -181,8 +182,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
net = dev_net(skb->dev);
sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
- iph->saddr, iph->daddr,
- skb->dev->ifindex);
+ iph->saddr, iph->daddr, dif);
while (sk) {
delivered = 1;
@@ -197,7 +197,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
}
sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
iph->saddr, iph->daddr,
- skb->dev->ifindex);
+ dif);
}
out:
read_unlock(&raw_v4_hashinfo.lock);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c42fb2330b45..8f5c6fa54ac0 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -496,15 +496,17 @@ EXPORT_SYMBOL(ip_idents_reserve);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
{
- static u32 ip_idents_hashrnd __read_mostly;
u32 hash, id;
- net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
+ /* Note the following code is not safe, but this is okay. */
+ if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
+ get_random_bytes(&net->ipv4.ip_id_key,
+ sizeof(net->ipv4.ip_id_key));
- hash = jhash_3words((__force u32)iph->daddr,
+ hash = siphash_3u32((__force u32)iph->daddr,
(__force u32)iph->saddr,
- iph->protocol ^ net_hash_mix(net),
- ip_idents_hashrnd);
+ iph->protocol,
+ &net->ipv4.ip_id_key);
id = ip_idents_reserve(hash, segs);
iph->id = htons(id);
}
@@ -901,16 +903,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
if (peer->rate_tokens == 0 ||
time_after(jiffies,
(peer->rate_last +
- (ip_rt_redirect_load << peer->rate_tokens)))) {
+ (ip_rt_redirect_load << peer->n_redirects)))) {
__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
- ++peer->rate_tokens;
++peer->n_redirects;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
- peer->rate_tokens == ip_rt_redirect_number)
+ peer->n_redirects == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw);
@@ -990,21 +991,22 @@ out: kfree_skb(skb);
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
+ u32 old_mtu = ipv4_mtu(dst);
struct fib_result res;
bool lock = false;
if (ip_mtu_locked(dst))
return;
- if (ipv4_mtu(dst) < mtu)
+ if (old_mtu < mtu)
return;
if (mtu < ip_rt_min_pmtu) {
lock = true;
- mtu = ip_rt_min_pmtu;
+ mtu = min(old_mtu, ip_rt_min_pmtu);
}
- if (rt->rt_pmtu == mtu &&
+ if (rt->rt_pmtu == mtu && !lock &&
time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
return;
@@ -1168,11 +1170,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
+static void ipv4_send_dest_unreach(struct sk_buff *skb)
+{
+ struct ip_options opt;
+ int res;
+
+ /* Recompile ip options since IPCB may not be valid anymore.
+ * Also check we have a reasonable ipv4 header.
+ */
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
+ ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
+ return;
+
+ memset(&opt, 0, sizeof(opt));
+ if (ip_hdr(skb)->ihl > 5) {
+ if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
+ return;
+ opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+ rcu_read_lock();
+ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+ rcu_read_unlock();
+
+ if (res)
+ return;
+ }
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+}
+
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ ipv4_send_dest_unreach(skb);
rt = skb_rtable(skb);
if (rt)
@@ -2192,7 +2222,7 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
struct fib_result res;
struct rtable *rth;
int orig_oif;
- int err = -ENETUNREACH;
+ int err;
res.tclassid = 0;
res.fi = NULL;
@@ -2207,11 +2237,14 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
rcu_read_lock();
if (fl4->saddr) {
- rth = ERR_PTR(-EINVAL);
if (ipv4_is_multicast(fl4->saddr) ||
ipv4_is_lbcast(fl4->saddr) ||
- ipv4_is_zeronet(fl4->saddr))
+ ipv4_is_zeronet(fl4->saddr)) {
+ rth = ERR_PTR(-EINVAL);
goto out;
+ }
+
+ rth = ERR_PTR(-ENETUNREACH);
/* I removed check for oif == dev_out->oif here.
It was wrong for two reasons:
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 024ab833557d..e202babb14d6 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -35,12 +35,15 @@ static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
+static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
+static int tcp_min_snd_mss_max = 65535;
static int ip_ttl_min = 1;
static int ip_ttl_max = 255;
static int tcp_syn_retries_min = 1;
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int one_day_secs = 24 * 3600;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
@@ -460,7 +463,9 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_min_rtt_wlen,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one_day_secs
},
{
.procname = "tcp_low_latency",
@@ -836,6 +841,15 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "tcp_min_snd_mss",
+ .data = &init_net.ipv4.sysctl_tcp_min_snd_mss,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &tcp_min_snd_mss_min,
+ .extra2 = &tcp_min_snd_mss_max,
+ },
+ {
.procname = "tcp_probe_threshold",
.data = &init_net.ipv4.sysctl_tcp_probe_threshold,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2ededb32b754..02918e0d635e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2298,9 +2298,11 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_cnt = 0;
tp->window_clamp = 0;
+ tp->delivered = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
tcp_clear_retrans(tp);
+ tp->total_retrans = 0;
inet_csk_delack_init(sk);
/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
* issue in __tcp_select_window()
@@ -2312,6 +2314,12 @@ int tcp_disconnect(struct sock *sk, int flags)
dst_release(sk->sk_rx_dst);
sk->sk_rx_dst = NULL;
tcp_saved_syn_free(tp);
+ tp->segs_in = 0;
+ tp->segs_out = 0;
+ tp->bytes_acked = 0;
+ tp->bytes_received = 0;
+ tp->data_segs_in = 0;
+ tp->data_segs_out = 0;
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
@@ -3307,6 +3315,7 @@ void __init tcp_init(void)
unsigned long limit;
unsigned int i;
+ BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
FIELD_SIZEOF(struct sk_buff, cb));
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 7e44d23b0328..3bfab2d41574 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -649,8 +649,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
* bandwidth sample. Delivered is in packets and interval_us in uS and
* ratio will be <<1 for most connections. So delivered is first scaled.
*/
- bw = (u64)rs->delivered * BW_UNIT;
- do_div(bw, rs->interval_us);
+ bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
/* If this sample is application-limited, it is likely to have a very
* low delivered count that represents application behavior rather than
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index a08cedf9d286..910ef01759e7 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
module_param(dctcp_alpha_on_init, uint, 0644);
MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
- "parameter for clamping alpha on loss");
-
static struct tcp_congestion_ops dctcp_reno;
static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
@@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
}
}
-static void dctcp_state(struct sock *sk, u8 new_state)
+static void dctcp_react_to_loss(struct sock *sk)
{
- if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
- struct dctcp *ca = inet_csk_ca(sk);
+ struct dctcp *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
- /* If this extension is enabled, we clamp dctcp_alpha to
- * max on packet loss; the motivation is that dctcp_alpha
- * is an indicator to the extend of congestion and packet
- * loss is an indicator of extreme congestion; setting
- * this in practice turned out to be beneficial, and
- * effectively assumes total congestion which reduces the
- * window by half.
- */
- ca->dctcp_alpha = DCTCP_MAX_ALPHA;
- }
+ ca->loss_cwnd = tp->snd_cwnd;
+ tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
+}
+
+static void dctcp_state(struct sock *sk, u8 new_state)
+{
+ if (new_state == TCP_CA_Recovery &&
+ new_state != inet_csk(sk)->icsk_ca_state)
+ dctcp_react_to_loss(sk);
+ /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
+ * one loss-adjustment per RTT.
+ */
}
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
case CA_EVENT_ECN_NO_CE:
dctcp_ce_state_1_to_0(sk);
break;
+ case CA_EVENT_LOSS:
+ dctcp_react_to_loss(sk);
+ break;
default:
/* Don't care for the rest. */
break;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cd4f13dda49e..52014c5312b9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -129,7 +129,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
#define REXMIT_LOST 1 /* retransmit packets marked lost */
#define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
-static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
+static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
+ unsigned int len)
{
static bool __once __read_mostly;
@@ -140,8 +141,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
- pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
- dev ? dev->name : "Unknown driver");
+ if (!dev || len >= dev->mtu)
+ pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
+ dev ? dev->name : "Unknown driver");
rcu_read_unlock();
}
}
@@ -164,8 +166,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
if (len >= icsk->icsk_ack.rcv_mss) {
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
tcp_sk(sk)->advmss);
- if (unlikely(icsk->icsk_ack.rcv_mss != len))
- tcp_gro_dev_warn(sk, skb);
+ /* Account for possibly-removed options */
+ if (unlikely(len > icsk->icsk_ack.rcv_mss +
+ MAX_TCP_OPTION_SPACE))
+ tcp_gro_dev_warn(sk, skb, len);
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.
@@ -247,7 +251,7 @@ static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
{
- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
}
static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
@@ -389,11 +393,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ int room;
+
+ room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
/* Check #1 */
- if (tp->rcv_ssthresh < tp->window_clamp &&
- (int)tp->rcv_ssthresh < tcp_space(sk) &&
- !tcp_under_memory_pressure(sk)) {
+ if (room > 0 && !tcp_under_memory_pressure(sk)) {
int incr;
/* Check #2. Increase window, if skb with such overhead
@@ -406,8 +411,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
if (incr) {
incr = max_t(int, incr, 2 * skb->len);
- tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
- tp->window_clamp);
+ tp->rcv_ssthresh += min(room, incr);
inet_csk(sk)->icsk_ack.quick |= 1;
}
}
@@ -919,9 +923,10 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
/* This must be called before lost_out is incremented */
static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
{
- if (!tp->retransmit_skb_hint ||
- before(TCP_SKB_CB(skb)->seq,
- TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
+ if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) ||
+ (tp->retransmit_skb_hint &&
+ before(TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(tp->retransmit_skb_hint)->seq)))
tp->retransmit_skb_hint = skb;
if (!tp->lost_out ||
@@ -1320,7 +1325,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
TCP_SKB_CB(skb)->seq += shifted;
tcp_skb_pcount_add(prev, pcount);
- BUG_ON(tcp_skb_pcount(skb) < pcount);
+ WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
tcp_skb_pcount_add(skb, -pcount);
/* When we're adding to gso_segs == 1, gso_size will be zero,
@@ -1387,6 +1392,21 @@ static int skb_can_shift(const struct sk_buff *skb)
return !skb_headlen(skb) && skb_is_nonlinear(skb);
}
+int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
+ int pcount, int shiftlen)
+{
+ /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
+ * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
+ * to make sure not storing more than 65535 * 8 bytes per skb,
+ * even if current MSS is bigger.
+ */
+ if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
+ return 0;
+ if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
+ return 0;
+ return skb_shift(to, from, shiftlen);
+}
+
/* Try collapsing SACK blocks spanning across multiple skbs to a single
* skb.
*/
@@ -1398,6 +1418,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev;
int mss;
+ int next_pcount;
int pcount = 0;
int len;
int in_sack;
@@ -1495,7 +1516,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
goto fallback;
- if (!skb_shift(prev, skb, len))
+ if (!tcp_skb_shift(prev, skb, pcount, len))
goto fallback;
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
goto out;
@@ -1514,11 +1535,11 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
goto out;
len = skb->len;
- if (skb_shift(prev, skb, len)) {
- pcount += tcp_skb_pcount(skb);
- tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
+ next_pcount = tcp_skb_pcount(skb);
+ if (tcp_skb_shift(prev, skb, next_pcount, len)) {
+ pcount += next_pcount;
+ tcp_shifted_skb(sk, skb, state, next_pcount, len, mss, 0);
}
-
out:
state->fack_count += pcount;
return prev;
@@ -1725,8 +1746,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
}
/* Ignore very old stuff early */
- if (!after(sp[used_sacks].end_seq, prior_snd_una))
+ if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
+ if (i == 0)
+ first_sack_index = -1;
continue;
+ }
used_sacks++;
}
@@ -2837,9 +2861,9 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering));
- if (WARN_ON(!tp->packets_out && tp->sacked_out))
+ if (!tp->packets_out && tp->sacked_out)
tp->sacked_out = 0;
- if (WARN_ON(!tp->sacked_out && tp->fackets_out))
+ if (!tp->sacked_out && tp->fackets_out)
tp->fackets_out = 0;
/* Now state machine starts.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 82c1064ff4aa..e1cf810140b0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -239,7 +239,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_sport,
usin->sin_port);
- inet->inet_id = tp->write_seq ^ jiffies;
+ inet->inet_id = prandom_u32();
err = tcp_connect(sk);
@@ -1307,7 +1307,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (inet_opt)
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
- newinet->inet_id = newtp->write_seq ^ jiffies;
+ newinet->inet_id = prandom_u32();
if (!dst) {
dst = inet_csk_route_child_sock(sk, newsk, req);
@@ -1917,13 +1917,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
struct inet_listen_hashbucket *ilb;
+ struct hlist_nulls_node *node;
struct sock *sk = cur;
if (!sk) {
get_head:
ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock_bh(&ilb->lock);
- sk = sk_head(&ilb->head);
+ sk = sk_nulls_head(&ilb->nulls_head);
st->offset = 0;
goto get_sk;
}
@@ -1931,9 +1932,9 @@ get_head:
++st->num;
++st->offset;
- sk = sk_next(sk);
+ sk = sk_nulls_next(sk);
get_sk:
- sk_for_each_from(sk) {
+ sk_nulls_for_each_from(sk, node) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == st->family)
@@ -2456,6 +2457,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_ecn_fallback = 1;
net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
+ net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6f35cdd5f2f0..ea7e9308c555 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -707,8 +707,9 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
min_t(unsigned int, eff_sacks,
(remaining - TCPOLEN_SACK_BASE_ALIGNED) /
TCPOLEN_SACK_PERBLOCK);
- size += TCPOLEN_SACK_BASE_ALIGNED +
- opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
+ if (likely(opts->num_sack_blocks))
+ size += TCPOLEN_SACK_BASE_ALIGNED +
+ opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
}
return size;
@@ -1175,6 +1176,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
int nsize, old_factor;
+ long limit;
int nlen;
u8 flags;
@@ -1185,6 +1187,19 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
if (nsize < 0)
nsize = 0;
+ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
+ * We need some allowance to not penalize applications setting small
+ * SO_SNDBUF values.
+ * Also allow first and last skb in retransmit queue to be split.
+ */
+ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
+ if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
+ skb != tcp_rtx_queue_head(sk) &&
+ skb != tcp_rtx_queue_tail(sk))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
+ return -ENOMEM;
+ }
+
if (skb_unclone(skb, gfp))
return -ENOMEM;
@@ -1355,8 +1370,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
mss_now -= icsk->icsk_ext_hdr_len;
/* Then reserve room for full set of TCP options and 8 bytes of data */
- if (mss_now < 48)
- mss_now = 48;
+ mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
return mss_now;
}
@@ -1930,7 +1944,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
if (len <= skb->len)
break;
- if (unlikely(TCP_SKB_CB(skb)->eor))
+ if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
return false;
len -= skb->len;
@@ -2053,6 +2067,7 @@ static int tcp_mtu_probe(struct sock *sk)
* we need to propagate it to the new skb.
*/
TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
+ tcp_skb_collapse_tstamp(nskb, skb);
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
} else {
@@ -2218,6 +2233,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (tcp_small_queue_check(sk, skb, 0))
break;
+ /* Argh, we hit an empty skb(), presumably a thread
+ * is sleeping in sendmsg()/sk_stream_wait_memory().
+ * We do not want to send a pure-ack packet and have
+ * a strange looking rtx queue with empty packet(s).
+ */
+ if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
+ break;
+
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 69523389f067..761a198ed5f3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -140,6 +140,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
mss = max(mss, 68 - tp->tcp_header_len);
+ mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
@@ -371,7 +372,7 @@ static void tcp_probe_timer(struct sock *sk)
return;
}
- if (icsk->icsk_probes_out > max_probes) {
+ if (icsk->icsk_probes_out >= max_probes) {
abort: tcp_write_err(sk);
} else {
/* Only send another probe if we didn't close things up. */
@@ -477,11 +478,12 @@ void tcp_retransmit_timer(struct sock *sk)
goto out_reset_timer;
}
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
if (tcp_write_timeout(sk))
goto out;
if (icsk->icsk_retransmits == 0) {
- int mib_idx;
+ int mib_idx = 0;
if (icsk->icsk_ca_state == TCP_CA_Recovery) {
if (tcp_is_sack(tp))
@@ -496,10 +498,9 @@ void tcp_retransmit_timer(struct sock *sk)
mib_idx = LINUX_MIB_TCPSACKFAILURES;
else
mib_idx = LINUX_MIB_TCPRENOFAILURES;
- } else {
- mib_idx = LINUX_MIB_TCPTIMEOUTS;
}
- __NET_INC_STATS(sock_net(sk), mib_idx);
+ if (mib_idx)
+ __NET_INC_STATS(sock_net(sk), mib_idx);
}
tcp_enter_loss(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5d4b5e0f6b5e..1bb1e27d3d13 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -569,7 +569,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport)
{
- return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
+ const struct iphdr *iph = ip_hdr(skb);
+
+ return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb),
+ &udp_table, NULL);
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 622e158a6fc4..1805413cd225 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -108,7 +108,8 @@ static void
_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
{
const struct iphdr *iph = ip_hdr(skb);
- u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
+ int ihl = iph->ihl;
+ u8 *xprth = skb_network_header(skb) + ihl * 4;
struct flowi4 *fl4 = &fl->u.ip4;
int oif = 0;
@@ -119,6 +120,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
fl4->flowi4_mark = skb->mark;
fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
+ fl4->flowi4_proto = iph->protocol;
+ fl4->daddr = reverse ? iph->saddr : iph->daddr;
+ fl4->saddr = reverse ? iph->daddr : iph->saddr;
+ fl4->flowi4_tos = iph->tos;
+
if (!ip_is_fragment(iph)) {
switch (iph->protocol) {
case IPPROTO_UDP:
@@ -130,7 +136,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ports;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
ports = (__be16 *)xprth;
fl4->fl4_sport = ports[!!reverse];
@@ -143,7 +149,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 2 - skb->data)) {
u8 *icmp;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
icmp = xprth;
fl4->fl4_icmp_type = icmp[0];
@@ -156,7 +162,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be32 *ehdr;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
ehdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ehdr[0];
@@ -168,7 +174,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 8 - skb->data)) {
__be32 *ah_hdr;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
ah_hdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ah_hdr[1];
@@ -180,7 +186,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ipcomp_hdr;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
ipcomp_hdr = (__be16 *)xprth;
fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
@@ -193,7 +199,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
__be16 *greflags;
__be32 *gre_hdr;
- xprth = skb_network_header(skb) + iph->ihl * 4;
+ xprth = skb_network_header(skb) + ihl * 4;
greflags = (__be16 *)xprth;
gre_hdr = (__be32 *)xprth;
@@ -210,10 +216,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
break;
}
}
- fl4->flowi4_proto = iph->protocol;
- fl4->daddr = reverse ? iph->saddr : iph->daddr;
- fl4->saddr = reverse ? iph->daddr : iph->saddr;
- fl4->flowi4_tos = iph->tos;
}
static inline int xfrm4_garbage_collect(struct dst_ops *ops)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4ce7f9195151..a4c00242a90b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3189,6 +3189,10 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_6LOWPAN) &&
(dev->type != ARPHRD_NONE)) {
/* Alas, we support only Ethernet autoconfiguration. */
+ idev = __in6_dev_get(dev);
+ if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
+ dev->flags & IFF_MULTICAST)
+ ipv6_mc_up(idev);
return;
}
@@ -5443,13 +5447,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
switch (event) {
case RTM_NEWADDR:
/*
- * If the address was optimistic
- * we inserted the route at the start of
- * our DAD process, so we don't need
- * to do it again
+ * If the address was optimistic we inserted the route at the
+ * start of our DAD process, so we don't need to do it again.
+ * If the device was taken down in the middle of the DAD
+ * cycle there is a race where we could get here without a
+ * host route, so nothing to insert. That will be fixed when
+ * the device is brought up.
*/
- if (!rcu_access_pointer(ifp->rt->rt6i_node))
+ if (ifp->rt && !rcu_access_pointer(ifp->rt->rt6i_node)) {
ip6_ins_rt(ifp->rt);
+ } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
+ pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
+ &ifp->addr, ifp->idev->dev->name);
+ }
+
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
if (!ipv6_addr_any(&ifp->peer_addr))
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 02761c9fe43e..d47cab6d7c6d 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -133,9 +133,10 @@ struct sock *inet6_lookup_listener(struct net *net,
int score, hiscore = 0, matches = 0, reuseport = 0;
bool exact_dif = inet6_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
+ struct hlist_nulls_node *node;
u32 phash = 0;
- sk_for_each(sk, &ilb->head) {
+ sk_nulls_for_each(sk, node, &ilb->nulls_head) {
score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5da864997495..85c7e250c7a8 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -784,8 +784,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
found++;
break;
}
- if (rt_can_ecmp)
- fallback_ins = fallback_ins ?: ins;
+ fallback_ins = fallback_ins ?: ins;
goto next_iter;
}
@@ -825,7 +824,9 @@ next_iter:
}
if (fallback_ins && !found) {
- /* No ECMP-able route found, replace first non-ECMP one */
+ /* No matching route with same ecmp-able-ness found, replace
+ * first matching route
+ */
ins = fallback_ins;
iter = *ins;
found++;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index b82e439804d1..6a6011160f18 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
return fl;
}
+static void fl_free_rcu(struct rcu_head *head)
+{
+ struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
+
+ if (fl->share == IPV6_FL_S_PROCESS)
+ put_pid(fl->owner.pid);
+ kfree(fl->opt);
+ kfree(fl);
+}
+
static void fl_free(struct ip6_flowlabel *fl)
{
- if (fl) {
- if (fl->share == IPV6_FL_S_PROCESS)
- put_pid(fl->owner.pid);
- kfree(fl->opt);
- kfree_rcu(fl, rcu);
- }
+ if (fl)
+ call_rcu(&fl->rcu, fl_free_rcu);
}
static void fl_release(struct ip6_flowlabel *fl)
@@ -248,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
rcu_read_lock_bh();
for_each_sk_fl_rcu(np, sfl) {
struct ip6_flowlabel *fl = sfl->fl;
- if (fl->label == label) {
+
+ if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
fl->lastuse = jiffies;
- atomic_inc(&fl->users);
rcu_read_unlock_bh();
return fl;
}
@@ -617,7 +623,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
goto done;
}
fl1 = sfl->fl;
- atomic_inc(&fl1->users);
+ if (!atomic_inc_not_zero(&fl1->users))
+ fl1 = NULL;
break;
}
}
@@ -634,9 +641,9 @@ recheck:
if (fl1->share == IPV6_FL_S_EXCL ||
fl1->share != fl->share ||
((fl1->share == IPV6_FL_S_PROCESS) &&
- (fl1->owner.pid == fl->owner.pid)) ||
+ (fl1->owner.pid != fl->owner.pid)) ||
((fl1->share == IPV6_FL_S_USER) &&
- uid_eq(fl1->owner.uid, fl->owner.uid)))
+ !uid_eq(fl1->owner.uid, fl->owner.uid)))
goto release;
err = -ENOMEM;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index aacfb4bce153..e726a61ae6dc 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -168,6 +168,16 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (ipv6_addr_is_multicast(&hdr->saddr))
goto err;
+ /* While RFC4291 is not explicit about v4mapped addresses
+ * in IPv6 headers, it seems clear linux dual-stack
+ * model can not deal properly with these.
+ * Security models could be fooled by ::ffff:127.0.0.1 for example.
+ *
+ * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
+ */
+ if (ipv6_addr_v4mapped(&hdr->saddr))
+ goto err;
+
skb->transport_header = skb->network_header + sizeof(*hdr);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index b723987761be..11407dd6bc7c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -592,7 +592,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
inet6_sk(skb->sk) : NULL;
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
- unsigned int mtu, hlen, left, len;
+ unsigned int mtu, hlen, left, len, nexthdr_offset;
int hroom, troom;
__be32 frag_id;
int ptr, offset = 0, err = 0;
@@ -603,6 +603,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
goto fail;
hlen = err;
nexthdr = *prevhdr;
+ nexthdr_offset = prevhdr - skb_network_header(skb);
mtu = ip6_skb_dst_mtu(skb);
@@ -637,6 +638,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
(err = skb_checksum_help(skb)))
goto fail;
+ prevhdr = skb_network_header(skb) + nexthdr_offset;
hroom = LL_RESERVED_SPACE(rt->dst.dev);
if (skb_has_frag_list(skb)) {
int first_len = skb_pagelen(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index f89516d04150..ed795d50a8d2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -634,7 +634,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
IPPROTO_IPIP,
RT_TOS(eiph->tos), 0);
if (IS_ERR(rt) ||
- rt->dst.dev->type != ARPHRD_TUNNEL) {
+ rt->dst.dev->type != ARPHRD_TUNNEL6) {
if (!IS_ERR(rt))
ip_rt_put(rt);
goto out;
@@ -644,7 +644,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
ip_rt_put(rt);
if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
skb2->dev) ||
- skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
+ skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
goto out;
}
@@ -1275,11 +1275,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowi6_mark = skb->mark;
}
+ dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
+
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
- dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
-
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1362,11 +1362,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowi6_mark = skb->mark;
}
+ dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
+
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
- dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
-
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1861,10 +1861,8 @@ static int ip6_tnl_dev_init(struct net_device *dev)
if (err)
return err;
ip6_tnl_link_config(t);
- if (t->parms.collect_md) {
- dev->features |= NETIF_F_NETNS_LOCAL;
+ if (t->parms.collect_md)
netif_keep_dst(dev);
- }
return 0;
}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index c2b2ee71fc6c..b9f5155a77ef 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -315,7 +315,7 @@ static int vti6_rcv(struct sk_buff *skb)
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
rcu_read_unlock();
- return 0;
+ goto discard;
}
ipv6h = ipv6_hdr(skb);
@@ -453,8 +453,35 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
int err = -1;
int mtu;
- if (!dst)
- goto tx_err_link_failure;
+ if (!dst) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP): {
+ struct rtable *rt;
+
+ fl->u.ip4.flowi4_oif = dev->ifindex;
+ fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+ rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+ if (IS_ERR(rt))
+ goto tx_err_link_failure;
+ dst = &rt->dst;
+ skb_dst_set(skb, dst);
+ break;
+ }
+ case htons(ETH_P_IPV6):
+ fl->u.ip6.flowi6_oif = dev->ifindex;
+ fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+ dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+ if (dst->error) {
+ dst_release(dst);
+ dst = NULL;
+ goto tx_err_link_failure;
+ }
+ skb_dst_set(skb, dst);
+ break;
+ default:
+ goto tx_err_link_failure;
+ }
+ }
dst_hold(dst);
dst = xfrm_lookup(t->net, dst, fl, NULL, 0);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 41f67629ae59..f38b22f54c09 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1668,6 +1668,10 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
struct net *net = sock_net(sk);
struct mr6_table *mrt;
+ if (sk->sk_type != SOCK_RAW ||
+ inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
+ return -EOPNOTSUPP;
+
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
if (!mrt)
return -ENOENT;
@@ -1679,9 +1683,6 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
switch (optname) {
case MRT6_INIT:
- if (sk->sk_type != SOCK_RAW ||
- inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
- return -EOPNOTSUPP;
if (optlen < sizeof(int))
return -EINVAL;
@@ -1818,6 +1819,10 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
struct net *net = sock_net(sk);
struct mr6_table *mrt;
+ if (sk->sk_type != SOCK_RAW ||
+ inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
+ return -EOPNOTSUPP;
+
mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
if (!mrt)
return -ENOENT;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 81fd35ed8732..1080770b5eaf 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -184,9 +184,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = -EBUSY;
break;
}
- } else if (sk->sk_protocol != IPPROTO_TCP)
+ } else if (sk->sk_protocol == IPPROTO_TCP) {
+ if (sk->sk_prot != &tcpv6_prot) {
+ retv = -EBUSY;
+ break;
+ }
break;
-
+ } else {
+ break;
+ }
if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 40262abb15db..e065d48b31b9 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -772,12 +772,13 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
im->idev = pmc->idev;
im->mca_crcount = idev->mc_qrv;
if (im->mca_sfmode == MCAST_INCLUDE) {
- im->mca_tomb = pmc->mca_tomb;
- im->mca_sources = pmc->mca_sources;
+ swap(im->mca_tomb, pmc->mca_tomb);
+ swap(im->mca_sources, pmc->mca_sources);
for (psf = im->mca_sources; psf; psf = psf->sf_next)
psf->sf_crcount = im->mca_crcount;
}
in6_dev_put(pmc->idev);
+ ip6_mc_clear_src(pmc);
kfree(pmc);
}
spin_unlock_bh(&im->mca_lock);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e46185377981..0b53d1907e4a 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -33,9 +33,8 @@
#include <net/sock.h>
#include <net/snmp.h>
-#include <net/inet_frag.h>
+#include <net/ipv6_frag.h>
-#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/rawv6.h>
@@ -52,14 +51,6 @@
static const char nf_frags_cache_name[] = "nf-frags";
-struct nf_ct_frag6_skb_cb
-{
- struct inet6_skb_parm h;
- int offset;
-};
-
-#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb *)((skb)->cb))
-
static struct inet_frags nf_frags;
#ifdef CONFIG_SYSCTL
@@ -145,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
}
#endif
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev);
+
static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
{
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
@@ -158,7 +152,7 @@ static void nf_ct_frag6_expire(unsigned long data)
fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
net = container_of(fq->q.net, struct net, nf_frag.frags);
- ip6_expire_frag_queue(net, fq);
+ ip6frag_expire_frag_queue(net, fq);
}
/* Creation primitives. */
@@ -185,9 +179,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
const struct frag_hdr *fhdr, int nhoff)
{
- struct sk_buff *prev, *next;
unsigned int payload_len;
- int offset, end;
+ struct net_device *dev;
+ struct sk_buff *prev;
+ int offset, end, err;
u8 ecn;
if (fq->q.flags & INET_FRAG_COMPLETE) {
@@ -262,55 +257,25 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
goto err;
}
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
+ /* Note : skb->rbnode and skb->dev share the same location. */
+ dev = skb->dev;
+ /* Makes sure compiler wont do silly aliasing games */
+ barrier();
+
prev = fq->q.fragments_tail;
- if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = fq->q.fragments; next != NULL; next = next->next) {
- if (NFCT_FRAG6_CB(next)->offset >= offset)
- break; /* bingo! */
- prev = next;
+ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+ if (err) {
+ if (err == IPFRAG_DUP) {
+ /* No error for duplicates, pretend they got queued. */
+ kfree_skb(skb);
+ return -EINPROGRESS;
+ }
+ goto insert_error;
}
-found:
- /* RFC5722, Section 4:
- * When reassembling an IPv6 datagram, if
- * one or more its constituent fragments is determined to be an
- * overlapping fragment, the entire datagram (and any constituent
- * fragments, including those not yet received) MUST be silently
- * discarded.
- */
+ if (dev)
+ fq->iif = dev->ifindex;
- /* Check for overlap with preceding fragment. */
- if (prev &&
- (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
- goto discard_fq;
-
- /* Look for overlap with succeeding segment. */
- if (next && NFCT_FRAG6_CB(next)->offset < end)
- goto discard_fq;
-
- NFCT_FRAG6_CB(skb)->offset = offset;
-
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- fq->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- fq->q.fragments = skb;
-
- if (skb->dev) {
- fq->iif = skb->dev->ifindex;
- skb->dev = NULL;
- }
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
fq->ecn |= ecn;
@@ -326,11 +291,27 @@ found:
fq->q.flags |= INET_FRAG_FIRST_IN;
}
- return 0;
+ if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+ fq->q.meat == fq->q.len) {
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ err = nf_ct_frag6_reasm(fq, skb, prev, dev);
+ skb->_skb_refdst = orefdst;
-discard_fq:
+ /* After queue has assumed skb ownership, only 0 or
+ * -EINPROGRESS must be returned.
+ */
+ return err ? -EINPROGRESS : 0;
+ }
+
+ skb_dst_drop(skb);
+ return -EINPROGRESS;
+
+insert_error:
inet_frag_kill(&fq->q);
err:
+ skb_dst_drop(skb);
return -EINVAL;
}
@@ -340,141 +321,67 @@ err:
* It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here.
- *
- * returns true if *prev skb has been transformed into the reassembled
- * skb, false otherwise.
*/
-static bool
-nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
+static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev)
{
- struct sk_buff *fp, *head = fq->q.fragments;
- int payload_len;
+ void *reasm_data;
+ int payload_len;
u8 ecn;
inet_frag_kill(&fq->q);
- WARN_ON(head == NULL);
- WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
-
ecn = ip_frag_ecn_table[fq->ecn];
if (unlikely(ecn == 0xff))
- return false;
+ goto err;
- /* Unfragmented part is taken from the first segment. */
- payload_len = ((head->data - skb_network_header(head)) -
+ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+ if (!reasm_data)
+ goto err;
+
+ payload_len = ((skb->data - skb_network_header(skb)) -
sizeof(struct ipv6hdr) + fq->q.len -
sizeof(struct frag_hdr));
if (payload_len > IPV6_MAXPLEN) {
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
payload_len);
- return false;
- }
-
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
- return false;
-
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments. */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (clone == NULL)
- return false;
-
- clone->next = head->next;
- head->next = clone;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = clone->data_len = head->data_len - plen;
- head->data_len -= clone->len;
- head->len -= clone->len;
- clone->csum = 0;
- clone->ip_summed = head->ip_summed;
-
- add_frag_mem_limit(fq->q.net, clone->truesize);
- }
-
- /* morph head into last received skb: prev.
- *
- * This allows callers of ipv6 conntrack defrag to continue
- * to use the last skb(frag) passed into the reasm engine.
- * The last skb frag 'silently' turns into the full reassembled skb.
- *
- * Since prev is also part of q->fragments we have to clone it first.
- */
- if (head != prev) {
- struct sk_buff *iter;
-
- fp = skb_clone(prev, GFP_ATOMIC);
- if (!fp)
- return false;
-
- fp->next = prev->next;
-
- iter = head;
- while (iter) {
- if (iter->next == prev) {
- iter->next = fp;
- break;
- }
- iter = iter->next;
- }
-
- skb_morph(prev, head);
- prev->next = head->next;
- consume_skb(head);
- head = prev;
+ goto err;
}
/* We have to remove fragment header from datagram and to relocate
* header in order to calculate ICV correctly. */
- skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
- memmove(head->head + sizeof(struct frag_hdr), head->head,
- (head->data - head->head) - sizeof(struct frag_hdr));
- head->mac_header += sizeof(struct frag_hdr);
- head->network_header += sizeof(struct frag_hdr);
-
- skb_shinfo(head)->frag_list = head->next;
- skb_reset_transport_header(head);
- skb_push(head, head->data - skb_network_header(head));
-
- for (fp = head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
- fp->sk = NULL;
- }
- sub_frag_mem_limit(fq->q.net, head->truesize);
+ skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
+ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+ (skb->data - skb->head) - sizeof(struct frag_hdr));
+ skb->mac_header += sizeof(struct frag_hdr);
+ skb->network_header += sizeof(struct frag_hdr);
+
+ skb_reset_transport_header(skb);
- head->ignore_df = 1;
- head->next = NULL;
- head->dev = dev;
- head->tstamp = fq->q.stamp;
- ipv6_hdr(head)->payload_len = htons(payload_len);
- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
- IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
+
+ skb->ignore_df = 1;
+ skb->dev = dev;
+ ipv6_hdr(skb)->payload_len = htons(payload_len);
+ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+ IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
/* Yes, and fold redundant checksum back. 8) */
- if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_partial(skb_network_header(head),
- skb_network_header_len(head),
- head->csum);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_partial(skb_network_header(skb),
+ skb_network_header_len(skb),
+ skb->csum);
fq->q.fragments = NULL;
fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
+ fq->q.last_run_head = NULL;
+
+ return 0;
- return true;
+err:
+ inet_frag_kill(&fq->q);
+ return -EINVAL;
}
/*
@@ -543,7 +450,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
{
u16 savethdr = skb->transport_header;
- struct net_device *dev = skb->dev;
int fhoff, nhoff, ret;
struct frag_hdr *fhdr;
struct frag_queue *fq;
@@ -566,10 +472,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
- fhdr->frag_off & htons(IP6_MF))
- return -EINVAL;
-
skb_orphan(skb);
fq = fq_find(net, fhdr->identification, user, hdr,
skb->dev ? skb->dev->ifindex : 0);
@@ -581,24 +483,11 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
spin_lock_bh(&fq->q.lock);
ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
- if (ret < 0) {
- if (ret == -EPROTO) {
- skb->transport_header = savethdr;
- ret = 0;
- }
- goto out_unlock;
- }
-
- /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
- * must be returned.
- */
- ret = -EINPROGRESS;
- if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
- fq->q.meat == fq->q.len &&
- nf_ct_frag6_reasm(fq, skb, dev))
+ if (ret == -EPROTO) {
+ skb->transport_header = savethdr;
ret = 0;
+ }
-out_unlock:
spin_unlock_bh(&fq->q.lock);
inet_frag_put(&fq->q);
return ret;
@@ -634,16 +523,24 @@ static struct pernet_operations nf_ct_net_ops = {
.exit = nf_ct_net_exit,
};
+static const struct rhashtable_params nfct_rhash_params = {
+ .head_offset = offsetof(struct inet_frag_queue, node),
+ .hashfn = ip6frag_key_hashfn,
+ .obj_hashfn = ip6frag_obj_hashfn,
+ .obj_cmpfn = ip6frag_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
int nf_ct_frag6_init(void)
{
int ret = 0;
- nf_frags.constructor = ip6_frag_init;
+ nf_frags.constructor = ip6frag_init;
nf_frags.destructor = NULL;
nf_frags.qsize = sizeof(struct frag_queue);
nf_frags.frag_expire = nf_ct_frag6_expire;
nf_frags.frags_cache_name = nf_frags_cache_name;
- nf_frags.rhash_params = ip6_rhash_params;
+ nf_frags.rhash_params = nfct_rhash_params;
ret = inet_frags_init(&nf_frags);
if (ret)
goto out;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index f06b0471f39f..c4070e9c4260 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -14,8 +14,7 @@
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/sysctl.h>
-#include <net/ipv6.h>
-#include <net/inet_frag.h>
+#include <net/ipv6_frag.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_bridge.h>
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index a338bbc33cf3..6a6d01cb1ace 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -10,15 +10,25 @@
#include <net/secure_seq.h>
#include <linux/netfilter.h>
-static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
+static u32 __ipv6_select_ident(struct net *net,
const struct in6_addr *dst,
const struct in6_addr *src)
{
+ const struct {
+ struct in6_addr dst;
+ struct in6_addr src;
+ } __aligned(SIPHASH_ALIGNMENT) combined = {
+ .dst = *dst,
+ .src = *src,
+ };
u32 hash, id;
- hash = __ipv6_addr_jhash(dst, hashrnd);
- hash = __ipv6_addr_jhash(src, hash);
- hash ^= net_hash_mix(net);
+ /* Note the following code is not safe, but this is okay. */
+ if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
+ get_random_bytes(&net->ipv4.ip_id_key,
+ sizeof(net->ipv4.ip_id_key));
+
+ hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
/* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
* set the hight order instead thus minimizing possible future
@@ -41,7 +51,6 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
*/
void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
{
- static u32 ip6_proxy_idents_hashrnd __read_mostly;
struct in6_addr buf[2];
struct in6_addr *addrs;
u32 id;
@@ -53,11 +62,7 @@ void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
if (!addrs)
return;
- net_get_random_once(&ip6_proxy_idents_hashrnd,
- sizeof(ip6_proxy_idents_hashrnd));
-
- id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
- &addrs[1], &addrs[0]);
+ id = __ipv6_select_ident(net, &addrs[1], &addrs[0]);
skb_shinfo(skb)->ip6_frag_id = htonl(id);
}
EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
@@ -66,12 +71,9 @@ __be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
- static u32 ip6_idents_hashrnd __read_mostly;
u32 id;
- net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
-
- id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
+ id = __ipv6_select_ident(net, daddr, saddr);
return htonl(id);
}
EXPORT_SYMBOL(ipv6_select_ident);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 982868193dbb..e209ae19fe78 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -239,7 +239,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
return ping_proc_register(net, &ping_v6_seq_afinfo);
}
-static void __net_init ping_v6_proc_exit_net(struct net *net)
+static void __net_exit ping_v6_proc_exit_net(struct net *net)
{
return ping_proc_unregister(net, &ping_v6_seq_afinfo);
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index a4f979ff31b9..301978df650e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -283,7 +283,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* Binding to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
goto out_unlock;
+ }
+ if (sk->sk_bound_dev_if) {
err = -ENODEV;
dev = dev_get_by_index_rcu(sock_net(sk),
sk->sk_bound_dev_if);
@@ -772,6 +774,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct sockcm_cookie sockc;
struct ipcm6_cookie ipc6;
int addr_len = msg->msg_namelen;
+ int hdrincl;
u16 proto;
int err;
@@ -785,6 +788,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
+ /* hdrincl should be READ_ONCE(inet->hdrincl)
+ * but READ_ONCE() doesn't work with bit fields.
+ * Doing this indirectly yields the same result.
+ */
+ hdrincl = inet->hdrincl;
+ hdrincl = READ_ONCE(hdrincl);
+
/*
* Get and verify the address.
*/
@@ -878,11 +888,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
- rfv.msg = msg;
- rfv.hlen = 0;
- err = rawv6_probe_proto_opt(&rfv, &fl6);
- if (err)
- goto out;
+
+ if (!hdrincl) {
+ rfv.msg = msg;
+ rfv.hlen = 0;
+ err = rawv6_probe_proto_opt(&rfv, &fl6);
+ if (err)
+ goto out;
+ }
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
@@ -899,7 +912,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- if (inet->hdrincl)
+ if (hdrincl)
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
if (ipc6.tclass < 0)
@@ -922,7 +935,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto do_confirm;
back_from_confirm:
- if (inet->hdrincl)
+ if (hdrincl)
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
else {
ipc6.opt = opt;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 74ffbcb306a6..3f488555999e 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -57,18 +57,11 @@
#include <net/rawv6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
-#include <net/inet_frag.h>
+#include <net/ipv6_frag.h>
#include <net/inet_ecn.h>
static const char ip6_frag_cache_name[] = "ip6-frags";
-struct ip6frag_skb_cb {
- struct inet6_skb_parm h;
- int offset;
-};
-
-#define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb))
-
static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
{
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
@@ -76,63 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
static struct inet_frags ip6_frags;
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
- struct net_device *dev);
-
-void ip6_frag_init(struct inet_frag_queue *q, const void *a)
-{
- struct frag_queue *fq = container_of(q, struct frag_queue, q);
- const struct frag_v6_compare_key *key = a;
-
- q->key.v6 = *key;
- fq->ecn = 0;
-}
-EXPORT_SYMBOL(ip6_frag_init);
-
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
-{
- struct net_device *dev = NULL;
- struct sk_buff *head;
-
- rcu_read_lock();
- spin_lock(&fq->q.lock);
-
- if (fq->q.flags & INET_FRAG_COMPLETE)
- goto out;
-
- inet_frag_kill(&fq->q);
-
- dev = dev_get_by_index_rcu(net, fq->iif);
- if (!dev)
- goto out;
-
- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
- __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
-
- /* Don't send error if the first segment did not arrive. */
- head = fq->q.fragments;
- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
- goto out;
-
- /* But use as source device on which LAST ARRIVED
- * segment was received. And do not use fq->dev
- * pointer directly, device might already disappeared.
- */
- head->dev = dev;
- skb_get(head);
- spin_unlock(&fq->q.lock);
-
- icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
- kfree_skb(head);
- goto out_rcu_unlock;
-
-out:
- spin_unlock(&fq->q.lock);
-out_rcu_unlock:
- rcu_read_unlock();
- inet_frag_put(&fq->q);
-}
-EXPORT_SYMBOL(ip6_expire_frag_queue);
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev);
static void ip6_frag_expire(unsigned long data)
{
@@ -142,7 +80,7 @@ static void ip6_frag_expire(unsigned long data)
fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
net = container_of(fq->q.net, struct net, ipv6.frags);
- ip6_expire_frag_queue(net, fq);
+ ip6frag_expire_frag_queue(net, fq);
}
static struct frag_queue *
@@ -169,27 +107,29 @@ fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
}
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
- struct frag_hdr *fhdr, int nhoff)
+ struct frag_hdr *fhdr, int nhoff,
+ u32 *prob_offset)
{
- struct sk_buff *prev, *next;
- struct net_device *dev;
- int offset, end;
struct net *net = dev_net(skb_dst(skb)->dev);
+ int offset, end, fragsize;
+ struct sk_buff *prev_tail;
+ struct net_device *dev;
+ int err = -ENOENT;
u8 ecn;
if (fq->q.flags & INET_FRAG_COMPLETE)
goto err;
+ err = -EINVAL;
offset = ntohs(fhdr->frag_off) & ~0x7;
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) {
- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_INHDRERRORS);
- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
- ((u8 *)&fhdr->frag_off -
- skb_network_header(skb)));
+ *prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
+ /* note that if prob_offset is set, the skb is freed elsewhere,
+ * we do not free it here.
+ */
return -1;
}
@@ -209,7 +149,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
*/
if (end < fq->q.len ||
((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
- goto err;
+ goto discard_fq;
fq->q.flags |= INET_FRAG_LAST_IN;
fq->q.len = end;
} else {
@@ -220,84 +160,51 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
/* RFC2460 says always send parameter problem in
* this case. -DaveM
*/
- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_INHDRERRORS);
- icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
- offsetof(struct ipv6hdr, payload_len));
+ *prob_offset = offsetof(struct ipv6hdr, payload_len);
return -1;
}
if (end > fq->q.len) {
/* Some bits beyond end -> corruption. */
if (fq->q.flags & INET_FRAG_LAST_IN)
- goto err;
+ goto discard_fq;
fq->q.len = end;
}
}
if (end == offset)
- goto err;
+ goto discard_fq;
+ err = -ENOMEM;
/* Point into the IP datagram 'data' part. */
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
- goto err;
-
- if (pskb_trim_rcsum(skb, end - offset))
- goto err;
-
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
- prev = fq->q.fragments_tail;
- if (!prev || FRAG6_CB(prev)->offset < offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = fq->q.fragments; next != NULL; next = next->next) {
- if (FRAG6_CB(next)->offset >= offset)
- break; /* bingo! */
- prev = next;
- }
-
-found:
- /* RFC5722, Section 4, amended by Errata ID : 3089
- * When reassembling an IPv6 datagram, if
- * one or more its constituent fragments is determined to be an
- * overlapping fragment, the entire datagram (and any constituent
- * fragments) MUST be silently discarded.
- */
-
- /* Check for overlap with preceding fragment. */
- if (prev &&
- (FRAG6_CB(prev)->offset + prev->len) > offset)
goto discard_fq;
- /* Look for overlap with succeeding segment. */
- if (next && FRAG6_CB(next)->offset < end)
+ err = pskb_trim_rcsum(skb, end - offset);
+ if (err)
goto discard_fq;
- FRAG6_CB(skb)->offset = offset;
+ /* Note : skb->rbnode and skb->dev share the same location. */
+ dev = skb->dev;
+ /* Makes sure compiler wont do silly aliasing games */
+ barrier();
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- fq->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- fq->q.fragments = skb;
+ prev_tail = fq->q.fragments_tail;
+ err = inet_frag_queue_insert(&fq->q, skb, offset, end);
+ if (err)
+ goto insert_error;
- dev = skb->dev;
- if (dev) {
+ if (dev)
fq->iif = dev->ifindex;
- skb->dev = NULL;
- }
+
fq->q.stamp = skb->tstamp;
fq->q.meat += skb->len;
fq->ecn |= ecn;
add_frag_mem_limit(fq->q.net, skb->truesize);
+ fragsize = -skb_network_offset(skb) + skb->len;
+ if (fragsize > fq->q.max_size)
+ fq->q.max_size = fragsize;
+
/* The first fragment.
* nhoffset is obtained from the first fragment, of course.
*/
@@ -308,44 +215,48 @@ found:
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len) {
- int res;
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
- res = ip6_frag_reasm(fq, prev, dev);
+ err = ip6_frag_reasm(fq, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
- return res;
+ return err;
}
skb_dst_drop(skb);
- return -1;
+ return -EINPROGRESS;
+insert_error:
+ if (err == IPFRAG_DUP) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ err = -EINVAL;
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_REASM_OVERLAPS);
discard_fq:
inet_frag_kill(&fq->q);
-err:
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
+err:
kfree_skb(skb);
- return -1;
+ return err;
}
/*
* Check if this packet is complete.
- * Returns NULL on failure by any reason, and pointer
- * to current nexthdr field in reassembled frame.
*
* It is called with locked fq, and caller must check that
* queue is eligible for reassembly i.e. it is not COMPLETE,
* the last and the first frames arrived and all the bits are here.
*/
-static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
- struct net_device *dev)
+static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev)
{
struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
- struct sk_buff *fp, *head = fq->q.fragments;
- int payload_len;
unsigned int nhoff;
- int sum_truesize;
+ void *reasm_data;
+ int payload_len;
u8 ecn;
inet_frag_kill(&fq->q);
@@ -354,113 +265,40 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
if (unlikely(ecn == 0xff))
goto out_fail;
- /* Make the one we just received the head. */
- if (prev) {
- head = prev->next;
- fp = skb_clone(head, GFP_ATOMIC);
-
- if (!fp)
- goto out_oom;
-
- fp->next = head->next;
- if (!fp->next)
- fq->q.fragments_tail = fp;
- prev->next = fp;
-
- skb_morph(head, fq->q.fragments);
- head->next = fq->q.fragments->next;
-
- consume_skb(fq->q.fragments);
- fq->q.fragments = head;
- }
-
- WARN_ON(head == NULL);
- WARN_ON(FRAG6_CB(head)->offset != 0);
+ reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
+ if (!reasm_data)
+ goto out_oom;
- /* Unfragmented part is taken from the first segment. */
- payload_len = ((head->data - skb_network_header(head)) -
+ payload_len = ((skb->data - skb_network_header(skb)) -
sizeof(struct ipv6hdr) + fq->q.len -
sizeof(struct frag_hdr));
if (payload_len > IPV6_MAXPLEN)
goto out_oversize;
- /* Head of list must not be cloned. */
- if (skb_unclone(head, GFP_ATOMIC))
- goto out_oom;
-
- /* If the first fragment is fragmented itself, we split
- * it to two chunks: the first with data and paged part
- * and the second, holding only fragments. */
- if (skb_has_frag_list(head)) {
- struct sk_buff *clone;
- int i, plen = 0;
-
- clone = alloc_skb(0, GFP_ATOMIC);
- if (!clone)
- goto out_oom;
- clone->next = head->next;
- head->next = clone;
- skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
- skb_frag_list_init(head);
- for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
- plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
- clone->len = clone->data_len = head->data_len - plen;
- head->data_len -= clone->len;
- head->len -= clone->len;
- clone->csum = 0;
- clone->ip_summed = head->ip_summed;
- add_frag_mem_limit(fq->q.net, clone->truesize);
- }
-
/* We have to remove fragment header from datagram and to relocate
* header in order to calculate ICV correctly. */
nhoff = fq->nhoffset;
- skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
- memmove(head->head + sizeof(struct frag_hdr), head->head,
- (head->data - head->head) - sizeof(struct frag_hdr));
- if (skb_mac_header_was_set(head))
- head->mac_header += sizeof(struct frag_hdr);
- head->network_header += sizeof(struct frag_hdr);
-
- skb_reset_transport_header(head);
- skb_push(head, head->data - skb_network_header(head));
-
- sum_truesize = head->truesize;
- for (fp = head->next; fp;) {
- bool headstolen;
- int delta;
- struct sk_buff *next = fp->next;
-
- sum_truesize += fp->truesize;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
-
- if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
- kfree_skb_partial(fp, headstolen);
- } else {
- if (!skb_shinfo(head)->frag_list)
- skb_shinfo(head)->frag_list = fp;
- head->data_len += fp->len;
- head->len += fp->len;
- head->truesize += fp->truesize;
- }
- fp = next;
- }
- sub_frag_mem_limit(fq->q.net, sum_truesize);
+ skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
+ memmove(skb->head + sizeof(struct frag_hdr), skb->head,
+ (skb->data - skb->head) - sizeof(struct frag_hdr));
+ if (skb_mac_header_was_set(skb))
+ skb->mac_header += sizeof(struct frag_hdr);
+ skb->network_header += sizeof(struct frag_hdr);
+
+ skb_reset_transport_header(skb);
- head->next = NULL;
- head->dev = dev;
- head->tstamp = fq->q.stamp;
- ipv6_hdr(head)->payload_len = htons(payload_len);
- ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
- IP6CB(head)->nhoff = nhoff;
- IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data);
+
+ skb->dev = dev;
+ ipv6_hdr(skb)->payload_len = htons(payload_len);
+ ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
+ IP6CB(skb)->nhoff = nhoff;
+ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+ IP6CB(skb)->frag_max_size = fq->q.max_size;
/* Yes, and fold redundant checksum back. 8) */
- skb_postpush_rcsum(head, skb_network_header(head),
- skb_network_header_len(head));
+ skb_postpush_rcsum(skb, skb_network_header(skb),
+ skb_network_header_len(skb));
rcu_read_lock();
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
@@ -468,6 +306,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
fq->q.fragments = NULL;
fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
+ fq->q.last_run_head = NULL;
return 1;
out_oversize:
@@ -479,6 +318,7 @@ out_fail:
rcu_read_lock();
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
rcu_read_unlock();
+ inet_frag_kill(&fq->q);
return -1;
}
@@ -517,22 +357,26 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
return 1;
}
- if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
- fhdr->frag_off & htons(IP6_MF))
- goto fail_hdr;
-
iif = skb->dev ? skb->dev->ifindex : 0;
fq = fq_find(net, fhdr->identification, hdr, iif);
if (fq) {
+ u32 prob_offset = 0;
int ret;
spin_lock(&fq->q.lock);
fq->iif = iif;
- ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
+ ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff,
+ &prob_offset);
spin_unlock(&fq->q.lock);
inet_frag_put(&fq->q);
+ if (prob_offset) {
+ __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_INHDRERRORS);
+ /* icmpv6_param_prob() calls kfree_skb(skb) */
+ icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
+ }
return ret;
}
@@ -700,42 +544,19 @@ static struct pernet_operations ip6_frags_ops = {
.exit = ipv6_frags_exit_net,
};
-static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
-{
- return jhash2(data,
- sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
-}
-
-static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
-{
- const struct inet_frag_queue *fq = data;
-
- return jhash2((const u32 *)&fq->key.v6,
- sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
-}
-
-static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
-{
- const struct frag_v6_compare_key *key = arg->key;
- const struct inet_frag_queue *fq = ptr;
-
- return !!memcmp(&fq->key, key, sizeof(*key));
-}
-
-const struct rhashtable_params ip6_rhash_params = {
+static const struct rhashtable_params ip6_rhash_params = {
.head_offset = offsetof(struct inet_frag_queue, node),
- .hashfn = ip6_key_hashfn,
- .obj_hashfn = ip6_obj_hashfn,
- .obj_cmpfn = ip6_obj_cmpfn,
+ .hashfn = ip6frag_key_hashfn,
+ .obj_hashfn = ip6frag_obj_hashfn,
+ .obj_cmpfn = ip6frag_obj_cmpfn,
.automatic_shrinking = true,
};
-EXPORT_SYMBOL(ip6_rhash_params);
int __init ipv6_frag_init(void)
{
int ret;
- ip6_frags.constructor = ip6_frag_init;
+ ip6_frags.constructor = ip6frag_init;
ip6_frags.destructor = NULL;
ip6_frags.qsize = sizeof(struct frag_queue);
ip6_frags.frag_expire = ip6_frag_expire;
@@ -771,8 +592,8 @@ err_protocol:
void ipv6_frag_exit(void)
{
- inet_frags_fini(&ip6_frags);
ip6_frags_sysctl_unregister();
unregister_pernet_subsys(&ip6_frags_ops);
inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+ inet_frags_fini(&ip6_frags);
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 27c93baed708..2c4743f2d50e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3069,6 +3069,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
*/
cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
NLM_F_REPLACE);
+ cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
nhn++;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c9c6a5e829ab..16eba7b5f1a9 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -661,6 +661,10 @@ static int ipip6_rcv(struct sk_buff *skb)
!net_eq(tunnel->net, dev_net(tunnel->dev))))
goto out;
+ /* skb can be uncloned in iptunnel_pull_header, so
+ * old iph is no longer valid
+ */
+ iph = (const struct iphdr *)skb_mac_header(skb);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
@@ -1065,7 +1069,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
if (!tdev && tunnel->parms.link)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
- if (tdev) {
+ if (tdev && !netif_is_l3_master(tdev)) {
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
@@ -1852,7 +1856,6 @@ static int __net_init sit_init_net(struct net *net)
err_reg_dev:
ipip6_dev_free(sitn->fb_tunnel_dev);
- free_netdev(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0a69d39880f2..4953466cf98f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1056,11 +1056,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
- newnp->mcast_oif = tcp_v6_iif(skb);
- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
- newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
+ newnp->mcast_oif = inet_iif(skb);
+ newnp->mcast_hops = ip_hdr(skb)->ttl;
+ newnp->rcv_flowinfo = 0;
if (np->repflow)
- newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
+ newnp->flow_label = 0;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4db5f541bca6..6a397e110b46 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -294,7 +294,7 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
- &udp_table, skb);
+ &udp_table, NULL);
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
@@ -479,7 +479,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct net *net = dev_net(skb->dev);
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
- inet6_iif(skb), udptable, skb);
+ inet6_iif(skb), udptable, NULL);
if (!sk) {
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 3a2701d42f47..07b7b2540579 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -391,6 +391,10 @@ static void __exit xfrm6_tunnel_fini(void)
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
+ /* Someone maybe has gotten the xfrm6_tunnel_spi.
+ * So need to wait it.
+ */
+ rcu_barrier();
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c2dfc32eb9f2..02e10deef5b4 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2431,6 +2431,13 @@ out:
return err;
}
+static void afiucv_iucv_exit(void)
+{
+ device_unregister(af_iucv_dev);
+ driver_unregister(&af_iucv_driver);
+ pr_iucv->iucv_unregister(&af_iucv_handler, 0);
+}
+
static int __init afiucv_init(void)
{
int err;
@@ -2464,11 +2471,18 @@ static int __init afiucv_init(void)
err = afiucv_iucv_init();
if (err)
goto out_sock;
- } else
- register_netdevice_notifier(&afiucv_netdev_notifier);
+ }
+
+ err = register_netdevice_notifier(&afiucv_netdev_notifier);
+ if (err)
+ goto out_notifier;
+
dev_add_pack(&iucv_packet_type);
return 0;
+out_notifier:
+ if (pr_iucv)
+ afiucv_iucv_exit();
out_sock:
sock_unregister(PF_IUCV);
out_proto:
@@ -2482,12 +2496,11 @@ out:
static void __exit afiucv_exit(void)
{
if (pr_iucv) {
- device_unregister(af_iucv_dev);
- driver_unregister(&af_iucv_driver);
- pr_iucv->iucv_unregister(&af_iucv_handler, 0);
+ afiucv_iucv_exit();
symbol_put(iucv_if);
- } else
- unregister_netdevice_notifier(&afiucv_netdev_notifier);
+ }
+
+ unregister_netdevice_notifier(&afiucv_netdev_notifier);
dev_remove_pack(&iucv_packet_type);
sock_unregister(PF_IUCV);
proto_unregister(&iucv_proto);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 553d0ad4a2fa..2f3cd09ee0df 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -2058,14 +2058,14 @@ static int __init kcm_init(void)
if (err)
goto fail;
- err = sock_register(&kcm_family_ops);
- if (err)
- goto sock_register_fail;
-
err = register_pernet_device(&kcm_net_ops);
if (err)
goto net_ops_fail;
+ err = sock_register(&kcm_family_ops);
+ if (err)
+ goto sock_register_fail;
+
err = kcm_proc_init();
if (err)
goto proc_init_fail;
@@ -2073,12 +2073,12 @@ static int __init kcm_init(void)
return 0;
proc_init_fail:
- unregister_pernet_device(&kcm_net_ops);
-
-net_ops_fail:
sock_unregister(PF_KCM);
sock_register_fail:
+ unregister_pernet_device(&kcm_net_ops);
+
+net_ops_fail:
proto_unregister(&kcm_proto);
fail:
@@ -2094,8 +2094,8 @@ fail:
static void __exit kcm_exit(void)
{
kcm_proc_exit();
- unregister_pernet_device(&kcm_net_ops);
sock_unregister(PF_KCM);
+ unregister_pernet_device(&kcm_net_ops);
proto_unregister(&kcm_proto);
destroy_workqueue(kcm_wq);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 3ba903ff2bb0..d2ec620319d7 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1969,8 +1969,10 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
if (rq->sadb_x_ipsecrequest_mode == 0)
return -EINVAL;
+ if (!xfrm_id_proto_valid(rq->sadb_x_ipsecrequest_proto))
+ return -EINVAL;
- t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
+ t->id.proto = rq->sadb_x_ipsecrequest_proto;
if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
return -EINVAL;
t->mode = mode;
@@ -2463,8 +2465,10 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
goto out;
}
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(out_skb);
goto out;
+ }
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version;
@@ -2717,8 +2721,10 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
return PTR_ERR(out_skb);
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(out_skb);
return err;
+ }
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = pfk->dump.msg_version;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 4ae758bcb2cf..7c3da29fad8e 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1351,6 +1351,9 @@ again:
hlist_del_init(&session->hlist);
+ if (test_and_set_bit(0, &session->dead))
+ goto again;
+
if (session->ref != NULL)
(*session->ref)(session);
@@ -1799,6 +1802,9 @@ EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
*/
int l2tp_session_delete(struct l2tp_session *session)
{
+ if (test_and_set_bit(0, &session->dead))
+ return 0;
+
if (session->ref)
(*session->ref)(session);
__l2tp_session_unhash(session);
@@ -1947,7 +1953,8 @@ static __net_exit void l2tp_exit_net(struct net *net)
}
rcu_read_unlock_bh();
- flush_workqueue(l2tp_wq);
+ if (l2tp_wq)
+ flush_workqueue(l2tp_wq);
rcu_barrier();
}
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 7cc49715606e..7c2037184b6c 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -84,6 +84,7 @@ struct l2tp_session_cfg {
struct l2tp_session {
int magic; /* should be
* L2TP_SESSION_MAGIC */
+ long dead;
struct l2tp_tunnel *tunnel; /* back pointer to tunnel
* context */
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 03a696d3bcd9..4a88c4eb2301 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -116,6 +116,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
+ struct iphdr *iph;
int length;
if (!pskb_may_pull(skb, 4))
@@ -174,24 +175,17 @@ pass_up:
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel) {
- sk = tunnel->sock;
- sock_hold(sk);
- } else {
- struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
-
- read_lock_bh(&l2tp_ip_lock);
- sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
- inet_iif(skb), tunnel_id);
- if (!sk) {
- read_unlock_bh(&l2tp_ip_lock);
- goto discard;
- }
+ iph = (struct iphdr *)skb_network_header(skb);
- sock_hold(sk);
+ read_lock_bh(&l2tp_ip_lock);
+ sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
+ tunnel_id);
+ if (!sk) {
read_unlock_bh(&l2tp_ip_lock);
+ goto discard;
}
+ sock_hold(sk);
+ read_unlock_bh(&l2tp_ip_lock);
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 8d412b9b0214..423cb095ad37 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -128,6 +128,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
+ struct ipv6hdr *iph;
int length;
if (!pskb_may_pull(skb, 4))
@@ -187,24 +188,17 @@ pass_up:
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
- tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel) {
- sk = tunnel->sock;
- sock_hold(sk);
- } else {
- struct ipv6hdr *iph = ipv6_hdr(skb);
-
- read_lock_bh(&l2tp_ip6_lock);
- sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
- inet6_iif(skb), tunnel_id);
- if (!sk) {
- read_unlock_bh(&l2tp_ip6_lock);
- goto discard;
- }
+ iph = ipv6_hdr(skb);
- sock_hold(sk);
+ read_lock_bh(&l2tp_ip6_lock);
+ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
+ inet6_iif(skb), tunnel_id);
+ if (!sk) {
read_unlock_bh(&l2tp_ip6_lock);
+ goto discard;
}
+ sock_hold(sk);
+ read_unlock_bh(&l2tp_ip6_lock);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 9b214f313cc0..d919b3e6b548 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -437,11 +437,11 @@ static void pppol2tp_session_close(struct l2tp_session *session)
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
- if (sock) {
+ if (sock)
inet_shutdown(sock, SEND_SHUTDOWN);
- /* Don't let the session go away before our socket does */
- l2tp_session_inc_refcount(session);
- }
+
+ /* Don't let the session go away before our socket does */
+ l2tp_session_inc_refcount(session);
}
/* Really kill the session socket. (Called from sock_put() if
@@ -1790,6 +1790,9 @@ static const struct proto_ops pppol2tp_ops = {
.recvmsg = pppol2tp_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppol2tp_proto = {
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index fc60d9d738b5..cdb913e7627e 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev)
lapb = __lapb_devtostruct(dev);
if (!lapb)
goto out;
+ lapb_put(lapb);
lapb_stop_t1timer(lapb);
lapb_stop_t2timer(lapb);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 789e66b0187a..2a859f967c8a 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -111,22 +111,26 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
*
* Send data via reliable llc2 connection.
* Returns 0 upon success, non-zero if action did not succeed.
+ *
+ * This function always consumes a reference to the skb.
*/
static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
{
struct llc_sock* llc = llc_sk(sk);
- int rc = 0;
if (unlikely(llc_data_accept_state(llc->state) ||
llc->remote_busy_flag ||
llc->p_flag)) {
long timeout = sock_sndtimeo(sk, noblock);
+ int rc;
rc = llc_ui_wait_for_busy_core(sk, timeout);
+ if (rc) {
+ kfree_skb(skb);
+ return rc;
+ }
}
- if (unlikely(!rc))
- rc = llc_build_and_send_pkt(sk, skb);
- return rc;
+ return llc_build_and_send_pkt(sk, skb);
}
static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
@@ -896,7 +900,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
int flags = msg->msg_flags;
int noblock = flags & MSG_DONTWAIT;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
size_t size = 0;
int rc = -EINVAL, copied = 0, hdrlen;
@@ -905,10 +909,10 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
lock_sock(sk);
if (addr) {
if (msg->msg_namelen < sizeof(*addr))
- goto release;
+ goto out;
} else {
if (llc_ui_addr_null(&llc->addr))
- goto release;
+ goto out;
addr = &llc->addr;
}
/* must bind connection to sap if user hasn't done it. */
@@ -916,7 +920,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
/* bind to sap with null dev, exclusive. */
rc = llc_ui_autobind(sock, addr);
if (rc)
- goto release;
+ goto out;
}
hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
size = hdrlen + len;
@@ -925,12 +929,12 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
copied = size - hdrlen;
rc = -EINVAL;
if (copied < 0)
- goto release;
+ goto out;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
lock_sock(sk);
if (!skb)
- goto release;
+ goto out;
skb->dev = llc->dev;
skb->protocol = llc_proto_type(addr->sllc_arphrd);
skb_reserve(skb, hdrlen);
@@ -940,29 +944,31 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
+ skb = NULL;
goto out;
}
if (addr->sllc_test) {
llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
+ skb = NULL;
goto out;
}
if (addr->sllc_xid) {
llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac,
addr->sllc_sap);
+ skb = NULL;
goto out;
}
rc = -ENOPROTOOPT;
if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
goto out;
rc = llc_ui_send_data(sk, skb, noblock);
+ skb = NULL;
out:
- if (rc) {
- kfree_skb(skb);
-release:
+ kfree_skb(skb);
+ if (rc)
dprintk("%s: failed sending from %02X to %02X: %d\n",
__func__, llc->laddr.lsap, llc->daddr.lsap, rc);
- }
release_sock(sk);
return rc ? : copied;
}
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index 4b60f68cb492..8354ae40ec85 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -372,6 +372,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
+ skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
@@ -389,7 +390,8 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
- rc = llc_conn_send_pdu(sk, skb);
+ skb_get(skb);
+ llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;
@@ -406,6 +408,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
+ skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
@@ -916,7 +919,8 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
- rc = llc_conn_send_pdu(sk, skb);
+ skb_get(skb);
+ llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index b9290a183a2f..1bdbd134bd7a 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -30,7 +30,7 @@
#endif
static int llc_find_offset(int state, int ev_type);
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
+static void llc_conn_send_pdus(struct sock *sk);
static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
static int llc_exec_conn_trans_actions(struct sock *sk,
struct llc_conn_state_trans *trans,
@@ -55,6 +55,8 @@ int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ;
* (executing it's actions and changing state), upper layer will be
* indicated or confirmed, if needed. Returns 0 for success, 1 for
* failure. The socket lock has to be held before calling this function.
+ *
+ * This function always consumes a reference to the skb.
*/
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
{
@@ -62,12 +64,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
struct llc_sock *llc = llc_sk(skb->sk);
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
- /*
- * We have to hold the skb, because llc_conn_service will kfree it in
- * the sending path and we need to look at the skb->cb, where we encode
- * llc_conn_state_ev.
- */
- skb_get(skb);
ev->ind_prim = ev->cfm_prim = 0;
/*
* Send event to state machine
@@ -75,21 +71,12 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
rc = llc_conn_service(skb->sk, skb);
if (unlikely(rc != 0)) {
printk(KERN_ERR "%s: llc_conn_service failed\n", __func__);
- goto out_kfree_skb;
- }
-
- if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
- /* indicate or confirm not required */
- if (!skb->next)
- goto out_kfree_skb;
goto out_skb_put;
}
- if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */
- skb_get(skb);
-
switch (ev->ind_prim) {
case LLC_DATA_PRIM:
+ skb_get(skb);
llc_save_primitive(sk, skb, LLC_DATA_PRIM);
if (unlikely(sock_queue_rcv_skb(sk, skb))) {
/*
@@ -106,6 +93,7 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
* skb->sk pointing to the newly created struct sock in
* llc_conn_handler. -acme
*/
+ skb_get(skb);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_state_change(sk);
break;
@@ -121,7 +109,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
sk->sk_state_change(sk);
}
}
- kfree_skb(skb);
sock_put(sk);
break;
case LLC_RESET_PRIM:
@@ -130,14 +117,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
* RESET is not being notified to upper layers for now
*/
printk(KERN_INFO "%s: received a reset ind!\n", __func__);
- kfree_skb(skb);
break;
default:
- if (ev->ind_prim) {
+ if (ev->ind_prim)
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->ind_prim);
- kfree_skb(skb);
- }
/* No indication */
break;
}
@@ -179,25 +163,22 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
printk(KERN_INFO "%s: received a reset conf!\n", __func__);
break;
default:
- if (ev->cfm_prim) {
+ if (ev->cfm_prim)
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->cfm_prim);
- break;
- }
- goto out_skb_put; /* No confirmation */
+ /* No confirmation */
+ break;
}
-out_kfree_skb:
- kfree_skb(skb);
out_skb_put:
kfree_skb(skb);
return rc;
}
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
{
/* queue PDU to send to MAC layer */
skb_queue_tail(&sk->sk_write_queue, skb);
- return llc_conn_send_pdus(sk, skb);
+ llc_conn_send_pdus(sk);
}
/**
@@ -255,7 +236,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
- llc_conn_send_pdus(sk, NULL);
+ llc_conn_send_pdus(sk);
out:;
}
@@ -296,7 +277,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
- llc_conn_send_pdus(sk, NULL);
+ llc_conn_send_pdus(sk);
out:;
}
@@ -340,16 +321,12 @@ out:
/**
* llc_conn_send_pdus - Sends queued PDUs
* @sk: active connection
- * @hold_skb: the skb held by caller, or NULL if does not care
*
- * Sends queued pdus to MAC layer for transmission. When @hold_skb is
- * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
- * successfully, or 1 for failure.
+ * Sends queued pdus to MAC layer for transmission.
*/
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
+static void llc_conn_send_pdus(struct sock *sk)
{
struct sk_buff *skb;
- int ret = 0;
while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
@@ -361,20 +338,10 @@ static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
if (!skb2)
break;
- dev_queue_xmit(skb2);
- } else {
- bool is_target = skb == hold_skb;
- int rc;
-
- if (is_target)
- skb_get(skb);
- rc = dev_queue_xmit(skb);
- if (is_target)
- ret = rc;
+ skb = skb2;
}
+ dev_queue_xmit(skb);
}
-
- return ret;
}
/**
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index e896a2c53b12..f1e442a39db8 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -127,9 +127,7 @@ void llc_sap_close(struct llc_sap *sap)
list_del_rcu(&sap->node);
spin_unlock_bh(&llc_sap_list_lock);
- synchronize_rcu();
-
- kfree(sap);
+ kfree_rcu(sap, rcu);
}
static struct packet_type llc_packet_type __read_mostly = {
diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
index 6daf391b3e84..fc4d2bd8816f 100644
--- a/net/llc/llc_if.c
+++ b/net/llc/llc_if.c
@@ -38,6 +38,8 @@
* closed and -EBUSY when sending data is not permitted in this state or
* LLC has send an I pdu with p bit set to 1 and is waiting for it's
* response.
+ *
+ * This function always consumes a reference to the skb.
*/
int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
{
@@ -46,20 +48,22 @@ int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
struct llc_sock *llc = llc_sk(sk);
if (unlikely(llc->state == LLC_CONN_STATE_ADM))
- goto out;
+ goto out_free;
rc = -EBUSY;
if (unlikely(llc_data_accept_state(llc->state) || /* data_conn_refuse */
llc->p_flag)) {
llc->failed_data_req = 1;
- goto out;
+ goto out_free;
}
ev = llc_conn_ev(skb);
ev->type = LLC_CONN_EV_TYPE_PRIM;
ev->prim = LLC_DATA_PRIM;
ev->prim_type = LLC_PRIM_TYPE_REQ;
skb->dev = llc->dev;
- rc = llc_conn_state_process(sk, skb);
-out:
+ return llc_conn_state_process(sk, skb);
+
+out_free:
+ kfree_skb(skb);
return rc;
}
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 94425e421213..9e4b6bcf6920 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -72,6 +72,8 @@ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac);
if (likely(!rc))
rc = dev_queue_xmit(skb);
+ else
+ kfree_skb(skb);
return rc;
}
diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
index a94bd56bcac6..7ae4cc684d3a 100644
--- a/net/llc/llc_s_ac.c
+++ b/net/llc/llc_s_ac.c
@@ -58,8 +58,10 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc))
+ if (likely(!rc)) {
+ skb_get(skb);
rc = dev_queue_xmit(skb);
+ }
return rc;
}
@@ -81,8 +83,10 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc))
+ if (likely(!rc)) {
+ skb_get(skb);
rc = dev_queue_xmit(skb);
+ }
return rc;
}
@@ -135,8 +139,10 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_test_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
- if (likely(!rc))
+ if (likely(!rc)) {
+ skb_get(skb);
rc = dev_queue_xmit(skb);
+ }
return rc;
}
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 5404d0d195cc..d51ff9df9c95 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -197,29 +197,22 @@ out:
* After executing actions of the event, upper layer will be indicated
* if needed(on receiving an UI frame). sk can be null for the
* datalink_proto case.
+ *
+ * This function always consumes a reference to the skb.
*/
static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
- /*
- * We have to hold the skb, because llc_sap_next_state
- * will kfree it in the sending path and we need to
- * look at the skb->cb, where we encode llc_sap_state_ev.
- */
- skb_get(skb);
ev->ind_cfm_flag = 0;
llc_sap_next_state(sap, skb);
- if (ev->ind_cfm_flag == LLC_IND) {
- if (skb->sk->sk_state == TCP_LISTEN)
- kfree_skb(skb);
- else {
- llc_save_primitive(skb->sk, skb, ev->prim);
- /* queue skb to the user. */
- if (sock_queue_rcv_skb(skb->sk, skb))
- kfree_skb(skb);
- }
+ if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) {
+ llc_save_primitive(skb->sk, skb, ev->prim);
+
+ /* queue skb to the user. */
+ if (sock_queue_rcv_skb(skb->sk, skb) == 0)
+ return;
}
kfree_skb(skb);
}
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 204a8351efff..c29170e767a8 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -32,7 +32,7 @@ static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
return LLC_PDU_IS_CMD(pdu) && /* command PDU */
LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
- !pdu->dsap ? 0 : 1; /* NULL DSAP value */
+ !pdu->dsap; /* NULL DSAP value */
}
static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
@@ -42,7 +42,7 @@ static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
return LLC_PDU_IS_CMD(pdu) && /* command PDU */
LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
- !pdu->dsap ? 0 : 1; /* NULL DSAP */
+ !pdu->dsap; /* NULL DSAP */
}
static int llc_station_ac_send_xid_r(struct sk_buff *skb)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 954315e1661d..88dd5d218fe3 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1048,50 +1048,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
return 0;
}
-/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
-struct iapp_layer2_update {
- u8 da[ETH_ALEN]; /* broadcast */
- u8 sa[ETH_ALEN]; /* STA addr */
- __be16 len; /* 6 */
- u8 dsap; /* 0 */
- u8 ssap; /* 0 */
- u8 control;
- u8 xid_info[3];
-} __packed;
-
-static void ieee80211_send_layer2_update(struct sta_info *sta)
-{
- struct iapp_layer2_update *msg;
- struct sk_buff *skb;
-
- /* Send Level 2 Update Frame to update forwarding tables in layer 2
- * bridge devices */
-
- skb = dev_alloc_skb(sizeof(*msg));
- if (!skb)
- return;
- msg = (struct iapp_layer2_update *)skb_put(skb, sizeof(*msg));
-
- /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
- * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
-
- eth_broadcast_addr(msg->da);
- memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
- msg->len = htons(6);
- msg->dsap = 0;
- msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
- msg->control = 0xaf; /* XID response lsb.1111F101.
- * F=0 (no poll command; unsolicited frame) */
- msg->xid_info[0] = 0x81; /* XID format identifier */
- msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */
- msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */
-
- skb->dev = sta->sdata->dev;
- skb->protocol = eth_type_trans(skb, sta->sdata->dev);
- memset(skb->cb, 0, sizeof(skb->cb));
- netif_rx_ni(skb);
-}
-
static int sta_apply_auth_flags(struct ieee80211_local *local,
struct sta_info *sta,
u32 mask, u32 set)
@@ -1401,7 +1357,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
struct sta_info *sta;
struct ieee80211_sub_if_data *sdata;
int err;
- int layer2_update;
if (params->vlan) {
sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
@@ -1418,6 +1373,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (is_multicast_ether_addr(mac))
return -EINVAL;
+ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
+ sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !sdata->u.mgd.associated)
+ return -EINVAL;
+
sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
if (!sta)
return -ENOMEM;
@@ -1425,10 +1385,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
sta->sta.tdls = true;
- if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
- !sdata->u.mgd.associated)
- return -EINVAL;
-
err = sta_apply_parameters(local, sta, params);
if (err) {
sta_info_free(local, sta);
@@ -1444,18 +1400,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
test_sta_flag(sta, WLAN_STA_ASSOC))
rate_control_rate_init(sta);
- layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- sdata->vif.type == NL80211_IFTYPE_AP;
-
err = sta_info_insert_rcu(sta);
if (err) {
rcu_read_unlock();
return err;
}
- if (layer2_update)
- ieee80211_send_layer2_update(sta);
-
rcu_read_unlock();
return 0;
@@ -1564,7 +1514,9 @@ static int ieee80211_change_station(struct wiphy *wiphy,
atomic_inc(&sta->sdata->bss->num_mcast_sta);
}
- ieee80211_send_layer2_update(sta);
+ if (sta->sta_state == IEEE80211_STA_AUTHORIZED)
+ cfg80211_send_layer2_update(sta->sdata->dev,
+ sta->sta.addr);
}
err = sta_apply_parameters(local, sta, params);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index bcec1240f41d..9769db9818d2 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -490,9 +490,14 @@ static ssize_t ieee80211_if_fmt_aqm(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
struct ieee80211_local *local = sdata->local;
- struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+ struct txq_info *txqi;
int len;
+ if (!sdata->vif.txq)
+ return 0;
+
+ txqi = to_txq_info(sdata->vif.txq);
+
spin_lock_bh(&local->fq.lock);
rcu_read_lock();
@@ -657,7 +662,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
DEBUGFS_ADD(hw_queues);
- if (sdata->local->ops->wake_tx_queue)
+ if (sdata->local->ops->wake_tx_queue &&
+ sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+ sdata->vif.type != NL80211_IFTYPE_NAN)
DEBUGFS_ADD(aqm);
}
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index bb886e7db47f..f783d1377d9a 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -169,11 +169,16 @@ int drv_conf_tx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- if (WARN_ONCE(params->cw_min == 0 ||
- params->cw_min > params->cw_max,
- "%s: invalid CW_min/CW_max: %d/%d\n",
- sdata->name, params->cw_min, params->cw_max))
+ if (params->cw_min == 0 || params->cw_min > params->cw_max) {
+ /*
+ * If we can't configure hardware anyway, don't warn. We may
+ * never have initialized the CW parameters.
+ */
+ WARN_ONCE(local->ops->conf_tx,
+ "%s: invalid CW_min/CW_max: %d/%d\n",
+ sdata->name, params->cw_min, params->cw_max);
return -EINVAL;
+ }
trace_drv_conf_tx(local, sdata, ac, params);
if (local->ops->conf_tx)
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 49c8a9c9b91f..1a169de60192 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1163,6 +1163,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
+ if (local->in_reconfig)
+ return;
+
if (!check_sdata_in_driver(sdata))
return;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8a690ebd7374..0b0de3030e0d 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1403,7 +1403,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (WARN_ON(!chanctx_conf)) {
+ if (WARN_ON_ONCE(!chanctx_conf)) {
rcu_read_unlock();
return NULL;
}
@@ -2123,6 +2123,9 @@ void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
const u8 *addr);
void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
void ieee80211_tdls_chsw_work(struct work_struct *wk);
+void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
+ const u8 *peer, u16 reason);
+const char *ieee80211_get_reason_code_string(u16 reason_code);
extern const struct ethtool_ops ieee80211_ethtool_ops;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5768560cbfc3..ad03331ee785 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1937,6 +1937,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
+ if (sdata->vif.txq)
+ ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
+
synchronize_rcu();
if (sdata->dev) {
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index b2a27263d6ff..5c347d3a92c9 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -885,6 +885,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
/* flush STAs and mpaths on this iface */
sta_info_flush(sdata);
+ ieee80211_free_keys(sdata, true);
mesh_path_flush_by_iface(sdata);
/* stop the beacon */
@@ -1135,7 +1136,8 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
ifmsh->chsw_ttl = 0;
/* Remove the CSA and MCSP elements from the beacon */
- tmp_csa_settings = rcu_dereference(ifmsh->csa);
+ tmp_csa_settings = rcu_dereference_protected(ifmsh->csa,
+ lockdep_is_held(&sdata->wdev.mtx));
RCU_INIT_POINTER(ifmsh->csa, NULL);
if (tmp_csa_settings)
kfree_rcu(tmp_csa_settings, rcu_head);
@@ -1157,6 +1159,8 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
struct mesh_csa_settings *tmp_csa_settings;
int ret = 0;
+ lockdep_assert_held(&sdata->wdev.mtx);
+
tmp_csa_settings = kmalloc(sizeof(*tmp_csa_settings),
GFP_ATOMIC);
if (!tmp_csa_settings)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index b0acb2961e80..f7eaa1051b5b 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -326,6 +326,9 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
u32 tx_time, estimated_retx;
u64 result;
+ if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+ return MAX_METRIC;
+
/* Try to get rate based on HW/SW RC algorithm.
* Rate is returned in units of Kbps, correct this
* to comply with airtime calculation units
@@ -1128,7 +1131,8 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
}
}
- if (!(mpath->flags & MESH_PATH_RESOLVING))
+ if (!(mpath->flags & MESH_PATH_RESOLVING) &&
+ mesh_path_sel_is_hwmp(sdata))
mesh_queue_preq(mpath, PREQ_Q_F_START);
if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 197753ad50b4..8c17d498df30 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
{
/* Use last four bytes of hw addr as hash index */
- return jhash_1word(*(u32 *)(addr+2), seed);
+ return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
}
static const struct rhashtable_params mesh_rht_params = {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 6e0aa296f134..048389b5aa0f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1072,9 +1072,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
goto out;
}
- /* XXX: shouldn't really modify cfg80211-owned data! */
- ifmgd->associated->channel = sdata->csa_chandef.chan;
-
ifmgd->csa_waiting_bcn = true;
ieee80211_sta_reset_beacon_monitor(sdata);
@@ -1876,6 +1873,16 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
}
}
+ /* WMM specification requires all 4 ACIs. */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (params[ac].cw_min == 0) {
+ sdata_info(sdata,
+ "AP has invalid WMM params (missing AC %d), using defaults\n",
+ ac);
+ return false;
+ }
+ }
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
mlme_dbg(sdata,
"WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
@@ -2427,7 +2434,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
rcu_read_lock();
ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
- if (WARN_ON_ONCE(ssid == NULL))
+ if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
+ "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
ssid_len = 0;
else
ssid_len = ssid[1];
@@ -2748,7 +2756,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
#define case_WLAN(type) \
case WLAN_REASON_##type: return #type
-static const char *ieee80211_get_reason_code_string(u16 reason_code)
+const char *ieee80211_get_reason_code_string(u16 reason_code)
{
switch (reason_code) {
case_WLAN(UNSPECIFIED);
@@ -2813,6 +2821,11 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
if (len < 24 + 2)
return;
+ if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
+ ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
+ return;
+ }
+
if (ifmgd->associated &&
ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) {
const u8 *bssid = ifmgd->associated->bssid;
@@ -2862,8 +2875,14 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
- sdata_info(sdata, "disassociated from %pM (Reason: %u)\n",
- mgmt->sa, reason_code);
+ if (!ether_addr_equal(mgmt->bssid, mgmt->sa)) {
+ ieee80211_tdls_handle_disconnect(sdata, mgmt->sa, reason_code);
+ return;
+ }
+
+ sdata_info(sdata, "disassociated from %pM (Reason: %u=%s)\n",
+ mgmt->sa, reason_code,
+ ieee80211_get_reason_code_string(reason_code));
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
@@ -4673,7 +4692,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
- if (!ssidie) {
+ if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
rcu_read_unlock();
kfree(assoc_data);
return -EINVAL;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 30fbabf4bcbc..e1b0e26c1f17 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -128,7 +128,7 @@
#define CCK_GROUP \
[MINSTREL_CCK_GROUP] = { \
- .streams = 0, \
+ .streams = 1, \
.flags = 0, \
.duration = { \
CCK_DURATION_LIST(false), \
@@ -547,7 +547,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
/* (re)Initialize group rate indexes */
for(j = 0; j < MAX_THR_RATES; j++)
- tmp_group_tp_rate[j] = group;
+ tmp_group_tp_rate[j] = MCS_GROUP_RATES * group;
for (i = 0; i < MCS_GROUP_RATES; i++) {
if (!(mg->supported & BIT(i)))
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 23f6c8baae95..a6f265262f15 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3205,9 +3205,18 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
/* process for all: mesh, mlme, ibss */
break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ if (is_multicast_ether_addr(mgmt->da) &&
+ !is_broadcast_ether_addr(mgmt->da))
+ return RX_DROP_MONITOR;
+
+ /* process only for station/IBSS */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ return RX_DROP_MONITOR;
+ break;
case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
if (is_multicast_ether_addr(mgmt->da) &&
!is_broadcast_ether_addr(mgmt->da))
@@ -3568,6 +3577,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
case NL80211_IFTYPE_STATION:
if (!bssid && !sdata->u.mgd.use_4addr)
return false;
+ if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
+ return false;
if (multicast)
return true;
return ether_addr_equal(sdata->vif.addr, hdr->addr1);
@@ -3830,7 +3841,7 @@ void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&local->sta_mtx);
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ list_for_each_entry(sta, &local->sta_list, list) {
if (sdata != sta->sdata &&
(!sta->sdata->bss || sta->sdata->bss != sdata->bss))
continue;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 892c392ff8fc..4f7061c3b770 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -3,6 +3,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018-2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -945,6 +946,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
might_sleep();
lockdep_assert_held(&local->sta_mtx);
+ while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+ ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+ WARN_ON_ONCE(ret);
+ }
+
/* now keys can no longer be reached */
ieee80211_free_sta_keys(local, sta);
@@ -1896,6 +1902,10 @@ int sta_info_move_state(struct sta_info *sta,
ieee80211_check_fast_xmit(sta);
ieee80211_check_fast_rx(sta);
}
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sta->sdata->vif.type == NL80211_IFTYPE_AP)
+ cfg80211_send_layer2_update(sta->sdata->dev,
+ sta->sta.addr);
break;
default:
break;
@@ -2305,7 +2315,8 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta)
{
struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
- if (time_after(stats->last_rx, sta->status_stats.last_ack))
+ if (!sta->status_stats.last_ack ||
+ time_after(stats->last_rx, sta->status_stats.last_ack))
return stats->last_rx;
return sta->status_stats.last_ack;
}
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index c64ae68ae4f8..863f92c08701 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -2001,3 +2001,26 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk)
}
rtnl_unlock();
}
+
+void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
+ const u8 *peer, u16 reason)
+{
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(&sdata->vif, peer);
+ if (!sta || !sta->tdls) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ tdls_dbg(sdata, "disconnected from TDLS peer %pM (Reason: %u=%s)\n",
+ peer, reason,
+ ieee80211_get_reason_code_string(reason));
+
+ ieee80211_tdls_oper_request(&sdata->vif, peer,
+ NL80211_TDLS_TEARDOWN,
+ WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
+ GFP_ATOMIC);
+}
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index b3622823bad2..ebd66e8f46b3 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -266,9 +266,21 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
if ((keyid >> 6) != key->conf.keyidx)
return TKIP_DECRYPT_INVALID_KEYIDX;
- if (rx_ctx->ctx.state != TKIP_STATE_NOT_INIT &&
- (iv32 < rx_ctx->iv32 ||
- (iv32 == rx_ctx->iv32 && iv16 <= rx_ctx->iv16)))
+ /* Reject replays if the received TSC is smaller than or equal to the
+ * last received value in a valid message, but with an exception for
+ * the case where a new key has been set and no valid frame using that
+ * key has yet received and the local RSC was initialized to 0. This
+ * exception allows the very first frame sent by the transmitter to be
+ * accepted even if that transmitter were to use TSC 0 (IEEE 802.11
+ * described TSC to be initialized to 1 whenever a new key is taken into
+ * use).
+ */
+ if (iv32 < rx_ctx->iv32 ||
+ (iv32 == rx_ctx->iv32 &&
+ (iv16 < rx_ctx->iv16 ||
+ (iv16 == rx_ctx->iv16 &&
+ (rx_ctx->iv32 || rx_ctx->iv16 ||
+ rx_ctx->ctx.state != TKIP_STATE_NOT_INIT)))))
return TKIP_DECRYPT_REPLAY;
if (only_iv) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f8de166b788a..850264fac378 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3412,8 +3412,26 @@ begin:
tx.skb = skb;
tx.sdata = vif_to_sdata(info->control.vif);
- if (txq->sta)
+ if (txq->sta) {
tx.sta = container_of(txq->sta, struct sta_info, sta);
+ /*
+ * Drop unicast frames to unauthorised stations unless they are
+ * EAPOL frames from the local station.
+ */
+ if (unlikely(ieee80211_is_data(hdr->frame_control) &&
+ !ieee80211_vif_is_mesh(&tx.sdata->vif) &&
+ tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
+ (!(info->control.flags &
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO) ||
+ !ether_addr_equal(tx.sdata->vif.addr,
+ hdr->addr2)))) {
+ I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
+ ieee80211_free_txskb(&local->hw, skb);
+ goto begin;
+ }
+ }
/*
* The key can be removed while the packet was queued, so need to call
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ca7de02e0a6e..52f9742c438a 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -943,16 +943,22 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elem_parse_failed = true;
break;
case WLAN_EID_VHT_OPERATION:
- if (elen >= sizeof(struct ieee80211_vht_operation))
+ if (elen >= sizeof(struct ieee80211_vht_operation)) {
elems->vht_operation = (void *)pos;
- else
- elem_parse_failed = true;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed = true;
break;
case WLAN_EID_OPMODE_NOTIF:
- if (elen > 0)
+ if (elen > 0) {
elems->opmode_notif = pos;
- else
- elem_parse_failed = true;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed = true;
break;
case WLAN_EID_MESH_ID:
elems->mesh_id = pos;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index caa5986cb2e4..c0529c4b60f8 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -1169,7 +1169,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_key *key = rx->key;
struct ieee80211_mmie_16 *mmie;
- u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
+ u8 aad[GMAC_AAD_LEN], *mic, ipn[6], nonce[GMAC_NONCE_LEN];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_mgmt(hdr->frame_control))
@@ -1200,13 +1200,18 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
memcpy(nonce, hdr->addr2, ETH_ALEN);
memcpy(nonce + ETH_ALEN, ipn, 6);
+ mic = kmalloc(GMAC_MIC_LEN, GFP_ATOMIC);
+ if (!mic)
+ return RX_DROP_UNUSABLE;
if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
skb->data + 24, skb->len - 24,
mic) < 0 ||
crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_gmac.icverrors++;
+ kfree(mic);
return RX_DROP_UNUSABLE;
}
+ kfree(mic);
}
memcpy(key->u.aes_gmac.rx_pn, ipn, 6);
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 2e8e7e5fb4a6..7536cdd2da28 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -66,9 +66,9 @@ mtype_destroy(struct ip_set *set)
if (SET_WITH_TIMEOUT(set))
del_timer_sync(&map->gc);
- ip_set_free(map->members);
if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
+ ip_set_free(map->members);
ip_set_free(map);
set->data = NULL;
@@ -81,7 +81,7 @@ mtype_flush(struct ip_set *set)
if (set->extensions & IPSET_EXT_DESTROY)
mtype_ext_cleanup(set);
- memset(map->members, 0, map->memsize);
+ bitmap_zero(map->members, map->elements);
}
static int
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 4783efff0bde..a4c104a4977f 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -40,7 +40,7 @@ MODULE_ALIAS("ip_set_bitmap:ip");
/* Type structure */
struct bitmap_ip {
- void *members; /* the set members */
+ unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@@ -222,7 +222,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
u32 first_ip, u32 last_ip,
u32 elements, u32 hosts, u8 netmask)
{
- map->members = ip_set_alloc(map->memsize);
+ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
@@ -315,7 +315,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (!map)
return -ENOMEM;
- map->memsize = bitmap_bytes(0, elements - 1);
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ip;
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 9a065f672d3a..8e58e7e34981 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -46,7 +46,7 @@ enum {
/* Type structure */
struct bitmap_ipmac {
- void *members; /* the set members */
+ unsigned long *members; /* the set members */
u32 first_ip; /* host byte order, included in range */
u32 last_ip; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@@ -299,7 +299,7 @@ static bool
init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
u32 first_ip, u32 last_ip, u32 elements)
{
- map->members = ip_set_alloc(map->memsize);
+ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_ip = first_ip;
@@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (!map)
return -ENOMEM;
- map->memsize = bitmap_bytes(0, elements - 1);
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ipmac;
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
kfree(map);
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 7f0c733358a4..6771b362a123 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -34,7 +34,7 @@ MODULE_ALIAS("ip_set_bitmap:port");
/* Type structure */
struct bitmap_port {
- void *members; /* the set members */
+ unsigned long *members; /* the set members */
u16 first_port; /* host byte order, included in range */
u16 last_port; /* host byte order, included in range */
u32 elements; /* number of max elements in the set */
@@ -207,7 +207,7 @@ static bool
init_map_port(struct ip_set *set, struct bitmap_port *map,
u16 first_port, u16 last_port)
{
- map->members = ip_set_alloc(map->memsize);
+ map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
if (!map->members)
return false;
map->first_port = first_port;
@@ -250,7 +250,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
return -ENOMEM;
map->elements = elements;
- map->memsize = bitmap_bytes(0, map->elements);
+ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_port;
if (!init_map_port(set, map, first_port, last_port)) {
kfree(map);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index a748b0c2c981..f64660e9ff87 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1634,6 +1634,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
struct ip_set *set;
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
int ret = 0;
+ u32 lineno;
if (unlikely(protocol_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
@@ -1650,7 +1651,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
return -IPSET_ERR_PROTOCOL;
rcu_read_lock_bh();
- ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
+ ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
rcu_read_unlock_bh();
/* Userspace can't trigger element to be re-added */
if (ret == -EAGAIN)
@@ -1942,8 +1943,9 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
}
req_version->version = IPSET_PROTOCOL;
- ret = copy_to_user(user, req_version,
- sizeof(struct ip_set_req_version));
+ if (copy_to_user(user, req_version,
+ sizeof(struct ip_set_req_version)))
+ ret = -EFAULT;
goto done;
}
case IP_SET_OP_GET_BYNAME: {
@@ -2000,7 +2002,8 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
} /* end of switch(op) */
copy:
- ret = copy_to_user(user, data, copylen);
+ if (copy_to_user(user, data, copylen))
+ ret = -EFAULT;
done:
vfree(data);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index fd186b011a99..8475e8692ff0 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1643,7 +1643,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
if (!cp) {
int v;
- if (!sysctl_schedule_icmp(ipvs))
+ if (ipip || !sysctl_schedule_icmp(ipvs))
return NF_ACCEPT;
if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 8037b25ddb76..33125fc009cf 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -97,7 +97,6 @@ static bool __ip_vs_addr_is_local_v6(struct net *net,
static void update_defense_level(struct netns_ipvs *ipvs)
{
struct sysinfo i;
- static int old_secure_tcp = 0;
int availmem;
int nomem;
int to_change = -1;
@@ -178,35 +177,35 @@ static void update_defense_level(struct netns_ipvs *ipvs)
spin_lock(&ipvs->securetcp_lock);
switch (ipvs->sysctl_secure_tcp) {
case 0:
- if (old_secure_tcp >= 2)
+ if (ipvs->old_secure_tcp >= 2)
to_change = 0;
break;
case 1:
if (nomem) {
- if (old_secure_tcp < 2)
+ if (ipvs->old_secure_tcp < 2)
to_change = 1;
ipvs->sysctl_secure_tcp = 2;
} else {
- if (old_secure_tcp >= 2)
+ if (ipvs->old_secure_tcp >= 2)
to_change = 0;
}
break;
case 2:
if (nomem) {
- if (old_secure_tcp < 2)
+ if (ipvs->old_secure_tcp < 2)
to_change = 1;
} else {
- if (old_secure_tcp >= 2)
+ if (ipvs->old_secure_tcp >= 2)
to_change = 0;
ipvs->sysctl_secure_tcp = 1;
}
break;
case 3:
- if (old_secure_tcp < 2)
+ if (ipvs->old_secure_tcp < 2)
to_change = 1;
break;
}
- old_secure_tcp = ipvs->sysctl_secure_tcp;
+ ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
if (to_change >= 0)
ip_vs_protocol_timeout_change(ipvs,
ipvs->sysctl_secure_tcp > 1);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index df1d5618b008..1bdae8f188e1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/jhash.h>
+#include <linux/siphash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
@@ -301,6 +302,40 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
}
EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+ static __read_mostly siphash_key_t ct_id_seed;
+ unsigned long a, b, c, d;
+
+ net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+ a = (unsigned long)ct;
+ b = (unsigned long)ct->master;
+ c = (unsigned long)nf_ct_net(ct);
+ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
+ &ct_id_seed);
+#ifdef CONFIG_64BIT
+ return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+ return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
static void
clean_from_lists(struct nf_conn *ct)
{
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index e3ed20060878..562b54524249 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -323,7 +323,7 @@ static int find_pattern(const char *data, size_t dlen,
i++;
}
- pr_debug("Skipped up to `%c'!\n", skip);
+ pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
*numoff = i;
*numlen = getnum(data + i, dlen - i, cmd, term, numoff);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9e30fd0ab227..5e28702c801f 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/siphash.h>
#include <linux/netfilter.h>
#include <net/netlink.h>
@@ -441,7 +442,9 @@ static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb,
static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
{
- if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+ __be32 id = (__force __be32)nf_ct_get_id(ct);
+
+ if (nla_put_be32(skb, CTA_ID, id))
goto nla_put_failure;
return 0;
@@ -1166,8 +1169,9 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
ct = nf_ct_tuplehash_to_ctrack(h);
if (cda[CTA_ID]) {
- u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
- if (id != (u32)(unsigned long)ct) {
+ __be32 id = nla_get_be32(cda[CTA_ID]);
+
+ if (id != (__force __be32)nf_ct_get_id(ct)) {
nf_ct_put(ct);
return -ENOENT;
}
@@ -2472,6 +2476,25 @@ nla_put_failure:
static const union nf_inet_addr any_addr;
+static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
+{
+ static __read_mostly siphash_key_t exp_id_seed;
+ unsigned long a, b, c, d;
+
+ net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
+
+ a = (unsigned long)exp;
+ b = (unsigned long)exp->helper;
+ c = (unsigned long)exp->master;
+ d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
+
+#ifdef CONFIG_64BIT
+ return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
+#else
+ return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
+#endif
+}
+
static int
ctnetlink_exp_dump_expect(struct sk_buff *skb,
const struct nf_conntrack_expect *exp)
@@ -2519,7 +2542,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
}
#endif
if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
- nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+ nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
goto nla_put_failure;
@@ -2818,7 +2841,8 @@ static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
if (cda[CTA_EXPECT_ID]) {
__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
- if (ntohl(id) != (u32)(unsigned long)exp) {
+
+ if (id != nf_expect_get_id(exp)) {
nf_ct_expect_put(exp);
return -ENOENT;
}
@@ -3364,6 +3388,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
list_for_each_entry(net, net_exit_list, exit_list)
ctnetlink_net_exit(net);
+
+ /* wait for other cpus until they are done with ctnl_notifiers */
+ synchronize_rcu();
}
static struct pernet_operations ctnetlink_net_ops = {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 2278d9ab723b..9837a61cb3e3 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -490,7 +490,7 @@ static int nfnetlink_bind(struct net *net, int group)
ss = nfnetlink_get_subsys(type << 8);
rcu_read_unlock();
if (!ss)
- request_module("nfnetlink-subsys-%d", type);
+ request_module_nowait("nfnetlink-subsys-%d", type);
return 0;
}
#endif
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 3f499126727c..8396dc8ee247 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -711,6 +711,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
[NFCTH_NAME] = { .type = NLA_NUL_STRING,
.len = NF_CT_HELPER_NAME_LEN-1 },
[NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
+ [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
+ [NFCTH_STATUS] = { .type = NLA_U32, },
};
static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index 763ebc3e0b2b..f93047f974e1 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -62,6 +62,13 @@ nla_put_failure:
return -1;
}
+static int nft_fwd_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
+}
+
static struct nft_expr_type nft_fwd_netdev_type;
static const struct nft_expr_ops nft_fwd_netdev_ops = {
.type = &nft_fwd_netdev_type,
@@ -69,6 +76,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
.eval = nft_fwd_netdev_eval,
.init = nft_fwd_netdev_init,
.dump = nft_fwd_netdev_dump,
+ .validate = nft_fwd_validate,
};
static struct nft_expr_type nft_fwd_netdev_type __read_mostly = {
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 93820e0d8814..4ee8acded0a4 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -191,10 +191,6 @@ static void *nft_rbtree_deactivate(const struct net *net,
else if (d > 0)
parent = parent->rb_right;
else {
- if (!nft_set_elem_active(&rbe->ext, genmask)) {
- parent = parent->rb_left;
- continue;
- }
if (nft_rbtree_interval_end(rbe) &&
!nft_rbtree_interval_end(this)) {
parent = parent->rb_left;
@@ -203,6 +199,9 @@ static void *nft_rbtree_deactivate(const struct net *net,
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
+ } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+ parent = parent->rb_left;
+ continue;
}
nft_set_elem_change_active(net, set, &rbe->ext);
return rbe;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 751fec729ffb..e065140d0c93 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1728,7 +1728,7 @@ static int __init xt_init(void)
seqcount_init(&per_cpu(xt_recseq, i));
}
- xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
+ xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
if (!xt)
return -ENOMEM;
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index dffee9d47ec4..7b993f25aab9 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -25,6 +25,9 @@ static int bpf_mt_check(const struct xt_mtchk_param *par)
struct xt_bpf_info *info = par->matchinfo;
struct sock_fprog_kern program;
+ if (info->bpf_program_num_elem > XT_BPF_MAX_NUM_INSTR)
+ return -EINVAL;
+
program.len = info->bpf_program_num_elem;
program.filter = info->bpf_program;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index a1a29cdc58fc..140a9ae262ef 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -735,6 +735,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 2);
}
+#define HASHLIMIT_MAX_SIZE 1048576
+
static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
struct xt_hashlimit_htable **hinfo,
struct hashlimit_cfg2 *cfg,
@@ -745,6 +747,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
if (cfg->gc_interval == 0 || cfg->expire == 0)
return -EINVAL;
+ if (cfg->size > HASHLIMIT_MAX_SIZE) {
+ cfg->size = HASHLIMIT_MAX_SIZE;
+ pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
+ }
+ if (cfg->max > HASHLIMIT_MAX_SIZE) {
+ cfg->max = HASHLIMIT_MAX_SIZE;
+ pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
+ }
if (par->family == NFPROTO_IPV4) {
if (cfg->srcmask > 32 || cfg->dstmask > 32)
return -EINVAL;
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index bb33598e4530..ec247d8370e8 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -96,8 +96,7 @@ match_outdev:
static int physdev_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
-
- br_netfilter_enable();
+ static bool brnf_probed __read_mostly;
if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
info->bitmask & ~XT_PHYSDEV_OP_MASK)
@@ -113,6 +112,12 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
return -EINVAL;
}
+
+ if (!brnf_probed) {
+ brnf_probed = true;
+ request_module("br_netfilter");
+ }
+
return 0;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 025487436438..205865292ba3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1003,7 +1003,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (nlk->netlink_bind && groups) {
int group;
- for (group = 0; group < nlk->ngroups; group++) {
+ /* nl_groups is a u32, so cap the maximum groups we can bind */
+ for (group = 0; group < BITS_PER_TYPE(u32); group++) {
if (!test_bit(group, &groups))
continue;
err = nlk->netlink_bind(net, group + 1);
@@ -1022,7 +1023,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
netlink_insert(sk, nladdr->nl_pid) :
netlink_autobind(sock);
if (err) {
- netlink_undo_bind(nlk->ngroups, groups, sk);
+ netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
return err;
}
}
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 046ae1caecea..e5888983bec4 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -870,7 +870,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
unsigned short frametype, flags, window, timeout;
int ret;
- skb->sk = NULL; /* Initially we don't know who it's for */
+ skb_orphan(skb);
/*
* skb->data points to the netrom frame start
@@ -968,7 +968,9 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
window = skb->data[20];
+ sock_hold(make);
skb->sk = make;
+ skb->destructor = sock_efree;
make->sk_state = TCP_ESTABLISHED;
/* Fill in his circuit details */
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 5a58f9f38095..291f24fef19a 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -193,13 +193,20 @@ exit:
void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
struct sk_buff *skb)
{
- u8 gate = hdev->pipes[pipe].gate;
u8 status = NFC_HCI_ANY_OK;
struct hci_create_pipe_resp *create_info;
struct hci_delete_pipe_noti *delete_info;
struct hci_all_pipe_cleared_noti *cleared_info;
+ u8 gate;
- pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
+ pr_debug("from pipe %x cmd %x\n", pipe, cmd);
+
+ if (pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
+ gate = hdev->pipes[pipe].gate;
switch (cmd) {
case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
@@ -387,8 +394,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
struct sk_buff *skb)
{
int r = 0;
- u8 gate = hdev->pipes[pipe].gate;
+ u8 gate;
+
+ if (pipe >= NFC_HCI_MAX_PIPES) {
+ pr_err("Discarded event %x to invalid pipe %x\n", event, pipe);
+ goto exit;
+ }
+ gate = hdev->pipes[pipe].gate;
if (gate == NFC_HCI_INVALID_GATE) {
pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
goto exit;
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index e31dea17473d..dd59fde1dac8 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -118,9 +118,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
llcp_sock->service_name = kmemdup(llcp_addr.service_name,
llcp_sock->service_name_len,
GFP_KERNEL);
-
+ if (!llcp_sock->service_name) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ kfree(llcp_sock->service_name);
+ llcp_sock->service_name = NULL;
ret = -EADDRINUSE;
goto put_dev;
}
@@ -1011,10 +1016,13 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- if (sock->type == SOCK_RAW)
+ if (sock->type == SOCK_RAW) {
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
sock->ops = &llcp_rawsock_ops;
- else
+ } else {
sock->ops = &llcp_sock_ops;
+ }
sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
if (sk == NULL)
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index dbd24254412a..d20383779710 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -119,7 +119,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
if (!conn_info) {
rc = -EPROTO;
- goto free_exit;
+ goto exit;
}
__skb_queue_head_init(&frags_q);
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index c468eabd6943..90268b642907 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -348,7 +348,7 @@ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data,
nu->rx_packet_len = -1;
nu->rx_skb = nci_skb_alloc(nu->ndev,
NCI_MAX_PACKET_SIZE,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!nu->rx_skb)
return -ENOMEM;
}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index dbf74afe82fb..e79a49fe61e8 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -62,7 +62,10 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
[NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
.len = NFC_FIRMWARE_NAME_MAXSIZE },
+ [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 },
[NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
+ [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 },
+ [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
[NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
};
@@ -973,7 +976,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
int rc;
u32 idx;
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ !info->attrs[NFC_ATTR_TARGET_INDEX])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -1022,7 +1026,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg = NULL;
u32 idx;
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ !info->attrs[NFC_ATTR_FIRMWARE_NAME])
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
@@ -1101,7 +1106,6 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
local = nfc_llcp_find_local(dev);
if (!local) {
- nfc_put_device(dev);
rc = -ENODEV;
goto exit;
}
@@ -1161,7 +1165,6 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
local = nfc_llcp_find_local(dev);
if (!local) {
- nfc_put_device(dev);
rc = -ENODEV;
goto exit;
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 05d9f42fc309..7135aff3946d 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -150,8 +150,7 @@ static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__be16 diff[] = { ~(hdr->h_proto), ethertype };
- skb->csum = ~csum_partial((char *)diff, sizeof(diff),
- ~skb->csum);
+ skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
}
hdr->h_proto = ethertype;
@@ -239,8 +238,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__be32 diff[] = { ~(stack->label_stack_entry), lse };
- skb->csum = ~csum_partial((char *)diff, sizeof(diff),
- ~skb->csum);
+ skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
}
stack->label_stack_entry = lse;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index f135814c34ad..beb2897d8ddf 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -23,6 +23,7 @@
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#include <net/ipv6_frag.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <linux/netfilter/nf_nat.h>
@@ -708,6 +709,17 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
}
err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
+ if (err == NF_ACCEPT &&
+ ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
+ if (maniptype == NF_NAT_MANIP_SRC)
+ maniptype = NF_NAT_MANIP_DST;
+ else
+ maniptype = NF_NAT_MANIP_SRC;
+
+ err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
+ maniptype);
+ }
+
/* Mark NAT done if successful and update the flow key. */
if (err == NF_ACCEPT)
ovs_nat_update_key(key, skb, maniptype);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 453f806afe6e..c28f0e2a7c3c 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -738,9 +738,13 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
{
size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
- /* OVS_FLOW_ATTR_UFID */
+ /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
+ * see ovs_nla_put_identifier()
+ */
if (sfid && ovs_identifier_is_ufid(sfid))
len += nla_total_size(sfid->ufid_len);
+ else
+ len += nla_total_size(ovs_key_attr_size());
/* OVS_FLOW_ATTR_KEY */
if (!sfid || should_fill_key(sfid, ufid_flags))
@@ -916,7 +920,10 @@ static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
info->snd_portid, info->snd_seq, 0,
cmd, ufid_flags);
- BUG_ON(retval < 0);
+ if (WARN_ON_ONCE(retval < 0)) {
+ kfree_skb(skb);
+ skb = ERR_PTR(retval);
+ }
return skb;
}
@@ -1343,7 +1350,10 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
OVS_FLOW_CMD_DEL,
ufid_flags);
rcu_read_unlock();
- BUG_ON(err < 0);
+ if (WARN_ON_ONCE(err < 0)) {
+ kfree_skb(reply);
+ goto out_free;
+ }
ovs_notify(&dp_flow_genl_family, reply, info);
} else {
@@ -1351,6 +1361,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
}
}
+out_free:
ovs_flow_free(flow, true);
return 0;
unlock:
@@ -2218,7 +2229,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
[OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
- [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
};
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 3bd4d5d0c346..50ea76180afa 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1853,14 +1853,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
struct sw_flow_actions *acts;
int new_acts_size;
- int req_size = NLA_ALIGN(attr_len);
+ size_t req_size = NLA_ALIGN(attr_len);
int next_offset = offsetof(struct sw_flow_actions, actions) +
(*sfa)->actions_len;
if (req_size <= (ksize(*sfa) - next_offset))
goto out;
- new_acts_size = ksize(*sfa) * 2;
+ new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index e7da29021b38..c23392482580 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -44,7 +44,8 @@ static struct internal_dev *internal_dev_priv(struct net_device *netdev)
}
/* Called with rcu_read_lock_bh. */
-static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t
+internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int len, err;
@@ -63,7 +64,7 @@ static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
} else {
netdev->stats.tx_errors++;
}
- return 0;
+ return NETDEV_TX_OK;
}
static int internal_dev_open(struct net_device *netdev)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 14df2fcf6138..fb643945e424 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -587,7 +587,8 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
msec = 1;
div = ecmd.base.speed / 1000;
}
- }
+ } else
+ return DEFAULT_PRB_RETIRE_TOV;
mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
@@ -1331,15 +1332,21 @@ static void packet_sock_destruct(struct sock *sk)
static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
{
- u32 rxhash;
+ u32 *history = po->rollover->history;
+ u32 victim, rxhash;
int i, count = 0;
rxhash = skb_get_hash(skb);
for (i = 0; i < ROLLOVER_HLEN; i++)
- if (po->rollover->history[i] == rxhash)
+ if (READ_ONCE(history[i]) == rxhash)
count++;
- po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
+ victim = prandom_u32() % ROLLOVER_HLEN;
+
+ /* Avoid dirtying the cache line if possible */
+ if (READ_ONCE(history[victim]) != rxhash)
+ WRITE_ONCE(history[victim], rxhash);
+
return count > (ROLLOVER_HLEN >> 1);
}
@@ -2399,6 +2406,9 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
ts = __packet_set_timestamp(po, ph, skb);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
+
+ if (!packet_read_pending(&po->tx_ring))
+ complete(&po->skb_completion);
}
sock_wfree(skb);
@@ -2629,7 +2639,7 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame,
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct net_device *dev;
struct virtio_net_hdr *vnet_hdr = NULL;
struct sockcm_cookie sockc;
@@ -2638,19 +2648,26 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
void *ph;
DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
+ unsigned char *addr = NULL;
int tp_len, size_max;
- unsigned char *addr;
void *data;
int len_sum = 0;
int status = TP_STATUS_AVAILABLE;
int hlen, tlen, copylen = 0;
+ long timeo = 0;
mutex_lock(&po->pg_vec_lock);
+ /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
+ * we need to confirm it under protection of pg_vec_lock.
+ */
+ if (unlikely(!po->tx_ring.pg_vec)) {
+ err = -EBUSY;
+ goto out;
+ }
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
- addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2660,10 +2677,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
- if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out_put;
+ if (po->sk.sk_socket->type == SOCK_DGRAM) {
+ if (dev && msg->msg_namelen < dev->addr_len +
+ offsetof(struct sockaddr_ll, sll_addr))
+ goto out_put;
+ addr = saddr->sll_addr;
+ }
}
err = -ENXIO;
@@ -2688,12 +2708,21 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
size_max = dev->mtu + reserve + VLAN_HLEN;
+ reinit_completion(&po->skb_completion);
+
do {
ph = packet_current_frame(po, &po->tx_ring,
TP_STATUS_SEND_REQUEST);
if (unlikely(ph == NULL)) {
- if (need_wait && need_resched())
- schedule();
+ if (need_wait && skb) {
+ timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
+ timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
+ if (timeo <= 0) {
+ err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
+ goto out_put;
+ }
+ }
+ /* check for additional frames */
continue;
}
@@ -2834,7 +2863,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
struct sk_buff *skb;
struct net_device *dev;
__be16 proto;
- unsigned char *addr;
+ unsigned char *addr = NULL;
int err, reserve = 0;
struct sockcm_cookie sockc;
struct virtio_net_hdr vnet_hdr = { 0 };
@@ -2851,7 +2880,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
- addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2859,10 +2887,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
- if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out_unlock;
+ if (sock->type == SOCK_DGRAM) {
+ if (dev && msg->msg_namelen < dev->addr_len +
+ offsetof(struct sockaddr_ll, sll_addr))
+ goto out_unlock;
+ addr = saddr->sll_addr;
+ }
}
err = -ENXIO;
@@ -3245,6 +3276,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
sock_init_data(sock, sk);
po = pkt_sk(sk);
+ init_completion(&po->skb_completion);
sk->sk_family = PF_PACKET;
po->num = proto;
po->xmit = dev_queue_xmit;
@@ -3278,7 +3310,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
}
mutex_lock(&net->packet.sklist_lock);
- sk_add_node_rcu(sk, &net->packet.sklist);
+ sk_add_node_tail_rcu(sk, &net->packet.sklist);
mutex_unlock(&net->packet.sklist_lock);
preempt_disable();
@@ -3378,20 +3410,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) {
+ int copy_len;
+
/* If the address length field is there to be filled
* in, we fill it in now.
*/
if (sock->type == SOCK_PACKET) {
__sockaddr_check_size(sizeof(struct sockaddr_pkt));
msg->msg_namelen = sizeof(struct sockaddr_pkt);
+ copy_len = msg->msg_namelen;
} else {
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
msg->msg_namelen = sll->sll_halen +
offsetof(struct sockaddr_ll, sll_addr);
+ copy_len = msg->msg_namelen;
+ if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
+ memset(msg->msg_name +
+ offsetof(struct sockaddr_ll, sll_addr),
+ 0, sizeof(sll->sll_addr));
+ msg->msg_namelen = sizeof(struct sockaddr_ll);
+ }
}
- memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
- msg->msg_namelen);
+ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}
if (pkt_sk(sk)->auxdata) {
@@ -4229,7 +4270,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
struct pgv *pg_vec;
int i;
- pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
+ pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pg_vec))
goto out;
@@ -4620,14 +4661,29 @@ static void __exit packet_exit(void)
static int __init packet_init(void)
{
- int rc = proto_register(&packet_proto, 0);
+ int rc;
- if (rc != 0)
+ rc = proto_register(&packet_proto, 0);
+ if (rc)
goto out;
+ rc = sock_register(&packet_family_ops);
+ if (rc)
+ goto out_proto;
+ rc = register_pernet_subsys(&packet_net_ops);
+ if (rc)
+ goto out_sock;
+ rc = register_netdevice_notifier(&packet_netdev_notifier);
+ if (rc)
+ goto out_pernet;
+
+ return 0;
- sock_register(&packet_family_ops);
- register_pernet_subsys(&packet_net_ops);
- register_netdevice_notifier(&packet_netdev_notifier);
+out_pernet:
+ unregister_pernet_subsys(&packet_net_ops);
+out_sock:
+ sock_unregister(PF_PACKET);
+out_proto:
+ proto_unregister(&packet_proto);
out:
return rc;
}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 1309e2a7baad..bbf8dd35df0d 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -125,6 +125,7 @@ struct packet_sock {
unsigned int tp_hdrlen;
unsigned int tp_reserve;
unsigned int tp_tstamp;
+ struct completion skb_completion;
struct net_device __rcu *cached_dev;
int (*xmit)(struct sk_buff *skb);
struct packet_type prot_hook ____cacheline_aligned_in_smp;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 7b670a9a375e..41547c6e496a 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -126,6 +126,7 @@ static void __qrtr_node_release(struct kref *kref)
list_del(&node->item);
mutex_unlock(&qrtr_node_lock);
+ cancel_work_sync(&node->work);
skb_queue_purge(&node->rx_queue);
kfree(node);
}
@@ -620,20 +621,21 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
node = NULL;
if (addr->sq_node == QRTR_NODE_BCAST) {
- enqueue_fn = qrtr_bcast_enqueue;
- if (addr->sq_port != QRTR_PORT_CTRL) {
+ if (addr->sq_port != QRTR_PORT_CTRL &&
+ qrtr_local_nid != QRTR_NODE_BCAST) {
release_sock(sk);
return -ENOTCONN;
}
+ enqueue_fn = qrtr_bcast_enqueue;
} else if (addr->sq_node == ipc->us.sq_node) {
enqueue_fn = qrtr_local_enqueue;
} else {
- enqueue_fn = qrtr_node_enqueue;
node = qrtr_node_lookup(addr->sq_node);
if (!node) {
release_sock(sk);
return -ECONNRESET;
}
+ enqueue_fn = qrtr_node_enqueue;
}
plen = (len + 3) & ~3;
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 0efb3d2b338d..a118686cc1ec 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -138,6 +138,9 @@ static void rds_ib_add_one(struct ib_device *device)
atomic_set(&rds_ibdev->refcount, 1);
INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
+ INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+ INIT_LIST_HEAD(&rds_ibdev->conn_list);
+
rds_ibdev->max_wrs = device->attrs.max_qp_wr;
rds_ibdev->max_sge = min(device->attrs.max_sge, RDS_IB_MAX_SGE);
@@ -189,9 +192,6 @@ static void rds_ib_add_one(struct ib_device *device)
device->name,
rds_ibdev->use_fastreg ? "FRMR" : "FMR");
- INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
- INIT_LIST_HEAD(&rds_ibdev->conn_list);
-
down_write(&rds_ib_devices_lock);
list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
up_write(&rds_ib_devices_lock);
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index 4fe8f4fec4ee..da84d6b2f72c 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
else
pool = rds_ibdev->mr_1m_pool;
+ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+ /* Switch pools if one of the pool is reaching upper limit */
+ if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ pool = rds_ibdev->mr_1m_pool;
+ else
+ pool = rds_ibdev->mr_8k_pool;
+ }
+
ibmr = rds_ib_try_reuse_ibmr(pool);
if (ibmr)
return ibmr;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 977f69886c00..78d5d68cbbce 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -416,12 +416,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
wait_clean_list_grace();
list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
- if (ibmr_ret)
+ if (ibmr_ret) {
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
-
+ clean_nodes = clean_nodes->next;
+ }
/* more than one entry in llist nodes */
- if (clean_nodes->next)
- llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
+ if (clean_nodes)
+ llist_add_batch(clean_nodes, clean_tail,
+ &pool->clean_list);
}
@@ -442,9 +444,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
struct rds_ib_mr *ibmr = NULL;
int iter = 0;
- if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
while (1) {
ibmr = rds_ib_reuse_mr(pool);
if (ibmr)
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index 7e78dca1f252..aaf4b3d10203 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -42,7 +42,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
static const char *const rds_ib_stat_names[] = {
"ib_connect_raced",
"ib_listen_closed_stale",
- "s_ib_evt_handler_call",
+ "ib_evt_handler_call",
"ib_tasklet_call",
"ib_tx_cq_event",
"ib_tx_ring_full",
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index d36effbf7614..2daba5316caa 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -527,7 +527,7 @@ static void rds_tcp_kill_sock(struct net *net)
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
- if (net != c_net || !tc->t_sock)
+ if (net != c_net)
continue;
if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
list_move_tail(&tc->t_tcp_node, &tmp_list);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 884027f62783..87c35844d7d9 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -940,10 +940,13 @@ static void rfkill_sync_work(struct work_struct *work)
int __must_check rfkill_register(struct rfkill *rfkill)
{
static unsigned long rfkill_no;
- struct device *dev = &rfkill->dev;
+ struct device *dev;
int error;
- BUG_ON(!rfkill);
+ if (!rfkill)
+ return -EINVAL;
+
+ dev = &rfkill->dev;
mutex_lock(&rfkill_global_mutex);
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 7ca57741b2fb..7849f286bb93 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
struct sk_buff *skb;
unsigned char *dptr;
unsigned char lci1, lci2;
- char buffer[100];
- int len, faclen = 0;
+ int maxfaclen = 0;
+ int len, faclen;
+ int reserve;
- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
+ reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
+ len = ROSE_MIN_LEN;
switch (frametype) {
case ROSE_CALL_REQUEST:
len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
- faclen = rose_create_facilities(buffer, rose);
- len += faclen;
+ maxfaclen = 256;
break;
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
break;
}
- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+ skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
+ if (!skb)
return;
/*
* Space for AX.25 header and PID.
*/
- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
+ skb_reserve(skb, reserve);
- dptr = skb_put(skb, skb_tailroom(skb));
+ dptr = skb_put(skb, len);
lci1 = (rose->lci >> 8) & 0x0F;
lci2 = (rose->lci >> 0) & 0xFF;
@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
dptr += ROSE_ADDR_LEN;
memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
- memcpy(dptr, buffer, faclen);
+ faclen = rose_create_facilities(dptr, rose);
+ skb_put(skb, faclen);
dptr += faclen;
break;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 2d59c9be40e1..222c566cf25d 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -405,6 +405,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
switch (rx->sk.sk_state) {
case RXRPC_UNBOUND:
+ case RXRPC_CLIENT_UNBOUND:
rx->srx.srx_family = AF_RXRPC;
rx->srx.srx_service = 0;
rx->srx.transport_type = SOCK_DGRAM;
@@ -429,10 +430,9 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
}
rx->local = local;
- rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
+ rx->sk.sk_state = RXRPC_CLIENT_BOUND;
/* Fall through */
- case RXRPC_CLIENT_UNBOUND:
case RXRPC_CLIENT_BOUND:
if (!m->msg_name &&
test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index f60e35576526..d6a64771463f 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -401,6 +401,7 @@ enum rxrpc_call_flag {
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
RXRPC_CALL_PINGING, /* Ping in process */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
+ RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
};
/*
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 1ed18d8c9c9f..88bcd146142f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -464,7 +464,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
- if (conn)
+ if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
rxrpc_disconnect_call(call);
for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
@@ -539,6 +539,7 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
+ rxrpc_put_connection(call->conn);
rxrpc_put_peer(call->peer);
kfree(call->rxtx_buffer);
kfree(call->rxtx_annotations);
@@ -560,7 +561,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
- ASSERTCMP(call->conn, ==, NULL);
/* Clean up the Rx/Tx buffer */
for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 0fce919bf47d..dd41733f182a 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -736,9 +736,9 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
struct rxrpc_channel *chan = &conn->channels[channel];
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
- call->conn = NULL;
spin_lock(&conn->channel_lock);
+ set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
/* Calls that have never actually been assigned a channel can simply be
* discarded. If the conn didn't get used either, it will follow
@@ -828,7 +828,6 @@ out:
spin_unlock(&rxrpc_client_conn_cache_lock);
out_2:
spin_unlock(&conn->channel_lock);
- rxrpc_put_connection(conn);
_leave("");
return;
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index e1e83af47866..e7c89b978587 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -211,9 +211,8 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
__rxrpc_disconnect_call(conn, call);
spin_unlock(&conn->channel_lock);
- call->conn = NULL;
+ set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
conn->idle_timestamp = jiffies;
- rxrpc_put_connection(conn);
}
/*
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index a4380e182e6c..f0ccc6a04c7a 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -582,8 +582,7 @@ ack:
immediate_ack, true,
rxrpc_propose_ack_input_data);
- if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1)
- rxrpc_notify_socket(call);
+ rxrpc_notify_socket(call);
_leave(" [queued]");
}
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 59d328603312..d568c96f5262 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -96,7 +96,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
*/
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
{
- struct rxrpc_connection *conn = NULL;
+ struct rxrpc_connection *conn;
struct rxrpc_ack_buffer *pkt;
struct msghdr msg;
struct kvec iov[2];
@@ -106,18 +106,14 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
int ret;
u8 reason;
- spin_lock_bh(&call->lock);
- if (call->conn)
- conn = rxrpc_get_connection_maybe(call->conn);
- spin_unlock_bh(&call->lock);
- if (!conn)
+ if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
return -ECONNRESET;
pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt) {
- rxrpc_put_connection(conn);
+ if (!pkt)
return -ENOMEM;
- }
+
+ conn = call->conn;
msg.msg_name = &call->peer->srx.transport;
msg.msg_namelen = call->peer->srx.transport_len;
@@ -204,7 +200,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
}
out:
- rxrpc_put_connection(conn);
kfree(pkt);
return ret;
}
@@ -214,20 +209,18 @@ out:
*/
int rxrpc_send_abort_packet(struct rxrpc_call *call)
{
- struct rxrpc_connection *conn = NULL;
+ struct rxrpc_connection *conn;
struct rxrpc_abort_buffer pkt;
struct msghdr msg;
struct kvec iov[1];
rxrpc_serial_t serial;
int ret;
- spin_lock_bh(&call->lock);
- if (call->conn)
- conn = rxrpc_get_connection_maybe(call->conn);
- spin_unlock_bh(&call->lock);
- if (!conn)
+ if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
return -ECONNRESET;
+ conn = call->conn;
+
msg.msg_name = &call->peer->srx.transport;
msg.msg_namelen = call->peer->srx.transport_len;
msg.msg_control = NULL;
@@ -255,7 +248,6 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
ret = kernel_sendmsg(conn->params.local->socket,
&msg, iov, 1, sizeof(pkt));
- rxrpc_put_connection(conn);
return ret;
}
@@ -400,6 +392,9 @@ send_fragmentable:
}
break;
#endif
+
+ default:
+ BUG();
}
up_write(&conn->params.local->defrag_sem);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index bf13b8470c9a..80950a4384aa 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -148,6 +148,9 @@ void rxrpc_error_report(struct sock *sk)
struct rxrpc_peer *peer;
struct sk_buff *skb;
+ if (unlikely(!local))
+ return;
+
_enter("%p{%d}", sk, local->debug_id);
skb = sock_dequeue_err_skb(sk);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 67adb4ecded2..5b8f8b382a2e 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -948,10 +948,15 @@ tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
static int tcf_action_add(struct net *net, struct nlattr *nla,
struct nlmsghdr *n, u32 portid, int ovr)
{
- int ret = 0;
+ int loop, ret;
LIST_HEAD(actions);
- ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
+ for (loop = 0; loop < 10; loop++) {
+ ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
+ if (ret != -EAGAIN)
+ break;
+ }
+
if (ret)
return ret;
@@ -989,10 +994,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
*/
if (n->nlmsg_flags & NLM_F_REPLACE)
ovr = 1;
-replay:
ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
- if (ret == -EAGAIN)
- goto replay;
break;
case RTM_DELACTION:
ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index d2932dc4c83d..36e4dcdac8dc 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -477,6 +477,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
int ret = 0;
int err;
+ if (!nla)
+ return -EINVAL;
+
err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
if (err < 0)
return err;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fc3650b06192..9b4c7c42c932 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -332,7 +332,11 @@ static int __init mirred_init_module(void)
return err;
pr_info("Mirror/redirect action on\n");
- return tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+ err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+ if (err)
+ unregister_netdevice_notifier(&mirred_device_notifier);
+
+ return err;
}
static void __exit mirred_cleanup_module(void)
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index cf9b2fe8eac6..9bebf4dc1c8e 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -54,13 +54,14 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (tb[TCA_PEDIT_PARMS] == NULL)
return -EINVAL;
parm = nla_data(tb[TCA_PEDIT_PARMS]);
+ if (!parm->nkeys)
+ return -EINVAL;
+
ksize = parm->nkeys * sizeof(struct tc_pedit_key);
if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize)
return -EINVAL;
if (!tcf_hash_check(tn, parm->index, a, bind)) {
- if (!parm->nkeys)
- return -EINVAL;
ret = tcf_hash_create(tn, parm->index, est, a,
&act_pedit_ops, bind, false);
if (ret)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index eee299bb6bcf..18904313bd4e 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -141,6 +141,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
if (!atomic_read(&head->ht.nelems))
return -1;
+ flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
fl_clear_masked_range(&skb_key, &head->mask);
info = skb_tunnel_info(skb);
@@ -364,6 +365,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
+ [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
};
static void fl_set_key_val(struct nlattr **tb,
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 61ddfbad2aae..fe29c576e494 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -111,6 +111,7 @@ static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
[TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
[TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
+ [TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
};
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 455fc8f83d0a..f20373588a99 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -542,8 +542,8 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
fp = &b->ht[h];
for (pfp = rtnl_dereference(*fp); pfp;
fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
- if (pfp == f) {
- *fp = f->next;
+ if (pfp == fold) {
+ rcu_assign_pointer(*fp, fold->next);
break;
}
}
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 322438fb3ffc..8673f9817f91 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -455,10 +455,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
[TCA_RSVP_CLASSID] = { .type = NLA_U32 },
- [TCA_RSVP_DST] = { .type = NLA_BINARY,
- .len = RSVP_DST_LEN * sizeof(u32) },
- [TCA_RSVP_SRC] = { .type = NLA_BINARY,
- .len = RSVP_DST_LEN * sizeof(u32) },
+ [TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) },
+ [TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) },
[TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
};
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index db80a6440f37..ab66e2b38e66 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -301,12 +301,32 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
cp->fall_through = p->fall_through;
cp->tp = tp;
+ if (tb[TCA_TCINDEX_HASH])
+ cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
+
+ if (tb[TCA_TCINDEX_MASK])
+ cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
+
+ if (tb[TCA_TCINDEX_SHIFT])
+ cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
+
+ if (!cp->hash) {
+ /* Hash not specified, use perfect hash if the upper limit
+ * of the hashing index is below the threshold.
+ */
+ if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
+ cp->hash = (cp->mask >> cp->shift) + 1;
+ else
+ cp->hash = DEFAULT_HASH_SIZE;
+ }
+
if (p->perfect) {
int i;
if (tcindex_alloc_perfect_hash(cp) < 0)
goto errout;
- for (i = 0; i < cp->hash; i++)
+ cp->alloc_hash = cp->hash;
+ for (i = 0; i < min(cp->hash, p->hash); i++)
cp->perfect[i].res = p->perfect[i].res;
balloc = 1;
}
@@ -321,15 +341,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (old_r)
cr.res = r->res;
- if (tb[TCA_TCINDEX_HASH])
- cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
-
- if (tb[TCA_TCINDEX_MASK])
- cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
-
- if (tb[TCA_TCINDEX_SHIFT])
- cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
-
err = -EBUSY;
/* Hash already allocated, make sure that we still meet the
@@ -347,16 +358,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (tb[TCA_TCINDEX_FALL_THROUGH])
cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
- if (!cp->hash) {
- /* Hash not specified, use perfect hash if the upper limit
- * of the hashing index is below the threshold.
- */
- if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
- cp->hash = (cp->mask >> cp->shift) + 1;
- else
- cp->hash = DEFAULT_HASH_SIZE;
- }
-
if (!cp->perfect && !cp->h)
cp->alloc_hash = cp->hash;
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index fbb7ebfc58c6..d4d6f9c91e8c 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -242,6 +242,9 @@ static int tcf_em_validate(struct tcf_proto *tp,
goto errout;
if (em->ops->change) {
+ err = -EINVAL;
+ if (em_hdr->flags & TCF_EM_SIMPLE)
+ goto errout;
err = em->ops->change(net, data, data_len, em);
if (err < 0)
goto errout;
@@ -267,12 +270,12 @@ static int tcf_em_validate(struct tcf_proto *tp,
}
em->data = (unsigned long) v;
}
+ em->datalen = data_len;
}
}
em->matchid = em_hdr->matchid;
em->flags = em_hdr->flags;
- em->datalen = data_len;
em->net = net;
err = 0;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index beb554aa8cfb..7a434a5c43fe 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1129,6 +1129,26 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
[TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
};
+static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1], struct nlattr *opt)
+{
+ int err;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[TCA_CBQ_WRROPT]) {
+ const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
+
+ if (wrr->priority > TC_CBQ_MAXPRIO)
+ err = -EINVAL;
+ }
+ return err;
+}
+
static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
{
struct cbq_sched_data *q = qdisc_priv(sch);
@@ -1136,7 +1156,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
struct tc_ratespec *r;
int err;
- err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
+ err = cbq_opt_parse(tb, opt);
if (err < 0)
return err;
@@ -1468,10 +1488,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
struct cbq_class *parent;
struct qdisc_rate_table *rtab = NULL;
- if (opt == NULL)
- return -EINVAL;
-
- err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
+ err = cbq_opt_parse(tb, opt);
if (err < 0)
return err;
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 5bfa79ee657c..17a0838f8074 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
struct Qdisc *sch = ctx;
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
- if (skb)
+ if (skb) {
sch->qstats.backlog -= qdisc_pkt_len(skb);
-
- prefetch(&skb->end); /* we'll need skb_shinfo() */
+ prefetch(&skb->end); /* we'll need skb_shinfo() */
+ }
return skb;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index b56d57984439..551cf193649e 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -346,6 +346,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
goto errout;
err = -EINVAL;
+ if (!tb[TCA_DSMARK_INDICES])
+ goto errout;
indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
if (hweight32(indices) != 1)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index b57b4de73038..9f53d4ec0e37 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -697,6 +697,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
[TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
[TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
+ [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
[TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
};
@@ -736,7 +737,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_FQ_QUANTUM]) {
u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
- if (quantum > 0)
+ if (quantum > 0 && quantum <= (1 << 20))
q->quantum = quantum;
else
err = -EINVAL;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index a5ea0e9b6be4..29b7465c9d8a 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -57,7 +57,7 @@ struct fq_codel_sched_data {
struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
u32 *backlogs; /* backlog table [flows_cnt] */
u32 flows_cnt; /* number of flows */
- u32 perturbation; /* hash perturbation */
+ siphash_key_t perturbation; /* hash perturbation */
u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
u32 drop_batch_size;
u32 memory_limit;
@@ -75,7 +75,7 @@ struct fq_codel_sched_data {
static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
struct sk_buff *skb)
{
- u32 hash = skb_get_hash_perturb(skb, q->perturbation);
+ u32 hash = skb_get_hash_perturb(skb, &q->perturbation);
return reciprocal_scale(hash, q->flows_cnt);
}
@@ -482,7 +482,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
q->memory_limit = 32 << 20; /* 32 MBytes */
q->drop_batch_size = 64;
q->quantum = psched_mtu(qdisc_dev(sch));
- q->perturbation = prandom_u32();
+ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
INIT_LIST_HEAD(&q->new_flows);
INIT_LIST_HEAD(&q->old_flows);
codel_params_init(&q->cparams);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9016c8baf2aa..88ce8edf1261 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -699,7 +699,11 @@ static void qdisc_rcu_free(struct rcu_head *head)
void qdisc_destroy(struct Qdisc *qdisc)
{
- const struct Qdisc_ops *ops = qdisc->ops;
+ const struct Qdisc_ops *ops;
+
+ if (!qdisc)
+ return;
+ ops = qdisc->ops;
if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt))
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index f4b2d69973c3..1367fe94d630 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -4,11 +4,11 @@
* Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
*/
-#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
+#include <linux/siphash.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
@@ -125,7 +125,7 @@ struct wdrr_bucket {
struct hhf_sched_data {
struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
- u32 perturbation; /* hash perturbation */
+ siphash_key_t perturbation; /* hash perturbation */
u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
u32 drop_overlimit; /* number of times max qdisc packet
* limit was hit
@@ -263,7 +263,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
}
/* Get hashed flow-id of the skb. */
- hash = skb_get_hash_perturb(skb, q->perturbation);
+ hash = skb_get_hash_perturb(skb, &q->perturbation);
/* Check if this packet belongs to an already established HH flow. */
flow_pos = hash & HHF_BIT_MASK;
@@ -543,7 +543,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
- if (non_hh_quantum > INT_MAX)
+ if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
return -EINVAL;
sch_tree_lock(sch);
@@ -593,7 +593,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
sch->limit = 1000;
q->quantum = psched_mtu(qdisc_dev(sch));
- q->perturbation = prandom_u32();
+ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
INIT_LIST_HEAD(&q->new_buckets);
INIT_LIST_HEAD(&q->old_buckets);
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 20b7f1646f69..c3c4c09ab289 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -195,7 +195,8 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
+ &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
return -1;
return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 922683418e53..1451fa180e6c 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -362,8 +362,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
+ sch->cpu_bstats, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL,
&sch->qstats, sch->q.qlen) < 0)
return -1;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 66b6e807b4ec..4a8680b1f861 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -332,7 +332,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl_q->bstats) < 0 ||
+ d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e9812e21dbc9..e06491314d59 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -437,8 +437,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct netem_skb_cb *cb;
struct sk_buff *skb2;
struct sk_buff *segs = NULL;
- unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
- int nb = 0;
+ unsigned int prev_len = qdisc_pkt_len(skb);
int count = 1;
int rc = NET_XMIT_SUCCESS;
int rc_drop = NET_XMIT_DROP;
@@ -475,7 +474,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
* skb will be queued.
*/
if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
- struct Qdisc *rootq = qdisc_root(sch);
+ struct Qdisc *rootq = qdisc_root_bh(sch);
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0;
@@ -495,6 +494,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
segs = netem_segment(skb, sch, to_free);
if (!segs)
return rc_drop;
+ qdisc_skb_cb(segs)->pkt_len = segs->len;
} else {
segs = skb;
}
@@ -510,6 +510,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(skb)) {
qdisc_drop(skb, sch, to_free);
+ skb = NULL;
goto finish_segs;
}
@@ -585,6 +586,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
finish_segs:
if (segs) {
+ unsigned int len, last_len;
+ int nb;
+
+ len = skb ? skb->len : 0;
+ nb = skb ? 1 : 0;
+
while (segs) {
skb2 = segs->next;
segs->next = NULL;
@@ -600,9 +607,10 @@ finish_segs:
}
segs = skb2;
}
- sch->q.qlen += nb;
- if (nb > 1)
- qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+ /* Parent qdiscs accounted for 1 skb of size @prev_len */
+ qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
+ } else if (!skb) {
+ return NET_XMIT_DROP;
}
return NET_XMIT_SUCCESS;
}
@@ -711,7 +719,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
int i;
size_t s;
- if (n > NETEM_DIST_MAX)
+ if (!n || n > NETEM_DIST_MAX)
return -EINVAL;
s = sizeof(struct disttable) + n * sizeof(s16);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8f575899adfa..2332984ba422 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -232,8 +232,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
- if (new == NULL)
- new = &noop_qdisc;
+ if (!new) {
+ new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(sch->handle, arg));
+ if (!new)
+ new = &noop_qdisc;
+ else
+ qdisc_hash_add(new);
+ }
*old = qdisc_replace(sch, new, &q->queues[band]);
return 0;
@@ -286,7 +292,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl_q->bstats) < 0 ||
+ d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 20a350bd1b1d..bc176bd48c02 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -22,7 +22,7 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/random.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
#include <net/ip.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
@@ -48,7 +48,7 @@ struct sfb_bucket {
* (Section 4.4 of SFB reference : moving hash functions)
*/
struct sfb_bins {
- u32 perturbation; /* jhash perturbation */
+ siphash_key_t perturbation; /* siphash key */
struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
};
@@ -219,7 +219,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da
static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
{
- q->bins[slot].perturbation = prandom_u32();
+ get_random_bytes(&q->bins[slot].perturbation,
+ sizeof(q->bins[slot].perturbation));
}
static void sfb_swap_slot(struct sfb_sched_data *q)
@@ -314,9 +315,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* If using external classifiers, get result and record it. */
if (!sfb_classify(skb, fl, &ret, &salt))
goto other_drop;
- sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+ sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
} else {
- sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
+ sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
}
@@ -352,7 +353,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* Inelastic flow */
if (q->double_buffering) {
sfbhash = skb_get_hash_perturb(skb,
- q->bins[slot].perturbation);
+ &q->bins[slot].perturbation);
if (!sfbhash)
sfbhash = 1;
sfb_skb_cb(skb)->hashes[slot] = sfbhash;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d8c2b6baaad2..a8d82cb7f073 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -18,7 +18,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
@@ -120,7 +120,7 @@ struct sfq_sched_data {
u8 headdrop;
u8 maxdepth; /* limit of packets per flow */
- u32 perturbation;
+ siphash_key_t perturbation;
u8 cur_depth; /* depth of longest slot */
u8 flags;
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
@@ -158,7 +158,7 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
static unsigned int sfq_hash(const struct sfq_sched_data *q,
const struct sk_buff *skb)
{
- return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
+ return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
}
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -607,9 +607,11 @@ static void sfq_perturbation(unsigned long arg)
struct Qdisc *sch = (struct Qdisc *)arg;
struct sfq_sched_data *q = qdisc_priv(sch);
spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ siphash_key_t nkey;
+ get_random_bytes(&nkey, sizeof(nkey));
spin_lock(root_lock);
- q->perturbation = prandom_u32();
+ q->perturbation = nkey;
if (!q->filter_list && q->tail)
sfq_rehash(sch);
spin_unlock(root_lock);
@@ -681,7 +683,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
del_timer(&q->perturb_timer);
if (q->perturb_period) {
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
- q->perturbation = prandom_u32();
+ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
}
sch_tree_unlock(sch);
kfree(p);
@@ -737,7 +739,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
q->quantum = psched_mtu(qdisc_dev(sch));
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = 0;
- q->perturbation = prandom_u32();
+ get_random_bytes(&q->perturbation, sizeof(q->perturbation));
if (opt) {
int err = sfq_change(sch, opt);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 7e127cde1ccc..16e120b84118 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -81,6 +81,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Discarding const is appropriate here. */
asoc->ep = (struct sctp_endpoint *)ep;
asoc->base.sk = (struct sock *)sk;
+ asoc->base.net = sock_net(sk);
sctp_endpoint_hold(asoc->ep);
sock_hold(asoc->base.sk);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 1f03065686fe..8164d0275137 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -125,10 +125,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Initialize the bind addr area */
sctp_bind_addr_init(&ep->base.bind_addr, 0);
- /* Remember who we are attached to. */
- ep->base.sk = sk;
- sock_hold(ep->base.sk);
-
/* Create the lists of associations. */
INIT_LIST_HEAD(&ep->asocs);
@@ -165,6 +161,11 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
ep->auth_chunk_list = auth_chunks;
ep->prsctp_enable = net->sctp.prsctp_enable;
+ /* Remember who we are attached to. */
+ ep->base.sk = sk;
+ ep->base.net = sock_net(sk);
+ sock_hold(ep->base.sk);
+
return ep;
nomem_hmacs:
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 68b84d3a7cac..969fb1623e4e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -812,7 +812,7 @@ static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
return err;
asoc = t->asoc;
- if (!net_eq(sock_net(asoc->base.sk), x->net))
+ if (!net_eq(asoc->base.net, x->net))
goto out;
if (x->ep) {
if (x->ep != asoc->ep)
@@ -835,7 +835,7 @@ static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
{
const struct sctp_transport *t = data;
const union sctp_addr *paddr = &t->ipaddr;
- const struct net *net = sock_net(t->asoc->base.sk);
+ const struct net *net = t->asoc->base.net;
u16 lport = htons(t->asoc->base.bind_addr.port);
u32 addr;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 31f461f955ec..34ab7f92f064 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -235,7 +235,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
{
struct sctp_association *asoc = t->asoc;
struct dst_entry *dst = NULL;
- struct flowi6 *fl6 = &fl->u.ip6;
+ struct flowi _fl;
+ struct flowi6 *fl6 = &_fl.u.ip6;
struct sctp_bind_addr *bp;
struct ipv6_pinfo *np = inet6_sk(sk);
struct sctp_sockaddr_entry *laddr;
@@ -245,7 +246,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
__u8 matchlen = 0;
sctp_scope_t scope;
- memset(fl6, 0, sizeof(struct flowi6));
+ memset(&_fl, 0, sizeof(_fl));
fl6->daddr = daddr->v6.sin6_addr;
fl6->fl6_dport = daddr->v6.sin6_port;
fl6->flowi6_proto = IPPROTO_SCTP;
@@ -269,8 +270,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
- if (!asoc || saddr)
+ if (!asoc || saddr) {
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
goto out;
+ }
bp = &asoc->base.bind_addr;
scope = sctp_scope(daddr);
@@ -293,6 +297,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if ((laddr->a.sa.sa_family == AF_INET6) &&
(sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
rcu_read_unlock();
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
goto out;
}
}
@@ -331,6 +337,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (!IS_ERR_OR_NULL(dst))
dst_release(dst);
dst = bdst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
break;
}
@@ -344,6 +352,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
dst_release(dst);
dst = bdst;
matchlen = bmatchlen;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
}
rcu_read_unlock();
@@ -352,14 +362,12 @@ out:
struct rt6_info *rt;
rt = (struct rt6_info *)dst;
- t->dst = dst;
t->dst_cookie = rt6_get_cookie(rt);
pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
&rt->rt6i_dst.addr, rt->rt6i_dst.plen,
- &fl6->saddr);
+ &fl->u.ip6.saddr);
} else {
t->dst = NULL;
-
pr_debug("no route\n");
}
}
@@ -973,7 +981,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
- .connect = inet_dgram_connect,
+ .connect = sctp_inet_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = sctp_getname,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 8ea8217db960..c5a2a538279b 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -257,6 +257,7 @@ static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
addr->v4.sin_family = AF_INET;
addr->v4.sin_port = 0;
addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr;
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
}
/* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -279,6 +280,7 @@ static void sctp_v4_from_addr_param(union sctp_addr *addr,
addr->v4.sin_family = AF_INET;
addr->v4.sin_port = port;
addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
}
/* Initialize an address parameter from a sctp_addr and return the length
@@ -303,6 +305,7 @@ static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
saddr->v4.sin_family = AF_INET;
saddr->v4.sin_port = port;
saddr->v4.sin_addr.s_addr = fl4->saddr;
+ memset(saddr->v4.sin_zero, 0, sizeof(saddr->v4.sin_zero));
}
/* Compare two addresses exactly. */
@@ -325,6 +328,7 @@ static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port)
addr->v4.sin_family = AF_INET;
addr->v4.sin_addr.s_addr = htonl(INADDR_ANY);
addr->v4.sin_port = port;
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
}
/* Is this a wildcard address? */
@@ -426,14 +430,15 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
{
struct sctp_association *asoc = t->asoc;
struct rtable *rt;
- struct flowi4 *fl4 = &fl->u.ip4;
+ struct flowi _fl;
+ struct flowi4 *fl4 = &_fl.u.ip4;
struct sctp_bind_addr *bp;
struct sctp_sockaddr_entry *laddr;
struct dst_entry *dst = NULL;
union sctp_addr *daddr = &t->ipaddr;
union sctp_addr dst_saddr;
- memset(fl4, 0x0, sizeof(struct flowi4));
+ memset(&_fl, 0x0, sizeof(_fl));
fl4->daddr = daddr->v4.sin_addr.s_addr;
fl4->fl4_dport = daddr->v4.sin_port;
fl4->flowi4_proto = IPPROTO_SCTP;
@@ -451,8 +456,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
&fl4->saddr);
rt = ip_route_output_key(sock_net(sk), fl4);
- if (!IS_ERR(rt))
+ if (!IS_ERR(rt)) {
dst = &rt->dst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ }
/* If there is no association or if a source address is passed, no
* more validation is required.
@@ -515,27 +523,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
false);
if (!odev || odev->ifindex != fl4->flowi4_oif) {
- if (!dst)
+ if (!dst) {
dst = &rt->dst;
- else
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
+ } else {
dst_release(&rt->dst);
+ }
continue;
}
dst_release(dst);
dst = &rt->dst;
+ t->dst = dst;
+ memcpy(fl, &_fl, sizeof(_fl));
break;
}
out_unlock:
rcu_read_unlock();
out:
- t->dst = dst;
- if (dst)
+ if (dst) {
pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
- &fl4->daddr, &fl4->saddr);
- else
+ &fl->u.ip4.daddr, &fl->u.ip4.saddr);
+ } else {
+ t->dst = NULL;
pr_debug("no route\n");
+ }
}
/* For v4, the source address is cached in the route entry(dst). So no need
@@ -600,6 +614,7 @@ out:
static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
{
/* No address mapping for V4 sockets */
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
return sizeof(struct sockaddr_in);
}
@@ -1013,7 +1028,7 @@ static const struct proto_ops inet_seqpacket_ops = {
.owner = THIS_MODULE,
.release = inet_release, /* Needs to be wrapped... */
.bind = inet_bind,
- .connect = inet_dgram_connect,
+ .connect = sctp_inet_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname, /* Semantics are different. */
@@ -1335,7 +1350,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
return status;
}
-static void __net_init sctp_ctrlsock_exit(struct net *net)
+static void __net_exit sctp_ctrlsock_exit(struct net *net)
{
/* Free the control endpoint. */
inet_ctl_sock_destroy(net->sctp.ctl_sock);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c345bf153bed..1133fa0830f4 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -508,8 +508,8 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
*/
if (net->sctp.pf_enable &&
(transport->state == SCTP_ACTIVE) &&
- (asoc->pf_retrans < transport->pathmaxrxt) &&
- (transport->error_count > asoc->pf_retrans)) {
+ (transport->error_count < transport->pathmaxrxt) &&
+ (transport->error_count > transport->pf_retrans)) {
sctp_assoc_control_transport(asoc, transport,
SCTP_TRANSPORT_PF,
@@ -1321,8 +1321,10 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
/* Generate an INIT ACK chunk. */
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
0);
- if (!new_obj)
- goto nomem;
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
@@ -1344,7 +1346,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
if (!new_obj) {
if (cmd->obj.chunk)
sctp_chunk_free(cmd->obj.chunk);
- goto nomem;
+ error = -ENOMEM;
+ break;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
@@ -1391,8 +1394,10 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
/* Generate a SHUTDOWN chunk. */
new_obj = sctp_make_shutdown(asoc, chunk);
- if (!new_obj)
- goto nomem;
+ if (!new_obj) {
+ error = -ENOMEM;
+ break;
+ }
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(new_obj));
break;
@@ -1721,11 +1726,17 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
break;
}
- if (error)
+ if (error) {
+ cmd = sctp_next_cmd(commands);
+ while (cmd) {
+ if (cmd->verb == SCTP_CMD_REPLY)
+ sctp_chunk_free(cmd->obj.chunk);
+ cmd = sctp_next_cmd(commands);
+ }
break;
+ }
}
-out:
/* If this is in response to a received chunk, wait until
* we are done with the packet to open the queue so that we don't
* send multiple packets in response to a single request.
@@ -1740,8 +1751,5 @@ out:
sp->data_ready_signalled = 0;
return error;
-nomem:
- error = -ENOMEM;
- goto out;
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index bfd068679710..1a3c75347f48 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -177,6 +177,16 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
return 1;
}
+/* Check for format error in an ABORT chunk */
+static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
+{
+ struct sctp_errhdr *err;
+
+ sctp_walk_errors(err, chunk->chunk_hdr);
+
+ return (void *)err == (void *)chunk->chunk_end;
+}
+
/**********************************************************
* These are the state functions for handling chunk events.
**********************************************************/
@@ -2159,6 +2169,9 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
@@ -2201,6 +2214,9 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net,
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
/* Stop the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
@@ -2466,6 +2482,9 @@ sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
@@ -2482,15 +2501,9 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
/* See if we have an error cause code in the chunk. */
len = ntohs(chunk->chunk_hdr->length);
- if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
-
- sctp_errhdr_t *err;
- sctp_walk_errors(err, chunk->chunk_hdr);
- if ((void *)err != (void *)chunk->chunk_end)
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
- }
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 93e60068800b..95f39dde1e08 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -173,29 +173,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk)
skb_orphan(chunk->skb);
}
+#define traverse_and_process() \
+do { \
+ msg = chunk->msg; \
+ if (msg == prev_msg) \
+ continue; \
+ list_for_each_entry(c, &msg->chunks, frag_list) { \
+ if ((clear && asoc->base.sk == c->skb->sk) || \
+ (!clear && asoc->base.sk != c->skb->sk)) \
+ cb(c); \
+ } \
+ prev_msg = msg; \
+} while (0)
+
static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
+ bool clear,
void (*cb)(struct sctp_chunk *))
{
+ struct sctp_datamsg *msg, *prev_msg = NULL;
struct sctp_outq *q = &asoc->outqueue;
+ struct sctp_chunk *chunk, *c;
struct sctp_transport *t;
- struct sctp_chunk *chunk;
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
- cb(chunk);
+ traverse_and_process();
list_for_each_entry(chunk, &q->retransmit, transmitted_list)
- cb(chunk);
+ traverse_and_process();
list_for_each_entry(chunk, &q->sacked, transmitted_list)
- cb(chunk);
+ traverse_and_process();
list_for_each_entry(chunk, &q->abandoned, transmitted_list)
- cb(chunk);
+ traverse_and_process();
list_for_each_entry(chunk, &q->out_chunk_list, list)
- cb(chunk);
+ traverse_and_process();
}
/* Verify that this is a valid address. */
@@ -1074,7 +1089,7 @@ out:
*/
static int __sctp_connect(struct sock *sk,
struct sockaddr *kaddrs,
- int addrs_size,
+ int addrs_size, int flags,
sctp_assoc_t *assoc_id)
{
struct net *net = sock_net(sk);
@@ -1092,7 +1107,6 @@ static int __sctp_connect(struct sock *sk,
union sctp_addr *sa_addr = NULL;
void *addr_buf;
unsigned short port;
- unsigned int f_flags = 0;
sp = sctp_sk(sk);
ep = sp->ep;
@@ -1240,13 +1254,7 @@ static int __sctp_connect(struct sock *sk,
sp->pf->to_sk_daddr(sa_addr, sk);
sk->sk_err = 0;
- /* in-kernel sockets don't generally have a file allocated to them
- * if all they do is call sock_create_kern().
- */
- if (sk->sk_socket->file)
- f_flags = sk->sk_socket->file->f_flags;
-
- timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
+ timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
if (assoc_id)
*assoc_id = asoc->assoc_id;
@@ -1341,7 +1349,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
{
struct sockaddr *kaddrs;
gfp_t gfp = GFP_KERNEL;
- int err = 0;
+ int err = 0, flags = 0;
pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
__func__, sk, addrs, addrs_size);
@@ -1361,11 +1369,18 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
return -ENOMEM;
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
- err = -EFAULT;
- } else {
- err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
+ kfree(kaddrs);
+ return -EFAULT;
}
+ /* in-kernel sockets don't generally have a file allocated to them
+ * if all they do is call sock_create_kern().
+ */
+ if (sk->sk_socket->file)
+ flags = sk->sk_socket->file->f_flags;
+
+ err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
+
kfree(kaddrs);
return err;
@@ -3979,31 +3994,36 @@ out_nounlock:
* len: the size of the address.
*/
static int sctp_connect(struct sock *sk, struct sockaddr *addr,
- int addr_len)
+ int addr_len, int flags)
{
- int err = 0;
struct sctp_af *af;
+ int err = -EINVAL;
lock_sock(sk);
-
pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
addr, addr_len);
/* Validate addr_len before calling common connect/connectx routine. */
af = sctp_get_af_specific(addr->sa_family);
- if (!af || addr_len < af->sockaddr_len) {
- err = -EINVAL;
- } else {
- /* Pass correct addr len to common routine (so it knows there
- * is only one address being passed.
- */
- err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
- }
+ if (af && addr_len >= af->sockaddr_len)
+ err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
release_sock(sk);
return err;
}
+int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ if (addr_len < sizeof(uaddr->sa_family))
+ return -EINVAL;
+
+ if (uaddr->sa_family == AF_UNSPEC)
+ return -EOPNOTSUPP;
+
+ return sctp_connect(sock->sk, uaddr, addr_len, flags);
+}
+
/* FIXME: Write comments. */
static int sctp_disconnect(struct sock *sk, int flags)
{
@@ -7729,7 +7749,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
newinet->inet_dport = htons(asoc->peer.port);
newinet->pmtudisc = inet->pmtudisc;
- newinet->inet_id = asoc->next_tsn ^ jiffies;
+ newinet->inet_id = prandom_u32();
newinet->uc_ttl = inet->uc_ttl;
newinet->mc_loop = 1;
@@ -7873,9 +7893,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
- sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
+ sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w);
sctp_assoc_migrate(assoc, newsk);
- sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
+ sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
@@ -7896,7 +7916,6 @@ struct proto sctp_prot = {
.name = "SCTP",
.owner = THIS_MODULE,
.close = sctp_close,
- .connect = sctp_connect,
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,
@@ -7911,7 +7930,7 @@ struct proto sctp_prot = {
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
- .get_port = sctp_get_port,
+ .no_autobind = true,
.obj_size = sizeof(struct sctp_sock),
.sysctl_mem = sysctl_sctp_mem,
.sysctl_rmem = sysctl_sctp_rmem,
@@ -7935,7 +7954,6 @@ struct proto sctpv6_prot = {
.name = "SCTPv6",
.owner = THIS_MODULE,
.close = sctp_close,
- .connect = sctp_connect,
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,
@@ -7950,7 +7968,7 @@ struct proto sctpv6_prot = {
.backlog_rcv = sctp_backlog_rcv,
.hash = sctp_hash,
.unhash = sctp_unhash,
- .get_port = sctp_get_port,
+ .no_autobind = true,
.obj_size = sizeof(struct sctp6_sock),
.sysctl_mem = sysctl_sctp_mem,
.sysctl_rmem = sysctl_sctp_rmem,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 03d71cd97ec0..5c78942550e9 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -205,7 +205,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
/* When a data chunk is sent, reset the heartbeat interval. */
expires = jiffies + sctp_transport_timeout(transport);
- if (time_before(transport->hb_timer.expires, expires) &&
+ if ((time_before(transport->hb_timer.expires, expires) ||
+ !timer_pending(&transport->hb_timer)) &&
!mod_timer(&transport->hb_timer,
expires + prandom_u32_max(transport->rto)))
sctp_transport_hold(transport);
diff --git a/net/socket.c b/net/socket.c
index d9e2989c10c4..65afc8ec68d4 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2550,15 +2550,6 @@ out_fs:
core_initcall(sock_init); /* early initcall */
-static int __init jit_init(void)
-{
-#ifdef CONFIG_BPF_JIT_ALWAYS_ON
- bpf_jit_enable = 1;
-#endif
- return 0;
-}
-pure_initcall(jit_init);
-
#ifdef CONFIG_PROC_FS
void socket_seq_show(struct seq_file *seq)
{
@@ -3169,6 +3160,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCSARP:
case SIOCGARP:
case SIOCDARP:
+ case SIOCOUTQNSD:
case SIOCATMARK:
return sock_do_ioctl(net, sock, cmd, arg);
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index 1d74d653e6c0..ad0dcb69395d 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -63,6 +63,7 @@
#include <linux/sunrpc/gss_krb5.h>
#include <linux/random.h>
#include <linux/crypto.h>
+#include <linux/atomic.h>
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index b4b68c6e3f8b..d7775ca2fbb9 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1180,6 +1180,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
dprintk("RPC: No creds found!\n");
goto out;
} else {
+ struct timespec64 boot;
/* steal creds */
rsci.cred = ud->creds;
@@ -1200,6 +1201,9 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
&expiry, GFP_KERNEL);
if (status)
goto out;
+
+ getboottime64(&boot);
+ expiry -= boot.tv_sec;
}
rsci.h.expiry_time = expiry;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index cab50ece6f3d..24e42919a480 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,8 +54,6 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
h->last_refresh = now;
}
-static void cache_fresh_locked(struct cache_head *head, time_t expiry,
- struct cache_detail *detail);
static void cache_fresh_unlocked(struct cache_head *head,
struct cache_detail *detail);
@@ -100,7 +98,6 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
if (cache_is_expired(detail, tmp)) {
hlist_del_init(&tmp->cache_list);
detail->entries --;
- cache_fresh_locked(tmp, 0, detail);
freeme = tmp;
break;
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 244eac1bd648..de18a463ac96 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2718,6 +2718,7 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
xprt = xprt_iter_xprt(&clnt->cl_xpi);
if (xps == NULL || xprt == NULL) {
rcu_read_unlock();
+ xprt_switch_put(xps);
return -EAGAIN;
}
resvport = xprt->resvport;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 600eacce653a..0ef65822fdd3 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -99,65 +99,79 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
}
-static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
-{
- struct list_head *q = &queue->tasks[queue->priority];
- struct rpc_task *task;
-
- if (!list_empty(q)) {
- task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
- if (task->tk_owner == queue->owner)
- list_move_tail(&task->u.tk_wait.list, q);
- }
-}
-
static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
{
if (queue->priority != priority) {
- /* Fairness: rotate the list when changing priority */
- rpc_rotate_queue_owner(queue);
queue->priority = priority;
+ queue->nr = 1U << priority;
}
}
-static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
-{
- queue->owner = pid;
- queue->nr = RPC_BATCH_COUNT;
-}
-
static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
{
rpc_set_waitqueue_priority(queue, queue->maxpriority);
- rpc_set_waitqueue_owner(queue, 0);
}
/*
- * Add new request to a priority queue.
+ * Add a request to a queue list
*/
-static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
- struct rpc_task *task,
- unsigned char queue_priority)
+static void
+__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
{
- struct list_head *q;
struct rpc_task *t;
- INIT_LIST_HEAD(&task->u.tk_wait.links);
- if (unlikely(queue_priority > queue->maxpriority))
- queue_priority = queue->maxpriority;
- if (queue_priority > queue->priority)
- rpc_set_waitqueue_priority(queue, queue_priority);
- q = &queue->tasks[queue_priority];
list_for_each_entry(t, q, u.tk_wait.list) {
if (t->tk_owner == task->tk_owner) {
- list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
+ list_add_tail(&task->u.tk_wait.links,
+ &t->u.tk_wait.links);
+ /* Cache the queue head in task->u.tk_wait.list */
+ task->u.tk_wait.list.next = q;
+ task->u.tk_wait.list.prev = NULL;
return;
}
}
+ INIT_LIST_HEAD(&task->u.tk_wait.links);
list_add_tail(&task->u.tk_wait.list, q);
}
/*
+ * Remove request from a queue list
+ */
+static void
+__rpc_list_dequeue_task(struct rpc_task *task)
+{
+ struct list_head *q;
+ struct rpc_task *t;
+
+ if (task->u.tk_wait.list.prev == NULL) {
+ list_del(&task->u.tk_wait.links);
+ return;
+ }
+ if (!list_empty(&task->u.tk_wait.links)) {
+ t = list_first_entry(&task->u.tk_wait.links,
+ struct rpc_task,
+ u.tk_wait.links);
+ /* Assume __rpc_list_enqueue_task() cached the queue head */
+ q = t->u.tk_wait.list.next;
+ list_add_tail(&t->u.tk_wait.list, q);
+ list_del(&task->u.tk_wait.links);
+ }
+ list_del(&task->u.tk_wait.list);
+}
+
+/*
+ * Add new request to a priority queue.
+ */
+static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
+ struct rpc_task *task,
+ unsigned char queue_priority)
+{
+ if (unlikely(queue_priority > queue->maxpriority))
+ queue_priority = queue->maxpriority;
+ __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
+}
+
+/*
* Add new request to wait queue.
*
* Swapper tasks always get inserted at the head of the queue.
@@ -194,13 +208,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
*/
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
{
- struct rpc_task *t;
-
- if (!list_empty(&task->u.tk_wait.links)) {
- t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
- list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
- list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
- }
+ __rpc_list_dequeue_task(task);
}
/*
@@ -212,7 +220,8 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas
__rpc_disable_timer(queue, task);
if (RPC_IS_PRIORITY(queue))
__rpc_remove_wait_queue_priority(task);
- list_del(&task->u.tk_wait.list);
+ else
+ list_del(&task->u.tk_wait.list);
queue->qlen--;
dprintk("RPC: %5u removed from queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
@@ -481,17 +490,9 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
* Service a batch of tasks from a single owner.
*/
q = &queue->tasks[queue->priority];
- if (!list_empty(q)) {
- task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
- if (queue->owner == task->tk_owner) {
- if (--queue->nr)
- goto out;
- list_move_tail(&task->u.tk_wait.list, q);
- }
- /*
- * Check if we need to switch queues.
- */
- goto new_owner;
+ if (!list_empty(q) && --queue->nr) {
+ task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
+ goto out;
}
/*
@@ -503,7 +504,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
else
q = q - 1;
if (!list_empty(q)) {
- task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
+ task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
goto new_queue;
}
} while (q != &queue->tasks[queue->priority]);
@@ -513,8 +514,6 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
new_queue:
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
-new_owner:
- rpc_set_waitqueue_owner(queue, task->tk_owner);
out:
return task;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 280fb3178708..f3f05148922a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -124,7 +124,7 @@ static struct ctl_table xs_tunables_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xprt_min_resvport_limit,
- .extra2 = &xprt_max_resvport
+ .extra2 = &xprt_max_resvport_limit
},
{
.procname = "max_resvport",
@@ -132,7 +132,7 @@ static struct ctl_table xs_tunables_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &xprt_min_resvport,
+ .extra1 = &xprt_min_resvport_limit,
.extra2 = &xprt_max_resvport_limit
},
{
@@ -1737,11 +1737,17 @@ static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
}
-static unsigned short xs_get_random_port(void)
+static int xs_get_random_port(void)
{
- unsigned short range = xprt_max_resvport - xprt_min_resvport + 1;
- unsigned short rand = (unsigned short) prandom_u32() % range;
- return rand + xprt_min_resvport;
+ unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
+ unsigned short range;
+ unsigned short rand;
+
+ if (max < min)
+ return -EADDRINUSE;
+ range = max - min + 1;
+ rand = (unsigned short) prandom_u32() % range;
+ return rand + min;
}
/**
@@ -1798,9 +1804,9 @@ static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
transport->srcport = xs_sock_getport(sock);
}
-static unsigned short xs_get_srcport(struct sock_xprt *transport)
+static int xs_get_srcport(struct sock_xprt *transport)
{
- unsigned short port = transport->srcport;
+ int port = transport->srcport;
if (port == 0 && transport->xprt.resvport)
port = xs_get_random_port();
@@ -1821,7 +1827,7 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
{
struct sockaddr_storage myaddr;
int err, nloop = 0;
- unsigned short port = xs_get_srcport(transport);
+ int port = xs_get_srcport(transport);
unsigned short last;
/*
@@ -1839,8 +1845,8 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
* transport->xprt.resvport == 1) xs_get_srcport above will
* ensure that port is non-zero and we will bind as needed.
*/
- if (port == 0)
- return 0;
+ if (port <= 0)
+ return port;
memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
do {
@@ -3223,12 +3229,8 @@ static int param_set_uint_minmax(const char *val,
static int param_set_portnr(const char *val, const struct kernel_param *kp)
{
- if (kp->arg == &xprt_min_resvport)
- return param_set_uint_minmax(val, kp,
- RPC_MIN_RESVPORT,
- xprt_max_resvport);
return param_set_uint_minmax(val, kp,
- xprt_min_resvport,
+ RPC_MIN_RESVPORT,
RPC_MAX_RESVPORT);
}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 236b043a4156..799900c0f2c9 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -71,9 +71,6 @@ static int __net_init tipc_init_net(struct net *net)
goto out_nametbl;
INIT_LIST_HEAD(&tn->dist_queue);
- err = tipc_topsrv_start(net);
- if (err)
- goto out_subscr;
err = tipc_bcast_init(net);
if (err)
@@ -82,8 +79,6 @@ static int __net_init tipc_init_net(struct net *net)
return 0;
out_bclink:
- tipc_bcast_stop(net);
-out_subscr:
tipc_nametbl_stop(net);
out_nametbl:
tipc_sk_rht_destroy(net);
@@ -93,7 +88,6 @@ out_sk_rht:
static void __net_exit tipc_exit_net(struct net *net)
{
- tipc_topsrv_stop(net);
tipc_net_stop(net);
tipc_bcast_stop(net);
tipc_nametbl_stop(net);
@@ -107,6 +101,11 @@ static struct pernet_operations tipc_net_ops = {
.size = sizeof(struct tipc_net),
};
+static struct pernet_operations tipc_topsrv_net_ops = {
+ .init = tipc_topsrv_init_net,
+ .exit = tipc_topsrv_exit_net,
+};
+
static int __init tipc_init(void)
{
int err;
@@ -117,54 +116,62 @@ static int __init tipc_init(void)
sysctl_tipc_rmem[1] = RCVBUF_DEF;
sysctl_tipc_rmem[2] = RCVBUF_MAX;
- err = tipc_netlink_start();
+ err = tipc_register_sysctl();
if (err)
- goto out_netlink;
+ goto out_sysctl;
- err = tipc_netlink_compat_start();
+ err = register_pernet_device(&tipc_net_ops);
if (err)
- goto out_netlink_compat;
+ goto out_pernet;
err = tipc_socket_init();
if (err)
goto out_socket;
- err = tipc_register_sysctl();
+ err = register_pernet_device(&tipc_topsrv_net_ops);
if (err)
- goto out_sysctl;
-
- err = register_pernet_subsys(&tipc_net_ops);
- if (err)
- goto out_pernet;
+ goto out_pernet_topsrv;
err = tipc_bearer_setup();
if (err)
goto out_bearer;
+ err = tipc_netlink_start();
+ if (err)
+ goto out_netlink;
+
+ err = tipc_netlink_compat_start();
+ if (err)
+ goto out_netlink_compat;
+
pr_info("Started in single node mode\n");
return 0;
+
+out_netlink_compat:
+ tipc_netlink_stop();
+out_netlink:
+ tipc_bearer_cleanup();
out_bearer:
- unregister_pernet_subsys(&tipc_net_ops);
+ unregister_pernet_device(&tipc_topsrv_net_ops);
+out_pernet_topsrv:
+ tipc_socket_stop();
+out_socket:
+ unregister_pernet_device(&tipc_net_ops);
out_pernet:
tipc_unregister_sysctl();
out_sysctl:
- tipc_socket_stop();
-out_socket:
- tipc_netlink_compat_stop();
-out_netlink_compat:
- tipc_netlink_stop();
-out_netlink:
pr_err("Unable to start in single node mode\n");
return err;
}
static void __exit tipc_exit(void)
{
- tipc_bearer_cleanup();
- unregister_pernet_subsys(&tipc_net_ops);
- tipc_netlink_stop();
tipc_netlink_compat_stop();
+ tipc_netlink_stop();
+ tipc_bearer_cleanup();
+ unregister_pernet_device(&tipc_topsrv_net_ops);
tipc_socket_stop();
+ unregister_pernet_device(&tipc_net_ops);
tipc_unregister_sysctl();
pr_info("Deactivated\n");
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 4e8647aef01c..c7406c1fdc14 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1063,7 +1063,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
default:
pr_warn("Dropping received illegal msg type\n");
kfree_skb(skb);
- return false;
+ return true;
};
}
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 23f8899e0f8c..7ebcaff8c1c4 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -224,7 +224,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
publ->key);
}
- kfree_rcu(p, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
}
/**
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index d947b8210399..cdc49e680be4 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
int rep_type;
int rep_size;
int req_type;
+ int req_size;
struct net *net;
struct sk_buff *rep;
struct tlv_desc *req;
@@ -252,7 +253,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
int err;
struct sk_buff *arg;
- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
return -EINVAL;
msg->rep = tipc_tlv_alloc(msg->rep_size);
@@ -262,8 +264,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
if (msg->rep_type)
tipc_tlv_init(msg->rep, msg->rep_type);
- if (cmd->header)
- (*cmd->header)(msg);
+ if (cmd->header) {
+ err = (*cmd->header)(msg);
+ if (err) {
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return err;
+ }
+ }
arg = nlmsg_new(0, GFP_KERNEL);
if (!arg) {
@@ -339,7 +347,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
{
int err;
- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
return -EINVAL;
err = __tipc_nl_compat_doit(cmd, msg);
@@ -388,7 +397,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_bearer_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
if (!string_is_valid(b->name, len))
return -EINVAL;
@@ -425,7 +439,11 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ len = TLV_GET_DATA_LEN(msg->req);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
if (!string_is_valid(name, len))
return -EINVAL;
@@ -517,7 +535,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
name = (char *)TLV_DATA(msg->req);
- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ len = TLV_GET_DATA_LEN(msg->req);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
if (!string_is_valid(name, len))
return -EINVAL;
@@ -757,7 +779,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_link_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
if (!string_is_valid(lc->name, len))
return -EINVAL;
@@ -790,7 +817,11 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
if (!link)
return -EMSGSIZE;
- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ len = TLV_GET_DATA_LEN(msg->req);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
if (!string_is_valid(name, len))
return -EINVAL;
@@ -943,6 +974,10 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
TIPC_NL_PUBL_GET);
+ if (!hdr) {
+ kfree_skb(args);
+ return -EMSGSIZE;
+ }
nest = nla_nest_start(args, TIPC_NLA_SOCK);
if (!nest) {
@@ -1239,8 +1274,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
goto send;
}
- len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (!len || !TLV_OK(msg.req, len)) {
+ msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index db8fbc076e1a..fe7b0ad1d6f3 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -688,10 +688,10 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
{
struct tipc_link_entry *le = &n->links[bearer_id];
+ struct tipc_media_addr *maddr = NULL;
struct tipc_link *l = le->link;
- struct tipc_media_addr *maddr;
- struct sk_buff_head xmitq;
int old_bearer_id = bearer_id;
+ struct sk_buff_head xmitq;
if (!l)
return;
@@ -713,7 +713,8 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
tipc_node_write_unlock(n);
if (delete)
tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
- tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+ if (!skb_queue_empty(&xmitq))
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
tipc_sk_rcv(n->net, &le->inputq);
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index d62affeb2a38..5e6dc7276abe 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -358,7 +358,7 @@ static void *tipc_subscrb_connect_cb(int conid)
return (void *)tipc_subscrb_create(conid);
}
-int tipc_topsrv_start(struct net *net)
+static int tipc_topsrv_start(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
const char name[] = "topology_server";
@@ -396,7 +396,7 @@ int tipc_topsrv_start(struct net *net)
return tipc_server_start(topsrv);
}
-void tipc_topsrv_stop(struct net *net)
+static void tipc_topsrv_stop(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_server *topsrv = tn->topsrv;
@@ -405,3 +405,13 @@ void tipc_topsrv_stop(struct net *net)
kfree(topsrv->saddr);
kfree(topsrv);
}
+
+int __net_init tipc_topsrv_init_net(struct net *net)
+{
+ return tipc_topsrv_start(net);
+}
+
+void __net_exit tipc_topsrv_exit_net(struct net *net)
+{
+ tipc_topsrv_stop(net);
+}
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ffdc214c117a..814dfcef33c2 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -75,7 +75,8 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
struct tipc_name_seq *out);
u32 tipc_subscrp_convert_seq_type(u32 type, int swap);
-int tipc_topsrv_start(struct net *net);
-void tipc_topsrv_stop(struct net *net);
+
+int __net_init tipc_topsrv_init_net(struct net *net);
+void __net_exit tipc_topsrv_exit_net(struct net *net);
#endif
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 1a779b1e8510..40f6d82083d7 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -37,6 +37,8 @@
#include <linux/sysctl.h>
+static int zero;
+static int one = 1;
static struct ctl_table_header *tipc_ctl_hdr;
static struct ctl_table tipc_table[] = {
@@ -45,14 +47,16 @@ static struct ctl_table tipc_table[] = {
.data = &sysctl_tipc_rmem,
.maxlen = sizeof(sysctl_tipc_rmem),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
},
{
.procname = "named_timeout",
.data = &sysctl_tipc_named_timeout,
.maxlen = sizeof(sysctl_tipc_named_timeout),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
},
{}
};
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 133e72654e77..05033ab05b8f 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -174,7 +174,6 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
goto tx_error;
}
- skb->dev = rt->dst.dev;
ttl = ip4_dst_hoplimit(&rt->dst);
udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
dst->ipv4.s_addr, 0, ttl, 0, src->port,
@@ -193,10 +192,9 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
if (err)
goto tx_error;
ttl = ip6_dst_hoplimit(ndst);
- err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
- ndst->dev, &src->ipv6,
- &dst->ipv6, 0, ttl, 0, src->port,
- dst->port, false);
+ err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
+ &src->ipv6, &dst->ipv6, 0, ttl, 0,
+ src->port, dst->port, false);
#endif
}
return err;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index cecf51a5aec4..32ae82a5596d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -224,6 +224,8 @@ static inline void unix_release_addr(struct unix_address *addr)
static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
{
+ *hashp = 0;
+
if (len <= sizeof(short) || len > sizeof(*sunaddr))
return -EINVAL;
if (!sunaddr || sunaddr->sun_family != AF_UNIX)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7566395e526d..18f377306884 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -97,6 +97,7 @@
#include <linux/mutex.h>
#include <linux/net.h>
#include <linux/poll.h>
+#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/smp.h>
#include <linux/socket.h>
@@ -501,9 +502,13 @@ out:
static int __vsock_bind_stream(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
- static u32 port = LAST_RESERVED_PORT + 1;
+ static u32 port = 0;
struct sockaddr_vm new_addr;
+ if (!port)
+ port = LAST_RESERVED_PORT + 1 +
+ prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
+
vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
if (addr->svm_port == VMADDR_PORT_ANY) {
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index f66a6010ae07..0bd5a60f3bde 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -600,28 +600,27 @@ static int __init virtio_vsock_init(void)
if (!virtio_vsock_workqueue)
return -ENOMEM;
- ret = register_virtio_driver(&virtio_vsock_driver);
+ ret = vsock_core_init(&virtio_transport.transport);
if (ret)
goto out_wq;
- ret = vsock_core_init(&virtio_transport.transport);
+ ret = register_virtio_driver(&virtio_vsock_driver);
if (ret)
- goto out_vdr;
+ goto out_vci;
return 0;
-out_vdr:
- unregister_virtio_driver(&virtio_vsock_driver);
+out_vci:
+ vsock_core_exit();
out_wq:
destroy_workqueue(virtio_vsock_workqueue);
return ret;
-
}
static void __exit virtio_vsock_exit(void)
{
- vsock_core_exit();
unregister_virtio_driver(&virtio_vsock_driver);
+ vsock_core_exit();
destroy_workqueue(virtio_vsock_workqueue);
}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 9c07c76c504d..aa9d1c7780c3 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -601,6 +601,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
*/
static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
{
+ const struct virtio_transport *t;
+ struct virtio_vsock_pkt *reply;
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
.type = le16_to_cpu(pkt->hdr.type),
@@ -611,15 +613,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
return 0;
- pkt = virtio_transport_alloc_pkt(&info, 0,
- le64_to_cpu(pkt->hdr.dst_cid),
- le32_to_cpu(pkt->hdr.dst_port),
- le64_to_cpu(pkt->hdr.src_cid),
- le32_to_cpu(pkt->hdr.src_port));
- if (!pkt)
+ reply = virtio_transport_alloc_pkt(&info, 0,
+ le64_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port),
+ le64_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+ if (!reply)
return -ENOMEM;
- return virtio_transport_get_ops()->send_pkt(pkt);
+ t = virtio_transport_get_ops();
+ if (!t) {
+ virtio_transport_free_pkt(reply);
+ return -ENOTCONN;
+ }
+
+ return t->send_pkt(reply);
}
static void virtio_transport_wait_close(struct sock *sk, long timeout)
@@ -717,12 +725,19 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
void virtio_transport_release(struct vsock_sock *vsk)
{
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ struct virtio_vsock_pkt *pkt, *tmp;
struct sock *sk = &vsk->sk;
bool remove_sock = true;
lock_sock(sk);
if (sk->sk_type == SOCK_STREAM)
remove_sock = virtio_transport_close(vsk);
+
+ list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
+ list_del(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
release_sock(sk);
if (remove_sock)
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 7fbf4dd07277..74554bdd9a5b 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -478,7 +478,7 @@ use_default_name:
&rdev->rfkill_ops, rdev);
if (!rdev->rfkill) {
- kfree(rdev);
+ wiphy_free(&rdev->wiphy);
return NULL;
}
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index e9e91298c70d..3cedf2c2b60b 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -6,9 +6,13 @@
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct device *pdev = wiphy_dev(wdev->wiphy);
- strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
- sizeof(info->driver));
+ if (pdev->driver)
+ strlcpy(info->driver, pdev->driver->name,
+ sizeof(info->driver));
+ else
+ strlcpy(info->driver, "N/A", sizeof(info->driver));
strlcpy(info->version, init_utsname()->release, sizeof(info->version));
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 549d0a4083b3..0048f90944dd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -213,6 +213,36 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
return __cfg80211_rdev_from_attrs(netns, info->attrs);
}
+static int validate_beacon_head(const struct nlattr *attr)
+{
+ const u8 *data = nla_data(attr);
+ unsigned int len = nla_len(attr);
+ const struct element *elem;
+ const struct ieee80211_mgmt *mgmt = (void *)data;
+ unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+
+ if (len < fixedlen)
+ goto err;
+
+ if (ieee80211_hdrlen(mgmt->frame_control) !=
+ offsetof(struct ieee80211_mgmt, u.beacon))
+ goto err;
+
+ data += fixedlen;
+ len -= fixedlen;
+
+ for_each_element(elem, data, len) {
+ /* nothing */
+ }
+
+ if (for_each_element_completed(elem, data, len))
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
/* policy for the attributes */
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -265,7 +295,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
[NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_MESH_ID_LEN },
- [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
+ [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_BINARY,
+ .len = ETH_ALEN },
[NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
[NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
@@ -302,6 +333,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 },
[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
+ [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
[NL80211_ATTR_PID] = { .type = NLA_U32 },
@@ -327,6 +359,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
[NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 },
+ [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 },
+ [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG },
[NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
[NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
[NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED },
@@ -375,6 +409,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MDID] = { .type = NLA_U16 },
[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 },
+ [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 },
[NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
[NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
[NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
@@ -400,6 +436,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 },
[NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
[NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
+ [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 },
[NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
[NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
[NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
@@ -2069,6 +2106,8 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
control_freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ memset(chandef, 0, sizeof(*chandef));
+
chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
chandef->center_freq1 = control_freq;
@@ -2538,7 +2577,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
if (rdev->ops->get_channel) {
int ret;
- struct cfg80211_chan_def chandef;
+ struct cfg80211_chan_def chandef = {};
ret = rdev_get_channel(rdev, wdev, &chandef);
if (ret == 0) {
@@ -3025,7 +3064,7 @@ static void get_key_callback(void *c, struct key_params *params)
params->cipher)))
goto nla_put_failure;
- if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
+ if (nla_put_u8(cookie->msg, NL80211_KEY_IDX, cookie->idx))
goto nla_put_failure;
nla_nest_end(cookie->msg, key);
@@ -3678,6 +3717,11 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
memset(bcn, 0, sizeof(*bcn));
if (attrs[NL80211_ATTR_BEACON_HEAD]) {
+ int ret = validate_beacon_head(attrs[NL80211_ATTR_BEACON_HEAD]);
+
+ if (ret)
+ return ret;
+
bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
if (!bcn->head_len)
@@ -5321,6 +5365,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->del_mpath)
return -EOPNOTSUPP;
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+ return -EOPNOTSUPP;
+
return rdev_del_mpath(rdev, dev, dst);
}
@@ -12942,7 +12989,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return;
@@ -13094,7 +13141,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
if (!msg)
return;
@@ -13136,7 +13183,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
if (!msg)
return;
@@ -13173,7 +13220,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ msg = nlmsg_new(100 + ie_len, GFP_KERNEL);
if (!msg)
return;
@@ -13249,7 +13296,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
trace_cfg80211_notify_new_peer_candidate(dev, addr);
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + ie_len, gfp);
if (!msg)
return;
@@ -13620,7 +13667,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return -ENOMEM;
@@ -13664,7 +13711,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return;
@@ -14014,6 +14061,11 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
wdev->chandef = *chandef;
wdev->preset_chandef = *chandef;
+
+ if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ !WARN_ON(!wdev->current_bss))
+ wdev->current_bss->pub.channel = chandef->chan;
+
nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL,
NL80211_CMD_CH_SWITCH_NOTIFY, 0);
}
@@ -14473,7 +14525,7 @@ void cfg80211_ft_event(struct net_device *netdev,
if (!ft_event->target_ap)
return;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
if (!msg)
return;
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 11cf83c8ad4f..8cd56eaba7d6 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -525,6 +525,10 @@ static inline int
rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
{
int ret;
+
+ if (!rdev->ops->set_wiphy_params)
+ return -EOPNOTSUPP;
+
trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
trace_rdev_return_int(&rdev->wiphy, ret);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6b7094966b4a..38ac0af730d6 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1564,7 +1564,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
{
- struct cfg80211_chan_def chandef;
+ struct cfg80211_chan_def chandef = {};
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
enum nl80211_iftype iftype;
@@ -1715,21 +1715,22 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
static void handle_channel_custom(struct wiphy *wiphy,
struct ieee80211_channel *chan,
- const struct ieee80211_regdomain *regd)
+ const struct ieee80211_regdomain *regd,
+ u32 min_bw)
{
u32 bw_flags = 0;
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
u32 bw;
- for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+ for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(chan->center_freq),
regd, bw);
if (!IS_ERR(reg_rule))
break;
}
- if (IS_ERR(reg_rule)) {
+ if (IS_ERR_OR_NULL(reg_rule)) {
pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n",
chan->center_freq);
if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
@@ -1778,8 +1779,14 @@ static void handle_band_custom(struct wiphy *wiphy,
if (!sband)
return;
+ /*
+ * We currently assume that you always want at least 20 MHz,
+ * otherwise channel 12 might get enabled if this rule is
+ * compatible to US, which permits 2402 - 2472 MHz.
+ */
for (i = 0; i < sband->n_channels; i++)
- handle_channel_custom(wiphy, &sband->channels[i], regd);
+ handle_channel_custom(wiphy, &sband->channels[i], regd,
+ MHZ_TO_KHZ(20));
}
/* Used by drivers prior to wiphy registration */
@@ -2165,7 +2172,7 @@ static void reg_process_pending_hints(void)
/* When last_request->processed becomes true this will be rescheduled */
if (lr && !lr->processed) {
- reg_process_hint(lr);
+ pr_debug("Pending regulatory request, waiting for it to be processed...\n");
return;
}
@@ -2617,8 +2624,54 @@ static void restore_regulatory_settings(bool reset_user)
schedule_work(&reg_work);
}
+static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ wdev_lock(wdev);
+ if (!(wdev->wiphy->regulatory_flags & flag)) {
+ wdev_unlock(wdev);
+ return false;
+ }
+ wdev_unlock(wdev);
+ }
+ }
+
+ return true;
+}
+
void regulatory_hint_disconnect(void)
{
+ /* Restore of regulatory settings is not required when wiphy(s)
+ * ignore IE from connected access point but clearance of beacon hints
+ * is required when wiphy(s) supports beacon hints.
+ */
+ if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) {
+ struct reg_beacon *reg_beacon, *btmp;
+
+ if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS))
+ return;
+
+ spin_lock_bh(&reg_pending_beacons_lock);
+ list_for_each_entry_safe(reg_beacon, btmp,
+ &reg_pending_beacons, list) {
+ list_del(&reg_beacon->list);
+ kfree(reg_beacon);
+ }
+ spin_unlock_bh(&reg_pending_beacons_lock);
+
+ list_for_each_entry_safe(reg_beacon, btmp,
+ &reg_beacon_list, list) {
+ list_del(&reg_beacon->list);
+ kfree(reg_beacon);
+ }
+
+ return;
+ }
+
pr_debug("All devices are disconnected, going to restore regulatory settings\n");
restore_regulatory_settings(false);
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 435f904c1be5..c60be11b5e08 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -407,6 +407,8 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
const u8 *match, int match_len,
int match_offset)
{
+ const struct element *elem;
+
/* match_offset can't be smaller than 2, unless match_len is
* zero, in which case match_offset must be zero as well.
*/
@@ -414,14 +416,10 @@ const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
(!match_len && match_offset)))
return NULL;
- while (len >= 2 && len >= ies[1] + 2) {
- if ((ies[0] == eid) &&
- (ies[1] + 2 >= match_offset + match_len) &&
- !memcmp(ies + match_offset, match, match_len))
- return ies;
-
- len -= ies[1] + 2;
- ies += ies[1] + 2;
+ for_each_element_id(elem, eid, ies, len) {
+ if (elem->datalen >= match_offset - 2 + match_len &&
+ !memcmp(elem->data + match_offset - 2, match, match_len))
+ return (void *)elem;
}
return NULL;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 42fde4a72516..f55d1c9a8950 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -653,7 +653,7 @@ __frame_add_frag(struct sk_buff *skb, struct page *page,
struct skb_shared_info *sh = skb_shinfo(skb);
int page_offset;
- page_ref_inc(page);
+ get_page(page);
page_offset = ptr - page_address(page);
skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size);
}
@@ -1051,6 +1051,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
}
cfg80211_process_rdev_events(rdev);
+ cfg80211_mlme_purge_registrations(dev->ieee80211_ptr);
}
err = rdev_change_virtual_intf(rdev, dev, ntype, flags, params);
@@ -1847,3 +1848,48 @@ bool cfg80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
return false;
}
EXPORT_SYMBOL(cfg80211_is_gratuitous_arp_unsolicited_na);
+
+/* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */
+struct iapp_layer2_update {
+ u8 da[ETH_ALEN]; /* broadcast */
+ u8 sa[ETH_ALEN]; /* STA addr */
+ __be16 len; /* 6 */
+ u8 dsap; /* 0 */
+ u8 ssap; /* 0 */
+ u8 control;
+ u8 xid_info[3];
+} __packed;
+
+void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr)
+{
+ struct iapp_layer2_update *msg;
+ struct sk_buff *skb;
+
+ /* Send Level 2 Update Frame to update forwarding tables in layer 2
+ * bridge devices */
+
+ skb = dev_alloc_skb(sizeof(*msg));
+ if (!skb)
+ return;
+ msg = (struct iapp_layer2_update *)skb_put(skb, sizeof(*msg));
+
+ /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID)
+ * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
+
+ eth_broadcast_addr(msg->da);
+ ether_addr_copy(msg->sa, addr);
+ msg->len = htons(6);
+ msg->dsap = 0;
+ msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
+ msg->control = 0xaf; /* XID response lsb.1111F101.
+ * F=0 (no poll command; unsolicited frame) */
+ msg->xid_info[0] = 0x81; /* XID format identifier */
+ msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */
+ msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ memset(skb->cb, 0, sizeof(skb->cb));
+ netif_rx_ni(skb);
+}
+EXPORT_SYMBOL(cfg80211_send_layer2_update);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index a220156cf217..b3baa34714e3 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -799,7 +799,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
- struct cfg80211_chan_def chandef;
+ struct cfg80211_chan_def chandef = {};
int ret;
switch (wdev->iftype) {
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 6250b1cfcde5..4bf0296a7c43 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -659,7 +659,8 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
return NULL;
}
-static int iw_handler_get_iwstats(struct net_device * dev,
+/* noinline to avoid a bogus warning with -O3 */
+static noinline int iw_handler_get_iwstats(struct net_device * dev,
struct iw_request_info * info,
union iwreq_data * wrqu,
char * extra)
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 995163830a61..9e7846b7d953 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -224,6 +224,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int ret = 0;
/* call only for station! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
@@ -241,7 +242,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
if (ie) {
data->flags = 1;
data->length = ie[1];
- memcpy(ssid, ie + 2, data->length);
+ if (data->length > IW_ESSID_MAX_SIZE)
+ ret = -EINVAL;
+ else
+ memcpy(ssid, ie + 2, data->length);
}
rcu_read_unlock();
} else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
@@ -251,7 +255,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
}
wdev_unlock(wdev);
- return 0;
+ return ret;
}
int cfg80211_mgd_wext_siwap(struct net_device *dev,
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 770ababb8f92..a8ca79810dcc 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
}
len = *skb->data;
- needed = 1 + (len >> 4) + (len & 0x0f);
+ needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2;
if (!pskb_may_pull(skb, needed)) {
/* packet is too short to hold the addresses it claims
@@ -288,7 +288,7 @@ static struct sock *x25_find_listener(struct x25_address *addr,
sk_for_each(s, &x25_list)
if ((!strcmp(addr->x25_addr,
x25_sk(s)->source_addr.x25_addr) ||
- !strcmp(addr->x25_addr,
+ !strcmp(x25_sk(s)->source_addr.x25_addr,
null_x25_address.x25_addr)) &&
s->sk_state == TCP_LISTEN) {
/*
@@ -684,11 +684,15 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
- len = strlen(addr->sx25_addr.x25_addr);
- for (i = 0; i < len; i++) {
- if (!isdigit(addr->sx25_addr.x25_addr[i])) {
- rc = -EINVAL;
- goto out;
+ /* check for the null_x25_address */
+ if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) {
+
+ len = strlen(addr->sx25_addr.x25_addr);
+ for (i = 0; i < len; i++) {
+ if (!isdigit(addr->sx25_addr.x25_addr[i])) {
+ rc = -EINVAL;
+ goto out;
+ }
}
}
@@ -760,6 +764,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
if (sk->sk_state == TCP_ESTABLISHED)
goto out;
+ rc = -EALREADY; /* Do nothing if call is already in progress */
+ if (sk->sk_state == TCP_SYN_SENT)
+ goto out;
+
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
@@ -806,7 +814,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
/* Now the loop */
rc = -EINPROGRESS;
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
- goto out_put_neigh;
+ goto out;
rc = x25_wait_for_connection_establishment(sk);
if (rc)
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index bda1a13628a8..c09336b5a028 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -9,6 +9,8 @@ config XFRM_ALGO
tristate
select XFRM
select CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER
config XFRM_USER
tristate "Transformation user configuration interface"
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 155b1591b17a..69d061d4ed4f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -336,7 +336,9 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
static void xfrm_policy_kill(struct xfrm_policy *policy)
{
+ write_lock_bh(&policy->lock);
policy->walk.dead = 1;
+ write_unlock_bh(&policy->lock);
atomic_inc(&policy->genid);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 884f2136b34b..3734ad56b456 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2168,7 +2168,7 @@ void xfrm_state_fini(struct net *net)
unsigned int sz;
flush_work(&net->xfrm.state_hash_work);
- xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
+ xfrm_state_flush(net, 0, false);
flush_work(&xfrm_state_gc_work);
WARN_ON(!list_empty(&net->xfrm.state_all));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index f6f91c3b2de0..feb24ca530f2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -109,7 +109,8 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs)
return 0;
uctx = nla_data(rt);
- if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
+ if (uctx->len > nla_len(rt) ||
+ uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
return -EINVAL;
return 0;
@@ -151,6 +152,25 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
err = -EINVAL;
switch (p->family) {
case AF_INET:
+ break;
+
+ case AF_INET6:
+#if IS_ENABLED(CONFIG_IPV6)
+ break;
+#else
+ err = -EAFNOSUPPORT;
+ goto out;
+#endif
+
+ default:
+ goto out;
+ }
+
+ switch (p->sel.family) {
+ case AF_UNSPEC:
+ break;
+
+ case AF_INET:
if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
goto out;
@@ -1344,7 +1364,7 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
ret = verify_policy_dir(p->dir);
if (ret)
return ret;
- if (p->index && ((p->index & XFRM_POLICY_MAX) != p->dir))
+ if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
return -EINVAL;
return 0;
@@ -1433,20 +1453,8 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
return -EINVAL;
}
- switch (ut[i].id.proto) {
- case IPPROTO_AH:
- case IPPROTO_ESP:
- case IPPROTO_COMP:
-#if IS_ENABLED(CONFIG_IPV6)
- case IPPROTO_ROUTING:
- case IPPROTO_DSTOPTS:
-#endif
- case IPSEC_PROTO_ANY:
- break;
- default:
+ if (!xfrm_id_proto_valid(ut[i].id.proto))
return -EINVAL;
- }
-
}
return 0;
@@ -2170,6 +2178,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
err = verify_newpolicy_info(&ua->policy);
if (err)
goto free_state;
+ err = verify_sec_ctx_len(attrs);
+ if (err)
+ goto free_state;
/* build an XP */
xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 97913e109b14..99e5a2f63e76 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -369,7 +369,7 @@ void read_trace_pipe(void)
static char buf[4096];
ssize_t sz;
- sz = read(trace_fd, buf, sizeof(buf));
+ sz = read(trace_fd, buf, sizeof(buf) - 1);
if (sz > 0) {
buf[sz] = 0;
puts(buf);
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index 9a130d31ecf2..6fbb5eb9daf3 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -33,9 +33,9 @@ static void print_ksym(__u64 addr)
return;
sym = ksym_search(addr);
printf("%s;", sym->name);
- if (!strcmp(sym->name, "sys_read"))
+ if (!strstr(sym->name, "sys_read"))
sys_read_seen = true;
- else if (!strcmp(sym->name, "sys_write"))
+ else if (!strstr(sym->name, "sys_write"))
sys_write_seen = true;
}
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
index bb9988914a56..32234481ad7d 100644
--- a/samples/mei/mei-amt-version.c
+++ b/samples/mei/mei-amt-version.c
@@ -370,7 +370,7 @@ static uint32_t amt_host_if_call(struct amt_host_if *acmd,
unsigned int expected_sz)
{
uint32_t in_buf_sz;
- uint32_t out_buf_sz;
+ ssize_t out_buf_sz;
ssize_t written;
uint32_t status;
struct amt_host_if_resp_header *msg_hdr;
diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
index 205e4cde4601..065a7e296ee3 100644
--- a/samples/pktgen/functions.sh
+++ b/samples/pktgen/functions.sh
@@ -5,6 +5,8 @@
# Author: Jesper Dangaaard Brouer
# License: GPL
+set -o errexit
+
## -- General shell logging cmds --
function err() {
local exitcode=$1
@@ -58,6 +60,7 @@ function pg_set() {
function proc_cmd() {
local result
local proc_file=$1
+ local status=0
# after shift, the remaining args are contained in $@
shift
local proc_ctrl=${PROC_DIR}/$proc_file
@@ -73,13 +76,13 @@ function proc_cmd() {
echo "cmd: $@ > $proc_ctrl"
fi
# Quoting of "$@" is important for space expansion
- echo "$@" > "$proc_ctrl"
- local status=$?
+ echo "$@" > "$proc_ctrl" || status=$?
- result=$(grep "Result: OK:" $proc_ctrl)
- # Due to pgctrl, cannot use exit code $? from grep
- if [[ "$result" == "" ]]; then
- grep "Result:" $proc_ctrl >&2
+ if [[ "$proc_file" != "pgctrl" ]]; then
+ result=$(grep "Result: OK:" $proc_ctrl) || true
+ if [[ "$result" == "" ]]; then
+ grep "Result:" $proc_ctrl >&2
+ fi
fi
if (( $status != 0 )); then
err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\""
@@ -105,6 +108,8 @@ function pgset() {
fi
}
+[[ $EUID -eq 0 ]] && trap 'pg_ctrl "reset"' EXIT
+
## -- General shell tricks --
function root_check_run_with_sudo() {
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 7f430778f418..558dea61db11 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -166,9 +166,7 @@ cc-ldoption = $(call try-run,\
# ld-option
# Usage: LDFLAGS += $(call ld-option, -X)
-ld-option = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -x c /dev/null -c -o "$$TMPO"; \
- $(LD) $(LDFLAGS) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
+ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2))
# ar-option
# Usage: KBUILD_ARFLAGS := $(call ar-option,D)
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index d08b6fbdfa85..1532038f6005 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -70,5 +70,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format)
KBUILD_CFLAGS += $(call cc-disable-warning, sign-compare)
KBUILD_CFLAGS += $(call cc-disable-warning, format-zero-length)
KBUILD_CFLAGS += $(call cc-disable-warning, uninitialized)
+KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
endif
endif
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 16923ba4b5b1..8cb7971b3f25 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -74,7 +74,7 @@ modpost = scripts/mod/modpost \
$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
$(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
$(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
$(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
$(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
$(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 12a6940741fe..b8f616545277 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -45,7 +45,7 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
$x = "[0-9a-f]"; # hex character
$xs = "[0-9a-f ]"; # hex character or space
$funcre = qr/^$x* <(.*)>:$/;
- if ($arch eq 'aarch64') {
+ if ($arch =~ '^(aarch|arm)64$') {
#ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
$re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
} elsif ($arch eq 'arm') {
diff --git a/scripts/coccinelle/api/stream_open.cocci b/scripts/coccinelle/api/stream_open.cocci
new file mode 100644
index 000000000000..350145da7669
--- /dev/null
+++ b/scripts/coccinelle/api/stream_open.cocci
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+// Author: Kirill Smelkov (kirr@nexedi.com)
+//
+// Search for stream-like files that are using nonseekable_open and convert
+// them to stream_open. A stream-like file is a file that does not use ppos in
+// its read and write. Rationale for the conversion is to avoid deadlock in
+// between read and write.
+
+virtual report
+virtual patch
+virtual explain // explain decisions in the patch (SPFLAGS="-D explain")
+
+// stream-like reader & writer - ones that do not depend on f_pos.
+@ stream_reader @
+identifier readstream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+ ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos)
+ {
+ ... when != ppos
+ }
+
+@ stream_writer @
+identifier writestream, ppos;
+identifier f, buf, len;
+type loff_t;
+@@
+ ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos)
+ {
+ ... when != ppos
+ }
+
+
+// a function that blocks
+@ blocks @
+identifier block_f;
+identifier wait_event =~ "^wait_event_.*";
+@@
+ block_f(...) {
+ ... when exists
+ wait_event(...)
+ ... when exists
+ }
+
+// stream_reader that can block inside.
+//
+// XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait())
+// XXX currently reader_blocks supports only direct and 1-level indirect cases.
+@ reader_blocks_direct @
+identifier stream_reader.readstream;
+identifier wait_event =~ "^wait_event_.*";
+@@
+ readstream(...)
+ {
+ ... when exists
+ wait_event(...)
+ ... when exists
+ }
+
+@ reader_blocks_1 @
+identifier stream_reader.readstream;
+identifier blocks.block_f;
+@@
+ readstream(...)
+ {
+ ... when exists
+ block_f(...)
+ ... when exists
+ }
+
+@ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @
+identifier stream_reader.readstream;
+@@
+ readstream(...) {
+ ...
+ }
+
+
+// file_operations + whether they have _any_ .read, .write, .llseek ... at all.
+//
+// XXX add support for file_operations xxx[N] = ... (sound/core/pcm_native.c)
+@ fops0 @
+identifier fops;
+@@
+ struct file_operations fops = {
+ ...
+ };
+
+@ has_read @
+identifier fops0.fops;
+identifier read_f;
+@@
+ struct file_operations fops = {
+ .read = read_f,
+ };
+
+@ has_read_iter @
+identifier fops0.fops;
+identifier read_iter_f;
+@@
+ struct file_operations fops = {
+ .read_iter = read_iter_f,
+ };
+
+@ has_write @
+identifier fops0.fops;
+identifier write_f;
+@@
+ struct file_operations fops = {
+ .write = write_f,
+ };
+
+@ has_write_iter @
+identifier fops0.fops;
+identifier write_iter_f;
+@@
+ struct file_operations fops = {
+ .write_iter = write_iter_f,
+ };
+
+@ has_llseek @
+identifier fops0.fops;
+identifier llseek_f;
+@@
+ struct file_operations fops = {
+ .llseek = llseek_f,
+ };
+
+@ has_no_llseek @
+identifier fops0.fops;
+@@
+ struct file_operations fops = {
+ .llseek = no_llseek,
+ };
+
+@ has_mmap @
+identifier fops0.fops;
+identifier mmap_f;
+@@
+ struct file_operations fops = {
+ .mmap = mmap_f,
+ };
+
+@ has_copy_file_range @
+identifier fops0.fops;
+identifier copy_file_range_f;
+@@
+ struct file_operations fops = {
+ .copy_file_range = copy_file_range_f,
+ };
+
+@ has_remap_file_range @
+identifier fops0.fops;
+identifier remap_file_range_f;
+@@
+ struct file_operations fops = {
+ .remap_file_range = remap_file_range_f,
+ };
+
+@ has_splice_read @
+identifier fops0.fops;
+identifier splice_read_f;
+@@
+ struct file_operations fops = {
+ .splice_read = splice_read_f,
+ };
+
+@ has_splice_write @
+identifier fops0.fops;
+identifier splice_write_f;
+@@
+ struct file_operations fops = {
+ .splice_write = splice_write_f,
+ };
+
+
+// file_operations that is candidate for stream_open conversion - it does not
+// use mmap and other methods that assume @offset access to file.
+//
+// XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now.
+// XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops".
+@ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @
+identifier fops0.fops;
+@@
+ struct file_operations fops = {
+ };
+
+
+// ---- conversions ----
+
+// XXX .open = nonseekable_open -> .open = stream_open
+// XXX .open = func -> openfunc -> nonseekable_open
+
+// read & write
+//
+// if both are used in the same file_operations together with an opener -
+// under that conditions we can use stream_open instead of nonseekable_open.
+@ fops_rw depends on maybe_stream @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+identifier stream_writer.writestream;
+@@
+ struct file_operations fops = {
+ .open = openfunc,
+ .read = readstream,
+ .write = writestream,
+ };
+
+@ report_rw depends on report @
+identifier fops_rw.openfunc;
+position p1;
+@@
+ openfunc(...) {
+ <...
+ nonseekable_open@p1
+ ...>
+ }
+
+@ script:python depends on report && reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+ "ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,))
+
+@ script:python depends on report && !reader_blocks @
+fops << fops0.fops;
+p << report_rw.p1;
+@@
+coccilib.report.print_report(p[0],
+ "WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+
+@ explain_rw_deadlocked depends on explain && reader_blocks @
+identifier fops_rw.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ nonseekable_open /* read & write (was deadlock) */
+ ...>
+ }
+
+
+@ explain_rw_nodeadlock depends on explain && !reader_blocks @
+identifier fops_rw.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ nonseekable_open /* read & write (no direct deadlock) */
+ ...>
+ }
+
+@ patch_rw depends on patch @
+identifier fops_rw.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ stream_open
+ ...>
+ }
+
+
+// read, but not write
+@ fops_r depends on maybe_stream && !has_write @
+identifier fops0.fops, openfunc;
+identifier stream_reader.readstream;
+@@
+ struct file_operations fops = {
+ .open = openfunc,
+ .read = readstream,
+ };
+
+@ report_r depends on report @
+identifier fops_r.openfunc;
+position p1;
+@@
+ openfunc(...) {
+ <...
+ nonseekable_open@p1
+ ...>
+ }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_r.p1;
+@@
+coccilib.report.print_report(p[0],
+ "WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_r depends on explain @
+identifier fops_r.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ nonseekable_open /* read only */
+ ...>
+ }
+
+@ patch_r depends on patch @
+identifier fops_r.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ stream_open
+ ...>
+ }
+
+
+// write, but not read
+@ fops_w depends on maybe_stream && !has_read @
+identifier fops0.fops, openfunc;
+identifier stream_writer.writestream;
+@@
+ struct file_operations fops = {
+ .open = openfunc,
+ .write = writestream,
+ };
+
+@ report_w depends on report @
+identifier fops_w.openfunc;
+position p1;
+@@
+ openfunc(...) {
+ <...
+ nonseekable_open@p1
+ ...>
+ }
+
+@ script:python depends on report @
+fops << fops0.fops;
+p << report_w.p1;
+@@
+coccilib.report.print_report(p[0],
+ "WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
+
+@ explain_w depends on explain @
+identifier fops_w.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ nonseekable_open /* write only */
+ ...>
+ }
+
+@ patch_w depends on patch @
+identifier fops_w.openfunc;
+@@
+ openfunc(...) {
+ <...
+- nonseekable_open
++ stream_open
+ ...>
+ }
+
+
+// no read, no write - don't change anything
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index edde8250195c..98cf6343afcd 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -65,7 +65,7 @@ parse_symbol() {
if [[ "${cache[$module,$address]+isset}" == "isset" ]]; then
local code=${cache[$module,$address]}
else
- local code=$(addr2line -i -e "$objfile" "$address")
+ local code=$(${CROSS_COMPILE}addr2line -i -e "$objfile" "$address")
cache[$module,$address]=$code
fi
@@ -77,7 +77,7 @@ parse_symbol() {
fi
# Strip out the base of the path
- code=${code//^$basepath/""}
+ code=${code#$basepath/}
# In the case of inlines, move everything to same line
code=${code//$'\n'/' '}
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index 790fbf6cf2d7..e4e0f6a8d07b 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -38,7 +38,6 @@ LINECOMMENT "//".*\n
#include "srcpos.h"
#include "dtc-parser.tab.h"
-YYLTYPE yylloc;
extern bool treesource_error;
/* CAUTION: this will stop working if we ever use yyless() or yyunput() */
diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped
index ba525c2f9fc2..750f7a4e3ece 100644
--- a/scripts/dtc/dtc-lexer.lex.c_shipped
+++ b/scripts/dtc/dtc-lexer.lex.c_shipped
@@ -637,7 +637,6 @@ char *yytext;
#include "srcpos.h"
#include "dtc-parser.tab.h"
-YYLTYPE yylloc;
extern bool treesource_error;
/* CAUTION: this will stop working if we ever use yyless() or yyunput() */
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 12262c0cc691..08fe09c28bd2 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -135,8 +135,12 @@ extern void print_gimple_expr(FILE *, gimple, int, int);
extern void dump_gimple_stmt(pretty_printer *, gimple, int, int);
#endif
+#ifndef __unused
#define __unused __attribute__((__unused__))
+#endif
+#ifndef __visible
#define __visible __attribute__((visibility("default")))
+#endif
#define DECL_NAME_POINTER(node) IDENTIFIER_POINTER(DECL_NAME(node))
#define DECL_NAME_LENGTH(node) IDENTIFIER_LENGTH(DECL_NAME(node))
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 004b0ac7fa72..4644f1a83b57 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -99,7 +99,8 @@ lx-symbols command."""
attrs[n]['name'].string(): attrs[n]['address']
for n in range(int(sect_attrs['nsections']))}
args = []
- for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
+ for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
+ ".text", ".text.hot", ".text.unlikely"]:
address = section_name_to_address.get(section_name)
if address:
args.append(" -s {name} {addr}".format(
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 1f22a186c18c..6402b0d36291 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -161,6 +161,9 @@ static int read_symbol(FILE *in, struct sym_entry *s)
/* exclude debugging symbols */
else if (stype == 'N')
return -1;
+ /* exclude s390 kasan local symbols */
+ else if (!strncmp(sym, ".LASANPC", 8))
+ return -1;
/* include the type field in the symbol name, so that it gets
* compressed together */
@@ -495,6 +498,8 @@ static void build_initial_tok_table(void)
table[pos] = table[i];
learn_symbol(table[pos].sym, table[pos].len);
pos++;
+ } else {
+ free(table[i].sym);
}
}
table_cnt = pos;
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 27aac273205b..fa423fcd1a92 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -1238,7 +1238,7 @@ bool conf_set_all_new_symbols(enum conf_def_mode mode)
sym_calc_value(csym);
if (mode == def_random)
- has_changed = randomize_choice_values(csym);
+ has_changed |= randomize_choice_values(csym);
else {
set_all_choice_values(csym);
has_changed = true;
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index ed29bad1f03a..96420b620963 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -201,6 +201,13 @@ static int expr_eq(struct expr *e1, struct expr *e2)
{
int res, old_count;
+ /*
+ * A NULL expr is taken to be yes, but there's also a different way to
+ * represent yes. expr_is_yes() checks for either representation.
+ */
+ if (!e1 || !e2)
+ return expr_is_yes(e1) && expr_is_yes(e2);
+
if (e1->type != e2->type)
return 0;
switch (e1->type) {
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index d58de1dc5360..510049a7bd1d 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -126,7 +126,8 @@ do_resize:
case KEY_DOWN:
break;
case KEY_BACKSPACE:
- case 127:
+ case 8: /* ^H */
+ case 127: /* ^? */
if (pos) {
wattrset(dialog, dlg.inputbox.atr);
if (input_x == 0) {
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index d42d534a66cd..f7049e288e93 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -1046,7 +1046,7 @@ static int do_match(int key, struct match_state *state, int *ans)
state->match_direction = FIND_NEXT_MATCH_UP;
*ans = get_mext_match(state->pattern,
state->match_direction);
- } else if (key == KEY_BACKSPACE || key == 127) {
+ } else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
state->pattern[strlen(state->pattern)-1] = '\0';
adj_match_dir(&state->match_direction);
} else
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 4b2f44c20caf..9a65035cf787 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
case KEY_F(F_EXIT):
case KEY_F(F_BACK):
break;
- case 127:
+ case 8: /* ^H */
+ case 127: /* ^? */
case KEY_BACKSPACE:
if (cursor_position > 0) {
memmove(&result[cursor_position-1],
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 29d6699d5a06..55b4c0dc2b93 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -47,49 +47,9 @@ typedef struct {
struct devtable {
const char *device_id; /* name of table, __mod_<name>__*_device_table. */
unsigned long id_size;
- void *function;
+ int (*do_entry)(const char *filename, void *symval, char *alias);
};
-#define ___cat(a,b) a ## b
-#define __cat(a,b) ___cat(a,b)
-
-/* we need some special handling for this host tool running eventually on
- * Darwin. The Mach-O section handling is a bit different than ELF section
- * handling. The differnces in detail are:
- * a) we have segments which have sections
- * b) we need a API call to get the respective section symbols */
-#if defined(__MACH__)
-#include <mach-o/getsect.h>
-
-#define INIT_SECTION(name) do { \
- unsigned long name ## _len; \
- char *__cat(pstart_,name) = getsectdata("__TEXT", \
- #name, &__cat(name,_len)); \
- char *__cat(pstop_,name) = __cat(pstart_,name) + \
- __cat(name, _len); \
- __cat(__start_,name) = (void *)__cat(pstart_,name); \
- __cat(__stop_,name) = (void *)__cat(pstop_,name); \
- } while (0)
-#define SECTION(name) __attribute__((section("__TEXT, " #name)))
-
-struct devtable **__start___devtable, **__stop___devtable;
-#else
-#define INIT_SECTION(name) /* no-op for ELF */
-#define SECTION(name) __attribute__((section(#name)))
-
-/* We construct a table of pointers in an ELF section (pointers generally
- * go unpadded by gcc). ld creates boundary syms for us. */
-extern struct devtable *__start___devtable[], *__stop___devtable[];
-#endif /* __MACH__ */
-
-#if !defined(__used)
-# if __GNUC__ == 3 && __GNUC_MINOR__ < 3
-# define __used __attribute__((__unused__))
-# else
-# define __used __attribute__((__used__))
-# endif
-#endif
-
/* Define a variable f that holds the value of field f of struct devid
* based at address m.
*/
@@ -102,16 +62,6 @@ extern struct devtable *__start___devtable[], *__stop___devtable[];
#define DEF_FIELD_ADDR(m, devid, f) \
typeof(((struct devid *)0)->f) *f = ((m) + OFF_##devid##_##f)
-/* Add a table entry. We test function type matches while we're here. */
-#define ADD_TO_DEVTABLE(device_id, type, function) \
- static struct devtable __cat(devtable,__LINE__) = { \
- device_id + 0*sizeof((function)((const char *)NULL, \
- (void *)NULL, \
- (char *)NULL)), \
- SIZE_##type, (function) }; \
- static struct devtable *SECTION(__devtable) __used \
- __cat(devtable_ptr,__LINE__) = &__cat(devtable,__LINE__)
-
#define ADD(str, sep, cond, field) \
do { \
strcat(str, sep); \
@@ -431,7 +381,6 @@ static int do_hid_entry(const char *filename,
return 1;
}
-ADD_TO_DEVTABLE("hid", hid_device_id, do_hid_entry);
/* Looks like: ieee1394:venNmoNspNverN */
static int do_ieee1394_entry(const char *filename,
@@ -456,7 +405,6 @@ static int do_ieee1394_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("ieee1394", ieee1394_device_id, do_ieee1394_entry);
/* Looks like: pci:vNdNsvNsdNbcNscNiN. */
static int do_pci_entry(const char *filename,
@@ -500,7 +448,6 @@ static int do_pci_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("pci", pci_device_id, do_pci_entry);
/* looks like: "ccw:tNmNdtNdmN" */
static int do_ccw_entry(const char *filename,
@@ -524,7 +471,6 @@ static int do_ccw_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("ccw", ccw_device_id, do_ccw_entry);
/* looks like: "ap:tN" */
static int do_ap_entry(const char *filename,
@@ -535,7 +481,6 @@ static int do_ap_entry(const char *filename,
sprintf(alias, "ap:t%02X*", dev_type);
return 1;
}
-ADD_TO_DEVTABLE("ap", ap_device_id, do_ap_entry);
/* looks like: "css:tN" */
static int do_css_entry(const char *filename,
@@ -546,7 +491,6 @@ static int do_css_entry(const char *filename,
sprintf(alias, "css:t%01X", type);
return 1;
}
-ADD_TO_DEVTABLE("css", css_device_id, do_css_entry);
/* Looks like: "serio:tyNprNidNexN" */
static int do_serio_entry(const char *filename,
@@ -566,7 +510,6 @@ static int do_serio_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
/* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
* "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
@@ -604,7 +547,6 @@ static int do_acpi_entry(const char *filename,
}
return 1;
}
-ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
/* looks like: "pnp:dD" */
static void do_pnp_device_entry(void *symval, unsigned long size,
@@ -725,7 +667,6 @@ static int do_pcmcia_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
static int do_vio_entry(const char *filename, void *symval,
char *alias)
@@ -745,7 +686,6 @@ static int do_vio_entry(const char *filename, void *symval,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("vio", vio_device_id, do_vio_entry);
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
@@ -818,7 +758,6 @@ static int do_input_entry(const char *filename, void *symval,
do_input(alias, *swbit, 0, INPUT_DEVICE_ID_SW_MAX);
return 1;
}
-ADD_TO_DEVTABLE("input", input_device_id, do_input_entry);
static int do_eisa_entry(const char *filename, void *symval,
char *alias)
@@ -830,7 +769,6 @@ static int do_eisa_entry(const char *filename, void *symval,
strcat(alias, "*");
return 1;
}
-ADD_TO_DEVTABLE("eisa", eisa_device_id, do_eisa_entry);
/* Looks like: parisc:tNhvNrevNsvN */
static int do_parisc_entry(const char *filename, void *symval,
@@ -850,7 +788,6 @@ static int do_parisc_entry(const char *filename, void *symval,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("parisc", parisc_device_id, do_parisc_entry);
/* Looks like: sdio:cNvNdN. */
static int do_sdio_entry(const char *filename,
@@ -867,7 +804,6 @@ static int do_sdio_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("sdio", sdio_device_id, do_sdio_entry);
/* Looks like: ssb:vNidNrevN. */
static int do_ssb_entry(const char *filename,
@@ -884,7 +820,6 @@ static int do_ssb_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("ssb", ssb_device_id, do_ssb_entry);
/* Looks like: bcma:mNidNrevNclN. */
static int do_bcma_entry(const char *filename,
@@ -903,7 +838,6 @@ static int do_bcma_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("bcma", bcma_device_id, do_bcma_entry);
/* Looks like: virtio:dNvN */
static int do_virtio_entry(const char *filename, void *symval,
@@ -919,7 +853,6 @@ static int do_virtio_entry(const char *filename, void *symval,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("virtio", virtio_device_id, do_virtio_entry);
/*
* Looks like: vmbus:guid
@@ -942,7 +875,6 @@ static int do_vmbus_entry(const char *filename, void *symval,
return 1;
}
-ADD_TO_DEVTABLE("vmbus", hv_vmbus_device_id, do_vmbus_entry);
/* Looks like: i2c:S */
static int do_i2c_entry(const char *filename, void *symval,
@@ -953,7 +885,6 @@ static int do_i2c_entry(const char *filename, void *symval,
return 1;
}
-ADD_TO_DEVTABLE("i2c", i2c_device_id, do_i2c_entry);
/* Looks like: spi:S */
static int do_spi_entry(const char *filename, void *symval,
@@ -964,7 +895,6 @@ static int do_spi_entry(const char *filename, void *symval,
return 1;
}
-ADD_TO_DEVTABLE("spi", spi_device_id, do_spi_entry);
static const struct dmifield {
const char *prefix;
@@ -1019,7 +949,6 @@ static int do_dmi_entry(const char *filename, void *symval,
strcat(alias, ":");
return 1;
}
-ADD_TO_DEVTABLE("dmi", dmi_system_id, do_dmi_entry);
static int do_platform_entry(const char *filename,
void *symval, char *alias)
@@ -1028,7 +957,6 @@ static int do_platform_entry(const char *filename,
sprintf(alias, PLATFORM_MODULE_PREFIX "%s", *name);
return 1;
}
-ADD_TO_DEVTABLE("platform", platform_device_id, do_platform_entry);
static int do_mdio_entry(const char *filename,
void *symval, char *alias)
@@ -1053,7 +981,6 @@ static int do_mdio_entry(const char *filename,
return 1;
}
-ADD_TO_DEVTABLE("mdio", mdio_device_id, do_mdio_entry);
/* Looks like: zorro:iN. */
static int do_zorro_entry(const char *filename, void *symval,
@@ -1064,7 +991,6 @@ static int do_zorro_entry(const char *filename, void *symval,
ADD(alias, "i", id != ZORRO_WILDCARD, id);
return 1;
}
-ADD_TO_DEVTABLE("zorro", zorro_device_id, do_zorro_entry);
/* looks like: "pnp:dD" */
static int do_isapnp_entry(const char *filename,
@@ -1080,7 +1006,6 @@ static int do_isapnp_entry(const char *filename,
(function >> 12) & 0x0f, (function >> 8) & 0x0f);
return 1;
}
-ADD_TO_DEVTABLE("isapnp", isapnp_device_id, do_isapnp_entry);
/* Looks like: "ipack:fNvNdN". */
static int do_ipack_entry(const char *filename,
@@ -1096,7 +1021,6 @@ static int do_ipack_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("ipack", ipack_device_id, do_ipack_entry);
/*
* Append a match expression for a single masked hex digit.
@@ -1167,7 +1091,6 @@ static int do_amba_entry(const char *filename,
return 1;
}
-ADD_TO_DEVTABLE("amba", amba_id, do_amba_entry);
/*
* looks like: "mipscdmm:tN"
@@ -1183,7 +1106,6 @@ static int do_mips_cdmm_entry(const char *filename,
sprintf(alias, "mipscdmm:t%02X*", type);
return 1;
}
-ADD_TO_DEVTABLE("mipscdmm", mips_cdmm_device_id, do_mips_cdmm_entry);
/* LOOKS like cpu:type:x86,venVVVVfamFFFFmodMMMM:feature:*,FEAT,*
* All fields are numbers. It would be nicer to use strings for vendor
@@ -1208,7 +1130,6 @@ static int do_x86cpu_entry(const char *filename, void *symval,
sprintf(alias + strlen(alias), "%04X*", feature);
return 1;
}
-ADD_TO_DEVTABLE("x86cpu", x86_cpu_id, do_x86cpu_entry);
/* LOOKS like cpu:type:*:feature:*FEAT* */
static int do_cpu_entry(const char *filename, void *symval, char *alias)
@@ -1218,7 +1139,6 @@ static int do_cpu_entry(const char *filename, void *symval, char *alias)
sprintf(alias, "cpu:type:*:feature:*%04X*", feature);
return 1;
}
-ADD_TO_DEVTABLE("cpu", cpu_feature, do_cpu_entry);
/* Looks like: mei:S:uuid:N:* */
static int do_mei_entry(const char *filename, void *symval,
@@ -1237,7 +1157,6 @@ static int do_mei_entry(const char *filename, void *symval,
return 1;
}
-ADD_TO_DEVTABLE("mei", mei_cl_device_id, do_mei_entry);
/* Looks like: rapidio:vNdNavNadN */
static int do_rio_entry(const char *filename,
@@ -1257,7 +1176,6 @@ static int do_rio_entry(const char *filename,
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("rapidio", rio_device_id, do_rio_entry);
/* Looks like: ulpi:vNpN */
static int do_ulpi_entry(const char *filename, void *symval,
@@ -1270,7 +1188,6 @@ static int do_ulpi_entry(const char *filename, void *symval,
return 1;
}
-ADD_TO_DEVTABLE("ulpi", ulpi_device_id, do_ulpi_entry);
/* Looks like: hdaudio:vNrNaN */
static int do_hda_entry(const char *filename, void *symval, char *alias)
@@ -1287,7 +1204,6 @@ static int do_hda_entry(const char *filename, void *symval, char *alias)
add_wildcard(alias);
return 1;
}
-ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry);
/* Looks like: fsl-mc:vNdN */
static int do_fsl_mc_entry(const char *filename, void *symval,
@@ -1299,7 +1215,6 @@ static int do_fsl_mc_entry(const char *filename, void *symval,
sprintf(alias, "fsl-mc:v%08Xd%s", vendor, *obj_type);
return 1;
}
-ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry);
/* Does namelen bytes of name exactly match the symbol? */
static bool sym_is(const char *name, unsigned namelen, const char *symbol)
@@ -1313,12 +1228,11 @@ static bool sym_is(const char *name, unsigned namelen, const char *symbol)
static void do_table(void *symval, unsigned long size,
unsigned long id_size,
const char *device_id,
- void *function,
+ int (*do_entry)(const char *filename, void *symval, char *alias),
struct module *mod)
{
unsigned int i;
char alias[500];
- int (*do_entry)(const char *, void *entry, char *alias) = function;
device_id_check(mod->name, device_id, size, id_size, symval);
/* Leave last one: it's the terminator. */
@@ -1332,6 +1246,44 @@ static void do_table(void *symval, unsigned long size,
}
}
+static const struct devtable devtable[] = {
+ {"hid", SIZE_hid_device_id, do_hid_entry},
+ {"ieee1394", SIZE_ieee1394_device_id, do_ieee1394_entry},
+ {"pci", SIZE_pci_device_id, do_pci_entry},
+ {"ccw", SIZE_ccw_device_id, do_ccw_entry},
+ {"ap", SIZE_ap_device_id, do_ap_entry},
+ {"css", SIZE_css_device_id, do_css_entry},
+ {"serio", SIZE_serio_device_id, do_serio_entry},
+ {"acpi", SIZE_acpi_device_id, do_acpi_entry},
+ {"pcmcia", SIZE_pcmcia_device_id, do_pcmcia_entry},
+ {"vio", SIZE_vio_device_id, do_vio_entry},
+ {"input", SIZE_input_device_id, do_input_entry},
+ {"eisa", SIZE_eisa_device_id, do_eisa_entry},
+ {"parisc", SIZE_parisc_device_id, do_parisc_entry},
+ {"sdio", SIZE_sdio_device_id, do_sdio_entry},
+ {"ssb", SIZE_ssb_device_id, do_ssb_entry},
+ {"bcma", SIZE_bcma_device_id, do_bcma_entry},
+ {"virtio", SIZE_virtio_device_id, do_virtio_entry},
+ {"vmbus", SIZE_hv_vmbus_device_id, do_vmbus_entry},
+ {"i2c", SIZE_i2c_device_id, do_i2c_entry},
+ {"spi", SIZE_spi_device_id, do_spi_entry},
+ {"dmi", SIZE_dmi_system_id, do_dmi_entry},
+ {"platform", SIZE_platform_device_id, do_platform_entry},
+ {"mdio", SIZE_mdio_device_id, do_mdio_entry},
+ {"zorro", SIZE_zorro_device_id, do_zorro_entry},
+ {"isapnp", SIZE_isapnp_device_id, do_isapnp_entry},
+ {"ipack", SIZE_ipack_device_id, do_ipack_entry},
+ {"amba", SIZE_amba_id, do_amba_entry},
+ {"mipscdmm", SIZE_mips_cdmm_device_id, do_mips_cdmm_entry},
+ {"x86cpu", SIZE_x86_cpu_id, do_x86cpu_entry},
+ {"cpu", SIZE_cpu_feature, do_cpu_entry},
+ {"mei", SIZE_mei_cl_device_id, do_mei_entry},
+ {"rapidio", SIZE_rio_device_id, do_rio_entry},
+ {"ulpi", SIZE_ulpi_device_id, do_ulpi_entry},
+ {"hdaudio", SIZE_hda_device_id, do_hda_entry},
+ {"fslmc", SIZE_fsl_mc_device_id, do_fsl_mc_entry},
+};
+
/* Create MODULE_ALIAS() statements.
* At this time, we cannot write the actual output C source yet,
* so we write into the mod->dev_table_buf buffer. */
@@ -1386,13 +1338,14 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
else if (sym_is(name, namelen, "pnp_card"))
do_pnp_card_entries(symval, sym->st_size, mod);
else {
- struct devtable **p;
- INIT_SECTION(__devtable);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(devtable); i++) {
+ const struct devtable *p = &devtable[i];
- for (p = __start___devtable; p < __stop___devtable; p++) {
- if (sym_is(name, namelen, (*p)->device_id)) {
- do_table(symval, sym->st_size, (*p)->id_size,
- (*p)->device_id, (*p)->function, mod);
+ if (sym_is(name, namelen, p->device_id)) {
+ do_table(symval, sym->st_size, p->id_size,
+ p->device_id, p->do_entry, mod);
break;
}
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index fdf5bbfd00cd..9abcdf2e8dfe 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1157,6 +1157,14 @@ static const struct sectioncheck *section_mismatch(
* fromsec = text section
* refsymname = *.constprop.*
*
+ * Pattern 6:
+ * Hide section mismatch warnings for ELF local symbols. The goal
+ * is to eliminate false positive modpost warnings caused by
+ * compiler-generated ELF local symbol names such as ".LANCHOR1".
+ * Autogenerated symbol names bypass modpost's "Pattern 2"
+ * whitelisting, which relies on pattern-matching against symbol
+ * names to work. (One situation where gcc can autogenerate ELF
+ * local symbols is when "-fsection-anchors" is used.)
**/
static int secref_whitelist(const struct sectioncheck *mismatch,
const char *fromsec, const char *fromsym,
@@ -1195,6 +1203,10 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
match(fromsym, optim_symbols))
return 0;
+ /* Check for pattern 6 */
+ if (strstarts(fromsym, ".L"))
+ return 0;
+
return 1;
}
diff --git a/scripts/namespace.pl b/scripts/namespace.pl
index 9f3c9d47a4a5..4dddd4c01b62 100755
--- a/scripts/namespace.pl
+++ b/scripts/namespace.pl
@@ -65,13 +65,14 @@
require 5; # at least perl 5
use strict;
use File::Find;
+use File::Spec;
my $nm = ($ENV{'NM'} || "nm") . " -p";
my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
-my $srctree = "";
-my $objtree = "";
-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
+my $srctree = File::Spec->curdir();
+my $objtree = File::Spec->curdir();
+$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
+$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
if ($#ARGV != -1) {
print STDERR "usage: $0 takes no parameters\n";
@@ -231,9 +232,9 @@ sub do_nm
}
($source = $basename) =~ s/\.o$//;
if (-e "$source.c" || -e "$source.S") {
- $source = "$objtree$File::Find::dir/$source";
+ $source = File::Spec->catfile($objtree, $File::Find::dir, $source)
} else {
- $source = "$srctree$File::Find::dir/$source";
+ $source = File::Spec->catfile($srctree, $File::Find::dir, $source)
}
if (! -e "$source.c" && ! -e "$source.S") {
# No obvious source, exclude the object if it is conglomerate
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 5423a58d1b06..9972141f11aa 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -53,6 +53,10 @@
#define R_AARCH64_ABS64 257
#endif
+#define R_ARM_PC24 1
+#define R_ARM_THM_CALL 10
+#define R_ARM_CALL 28
+
static int fd_map; /* File descriptor for file being modified. */
static int mmap_failed; /* Boolean flag. */
static char gpfx; /* prefix for global symbol name (sometimes '_') */
@@ -374,6 +378,18 @@ is_mcounted_section_name(char const *const txtname)
#define RECORD_MCOUNT_64
#include "recordmcount.h"
+static int arm_is_fake_mcount(Elf32_Rel const *rp)
+{
+ switch (ELF32_R_TYPE(w(rp->r_info))) {
+ case R_ARM_THM_CALL:
+ case R_ARM_CALL:
+ case R_ARM_PC24:
+ return 0;
+ }
+
+ return 1;
+}
+
/* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
* http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
* We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
@@ -463,6 +479,7 @@ do_file(char const *const fname)
break;
case EM_ARM: reltype = R_ARM_ABS32;
altmcount = "__gnu_mcount_nc";
+ is_fake_mcount32 = arm_is_fake_mcount;
break;
case EM_AARCH64:
reltype = R_AARCH64_ABS64;
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index b9897e2be404..04151ede8043 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -326,7 +326,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp,
if (!mcountsym)
mcountsym = get_mcountsym(sym0, relp, str0);
- if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
+ if (mcountsym && mcountsym == Elf_r_sym(relp) &&
+ !is_fake_mcount(relp)) {
uint_t const addend =
_w(_w(relp->r_offset) - recval + mcount_adjust);
mrelp->r_offset = _w(offbase
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 966dd3924ea9..aa28c3f29809 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -72,8 +72,16 @@ scm_version()
printf -- '-svn%s' "`git svn find-rev $head`"
fi
- # Check for uncommitted changes
- if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
+ # Check for uncommitted changes.
+ # First, with git-status, but --no-optional-locks is only
+ # supported in git >= 2.14, so fall back to git-diff-index if
+ # it fails. Note that git-diff-index does not refresh the
+ # index, so it may give misleading results. See
+ # git-update-index(1), git-diff-index(1), and git-status(1).
+ if {
+ git --no-optional-locks status -uno --porcelain 2>/dev/null ||
+ git diff-index --name-only HEAD
+ } | grep -qvE '^(.. )?scripts/package'; then
printf '%s' -dirty
fi
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 138120698f83..96a8d7115120 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -177,7 +177,7 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
char *tag = NULL;
size_t size = unpack_u16_chunk(e, &tag);
/* if a name is specified it must match. otherwise skip tag */
- if (name && (!size || strcmp(name, tag)))
+ if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
goto fail;
} else if (name) {
/* if a name is specified and there is no name tag fail */
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 03c1652c9a1f..db3bdc91c520 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -568,7 +568,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
devcg->behavior == DEVCG_DEFAULT_ALLOW) {
rc = dev_exception_add(devcg, ex);
if (rc)
- break;
+ return rc;
} else {
/*
* in the other possible cases:
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 20e66291ca99..5155c343406e 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -298,8 +298,11 @@ static int ima_calc_file_hash_atfm(struct file *file,
rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
rc = integrity_kernel_read(file, offset, rbuf[active],
rbuf_len);
- if (rc != rbuf_len)
+ if (rc != rbuf_len) {
+ if (rc >= 0)
+ rc = -EINVAL;
goto out3;
+ }
if (rbuf[1] && offset) {
/* Using two buffers, and it is not the first
diff --git a/security/keys/key.c b/security/keys/key.c
index 7276d1a009d4..33a9c64eeed3 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -296,6 +296,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->gid = gid;
key->perm = perm;
key->restrict_link = restrict_link;
+ key->last_used_at = ktime_get_real_seconds();
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
@@ -381,7 +382,7 @@ int key_payload_reserve(struct key *key, size_t datalen)
spin_lock(&key->user->lock);
if (delta > 0 &&
- (key->user->qnbytes + delta >= maxbytes ||
+ (key->user->qnbytes + delta > maxbytes ||
key->user->qnbytes + delta < key->user->qnbytes)) {
ret = -EDQUOT;
}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 797edcf1d424..2ef853bfbb8f 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -881,8 +881,8 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
- if (newowner->qnkeys + 1 >= maxkeys ||
- newowner->qnbytes + key->quotalen >= maxbytes ||
+ if (newowner->qnkeys + 1 > maxkeys ||
+ newowner->qnbytes + key->quotalen > maxbytes ||
newowner->qnbytes + key->quotalen <
newowner->qnbytes)
goto quota_overrun;
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index f60baeb338e5..b47445022d5c 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -71,6 +71,9 @@ static void request_key_auth_describe(const struct key *key,
{
struct request_key_auth *rka = key->payload.data[0];
+ if (!rka)
+ return;
+
seq_puts(m, "key:");
seq_puts(m, key->description);
if (key_is_positive(key))
@@ -88,6 +91,9 @@ static long request_key_auth_read(const struct key *key,
size_t datalen;
long ret;
+ if (!rka)
+ return -EKEYREVOKED;
+
datalen = rka->callout_len;
ret = datalen;
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 52f3c550abcc..f3c473791b69 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -865,7 +865,7 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
if (orig->ae.xp_node) {
rc = avc_xperms_populate(node, orig->ae.xp_node);
if (rc) {
- kmem_cache_free(avc_node_cachep, node);
+ avc_node_kill(node);
goto out_unlock;
}
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d293b546a2aa..772df402c495 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -467,21 +467,43 @@ static int may_context_mount_inode_relabel(u32 sid,
return rc;
}
-static int selinux_is_sblabel_mnt(struct super_block *sb)
+static int selinux_is_genfs_special_handling(struct super_block *sb)
{
- struct superblock_security_struct *sbsec = sb->s_security;
-
- return sbsec->behavior == SECURITY_FS_USE_XATTR ||
- sbsec->behavior == SECURITY_FS_USE_TRANS ||
- sbsec->behavior == SECURITY_FS_USE_TASK ||
- sbsec->behavior == SECURITY_FS_USE_NATIVE ||
- /* Special handling. Genfs but also in-core setxattr handler */
- !strcmp(sb->s_type->name, "sysfs") ||
+ /* Special handling. Genfs but also in-core setxattr handler */
+ return !strcmp(sb->s_type->name, "sysfs") ||
!strcmp(sb->s_type->name, "pstore") ||
!strcmp(sb->s_type->name, "debugfs") ||
!strcmp(sb->s_type->name, "rootfs");
}
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = sb->s_security;
+
+ /*
+ * IMPORTANT: Double-check logic in this function when adding a new
+ * SECURITY_FS_USE_* definition!
+ */
+ BUILD_BUG_ON(SECURITY_FS_USE_MAX != 7);
+
+ switch (sbsec->behavior) {
+ case SECURITY_FS_USE_XATTR:
+ case SECURITY_FS_USE_TRANS:
+ case SECURITY_FS_USE_TASK:
+ case SECURITY_FS_USE_NATIVE:
+ return 1;
+
+ case SECURITY_FS_USE_GENFS:
+ return selinux_is_genfs_special_handling(sb);
+
+ /* Never allow relabeling on context mounts */
+ case SECURITY_FS_USE_MNTPOINT:
+ case SECURITY_FS_USE_NONE:
+ default:
+ return 0;
+ }
+}
+
static int sb_finish_set_opts(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
@@ -3265,12 +3287,16 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
struct inode_security_struct *isec = inode_security_novalidate(inode);
+ struct superblock_security_struct *sbsec = inode->i_sb->s_security;
u32 newsid;
int rc;
if (strcmp(name, XATTR_SELINUX_SUFFIX))
return -EOPNOTSUPP;
+ if (!(sbsec->flags & SBLABEL_MNT))
+ return -EOPNOTSUPP;
+
if (!value || !size)
return -EACCES;
@@ -5984,7 +6010,10 @@ static void selinux_inode_invalidate_secctx(struct inode *inode)
*/
static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
- return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
+ int rc = selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX,
+ ctx, ctxlen, 0);
+ /* Do not return error when suppressing label (SBLABEL_MNT not set). */
+ return rc == -EOPNOTSUPP ? 0 : rc;
}
/*
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index c483de590ba3..af9cc839856f 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -266,6 +266,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
return v;
}
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
+
/*
* Initialize a policy database structure.
*/
@@ -313,8 +315,10 @@ static int policydb_init(struct policydb *p)
out:
hashtab_destroy(p->filename_trans);
hashtab_destroy(p->range_tr);
- for (i = 0; i < SYM_NUM; i++)
+ for (i = 0; i < SYM_NUM; i++) {
+ hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
hashtab_destroy(p->symtab[i].table);
+ }
return rc;
}
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 23e5808a0970..e5d5c7fb2dac 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -474,7 +474,7 @@ char *smk_parse_smack(const char *string, int len)
if (i == 0 || i >= SMK_LONGLABEL)
return ERR_PTR(-EINVAL);
- smack = kzalloc(i + 1, GFP_KERNEL);
+ smack = kzalloc(i + 1, GFP_NOFS);
if (smack == NULL)
return ERR_PTR(-ENOMEM);
@@ -545,7 +545,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
if (skp != NULL)
goto freeout;
- skp = kzalloc(sizeof(*skp), GFP_KERNEL);
+ skp = kzalloc(sizeof(*skp), GFP_NOFS);
if (skp == NULL) {
skp = ERR_PTR(-ENOMEM);
goto freeout;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index aeb3ba70f907..589c1c2ae6db 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -268,7 +268,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
if (!(ip->i_opflags & IOP_XATTR))
return ERR_PTR(-EOPNOTSUPP);
- buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
+ buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
if (buffer == NULL)
return ERR_PTR(-ENOMEM);
@@ -949,7 +949,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
if (rc != 0)
return rc;
- } else if (bprm->unsafe)
+ }
+ if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
return -EPERM;
bsp->smk_task = isp->smk_task;
@@ -4037,6 +4038,8 @@ access_check:
skp = smack_ipv6host_label(&sadd);
if (skp == NULL)
skp = smack_net_ambient;
+ if (skb == NULL)
+ break;
#ifdef CONFIG_AUDIT
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
ad.a.u.net->family = family;
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index a04edff8b729..ae50d59fb810 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -74,8 +74,10 @@ static int onyx_read_register(struct onyx *onyx, u8 reg, u8 *value)
return 0;
}
v = i2c_smbus_read_byte_data(onyx->i2c, reg);
- if (v < 0)
+ if (v < 0) {
+ *value = 0;
return -1;
+ }
*value = (u8)v;
onyx->cache[ONYX_REG_CONTROL-FIRSTREGISTER] = *value;
return 0;
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 555df64d46ff..7ae8e24dc1e6 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -529,7 +529,7 @@ static int snd_compress_check_input(struct snd_compr_params *params)
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
- params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
+ params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
params->buffer.fragments == 0)
return -EINVAL;
@@ -575,10 +575,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
stream->metadata_set = false;
stream->next_track = false;
- if (stream->direction == SND_COMPRESS_PLAYBACK)
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
- else
- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
} else {
return -EPERM;
}
@@ -694,8 +691,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_SETUP:
+ if (stream->direction != SND_COMPRESS_CAPTURE)
+ return -EPERM;
+ break;
+ case SNDRV_PCM_STATE_PREPARED:
+ break;
+ default:
return -EPERM;
+ }
+
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
if (!retval)
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
@@ -706,9 +712,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
return -EPERM;
+ default:
+ break;
+ }
+
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
if (!retval) {
snd_compr_drain_notify(stream);
@@ -796,9 +808,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
return -EPERM;
+ case SNDRV_PCM_STATE_XRUN:
+ return -EPIPE;
+ default:
+ break;
+ }
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
if (retval) {
@@ -818,6 +838,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
return -EPERM;
+ /* next track doesn't have any meaning for capture streams */
+ if (stream->direction == SND_COMPRESS_CAPTURE)
+ return -EPERM;
+
/* you can signal next track if this is intended to be a gapless stream
* and current track metadata is set
*/
@@ -835,9 +859,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
static int snd_compr_partial_drain(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
+ return -EPERM;
+ case SNDRV_PCM_STATE_XRUN:
+ return -EPIPE;
+ default:
+ break;
+ }
+
+ /* partial drain doesn't have any meaning for capture streams */
+ if (stream->direction == SND_COMPRESS_CAPTURE)
return -EPERM;
+
/* stream can be drained only when next track has been signalled */
if (stream->next_track == false)
return -EPERM;
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index e2f27022b363..d7dddb75e7bb 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -159,6 +159,7 @@ static int __init snd_hrtimer_init(void)
timer->hw = hrtimer_hw;
timer->hw.resolution = resolution;
timer->hw.ticks = NANO_SEC / resolution;
+ timer->max_instances = 100; /* lower the limit */
err = snd_timer_global_register(timer);
if (err < 0) {
diff --git a/sound/core/info.c b/sound/core/info.c
index 8ab72e0f5932..358a6947342d 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -724,8 +724,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent)
INIT_LIST_HEAD(&entry->children);
INIT_LIST_HEAD(&entry->list);
entry->parent = parent;
- if (parent)
+ if (parent) {
+ mutex_lock(&parent->access);
list_add_tail(&entry->list, &parent->children);
+ mutex_unlock(&parent->access);
+ }
return entry;
}
@@ -809,7 +812,12 @@ void snd_info_free_entry(struct snd_info_entry * entry)
list_for_each_entry_safe(p, n, &entry->children, list)
snd_info_free_entry(p);
- list_del(&entry->list);
+ p = entry->parent;
+ if (p) {
+ mutex_lock(&p->access);
+ list_del(&entry->list);
+ mutex_unlock(&p->access);
+ }
kfree(entry->name);
if (entry->private_free)
entry->private_free(entry);
diff --git a/sound/core/init.c b/sound/core/init.c
index 6bda8436d765..02e96c580cb7 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -408,14 +408,7 @@ int snd_card_disconnect(struct snd_card *card)
card->shutdown = 1;
spin_unlock(&card->files_lock);
- /* phase 1: disable fops (user space) operations for ALSA API */
- mutex_lock(&snd_card_mutex);
- snd_cards[card->number] = NULL;
- clear_bit(card->number, snd_cards_lock);
- mutex_unlock(&snd_card_mutex);
-
- /* phase 2: replace file->f_op with special dummy operations */
-
+ /* replace file->f_op with special dummy operations */
spin_lock(&card->files_lock);
list_for_each_entry(mfile, &card->files_list, list) {
/* it's critical part, use endless loop */
@@ -431,7 +424,7 @@ int snd_card_disconnect(struct snd_card *card)
}
spin_unlock(&card->files_lock);
- /* phase 3: notify all connected devices about disconnection */
+ /* notify all connected devices about disconnection */
/* at this point, they cannot respond to any calls except release() */
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -447,6 +440,13 @@ int snd_card_disconnect(struct snd_card *card)
device_del(&card->card_dev);
card->registered = false;
}
+
+ /* disable fops (user space) operations for ALSA API */
+ mutex_lock(&snd_card_mutex);
+ snd_cards[card->number] = NULL;
+ clear_bit(card->number, snd_cards_lock);
+ mutex_unlock(&snd_card_mutex);
+
#ifdef CONFIG_PM
wake_up(&card->power_sleep);
#endif
diff --git a/sound/core/oss/linear.c b/sound/core/oss/linear.c
index 2045697f449d..797d838a2f9e 100644
--- a/sound/core/oss/linear.c
+++ b/sound/core/oss/linear.c
@@ -107,6 +107,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin,
}
}
#endif
+ if (frames > dst_channels[0].frames)
+ frames = dst_channels[0].frames;
convert(plugin, src_channels, dst_channels, frames);
return frames;
}
diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
index 7915564bd394..3788906421a7 100644
--- a/sound/core/oss/mulaw.c
+++ b/sound/core/oss/mulaw.c
@@ -269,6 +269,8 @@ static snd_pcm_sframes_t mulaw_transfer(struct snd_pcm_plugin *plugin,
}
}
#endif
+ if (frames > dst_channels[0].frames)
+ frames = dst_channels[0].frames;
data = (struct mulaw_priv *)plugin->extra_data;
data->func(plugin, src_channels, dst_channels, frames);
return frames;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index cfb8f5896787..824097571467 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -951,6 +951,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
params_channels(params) / 8;
+ err = snd_pcm_oss_period_size(substream, params, sparams);
+ if (err < 0)
+ goto failure;
+
+ n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
+ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
+ if (err < 0)
+ goto failure;
+
+ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
+ runtime->oss.periods, NULL);
+ if (err < 0)
+ goto failure;
+
+ snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+
+ err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
+ if (err < 0) {
+ pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
+ goto failure;
+ }
+
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
snd_pcm_oss_plugin_clear(substream);
if (!direct) {
@@ -985,27 +1007,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
}
#endif
- err = snd_pcm_oss_period_size(substream, params, sparams);
- if (err < 0)
- goto failure;
-
- n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
- if (err < 0)
- goto failure;
-
- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
- runtime->oss.periods, NULL);
- if (err < 0)
- goto failure;
-
- snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
-
- if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
- pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
- goto failure;
- }
-
if (runtime->oss.trigger) {
sw_params->start_threshold = 1;
} else {
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index a84a1d3d23e5..7c5d124d538c 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -111,7 +111,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
while (plugin->next) {
if (plugin->dst_frames)
frames = plugin->dst_frames(plugin, frames);
- if (snd_BUG_ON(frames <= 0))
+ if ((snd_pcm_sframes_t)frames <= 0)
return -ENXIO;
plugin = plugin->next;
err = snd_pcm_plugin_alloc(plugin, frames);
@@ -123,7 +123,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
while (plugin->prev) {
if (plugin->src_frames)
frames = plugin->src_frames(plugin, frames);
- if (snd_BUG_ON(frames <= 0))
+ if ((snd_pcm_sframes_t)frames <= 0)
return -ENXIO;
plugin = plugin->prev;
err = snd_pcm_plugin_alloc(plugin, frames);
@@ -196,7 +196,9 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
return 0;
}
-snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t drv_frames)
+static snd_pcm_sframes_t plug_client_size(struct snd_pcm_substream *plug,
+ snd_pcm_uframes_t drv_frames,
+ bool check_size)
{
struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
int stream;
@@ -209,6 +211,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
plugin = snd_pcm_plug_last(plug);
while (plugin && drv_frames > 0) {
+ if (check_size && drv_frames > plugin->buf_frames)
+ drv_frames = plugin->buf_frames;
plugin_prev = plugin->prev;
if (plugin->src_frames)
drv_frames = plugin->src_frames(plugin, drv_frames);
@@ -220,6 +224,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
plugin_next = plugin->next;
if (plugin->dst_frames)
drv_frames = plugin->dst_frames(plugin, drv_frames);
+ if (check_size && drv_frames > plugin->buf_frames)
+ drv_frames = plugin->buf_frames;
plugin = plugin_next;
}
} else
@@ -227,7 +233,9 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
return drv_frames;
}
-snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pcm_uframes_t clt_frames)
+static snd_pcm_sframes_t plug_slave_size(struct snd_pcm_substream *plug,
+ snd_pcm_uframes_t clt_frames,
+ bool check_size)
{
struct snd_pcm_plugin *plugin, *plugin_prev, *plugin_next;
snd_pcm_sframes_t frames;
@@ -248,11 +256,15 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
if (frames < 0)
return frames;
}
+ if (check_size && frames > plugin->buf_frames)
+ frames = plugin->buf_frames;
plugin = plugin_next;
}
} else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
plugin = snd_pcm_plug_last(plug);
while (plugin) {
+ if (check_size && frames > plugin->buf_frames)
+ frames = plugin->buf_frames;
plugin_prev = plugin->prev;
if (plugin->src_frames) {
frames = plugin->src_frames(plugin, frames);
@@ -266,6 +278,18 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
return frames;
}
+snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug,
+ snd_pcm_uframes_t drv_frames)
+{
+ return plug_client_size(plug, drv_frames, false);
+}
+
+snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug,
+ snd_pcm_uframes_t clt_frames)
+{
+ return plug_slave_size(plug, clt_frames, false);
+}
+
static int snd_pcm_plug_formats(struct snd_mask *mask, snd_pcm_format_t format)
{
struct snd_mask formats = *mask;
@@ -620,7 +644,7 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
src_channels = dst_channels;
plugin = next;
}
- return snd_pcm_plug_client_size(plug, frames);
+ return plug_client_size(plug, frames, true);
}
snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, struct snd_pcm_plugin_channel *dst_channels_final, snd_pcm_uframes_t size)
@@ -630,7 +654,7 @@ snd_pcm_sframes_t snd_pcm_plug_read_transfer(struct snd_pcm_substream *plug, str
snd_pcm_sframes_t frames = size;
int err;
- frames = snd_pcm_plug_slave_size(plug, frames);
+ frames = plug_slave_size(plug, frames, true);
if (frames < 0)
return frames;
diff --git a/sound/core/oss/route.c b/sound/core/oss/route.c
index c8171f5783c8..72dea04197ef 100644
--- a/sound/core/oss/route.c
+++ b/sound/core/oss/route.c
@@ -57,6 +57,8 @@ static snd_pcm_sframes_t route_transfer(struct snd_pcm_plugin *plugin,
return -ENXIO;
if (frames == 0)
return 0;
+ if (frames > dst_channels[0].frames)
+ frames = dst_channels[0].frames;
nsrcs = plugin->src_format.channels;
ndsts = plugin->dst_format.channels;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index f57a58ac7ae0..f09ae7efc695 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1849,8 +1849,6 @@ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
switch (cmd) {
- case SNDRV_PCM_IOCTL1_INFO:
- return 0;
case SNDRV_PCM_IOCTL1_RESET:
return snd_pcm_lib_ioctl_reset(substream, arg);
case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
@@ -1879,11 +1877,14 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime;
unsigned long flags;
- if (PCM_RUNTIME_CHECK(substream))
+ if (snd_BUG_ON(!substream))
return;
- runtime = substream->runtime;
snd_pcm_stream_lock_irqsave(substream, flags);
+ if (PCM_RUNTIME_CHECK(substream))
+ goto _unlock;
+ runtime = substream->runtime;
+
if (!snd_pcm_running(substream) ||
snd_pcm_update_hw_ptr0(substream, 1) < 0)
goto _end;
@@ -1894,6 +1895,7 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
#endif
_end:
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ _unlock:
snd_pcm_stream_unlock_irqrestore(substream, flags);
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 3586ab41dec4..23e17a58651b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -214,11 +214,7 @@ int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
strlcpy(info->subname, substream->name, sizeof(info->subname));
runtime = substream->runtime;
- /* AB: FIXME!!! This is definitely nonsense */
- if (runtime) {
- info->sync = runtime->sync;
- substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info);
- }
+
return 0;
}
@@ -591,6 +587,10 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
runtime->boundary *= 2;
+ /* clear the buffer for avoiding possible kernel info leaks */
+ if (runtime->dma_area && !substream->ops->copy)
+ memset(runtime->dma_area, 0, runtime->dma_bytes);
+
snd_pcm_timer_resolution_change(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
@@ -1258,8 +1258,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+ switch (runtime->status->state) {
+ case SNDRV_PCM_STATE_SUSPENDED:
return -EBUSY;
+ /* unresumable PCM state; return -EBUSY for skipping suspend */
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_DISCONNECTED:
+ return -EBUSY;
+ }
runtime->trigger_master = substream;
return 0;
}
@@ -1339,6 +1346,14 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm)
/* FIXME: the open/close code should lock this as well */
if (substream->runtime == NULL)
continue;
+
+ /*
+ * Skip BE dai link PCM's that are internal and may
+ * not have their substream ops set.
+ */
+ if (!substream->ops)
+ continue;
+
err = snd_pcm_suspend(substream);
if (err < 0 && err != -EBUSY)
return err;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 59111cadaec2..c8b2309352d7 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/nospec.h>
#include <sound/rawmidi.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -591,6 +592,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
return -ENXIO;
if (info->stream < 0 || info->stream > 1)
return -EINVAL;
+ info->stream = array_index_nospec(info->stream, 2);
pstr = &rmidi->streams[info->stream];
if (pstr->substream_count == 0)
return -ENOENT;
diff --git a/sound/core/seq/oss/seq_oss_ioctl.c b/sound/core/seq/oss/seq_oss_ioctl.c
index 5b8520177b0e..7d72e3d48ad5 100644
--- a/sound/core/seq/oss/seq_oss_ioctl.c
+++ b/sound/core/seq/oss/seq_oss_ioctl.c
@@ -62,7 +62,7 @@ static int snd_seq_oss_oob_user(struct seq_oss_devinfo *dp, void __user *arg)
if (copy_from_user(ev, arg, 8))
return -EFAULT;
memset(&tmpev, 0, sizeof(tmpev));
- snd_seq_oss_fill_addr(dp, &tmpev, dp->addr.port, dp->addr.client);
+ snd_seq_oss_fill_addr(dp, &tmpev, dp->addr.client, dp->addr.port);
tmpev.time.tick = 0;
if (! snd_seq_oss_process_event(dp, (union evrec *)ev, &tmpev)) {
snd_seq_oss_dispatch(dp, &tmpev, 0, 0);
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index 9debd1b8fd28..cdfb8f92d554 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -615,6 +615,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
len = snd_seq_oss_timer_start(dp->timer);
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
+ snd_midi_event_reset_decode(mdev->coder);
} else {
len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
if (len > 0)
diff --git a/sound/core/seq/oss/seq_oss_rw.c b/sound/core/seq/oss/seq_oss_rw.c
index 6a7b6aceeca9..499f3e8f4949 100644
--- a/sound/core/seq/oss/seq_oss_rw.c
+++ b/sound/core/seq/oss/seq_oss_rw.c
@@ -174,7 +174,7 @@ insert_queue(struct seq_oss_devinfo *dp, union evrec *rec, struct file *opt)
memset(&event, 0, sizeof(event));
/* set dummy -- to be sure */
event.type = SNDRV_SEQ_EVENT_NOTEOFF;
- snd_seq_oss_fill_addr(dp, &event, dp->addr.port, dp->addr.client);
+ snd_seq_oss_fill_addr(dp, &event, dp->addr.client, dp->addr.port);
if (snd_seq_oss_process_event(dp, rec, &event))
return 0; /* invalid event - no need to insert queue */
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 278ebb993122..c93945917235 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -617,13 +617,14 @@ int
snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
- if (dev < 0 || dev >= dp->max_synthdev)
+ if (!info)
return -ENXIO;
- if (dp->synths[dev].is_midi) {
+ if (info->is_midi) {
struct midi_info minf;
- snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
+ snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
inf->synth_type = SYNTH_TYPE_MIDI;
inf->synth_subtype = 0;
inf->nr_voices = 16;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 965473d4129c..198eea5c8c2f 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -564,7 +564,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
event->queue = queue;
event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
if (real_time) {
- event->time.time = snd_seq_timer_get_cur_time(q->timer);
+ event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
} else {
event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
@@ -1001,7 +1001,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
{
struct snd_seq_client *client = file->private_data;
int written = 0, len;
- int err;
+ int err, handled;
struct snd_seq_event event;
if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
@@ -1014,6 +1014,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
if (!client->accept_output || client->pool == NULL)
return -ENXIO;
+ repeat:
+ handled = 0;
/* allocate the pool now if the pool is not allocated yet */
mutex_lock(&client->ioctl_mutex);
if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
@@ -1073,12 +1075,19 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
0, 0, &client->ioctl_mutex);
if (err < 0)
break;
+ handled++;
__skip_event:
/* Update pointers and counts */
count -= len;
buf += len;
written += len;
+
+ /* let's have a coffee break if too many events are queued */
+ if (++handled >= 200) {
+ mutex_unlock(&client->ioctl_mutex);
+ goto repeat;
+ }
}
out:
@@ -1249,7 +1258,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
/* fill the info fields */
if (client_info->name[0])
- strlcpy(client->name, client_info->name, sizeof(client->name));
+ strscpy(client->name, client_info->name, sizeof(client->name));
client->filter = client_info->filter;
client->event_lost = client_info->event_lost;
@@ -1527,7 +1536,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
/* set queue name */
if (!info->name[0])
snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
- strlcpy(q->name, info->name, sizeof(q->name));
+ strscpy(q->name, info->name, sizeof(q->name));
snd_use_lock_free(&q->use_lock);
return 0;
@@ -1589,7 +1598,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
queuefree(q);
return -EPERM;
}
- strlcpy(q->name, info->name, sizeof(q->name));
+ strscpy(q->name, info->name, sizeof(q->name));
queuefree(q);
return 0;
@@ -1630,7 +1639,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
tmr = queue->timer;
status->events = queue->tickq->cells + queue->timeq->cells;
- status->time = snd_seq_timer_get_cur_time(tmr);
+ status->time = snd_seq_timer_get_cur_time(tmr, true);
status->tick = snd_seq_timer_get_cur_tick(tmr);
status->running = tmr->running;
@@ -1813,8 +1822,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
if (cptr->type == USER_CLIENT) {
info->input_pool = cptr->data.user.fifo_pool_size;
info->input_free = info->input_pool;
- if (cptr->data.user.fifo)
- info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
+ info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
} else {
info->input_pool = 0;
info->input_free = 0;
@@ -1905,20 +1913,14 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
int result;
struct snd_seq_client *sender = NULL;
struct snd_seq_client_port *sport = NULL;
- struct snd_seq_subscribers *p;
result = -EINVAL;
if ((sender = snd_seq_client_use_ptr(subs->sender.client)) == NULL)
goto __end;
if ((sport = snd_seq_port_use_ptr(sender, subs->sender.port)) == NULL)
goto __end;
- p = snd_seq_port_get_subscription(&sport->c_src, &subs->dest);
- if (p) {
- result = 0;
- *subs = p->info;
- } else
- result = -ENOENT;
-
+ result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
+ subs);
__end:
if (sport)
snd_seq_port_unlock(sport);
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 9acbed1ac982..d9f5428ee995 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -278,3 +278,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
return 0;
}
+
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
+{
+ unsigned long flags;
+ int cells;
+
+ if (!f)
+ return 0;
+
+ snd_use_lock_use(&f->use_lock);
+ spin_lock_irqsave(&f->lock, flags);
+ cells = snd_seq_unused_cells(f->pool);
+ spin_unlock_irqrestore(&f->lock, flags);
+ snd_use_lock_free(&f->use_lock);
+ return cells;
+}
diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
index 062c446e7867..5d38a0d7f0cd 100644
--- a/sound/core/seq/seq_fifo.h
+++ b/sound/core/seq/seq_fifo.h
@@ -68,5 +68,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
/* resize pool in fifo */
int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
+/* get the number of unused cells safely */
+int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
#endif
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index f04714d70bf7..9cfe4fcee9a5 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -550,10 +550,10 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
list_del_init(list);
grp->exclusive = 0;
write_unlock_irq(&grp->list_lock);
- up_write(&grp->list_mutex);
if (!empty)
unsubscribe_port(client, port, grp, &subs->info, ack);
+ up_write(&grp->list_mutex);
}
/* connect two ports */
@@ -635,20 +635,23 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
/* get matched subscriber */
-struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
- struct snd_seq_addr *dest_addr)
+int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
+ struct snd_seq_addr *dest_addr,
+ struct snd_seq_port_subscribe *subs)
{
- struct snd_seq_subscribers *s, *found = NULL;
+ struct snd_seq_subscribers *s;
+ int err = -ENOENT;
down_read(&src_grp->list_mutex);
list_for_each_entry(s, &src_grp->list_head, src_list) {
if (addr_match(dest_addr, &s->info.dest)) {
- found = s;
+ *subs = s->info;
+ err = 0;
break;
}
}
up_read(&src_grp->list_mutex);
- return found;
+ return err;
}
/*
diff --git a/sound/core/seq/seq_ports.h b/sound/core/seq/seq_ports.h
index 26bd71f36c41..06003b36652e 100644
--- a/sound/core/seq/seq_ports.h
+++ b/sound/core/seq/seq_ports.h
@@ -135,7 +135,8 @@ int snd_seq_port_subscribe(struct snd_seq_client_port *port,
struct snd_seq_port_subscribe *info);
/* get matched subscriber */
-struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
- struct snd_seq_addr *dest_addr);
+int snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp,
+ struct snd_seq_addr *dest_addr,
+ struct snd_seq_port_subscribe *subs);
#endif
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 1a6dc4ff44a6..ea1aa0796276 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -261,6 +261,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
{
unsigned long flags;
struct snd_seq_event_cell *cell;
+ snd_seq_tick_time_t cur_tick;
+ snd_seq_real_time_t cur_time;
if (q == NULL)
return;
@@ -277,17 +279,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
__again:
/* Process tick queue... */
+ cur_tick = snd_seq_timer_get_cur_tick(q->timer);
for (;;) {
- cell = snd_seq_prioq_cell_out(q->tickq,
- &q->timer->tick.cur_tick);
+ cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
if (!cell)
break;
snd_seq_dispatch_event(cell, atomic, hop);
}
/* Process time queue... */
+ cur_time = snd_seq_timer_get_cur_time(q->timer, false);
for (;;) {
- cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
+ cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
if (!cell)
break;
snd_seq_dispatch_event(cell, atomic, hop);
@@ -415,6 +418,7 @@ int snd_seq_queue_check_access(int queueid, int client)
int snd_seq_queue_set_owner(int queueid, int client, int locked)
{
struct snd_seq_queue *q = queueptr(queueid);
+ unsigned long flags;
if (q == NULL)
return -EINVAL;
@@ -424,8 +428,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
return -EPERM;
}
+ spin_lock_irqsave(&q->owner_lock, flags);
q->locked = locked ? 1 : 0;
q->owner = client;
+ spin_unlock_irqrestore(&q->owner_lock, flags);
queue_access_unlock(q);
queuefree(q);
@@ -564,15 +570,17 @@ void snd_seq_queue_client_termination(int client)
unsigned long flags;
int i;
struct snd_seq_queue *q;
+ bool matched;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
if ((q = queueptr(i)) == NULL)
continue;
spin_lock_irqsave(&q->owner_lock, flags);
- if (q->owner == client)
+ matched = (q->owner == client);
+ if (matched)
q->klocked = 1;
spin_unlock_irqrestore(&q->owner_lock, flags);
- if (q->owner == client) {
+ if (matched) {
if (q->timer->running)
snd_seq_timer_stop(q->timer);
snd_seq_timer_reset(q->timer);
@@ -764,6 +772,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
int i, bpm;
struct snd_seq_queue *q;
struct snd_seq_timer *tmr;
+ bool locked;
+ int owner;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
if ((q = queueptr(i)) == NULL)
@@ -775,9 +785,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
else
bpm = 0;
+ spin_lock_irq(&q->owner_lock);
+ locked = q->locked;
+ owner = q->owner;
+ spin_unlock_irq(&q->owner_lock);
+
snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
- snd_iprintf(buffer, "owned by client : %d\n", q->owner);
- snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free");
+ snd_iprintf(buffer, "owned by client : %d\n", owner);
+ snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
diff --git a/sound/core/seq/seq_system.c b/sound/core/seq/seq_system.c
index 8ce1d0b40dce..ce1f1e4727ab 100644
--- a/sound/core/seq/seq_system.c
+++ b/sound/core/seq/seq_system.c
@@ -123,6 +123,7 @@ int __init snd_seq_system_client_init(void)
{
struct snd_seq_port_callback pcallbacks;
struct snd_seq_port_info *port;
+ int err;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
@@ -144,7 +145,10 @@ int __init snd_seq_system_client_init(void)
port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT;
port->addr.client = sysclient;
port->addr.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
- snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port);
+ err = snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT,
+ port);
+ if (err < 0)
+ goto error_port;
/* register announcement port */
strcpy(port->name, "Announce");
@@ -154,16 +158,24 @@ int __init snd_seq_system_client_init(void)
port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT;
port->addr.client = sysclient;
port->addr.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
- snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port);
+ err = snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT,
+ port);
+ if (err < 0)
+ goto error_port;
announce_port = port->addr.port;
kfree(port);
return 0;
+
+ error_port:
+ snd_seq_system_client_done();
+ kfree(port);
+ return err;
}
/* unregister our internal client */
-void __exit snd_seq_system_client_done(void)
+void snd_seq_system_client_done(void)
{
int oldsysclient = sysclient;
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index b80985fbc334..bd5e5a5d52a8 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -436,14 +436,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
}
/* return current 'real' time. use timeofday() to get better granularity. */
-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
+ bool adjust_ktime)
{
snd_seq_real_time_t cur_time;
unsigned long flags;
spin_lock_irqsave(&tmr->lock, flags);
cur_time = tmr->cur_time;
- if (tmr->running) {
+ if (adjust_ktime && tmr->running) {
struct timespec64 tm;
ktime_get_ts64(&tm);
@@ -460,7 +461,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
high PPQ values) */
snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
{
- return tmr->tick.cur_tick;
+ snd_seq_tick_time_t cur_tick;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tmr->lock, flags);
+ cur_tick = tmr->tick.cur_tick;
+ spin_unlock_irqrestore(&tmr->lock, flags);
+ return cur_tick;
}
@@ -479,15 +486,19 @@ void snd_seq_info_timer_read(struct snd_info_entry *entry,
q = queueptr(idx);
if (q == NULL)
continue;
- if ((tmr = q->timer) == NULL ||
- (ti = tmr->timeri) == NULL) {
- queuefree(q);
- continue;
- }
+ mutex_lock(&q->timer_mutex);
+ tmr = q->timer;
+ if (!tmr)
+ goto unlock;
+ ti = tmr->timeri;
+ if (!ti)
+ goto unlock;
snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name);
resolution = snd_timer_resolution(ti) * tmr->ticks;
snd_iprintf(buffer, " Period time : %lu.%09lu\n", resolution / 1000000000, resolution % 1000000000);
snd_iprintf(buffer, " Skew : %u / %u\n", tmr->skew, tmr->skew_base);
+unlock:
+ mutex_unlock(&q->timer_mutex);
queuefree(q);
}
}
diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
index 9506b661fe5b..5d47d559465e 100644
--- a/sound/core/seq/seq_timer.h
+++ b/sound/core/seq/seq_timer.h
@@ -135,7 +135,8 @@ int snd_seq_timer_set_ppq(struct snd_seq_timer *tmr, int ppq);
int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
+snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
+ bool adjust_ktime);
snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
extern int seq_default_timer_class;
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 1ebb34656241..d6fc84c11796 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -95,6 +95,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
continue;
snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
+ snd_midi_event_reset_decode(vmidi->parser);
} else {
len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
if (len > 0)
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 152254193c69..f8a4b2a2f8f6 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -87,6 +87,9 @@ static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
+#define MAX_SLAVE_INSTANCES 1000
+static int num_slaves;
+
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
@@ -179,7 +182,7 @@ static void snd_timer_request(struct snd_timer_id *tid)
*
* call this with register_mutex down.
*/
-static void snd_timer_check_slave(struct snd_timer_instance *slave)
+static int snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
@@ -189,16 +192,21 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave)
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
+ if (master->timer->num_instances >=
+ master->timer->max_instances)
+ return -EBUSY;
list_move_tail(&slave->open_list,
&master->slave_list_head);
+ master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
- return;
+ return 0;
}
}
}
+ return 0;
}
/*
@@ -207,7 +215,7 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave)
*
* call this with register_mutex down.
*/
-static void snd_timer_check_master(struct snd_timer_instance *master)
+static int snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
@@ -215,7 +223,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
+ if (master->timer->num_instances >=
+ master->timer->max_instances)
+ return -EBUSY;
list_move_tail(&slave->open_list, &master->slave_list_head);
+ master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
@@ -227,8 +239,12 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
spin_unlock_irq(&slave_active_lock);
}
}
+ return 0;
}
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+ struct device **card_devp_to_put);
+
/*
* open a timer instance
* when opening a master, the slave id must be here given.
@@ -239,33 +255,42 @@ int snd_timer_open(struct snd_timer_instance **ti,
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
+ struct device *card_dev_to_put = NULL;
+ int err;
+ mutex_lock(&register_mutex);
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
pr_debug("ALSA: timer: invalid slave class %i\n",
tid->dev_sclass);
- return -EINVAL;
+ err = -EINVAL;
+ goto unlock;
+ }
+ if (num_slaves >= MAX_SLAVE_INSTANCES) {
+ err = -EBUSY;
+ goto unlock;
}
- mutex_lock(&register_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
- mutex_unlock(&register_mutex);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto unlock;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
- snd_timer_check_slave(timeri);
- mutex_unlock(&register_mutex);
- *ti = timeri;
- return 0;
+ num_slaves++;
+ err = snd_timer_check_slave(timeri);
+ if (err < 0) {
+ snd_timer_close_locked(timeri, &card_dev_to_put);
+ timeri = NULL;
+ }
+ goto unlock;
}
/* open a master instance */
- mutex_lock(&register_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
@@ -276,21 +301,26 @@ int snd_timer_open(struct snd_timer_instance **ti,
}
#endif
if (!timer) {
- mutex_unlock(&register_mutex);
- return -ENODEV;
+ err = -ENODEV;
+ goto unlock;
}
if (!list_empty(&timer->open_list_head)) {
- timeri = list_entry(timer->open_list_head.next,
+ struct snd_timer_instance *t =
+ list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
- if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
- mutex_unlock(&register_mutex);
- return -EBUSY;
+ if (t->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
+ err = -EBUSY;
+ goto unlock;
}
}
+ if (timer->num_instances >= timer->max_instances) {
+ err = -EBUSY;
+ goto unlock;
+ }
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
- mutex_unlock(&register_mutex);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto unlock;
}
/* take a card refcount for safe disconnection */
if (timer->card)
@@ -299,45 +329,57 @@ int snd_timer_open(struct snd_timer_instance **ti,
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open) {
- int err = timer->hw.open(timer);
+ err = timer->hw.open(timer);
if (err) {
kfree(timeri->owner);
kfree(timeri);
+ timeri = NULL;
if (timer->card)
- put_device(&timer->card->card_dev);
+ card_dev_to_put = &timer->card->card_dev;
module_put(timer->module);
- mutex_unlock(&register_mutex);
- return err;
+ goto unlock;
}
}
list_add_tail(&timeri->open_list, &timer->open_list_head);
- snd_timer_check_master(timeri);
+ timer->num_instances++;
+ err = snd_timer_check_master(timeri);
+ if (err < 0) {
+ snd_timer_close_locked(timeri, &card_dev_to_put);
+ timeri = NULL;
+ }
+
+ unlock:
mutex_unlock(&register_mutex);
+ /* put_device() is called after unlock for avoiding deadlock */
+ if (card_dev_to_put)
+ put_device(card_dev_to_put);
*ti = timeri;
- return 0;
+ return err;
}
+EXPORT_SYMBOL(snd_timer_open);
/*
* close a timer instance
+ * call this with register_mutex down.
*/
-int snd_timer_close(struct snd_timer_instance *timeri)
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+ struct device **card_devp_to_put)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
- if (snd_BUG_ON(!timeri))
- return -ENXIO;
-
- mutex_lock(&register_mutex);
list_del(&timeri->open_list);
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+ num_slaves--;
/* force to stop the timer */
snd_timer_stop(timeri);
timer = timeri->timer;
if (timer) {
+ timer->num_instances--;
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
@@ -353,6 +395,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
+ timer->num_instances--;
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
@@ -376,14 +419,34 @@ int snd_timer_close(struct snd_timer_instance *timeri)
timer->hw.close(timer);
/* release a card refcount for safe disconnection */
if (timer->card)
- put_device(&timer->card->card_dev);
+ *card_devp_to_put = &timer->card->card_dev;
module_put(timer->module);
}
- mutex_unlock(&register_mutex);
return 0;
}
+/*
+ * close a timer instance
+ */
+int snd_timer_close(struct snd_timer_instance *timeri)
+{
+ struct device *card_dev_to_put = NULL;
+ int err;
+
+ if (snd_BUG_ON(!timeri))
+ return -ENXIO;
+
+ mutex_lock(&register_mutex);
+ err = snd_timer_close_locked(timeri, &card_dev_to_put);
+ mutex_unlock(&register_mutex);
+ /* put_device() is called after unlock for avoiding deadlock */
+ if (card_dev_to_put)
+ put_device(card_dev_to_put);
+ return err;
+}
+EXPORT_SYMBOL(snd_timer_close);
+
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
@@ -397,6 +460,7 @@ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
}
return 0;
}
+EXPORT_SYMBOL(snd_timer_resolution);
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
@@ -588,6 +652,7 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
else
return snd_timer_start1(timeri, true, ticks);
}
+EXPORT_SYMBOL(snd_timer_start);
/*
* stop the timer instance.
@@ -601,6 +666,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
else
return snd_timer_stop1(timeri, true);
}
+EXPORT_SYMBOL(snd_timer_stop);
/*
* start again.. the tick is kept.
@@ -616,6 +682,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
else
return snd_timer_start1(timeri, false, 0);
}
+EXPORT_SYMBOL(snd_timer_continue);
/*
* pause.. remember the ticks left
@@ -627,6 +694,7 @@ int snd_timer_pause(struct snd_timer_instance * timeri)
else
return snd_timer_stop1(timeri, false);
}
+EXPORT_SYMBOL(snd_timer_pause);
/*
* reschedule the timer
@@ -808,6 +876,7 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
+EXPORT_SYMBOL(snd_timer_interrupt);
/*
@@ -846,6 +915,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
+ timer->max_instances = 1000; /* default limit per timer */
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
@@ -858,6 +928,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
*rtimer = timer;
return 0;
}
+EXPORT_SYMBOL(snd_timer_new);
static int snd_timer_free(struct snd_timer *timer)
{
@@ -977,6 +1048,7 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam
}
spin_unlock_irqrestore(&timer->lock, flags);
}
+EXPORT_SYMBOL(snd_timer_notify);
/*
* exported functions for global timers
@@ -992,11 +1064,13 @@ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
+EXPORT_SYMBOL(snd_timer_global_new);
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
+EXPORT_SYMBOL(snd_timer_global_free);
int snd_timer_global_register(struct snd_timer *timer)
{
@@ -1006,6 +1080,7 @@ int snd_timer_global_register(struct snd_timer *timer)
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
+EXPORT_SYMBOL(snd_timer_global_register);
/*
* System timer
@@ -2121,17 +2196,3 @@ static void __exit alsa_timer_exit(void)
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
-
-EXPORT_SYMBOL(snd_timer_open);
-EXPORT_SYMBOL(snd_timer_close);
-EXPORT_SYMBOL(snd_timer_resolution);
-EXPORT_SYMBOL(snd_timer_start);
-EXPORT_SYMBOL(snd_timer_stop);
-EXPORT_SYMBOL(snd_timer_continue);
-EXPORT_SYMBOL(snd_timer_pause);
-EXPORT_SYMBOL(snd_timer_new);
-EXPORT_SYMBOL(snd_timer_notify);
-EXPORT_SYMBOL(snd_timer_global_new);
-EXPORT_SYMBOL(snd_timer_global_free);
-EXPORT_SYMBOL(snd_timer_global_register);
-EXPORT_SYMBOL(snd_timer_interrupt);
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 172dacd925f5..c182341c1714 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -925,7 +925,7 @@ static void print_formats(struct snd_dummy *dummy,
{
int i;
- for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
+ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
if (dummy->pcm_hw.formats & (1ULL << i))
snd_iprintf(buffer, " %s", snd_pcm_format_name(i));
}
diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
index a371c075ac87..e26702559f61 100644
--- a/sound/drivers/opl3/opl3_voice.h
+++ b/sound/drivers/opl3/opl3_voice.h
@@ -41,7 +41,7 @@ void snd_opl3_timer_func(unsigned long data);
/* Prototypes for opl3_drums.c */
void snd_opl3_load_drums(struct snd_opl3 *opl3);
-void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
+void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
/* Prototypes for opl3_oss.c */
#ifdef CONFIG_SND_SEQUENCER_OSS
diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c
index bebddc60fde8..99654e7eb2d4 100644
--- a/sound/firewire/amdtp-am824.c
+++ b/sound/firewire/amdtp-am824.c
@@ -388,7 +388,7 @@ static void read_midi_messages(struct amdtp_stream *s,
u8 *b;
for (f = 0; f < frames; f++) {
- port = (s->data_block_counter + f) % 8;
+ port = (8 - s->tx_first_dbc + s->data_block_counter + f) % 8;
b = (u8 *)&buffer[p->midi_position];
len = b[0] - 0x80;
diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c
index f11090057949..d0a8736613a1 100644
--- a/sound/firewire/bebob/bebob_focusrite.c
+++ b/sound/firewire/bebob/bebob_focusrite.c
@@ -28,6 +28,8 @@
#define SAFFIRE_CLOCK_SOURCE_SPDIF 1
/* clock sources as returned from register of Saffire Pro 10 and 26 */
+#define SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK 0x000000ff
+#define SAFFIREPRO_CLOCK_SOURCE_DETECT_MASK 0x0000ff00
#define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0
#define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */
#define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2
@@ -190,6 +192,7 @@ saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
map = saffirepro_clk_maps[1];
/* In a case that this driver cannot handle the value of register. */
+ value &= SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK;
if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
err = -EIO;
goto end;
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index 4d3034a68bdf..be2c056eb62d 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -253,8 +253,7 @@ end:
return err;
}
-static unsigned int
-map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
+static int map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
{
unsigned int sec, sections, ch, channels;
unsigned int pcm, midi, location;
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 48d6dca471c6..6c8daf5b391f 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -639,7 +639,7 @@ static int isight_probe(struct fw_unit *unit,
if (!isight->audio_base) {
dev_err(&unit->device, "audio unit base not found\n");
err = -ENXIO;
- goto err_unit;
+ goto error;
}
fw_iso_resources_init(&isight->resources, unit);
@@ -668,12 +668,12 @@ static int isight_probe(struct fw_unit *unit,
dev_set_drvdata(&unit->device, isight);
return 0;
-
-err_unit:
- fw_unit_put(isight->unit);
- mutex_destroy(&isight->mutex);
error:
snd_card_free(card);
+
+ mutex_destroy(&isight->mutex);
+ fw_unit_put(isight->unit);
+
return err;
}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index b0395c4209ab..a7ab34d5e7b0 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -175,9 +175,6 @@ static int detect_quirks(struct snd_oxfw *oxfw)
oxfw->midi_input_ports = 0;
oxfw->midi_output_ports = 0;
- /* Output stream exists but no data channels are useful. */
- oxfw->has_output = false;
-
return snd_oxfw_scs1x_add(oxfw);
}
diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
index ea1506679c66..3b09b8ef3a09 100644
--- a/sound/firewire/packets-buffer.c
+++ b/sound/firewire/packets-buffer.c
@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
packets_per_page = PAGE_SIZE / packet_size;
if (WARN_ON(!packets_per_page)) {
err = -EINVAL;
- goto error;
+ goto err_packets;
}
pages = DIV_ROUND_UP(count, packets_per_page);
diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
index 79db1b651f5c..984749ee8145 100644
--- a/sound/firewire/tascam/tascam-pcm.c
+++ b/sound/firewire/tascam/tascam-pcm.c
@@ -81,6 +81,9 @@ static int pcm_open(struct snd_pcm_substream *substream)
goto err_locked;
err = snd_tscm_stream_get_clock(tscm, &clock);
+ if (err < 0)
+ goto err_locked;
+
if (clock != SND_TSCM_CLOCK_INTERNAL ||
amdtp_stream_pcm_running(&tscm->rx_stream) ||
amdtp_stream_pcm_running(&tscm->tx_stream)) {
diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
index f1657a4e0621..a1308f12a65b 100644
--- a/sound/firewire/tascam/tascam-stream.c
+++ b/sound/firewire/tascam/tascam-stream.c
@@ -9,20 +9,37 @@
#include <linux/delay.h>
#include "tascam.h"
+#define CLOCK_STATUS_MASK 0xffff0000
+#define CLOCK_CONFIG_MASK 0x0000ffff
+
#define CALLBACK_TIMEOUT 500
static int get_clock(struct snd_tscm *tscm, u32 *data)
{
+ int trial = 0;
__be32 reg;
int err;
- err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
- TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
- &reg, sizeof(reg), 0);
- if (err >= 0)
+ while (trial++ < 5) {
+ err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
+ TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
+ &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
*data = be32_to_cpu(reg);
+ if (*data & CLOCK_STATUS_MASK)
+ break;
- return err;
+ // In intermediate state after changing clock status.
+ msleep(50);
+ }
+
+ // Still in the intermediate state.
+ if (trial >= 5)
+ return -EAGAIN;
+
+ return 0;
}
static int set_clock(struct snd_tscm *tscm, unsigned int rate,
@@ -35,7 +52,7 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
err = get_clock(tscm, &data);
if (err < 0)
return err;
- data &= 0x0000ffff;
+ data &= CLOCK_CONFIG_MASK;
if (rate > 0) {
data &= 0x000000ff;
@@ -80,17 +97,14 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate)
{
- u32 data = 0x0;
- unsigned int trials = 0;
+ u32 data;
int err;
- while (data == 0x0 || trials++ < 5) {
- err = get_clock(tscm, &data);
- if (err < 0)
- return err;
+ err = get_clock(tscm, &data);
+ if (err < 0)
+ return err;
- data = (data & 0xff000000) >> 24;
- }
+ data = (data & 0xff000000) >> 24;
/* Check base rate. */
if ((data & 0x0f) == 0x01)
diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
index f21633cd9b38..acbe61b8db7b 100644
--- a/sound/hda/hdmi_chmap.c
+++ b/sound/hda/hdmi_chmap.c
@@ -249,7 +249,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
if (spk_alloc & (1 << i))
- j += snprintf(buf + j, buflen - j, " %s",
+ j += scnprintf(buf + j, buflen - j, " %s",
cea_speaker_allocation_names[i]);
}
buf[j] = '\0'; /* necessary when j == 0 */
diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
index 7e21621e492a..7fd1b4000883 100644
--- a/sound/i2c/cs8427.c
+++ b/sound/i2c/cs8427.c
@@ -118,7 +118,7 @@ static int snd_cs8427_send_corudata(struct snd_i2c_device *device,
struct cs8427 *chip = device->private_data;
char *hw_data = udata ?
chip->playback.hw_udata : chip->playback.hw_status;
- char data[32];
+ unsigned char data[32];
int err, idx;
if (!memcmp(hw_data, ndata, count))
diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
index bf377dc192aa..d33e02c31712 100644
--- a/sound/i2c/other/ak4xxx-adda.c
+++ b/sound/i2c/other/ak4xxx-adda.c
@@ -789,11 +789,12 @@ static int build_adc_controls(struct snd_akm4xxx *ak)
return err;
memset(&knew, 0, sizeof(knew));
- knew.name = ak->adc_info[mixer_ch].selector_name;
- if (!knew.name) {
+ if (!ak->adc_info ||
+ !ak->adc_info[mixer_ch].selector_name) {
knew.name = "Capture Channel";
knew.index = mixer_ch + ak->idx_offset * 2;
- }
+ } else
+ knew.name = ak->adc_info[mixer_ch].selector_name;
knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
knew.info = ak4xxx_capture_source_info;
diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
index 9d7582c90a95..c67d379cb6d6 100644
--- a/sound/isa/cs423x/cs4236.c
+++ b/sound/isa/cs423x/cs4236.c
@@ -293,7 +293,8 @@ static int snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev)
} else {
mpu_port[dev] = pnp_port_start(pdev, 0);
if (mpu_irq[dev] >= 0 &&
- pnp_irq_valid(pdev, 0) && pnp_irq(pdev, 0) >= 0) {
+ pnp_irq_valid(pdev, 0) &&
+ pnp_irq(pdev, 0) != (resource_size_t)-1) {
mpu_irq[dev] = pnp_irq(pdev, 0);
} else {
mpu_irq[dev] = -1; /* disable interrupt */
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index ad42d2364199..e75bfc511e3e 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
/* block the 0x388 port to avoid PnP conflicts */
acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
+ if (!acard->fm_res) {
+ err = -EBUSY;
+ goto _err;
+ }
if (port[dev] != SNDRV_AUTO_PORT) {
if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 286f5e3686a3..d73ee11a32bd 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1953,6 +1953,11 @@ static int snd_echo_create(struct snd_card *card,
}
chip->dsp_registers = (volatile u32 __iomem *)
ioremap_nocache(chip->dsp_registers_phys, sz);
+ if (!chip->dsp_registers) {
+ dev_err(chip->card->dev, "ioremap failed\n");
+ snd_echo_free(chip);
+ return -ENOMEM;
+ }
if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
KBUILD_MODNAME, chip)) {
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index a03cf68d0bcd..12d87204e373 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -827,6 +827,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
while (id >= 0) {
const struct hda_fixup *fix = codec->fixup_list + id;
+ if (++depth > 10)
+ break;
if (fix->chained_before)
apply_fixup(codec, fix->chain_id, action, depth + 1);
@@ -866,8 +868,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
}
if (!fix->chained || fix->chained_before)
break;
- if (++depth > 10)
- break;
id = fix->chain_id;
}
}
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index c397e7da0eac..7ccfb09535e1 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -310,8 +310,12 @@ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct hda_beep *beep = codec->beep;
+ int chs = get_amp_channels(kcontrol);
+
if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) {
- ucontrol->value.integer.value[0] =
+ if (chs & 1)
+ ucontrol->value.integer.value[0] = beep->enabled;
+ if (chs & 2)
ucontrol->value.integer.value[1] = beep->enabled;
return 0;
}
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 7ea201c05e5d..d0d6dfbfcfdf 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -42,6 +42,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
{
struct hda_codec *codec = container_of(dev, struct hda_codec, core);
+ /* ignore unsol events during shutdown */
+ if (codec->bus->shutdown)
+ return;
+
if (codec->patch_ops.unsol_event)
codec->patch_ops.unsol_event(codec, ev);
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 1b5e217d1bb2..cbe0248225c1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -876,6 +876,7 @@ int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card,
/* power-up all before initialization */
hda_set_power_state(codec, AC_PWRST_D0);
+ codec->core.dev.power.power_state = PMSG_ON;
snd_hda_codec_proc_new(codec);
@@ -4104,7 +4105,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
if (pcm & (AC_SUPPCM_BITS_8 << i))
- j += snprintf(buf + j, buflen - j, " %d", bits[i]);
+ j += scnprintf(buf + j, buflen - j, " %d", bits[i]);
buf[j] = '\0'; /* necessary when j == 0 */
}
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 56af7308a2fd..bd0e4710d15d 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -609,11 +609,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
}
runtime->private_data = azx_dev;
- if (chip->gts_present)
- azx_pcm_hw.info = azx_pcm_hw.info |
- SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
-
runtime->hw = azx_pcm_hw;
+ if (chip->gts_present)
+ runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
runtime->hw.channels_min = hinfo->channels_min;
runtime->hw.channels_max = hinfo->channels_max;
runtime->hw.formats = hinfo->formats;
@@ -868,6 +866,9 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
*/
if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
hbus->response_reset = 1;
+ dev_err(chip->card->dev,
+ "No response from codec, resetting bus: last cmd=0x%08x\n",
+ bus->last_cmd[addr]);
return -EAGAIN; /* give a chance to retry */
}
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index b83feecf1e40..8f9386998270 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -171,11 +171,10 @@ struct azx {
#define azx_bus(chip) (&(chip)->bus.core)
#define bus_to_azx(_bus) container_of(_bus, struct azx, bus.core)
-#ifdef CONFIG_X86
-#define azx_snoop(chip) ((chip)->snoop)
-#else
-#define azx_snoop(chip) true
-#endif
+static inline bool azx_snoop(struct azx *chip)
+{
+ return !IS_ENABLED(CONFIG_X86) || chip->snoop;
+}
/*
* macros for easy use
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index ba7fe9b6655c..864cc8c9ada0 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -373,7 +373,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
if (pcm & (1 << i))
- j += snprintf(buf + j, buflen - j, " %d",
+ j += scnprintf(buf + j, buflen - j, " %d",
alsa_rates[i]);
buf[j] = '\0'; /* necessary when j == 0 */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index b0bd29003b5d..949c90a859fa 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -5807,7 +5807,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
if (spec->init_hook)
spec->init_hook(codec);
- snd_hda_apply_verbs(codec);
+ if (!spec->skip_verbs)
+ snd_hda_apply_verbs(codec);
init_multi_out(codec);
init_extra_out(codec);
@@ -5849,6 +5850,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
}
EXPORT_SYMBOL_GPL(snd_hda_gen_free);
+/**
+ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops reboot_notify function.
+ */
+void snd_hda_gen_reboot_notify(struct hda_codec *codec)
+{
+ /* Make the codec enter D3 to avoid spurious noises from the internal
+ * speaker during (and after) reboot
+ */
+ snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+ snd_hda_codec_write(codec, codec->core.afg, 0,
+ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+ msleep(10);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
+
#ifdef CONFIG_PM
/**
* snd_hda_gen_check_power_status - check the loopback power save state
@@ -5876,6 +5895,7 @@ static const struct hda_codec_ops generic_patch_ops = {
.init = snd_hda_gen_init,
.free = snd_hda_gen_free,
.unsol_event = snd_hda_jack_unsol_event,
+ .reboot_notify = snd_hda_gen_reboot_notify,
#ifdef CONFIG_PM
.check_power_status = snd_hda_gen_check_power_status,
#endif
@@ -5898,7 +5918,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
if (err < 0)
- return err;
+ goto error;
err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
if (err < 0)
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index f66fc7e25e07..6d1cb2fb447d 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -236,6 +236,7 @@ struct hda_gen_spec {
unsigned int indep_hp_enabled:1; /* independent HP enabled */
unsigned int have_aamix_ctl:1;
unsigned int hp_mic_jack_modes:1;
+ unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
/* additional mute flags (only effective with auto_mute_via_amp=1) */
u64 mute_bits;
@@ -322,6 +323,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
struct auto_pin_cfg *cfg);
int snd_hda_gen_build_controls(struct hda_codec *codec);
int snd_hda_gen_build_pcms(struct hda_codec *codec);
+void snd_hda_gen_reboot_notify(struct hda_codec *codec);
/* standard jack event callbacks */
void snd_hda_gen_hp_automute(struct hda_codec *codec,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 789eca17fc60..d8e132a2d1a8 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1275,8 +1275,11 @@ static int azx_free(struct azx *chip)
static int azx_dev_disconnect(struct snd_device *device)
{
struct azx *chip = device->device_data;
+ struct hdac_bus *bus = azx_bus(chip);
chip->bus.shutdown = 1;
+ cancel_work_sync(&bus->unsol_work);
+
return 0;
}
@@ -1700,9 +1703,6 @@ static int azx_first_init(struct azx *chip)
chip->msi = 0;
}
- if (azx_acquire_irq(chip, 0) < 0)
- return -EBUSY;
-
pci_set_master(pci);
synchronize_irq(bus->irq);
@@ -1809,6 +1809,9 @@ static int azx_first_init(struct azx *chip)
return -ENODEV;
}
+ if (azx_acquire_irq(chip, 0) < 0)
+ return -EBUSY;
+
strcpy(card->driver, "HDA-Intel");
strlcpy(card->shortname, driver_short_names[chip->driver_type],
sizeof(card->shortname));
@@ -1825,24 +1828,15 @@ static void azx_firmware_cb(const struct firmware *fw, void *context)
{
struct snd_card *card = context;
struct azx *chip = card->private_data;
- struct pci_dev *pci = chip->pci;
-
- if (!fw) {
- dev_err(card->dev, "Cannot load firmware, aborting\n");
- goto error;
- }
- chip->fw = fw;
+ if (fw)
+ chip->fw = fw;
+ else
+ dev_err(card->dev, "Cannot load firmware, continue without patching\n");
if (!chip->disabled) {
/* continue probing */
- if (azx_probe_continue(chip))
- goto error;
+ azx_probe_continue(chip);
}
- return; /* OK */
-
- error:
- snd_card_free(card);
- pci_set_drvdata(pci, NULL);
}
#endif
@@ -1968,6 +1962,17 @@ static const struct hdac_io_ops pci_hda_io_ops = {
.dma_free_pages = dma_free_pages,
};
+/* Blacklist for skipping the whole probe:
+ * some HD-audio PCI entries are exposed without any codecs, and such devices
+ * should be ignored from the beginning.
+ */
+static const struct snd_pci_quirk driver_blacklist[] = {
+ SND_PCI_QUIRK(0x1043, 0x874f, "ASUS ROG Zenith II / Strix", 0),
+ SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0),
+ SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0),
+ {}
+};
+
static const struct hda_controller_ops pci_hda_ops = {
.disable_msi_reset_irq = disable_msi_reset_irq,
.substream_alloc_pages = substream_alloc_pages,
@@ -1987,6 +1992,11 @@ static int azx_probe(struct pci_dev *pci,
bool schedule_probe;
int err;
+ if (snd_pci_quirk_lookup(pci, driver_blacklist)) {
+ dev_info(&pci->dev, "Skipping the blacklisted device\n");
+ return -ENODEV;
+ }
+
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 9739fce9e032..f3ac19d33bd4 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -221,7 +221,7 @@ static ssize_t init_verbs_show(struct device *dev,
mutex_lock(&codec->user_mutex);
for (i = 0; i < codec->init_verbs.used; i++) {
struct hda_verb *v = snd_array_elem(&codec->init_verbs, i);
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"0x%02x 0x%03x 0x%04x\n",
v->nid, v->verb, v->param);
}
@@ -271,7 +271,7 @@ static ssize_t hints_show(struct device *dev,
mutex_lock(&codec->user_mutex);
for (i = 0; i < codec->hints.used; i++) {
struct hda_hint *hint = snd_array_elem(&codec->hints, i);
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"%s = %s\n", hint->key, hint->val);
}
mutex_unlock(&codec->user_mutex);
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index e0fb8c6d1bc2..7d65c6df9aa8 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -370,6 +370,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
+ SND_PCI_QUIRK(0x1043, 0x1153, "ASUS M9V", AD1986A_FIXUP_LAPTOP_IMIC),
SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 280999961226..bf7593f234f6 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1300,13 +1300,14 @@ struct scp_msg {
static void dspio_clear_response_queue(struct hda_codec *codec)
{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
unsigned int dummy = 0;
- int status = -1;
+ int status;
/* clear all from the response queue */
do {
status = dspio_read(codec, &dummy);
- } while (status == 0);
+ } while (status == 0 && time_before(jiffies, timeout));
}
static int dspio_get_response_data(struct hda_codec *codec)
@@ -4424,12 +4425,14 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
struct ca0132_spec *spec = codec->spec;
codec_dbg(codec, "ca0132_process_dsp_response\n");
+ snd_hda_power_up_pm(codec);
if (spec->wait_scp) {
if (dspio_get_response_data(codec) >= 0)
spec->wait_scp = 0;
}
dspio_clear_response_queue(codec);
+ snd_hda_power_down_pm(codec);
}
static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
@@ -4440,7 +4443,7 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
/* Delay enabling the HP amp, to let the mic-detection
* state machine run.
*/
- cancel_delayed_work_sync(&spec->unsol_hp_work);
+ cancel_delayed_work(&spec->unsol_hp_work);
schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
tbl = snd_hda_jack_tbl_get(codec, cb->nid);
if (tbl)
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 447b3a8a83c3..1e99500dbb6c 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -204,23 +204,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- switch (codec->core.vendor_id) {
- case 0x14f12008: /* CX8200 */
- case 0x14f150f2: /* CX20722 */
- case 0x14f150f4: /* CX20724 */
- break;
- default:
- return;
- }
-
/* Turn the problematic codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
-
- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- msleep(10);
+ snd_hda_gen_reboot_notify(codec);
}
static void cx_auto_free(struct hda_codec *codec)
@@ -866,6 +853,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x21d2, "Lenovo T420s", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
@@ -1011,6 +999,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
*/
static const struct hda_device_id snd_hda_id_conexant[] = {
+ HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 76ae627e3f93..7a2943a338bf 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1447,9 +1447,11 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
ret = !repoll || !eld->monitor_present || eld->eld_valid;
jack = snd_hda_jack_tbl_get(codec, pin_nid);
- if (jack)
+ if (jack) {
jack->block_report = !ret;
-
+ jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
+ AC_PINSENSE_PRESENCE : 0;
+ }
mutex_unlock(&per_pin->lock);
return ret;
}
@@ -1554,6 +1556,11 @@ static void hdmi_repoll_eld(struct work_struct *work)
container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
struct hda_codec *codec = per_pin->codec;
struct hdmi_spec *spec = codec->spec;
+ struct hda_jack_tbl *jack;
+
+ jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+ if (jack)
+ jack->jack_dirty = 1;
if (per_pin->repoll_count++ > 6)
per_pin->repoll_count = 0;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0fc05ebdf81a..000b59d474ab 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -353,6 +353,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0700:
case 0x10ec0701:
case 0x10ec0703:
+ case 0x10ec0711:
alc_update_coef_idx(codec, 0x10, 1<<15, 0);
break;
case 0x10ec0662:
@@ -773,10 +774,11 @@ static int alc_init(struct hda_codec *codec)
if (spec->init_hook)
spec->init_hook(codec);
+ spec->gen.skip_verbs = 1; /* applied in below */
+ snd_hda_gen_init(codec);
alc_fix_pll(codec);
alc_auto_init_amp(codec, spec->init_amp);
-
- snd_hda_gen_init(codec);
+ snd_hda_apply_verbs(codec); /* apply verbs here after own init */
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
@@ -803,15 +805,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
alc_shutup(codec);
}
-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
-static void alc_d3_at_reboot(struct hda_codec *codec)
-{
- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- msleep(10);
-}
-
#define alc_free snd_hda_gen_free
#ifdef CONFIG_PM
@@ -977,6 +970,9 @@ static const struct snd_pci_quirk beep_white_list[] = {
SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1),
SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
+ /* blacklist -- no beep available */
+ SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0),
+ SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0),
{}
};
@@ -4216,6 +4212,8 @@ static void alc_determine_headset_type(struct hda_codec *codec)
is_ctia = (val & 0x1c02) == 0x1c02;
break;
case 0x10ec0225:
+ codec->power_save_node = 1;
+ /* fall through */
case 0x10ec0295:
case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
@@ -4474,7 +4472,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+ spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
codec->power_save_node = 0; /* avoid click noises */
snd_hda_apply_pincfgs(codec, pincfgs);
@@ -5855,7 +5853,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
- SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
@@ -6318,7 +6316,7 @@ static int patch_alc269(struct hda_codec *codec)
spec = codec->spec;
spec->gen.shared_mic_vref_pin = 0x18;
- codec->power_save_node = 1;
+ codec->power_save_node = 0;
#ifdef CONFIG_PM
codec->patch_ops.suspend = alc269_suspend;
@@ -6429,6 +6427,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0700:
case 0x10ec0701:
case 0x10ec0703:
+ case 0x10ec0711:
spec->codec_variant = ALC269_TYPE_ALC700;
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
@@ -7273,6 +7272,11 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
{0x18, 0x01a19030},
{0x1a, 0x01813040},
{0x21, 0x01014020}),
+ SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
+ {0x16, 0x01813030},
+ {0x17, 0x02211010},
+ {0x18, 0x01a19040},
+ {0x21, 0x01014020}),
SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
{0x14, 0x01014010},
{0x18, 0x01a19020},
@@ -7464,6 +7468,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 0abab7926dca..d1a6d20ace0d 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -77,6 +77,7 @@ enum {
STAC_DELL_M6_BOTH,
STAC_DELL_EQ,
STAC_ALIENWARE_M17X,
+ STAC_ELO_VUPOINT_15MX,
STAC_92HD89XX_HP_FRONT_JACK,
STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
STAC_92HD73XX_ASUS_MOBO,
@@ -1875,6 +1876,18 @@ static void stac92hd73xx_fixup_no_jd(struct hda_codec *codec,
codec->no_jack_detect = 1;
}
+
+static void stac92hd73xx_disable_automute(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct sigmatel_spec *spec = codec->spec;
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ spec->gen.suppress_auto_mute = 1;
+}
+
static const struct hda_fixup stac92hd73xx_fixups[] = {
[STAC_92HD73XX_REF] = {
.type = HDA_FIXUP_FUNC,
@@ -1900,6 +1913,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = stac92hd73xx_fixup_alienware_m17x,
},
+ [STAC_ELO_VUPOINT_15MX] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = stac92hd73xx_disable_automute,
+ },
[STAC_92HD73XX_INTEL] = {
.type = HDA_FIXUP_PINS,
.v.pins = intel_dg45id_pin_configs,
@@ -1938,6 +1955,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
{ .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
{ .id = STAC_DELL_EQ, .name = "dell-eq" },
{ .id = STAC_ALIENWARE_M17X, .name = "alienware" },
+ { .id = STAC_ELO_VUPOINT_15MX, .name = "elo-vupoint-15mx" },
{ .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
{}
};
@@ -1987,6 +2005,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
"Alienware M17x", STAC_ALIENWARE_M17X),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
"Alienware M17x R3", STAC_DELL_EQ),
+ SND_PCI_QUIRK(0x1059, 0x1011,
+ "ELO VuPoint 15MX", STAC_ELO_VUPOINT_15MX),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927,
"HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index e5c52ed9b674..8c06de37b467 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -661,6 +661,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
unsigned long flags;
unsigned char mclk_change;
unsigned int i, old_rate;
+ bool call_set_rate = false;
if (rate > ice->hw_rates->list[ice->hw_rates->count - 1])
return -EINVAL;
@@ -684,7 +685,7 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
* setting clock rate for internal clock mode */
old_rate = ice->get_rate(ice);
if (force || (old_rate != rate))
- ice->set_rate(ice, rate);
+ call_set_rate = true;
else if (rate == ice->cur_rate) {
spin_unlock_irqrestore(&ice->reg_lock, flags);
return 0;
@@ -692,12 +693,14 @@ static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate,
}
ice->cur_rate = rate;
+ spin_unlock_irqrestore(&ice->reg_lock, flags);
+
+ if (call_set_rate)
+ ice->set_rate(ice, rate);
/* setting master clock */
mclk_change = ice->set_mclk(ice, rate);
- spin_unlock_irqrestore(&ice->reg_lock, flags);
-
if (mclk_change && ice->gpio.i2s_mclk_changed)
ice->gpio.i2s_mclk_changed(ice);
if (ice->gpio.set_pro_rate)
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index 2697402b5195..41f6450a2539 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -569,7 +569,7 @@ static int wm_adc_mux_enum_get(struct snd_kcontrol *kcontrol,
struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol);
mutex_lock(&ice->gpio_mutex);
- ucontrol->value.integer.value[0] = wm_get(ice, WM_ADC_MUX) & 0x1f;
+ ucontrol->value.enumerated.item[0] = wm_get(ice, WM_ADC_MUX) & 0x1f;
mutex_unlock(&ice->gpio_mutex);
return 0;
}
@@ -583,7 +583,7 @@ static int wm_adc_mux_enum_put(struct snd_kcontrol *kcontrol,
mutex_lock(&ice->gpio_mutex);
oval = wm_get(ice, WM_ADC_MUX);
- nval = (oval & 0xe0) | ucontrol->value.integer.value[0];
+ nval = (oval & 0xe0) | ucontrol->value.enumerated.item[0];
if (nval != oval) {
wm_put(ice, WM_ADC_MUX, nval);
change = 1;
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 1bc98c867133..2286dfd72ff7 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -1171,16 +1171,6 @@ static int snd_intel8x0m_create(struct snd_card *card,
}
port_inited:
- if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, chip)) {
- dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
- snd_intel8x0m_free(chip);
- return -EBUSY;
- }
- chip->irq = pci->irq;
- pci_set_master(pci);
- synchronize_irq(chip->irq);
-
/* initialize offsets */
chip->bdbars_count = 2;
tbl = intel_regs;
@@ -1224,11 +1214,21 @@ static int snd_intel8x0m_create(struct snd_card *card,
chip->int_sta_reg = ICH_REG_GLOB_STA;
chip->int_sta_mask = int_sta_masks;
+ pci_set_master(pci);
+
if ((err = snd_intel8x0m_chip_init(chip, 1)) < 0) {
snd_intel8x0m_free(chip);
return err;
}
+ if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, chip)) {
+ dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
+ snd_intel8x0m_free(chip);
+ return -EBUSY;
+ }
+ chip->irq = pci->irq;
+
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
snd_intel8x0m_free(chip);
return err;
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index fbbc25279559..2a127feb8e29 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -117,10 +117,10 @@ static void spu_memset(u32 toi, u32 what, int length)
}
/* spu_memload - write to SPU address space */
-static void spu_memload(u32 toi, void *from, int length)
+static void spu_memload(u32 toi, const void *from, int length)
{
unsigned long flags;
- u32 *froml = from;
+ const u32 *froml = from;
u32 __iomem *to = (u32 __iomem *) (SPU_MEMORY_BASE + toi);
int i;
u32 val;
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 22aec9a1e9a4..838d03a138ca 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -25,6 +25,8 @@ config SND_ATMEL_SOC_DMA
config SND_ATMEL_SOC_SSC_DMA
tristate
+ select SND_ATMEL_SOC_DMA
+ select SND_ATMEL_SOC_PDC
config SND_ATMEL_SOC_SSC
tristate
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 6e8eb1f5a041..bed64723e5d9 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -60,7 +60,7 @@ static const struct reg_default cs4265_reg_defaults[] = {
static bool cs4265_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2:
+ case CS4265_CHIP_ID ... CS4265_MAX_REGISTER:
return true;
default:
return false;
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 84f86745c30e..828bc615a190 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -643,6 +643,7 @@ static const struct regmap_config cs4270_regmap = {
.reg_defaults = cs4270_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(cs4270_reg_defaults),
.cache_type = REGCACHE_RBTREE,
+ .write_flag_mask = CS4270_I2C_INCR,
.readable_reg = cs4270_reg_is_readable,
.volatile_reg = cs4270_reg_is_volatile,
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index 43cdd06309d7..7cbbe64e3509 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -654,6 +654,7 @@ static int cs42xx8_runtime_resume(struct device *dev)
CS42XX8_PWRCTL_PDN_MASK, 0);
regcache_cache_only(cs42xx8->regmap, false);
+ regcache_mark_dirty(cs42xx8->regmap);
ret = regcache_sync(cs42xx8->regmap);
if (ret) {
diff --git a/sound/soc/codecs/cs4349.c b/sound/soc/codecs/cs4349.c
index 231ca935cdf3..c232c42ccead 100644
--- a/sound/soc/codecs/cs4349.c
+++ b/sound/soc/codecs/cs4349.c
@@ -380,6 +380,7 @@ static struct i2c_driver cs4349_i2c_driver = {
.driver = {
.name = "cs4349",
.of_match_table = cs4349_of_match,
+ .pm = &cs4349_runtime_pm,
},
.id_table = cs4349_i2c_id,
.probe = cs4349_i2c_probe,
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index 37722194b107..6b22700842e2 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -234,7 +234,7 @@ static const struct soc_enum es8328_rline_enum =
ARRAY_SIZE(es8328_line_texts),
es8328_line_texts);
static const struct snd_kcontrol_new es8328_right_line_controls =
- SOC_DAPM_ENUM("Route", es8328_lline_enum);
+ SOC_DAPM_ENUM("Route", es8328_rline_enum);
/* Left Mixer */
static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index c602c4960924..88355d1719a3 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1267,6 +1267,12 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
if (ret)
return ret;
+ /* Filter out 44.1, 88.2 and 176.4Khz */
+ rates &= ~(SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_176400);
+ if (!rates)
+ return -EINVAL;
+
sprintf(dai_name, "intel-hdmi-hifi%d", i+1);
hdmi_dais[i].name = devm_kstrdup(&hdac->dev,
dai_name, GFP_KERNEL);
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 511d09cb2181..ca86eefdb9eb 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -140,8 +140,12 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
if (!ret) {
ret = snd_pcm_hw_constraint_eld(substream->runtime,
hcp->eld);
- if (ret)
+ if (ret) {
+ mutex_lock(&hcp->current_stream_lock);
+ hcp->current_stream = NULL;
+ mutex_unlock(&hcp->current_stream_lock);
return ret;
+ }
}
}
return 0;
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 584aab83e478..e7aef841f87d 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1209,14 +1209,14 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
&max98090_right_rcv_mixer_controls[0],
ARRAY_SIZE(max98090_right_rcv_mixer_controls)),
- SND_SOC_DAPM_MUX("LINMOD Mux", M98090_REG_LOUTR_MIXER,
- M98090_LINMOD_SHIFT, 0, &max98090_linmod_mux),
+ SND_SOC_DAPM_MUX("LINMOD Mux", SND_SOC_NOPM, 0, 0,
+ &max98090_linmod_mux),
- SND_SOC_DAPM_MUX("MIXHPLSEL Mux", M98090_REG_HP_CONTROL,
- M98090_MIXHPLSEL_SHIFT, 0, &max98090_mixhplsel_mux),
+ SND_SOC_DAPM_MUX("MIXHPLSEL Mux", SND_SOC_NOPM, 0, 0,
+ &max98090_mixhplsel_mux),
- SND_SOC_DAPM_MUX("MIXHPRSEL Mux", M98090_REG_HP_CONTROL,
- M98090_MIXHPRSEL_SHIFT, 0, &max98090_mixhprsel_mux),
+ SND_SOC_DAPM_MUX("MIXHPRSEL Mux", SND_SOC_NOPM, 0, 0,
+ &max98090_mixhprsel_mux),
SND_SOC_DAPM_PGA("HP Left Out", M98090_REG_OUTPUT_ENABLE,
M98090_HPLEN_SHIFT, 0, NULL, 0),
@@ -1924,6 +1924,21 @@ static int max98090_configure_dmic(struct max98090_priv *max98090,
return 0;
}
+static int max98090_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct max98090_priv *max98090 = snd_soc_component_get_drvdata(component);
+ unsigned int fmt = max98090->dai_fmt;
+
+ /* Remove 24-bit format support if it is not in right justified mode. */
+ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_RIGHT_J) {
+ substream->runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(substream->runtime, 0, 16, 16);
+ }
+ return 0;
+}
+
static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -2331,6 +2346,7 @@ EXPORT_SYMBOL_GPL(max98090_mic_detect);
#define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops max98090_dai_ops = {
+ .startup = max98090_dai_startup,
.set_sysclk = max98090_dai_set_sysclk,
.set_fmt = max98090_dai_set_fmt,
.set_tdm_slot = max98090_set_tdm_slot,
diff --git a/sound/soc/codecs/nau8810.c b/sound/soc/codecs/nau8810.c
index e45518629968..2234d0c04165 100644
--- a/sound/soc/codecs/nau8810.c
+++ b/sound/soc/codecs/nau8810.c
@@ -414,9 +414,9 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
ARRAY_SIZE(nau8810_mono_mixer_controls)),
- SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
+ SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3,
NAU8810_DAC_EN_SFT, 0),
- SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
+ SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2,
NAU8810_ADC_EN_SFT, 0),
SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
NAU8810_NSPK_EN_SFT, 0, NULL, 0),
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 72b19e62f626..c0807b82399a 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1441,13 +1441,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
}
pcm512x->sclk = devm_clk_get(dev, NULL);
- if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
if (!IS_ERR(pcm512x->sclk)) {
ret = clk_prepare_enable(pcm512x->sclk);
if (ret != 0) {
dev_err(dev, "Failed to enable SCLK: %d\n", ret);
- return ret;
+ goto err;
}
}
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index 91879ea95415..01aa75cde571 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(spi_mutex);
* RT5677_SPI_READ/WRITE_32: Transfer 4 bytes
* RT5677_SPI_READ/WRITE_BURST: Transfer any multiples of 8 bytes
*
- * For example, reading 260 bytes at 0x60030002 uses the following commands:
- * 0x60030002 RT5677_SPI_READ_16 2 bytes
+ * Note:
+ * 16 Bit writes and reads are restricted to the address range
+ * 0x18020000 ~ 0x18021000
+ *
+ * For example, reading 256 bytes at 0x60030004 uses the following commands:
* 0x60030004 RT5677_SPI_READ_32 4 bytes
* 0x60030008 RT5677_SPI_READ_BURST 240 bytes
* 0x600300F8 RT5677_SPI_READ_BURST 8 bytes
* 0x60030100 RT5677_SPI_READ_32 4 bytes
- * 0x60030104 RT5677_SPI_READ_16 2 bytes
*
* Input:
* @read: true for read commands; false for write commands
@@ -81,15 +83,13 @@ static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 remain, u32 *len)
{
u8 cmd;
- if (align == 2 || align == 6 || remain == 2) {
- cmd = RT5677_SPI_READ_16;
- *len = 2;
- } else if (align == 4 || remain <= 6) {
+ if (align == 4 || remain <= 4) {
cmd = RT5677_SPI_READ_32;
*len = 4;
} else {
cmd = RT5677_SPI_READ_BURST;
- *len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
+ *len = (((remain - 1) >> 3) + 1) << 3;
+ *len = min_t(u32, *len, RT5677_SPI_BURST_LEN);
}
return read ? cmd : cmd + 1;
}
@@ -110,7 +110,7 @@ static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
}
}
-/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */
+/* Read DSP address space using SPI. addr and len have to be 4-byte aligned. */
int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
{
u32 offset;
@@ -126,7 +126,7 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
if (!g_spi)
return -ENODEV;
- if ((addr & 1) || (len & 1)) {
+ if ((addr & 3) || (len & 3)) {
dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
return -EACCES;
}
@@ -161,13 +161,13 @@ int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
}
EXPORT_SYMBOL_GPL(rt5677_spi_read);
-/* Write DSP address space using SPI. addr has to be 2-byte aligned.
- * If len is not 2-byte aligned, an extra byte of zero is written at the end
+/* Write DSP address space using SPI. addr has to be 4-byte aligned.
+ * If len is not 4-byte aligned, then extra zeros are written at the end
* as padding.
*/
int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
{
- u32 offset, len_with_pad = len;
+ u32 offset;
int status = 0;
struct spi_transfer t;
struct spi_message m;
@@ -180,22 +180,19 @@ int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
if (!g_spi)
return -ENODEV;
- if (addr & 1) {
+ if (addr & 3) {
dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
return -EACCES;
}
- if (len & 1)
- len_with_pad = len + 1;
-
memset(&t, 0, sizeof(t));
t.tx_buf = buf;
t.speed_hz = RT5677_SPI_FREQ;
spi_message_init_with_transfers(&m, &t, 1);
- for (offset = 0; offset < len_with_pad;) {
+ for (offset = 0; offset < len;) {
spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
- len_with_pad - offset, &t.len);
+ len - offset, &t.len);
/* Construct SPI message header */
buf[0] = spi_cmd;
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 65ac4518ad06..49ab26e69f2f 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -305,6 +305,7 @@ static bool rt5677_volatile_register(struct device *dev, unsigned int reg)
case RT5677_I2C_MASTER_CTRL7:
case RT5677_I2C_MASTER_CTRL8:
case RT5677_HAP_GENE_CTRL2:
+ case RT5677_PWR_ANLG2: /* Modified by DSP firmware */
case RT5677_PWR_DSP_ST:
case RT5677_PRIV_DATA:
case RT5677_ASRC_22:
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 3dba5550a665..39810b713d5f 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -35,6 +35,13 @@
#define SGTL5000_DAP_REG_OFFSET 0x0100
#define SGTL5000_MAX_REG_OFFSET 0x013A
+/* Delay for the VAG ramp up */
+#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */
+/* Delay for the VAG ramp down */
+#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */
+
+#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE)
+
/* default value of sgtl5000 registers */
static const struct reg_default sgtl5000_reg_defaults[] = {
{ SGTL5000_CHIP_DIG_POWER, 0x0000 },
@@ -99,6 +106,13 @@ enum sgtl5000_micbias_resistor {
SGTL5000_MICBIAS_8K = 8,
};
+enum {
+ HP_POWER_EVENT,
+ DAC_POWER_EVENT,
+ ADC_POWER_EVENT,
+ LAST_POWER_EVENT = ADC_POWER_EVENT
+};
+
/* sgtl5000 private structure in codec */
struct sgtl5000_priv {
int sysclk; /* sysclk rate */
@@ -111,8 +125,117 @@ struct sgtl5000_priv {
int revision;
u8 micbias_resistor;
u8 micbias_voltage;
+ u16 mute_state[LAST_POWER_EVENT + 1];
};
+static inline int hp_sel_input(struct snd_soc_component *component)
+{
+ unsigned int ana_reg = 0;
+
+ snd_soc_component_read(component, SGTL5000_CHIP_ANA_CTRL, &ana_reg);
+
+ return (ana_reg & SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT;
+}
+
+static inline u16 mute_output(struct snd_soc_component *component,
+ u16 mute_mask)
+{
+ unsigned int mute_reg = 0;
+
+ snd_soc_component_read(component, SGTL5000_CHIP_ANA_CTRL, &mute_reg);
+
+ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+ mute_mask, mute_mask);
+ return mute_reg;
+}
+
+static inline void restore_output(struct snd_soc_component *component,
+ u16 mute_mask, u16 mute_reg)
+{
+ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
+ mute_mask, mute_reg);
+}
+
+static void vag_power_on(struct snd_soc_component *component, u32 source)
+{
+ unsigned int ana_reg = 0;
+
+ snd_soc_component_read(component, SGTL5000_CHIP_ANA_POWER, &ana_reg);
+
+ if (ana_reg & SGTL5000_VAG_POWERUP)
+ return;
+
+ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
+
+ /* When VAG powering on to get local loop from Line-In, the sleep
+ * is required to avoid loud pop.
+ */
+ if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN &&
+ source == HP_POWER_EVENT)
+ msleep(SGTL5000_VAG_POWERUP_DELAY);
+}
+
+static int vag_power_consumers(struct snd_soc_component *component,
+ u16 ana_pwr_reg, u32 source)
+{
+ int consumers = 0;
+
+ /* count dac/adc consumers unconditional */
+ if (ana_pwr_reg & SGTL5000_DAC_POWERUP)
+ consumers++;
+ if (ana_pwr_reg & SGTL5000_ADC_POWERUP)
+ consumers++;
+
+ /*
+ * If the event comes from HP and Line-In is selected,
+ * current action is 'DAC to be powered down'.
+ * As HP_POWERUP is not set when HP muxed to line-in,
+ * we need to keep VAG power ON.
+ */
+ if (source == HP_POWER_EVENT) {
+ if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN)
+ consumers++;
+ } else {
+ if (ana_pwr_reg & SGTL5000_HP_POWERUP)
+ consumers++;
+ }
+
+ return consumers;
+}
+
+static void vag_power_off(struct snd_soc_component *component, u32 source)
+{
+ unsigned int ana_pwr = SGTL5000_VAG_POWERUP;
+
+ snd_soc_component_read(component, SGTL5000_CHIP_ANA_POWER, &ana_pwr);
+
+ if (!(ana_pwr & SGTL5000_VAG_POWERUP))
+ return;
+
+ /*
+ * This function calls when any of VAG power consumers is disappearing.
+ * Thus, if there is more than one consumer at the moment, as minimum
+ * one consumer will definitely stay after the end of the current
+ * event.
+ * Don't clear VAG_POWERUP if 2 or more consumers of VAG present:
+ * - LINE_IN (for HP events) / HP (for DAC/ADC events)
+ * - DAC
+ * - ADC
+ * (the current consumer is disappearing right now)
+ */
+ if (vag_power_consumers(component, ana_pwr, source) >= 2)
+ return;
+
+ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_VAG_POWERUP, 0);
+ /* In power down case, we need wait 400-1000 ms
+ * when VAG fully ramped down.
+ * As longer we wait, as smaller pop we've got.
+ */
+ msleep(SGTL5000_VAG_POWERDOWN_DELAY);
+}
+
/*
* mic_bias power on/off share the same register bits with
* output impedance of mic bias, when power on mic bias, we
@@ -144,36 +267,46 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
return 0;
}
-/*
- * As manual described, ADC/DAC only works when VAG powerup,
- * So enabled VAG before ADC/DAC up.
- * In power down case, we need wait 400ms when vag fully ramped down.
- */
-static int power_vag_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+static int vag_and_mute_control(struct snd_soc_component *component,
+ int event, int event_source)
{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
+ static const u16 mute_mask[] = {
+ /*
+ * Mask for HP_POWER_EVENT.
+ * Muxing Headphones have to be wrapped with mute/unmute
+ * headphones only.
+ */
+ SGTL5000_HP_MUTE,
+ /*
+ * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT.
+ * Muxing DAC or ADC block have to be wrapped with mute/unmute
+ * both headphones and line-out.
+ */
+ SGTL5000_OUTPUTS_MUTE,
+ SGTL5000_OUTPUTS_MUTE
+ };
+
+ struct sgtl5000_priv *sgtl5000 =
+ snd_soc_component_get_drvdata(component);
switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ sgtl5000->mute_state[event_source] =
+ mute_output(component, mute_mask[event_source]);
+ break;
case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
- msleep(400);
+ vag_power_on(component, event_source);
+ restore_output(component, mute_mask[event_source],
+ sgtl5000->mute_state[event_source]);
break;
-
case SND_SOC_DAPM_PRE_PMD:
- /*
- * Don't clear VAG_POWERUP, when both DAC and ADC are
- * operational to prevent inadvertently starving the
- * other one of them.
- */
- if ((snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER) &
- mask) != mask) {
- snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_VAG_POWERUP, 0);
- msleep(400);
- }
+ sgtl5000->mute_state[event_source] =
+ mute_output(component, mute_mask[event_source]);
+ vag_power_off(component, event_source);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ restore_output(component, mute_mask[event_source],
+ sgtl5000->mute_state[event_source]);
break;
default:
break;
@@ -182,6 +315,41 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
return 0;
}
+/*
+ * Mute Headphone when power it up/down.
+ * Control VAG power on HP power path.
+ */
+static int headphone_pga_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ return vag_and_mute_control(component, event, HP_POWER_EVENT);
+}
+
+/* As manual describes, ADC/DAC powering up/down requires
+ * to mute outputs to avoid pops.
+ * Control VAG power on ADC/DAC power path.
+ */
+static int adc_updown_depop(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ return vag_and_mute_control(component, event, ADC_POWER_EVENT);
+}
+
+static int dac_updown_depop(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ return vag_and_mute_control(component, event, DAC_POWER_EVENT);
+}
+
/* input sources for ADC */
static const char *adc_mux_text[] = {
"MIC_IN", "LINE_IN"
@@ -217,7 +385,10 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
mic_bias_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
- SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
+ SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
+ headphone_pga_event,
+ SND_SOC_DAPM_PRE_POST_PMU |
+ SND_SOC_DAPM_PRE_POST_PMD),
SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
@@ -233,11 +404,12 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
0, SGTL5000_CHIP_DIG_POWER,
1, 0),
- SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
- SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
-
- SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
- SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
+ SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0,
+ adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+ SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0,
+ dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
+ SND_SOC_DAPM_PRE_POST_PMD),
};
/* routes for sgtl5000 */
@@ -987,12 +1159,17 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
SGTL5000_INT_OSC_EN);
/* Enable VDDC charge pump */
ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
- } else if (vddio >= 3100 && vdda >= 3100) {
+ } else {
ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
- /* VDDC use VDDIO rail */
- lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
- lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
- SGTL5000_VDDC_MAN_ASSN_SHIFT;
+ /*
+ * if vddio == vdda the source of charge pump should be
+ * assigned manually to VDDIO
+ */
+ if (vddio == vdda) {
+ lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
+ lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
+ SGTL5000_VDDC_MAN_ASSN_SHIFT;
+ }
}
snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl);
@@ -1040,7 +1217,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_codec *codec)
* Searching for a suitable index solving this formula:
* idx = 40 * log10(vag_val / lo_cagcntrl) + 15
*/
- vol_quot = (vag * 100) / lo_vag;
+ vol_quot = lo_vag ? (vag * 100) / lo_vag : 0;
lo_vol = 0;
for (i = 0; i < ARRAY_SIZE(vol_quot_table); i++) {
if (vol_quot >= vol_quot_table[i])
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 28fdfc5ec544..c27e3476848a 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -316,6 +316,8 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("IN2_R"),
SND_SOC_DAPM_INPUT("IN3_L"),
SND_SOC_DAPM_INPUT("IN3_R"),
+ SND_SOC_DAPM_INPUT("CM_L"),
+ SND_SOC_DAPM_INPUT("CM_R"),
};
static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
index f0cb1c4afe3c..c5a8d758f58b 100644
--- a/sound/soc/codecs/wm8737.c
+++ b/sound/soc/codecs/wm8737.c
@@ -170,7 +170,7 @@ SOC_DOUBLE("Polarity Invert Switch", WM8737_ADC_CONTROL, 5, 6, 1, 0),
SOC_SINGLE("3D Switch", WM8737_3D_ENHANCE, 0, 1, 0),
SOC_SINGLE("3D Depth", WM8737_3D_ENHANCE, 1, 15, 0),
SOC_ENUM("3D Low Cut-off", low_3d),
-SOC_ENUM("3D High Cut-off", low_3d),
+SOC_ENUM("3D High Cut-off", high_3d),
SOC_SINGLE_TLV("3D ADC Volume", WM8737_3D_ENHANCE, 7, 1, 1, adc_tlv),
SOC_SINGLE("Noise Gate Switch", WM8737_NOISE_GATE, 0, 1, 0),
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index c03c9da076c2..28eb55bc4663 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -948,8 +948,7 @@ static unsigned int wmfw_convert_flags(unsigned int in, unsigned int len)
}
if (in) {
- if (in & WMFW_CTL_FLAG_READABLE)
- out |= rd;
+ out |= rd;
if (in & WMFW_CTL_FLAG_WRITEABLE)
out |= wr;
if (in & WMFW_CTL_FLAG_VOLATILE)
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 3c5a9804d3f5..d1935c5c3602 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -43,6 +43,7 @@
#define MCASP_MAX_AFIFO_DEPTH 64
+#ifdef CONFIG_PM
static u32 context_regs[] = {
DAVINCI_MCASP_TXFMCTL_REG,
DAVINCI_MCASP_RXFMCTL_REG,
@@ -65,6 +66,7 @@ struct davinci_mcasp_context {
u32 *xrsr_regs; /* for serializer configuration */
bool pm_state;
};
+#endif
struct davinci_mcasp_ruledata {
struct davinci_mcasp *mcasp;
@@ -880,14 +882,13 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
active_slots = hweight32(mcasp->tdm_mask[stream]);
active_serializers = (channels + active_slots - 1) /
active_slots;
- if (active_serializers == 1) {
+ if (active_serializers == 1)
active_slots = channels;
- for (i = 0; i < total_slots; i++) {
- if ((1 << i) & mcasp->tdm_mask[stream]) {
- mask |= (1 << i);
- if (--active_slots <= 0)
- break;
- }
+ for (i = 0; i < total_slots; i++) {
+ if ((1 << i) & mcasp->tdm_mask[stream]) {
+ mask |= (1 << i);
+ if (--active_slots <= 0)
+ break;
}
}
} else {
@@ -1156,6 +1157,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
return ret;
}
+static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct davinci_mcasp_ruledata *rd = rule->private;
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_mask nfmt;
+ int i, slot_width;
+
+ snd_mask_none(&nfmt);
+ slot_width = rd->mcasp->slot_width;
+
+ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+ if (snd_mask_test(fmt, i)) {
+ if (snd_pcm_format_width(i) <= slot_width) {
+ snd_mask_set(&nfmt, i);
+ }
+ }
+ }
+
+ return snd_mask_refine(fmt, &nfmt);
+}
+
static const unsigned int davinci_mcasp_dai_rates[] = {
8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
88200, 96000, 176400, 192000,
@@ -1249,7 +1272,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
struct davinci_mcasp_ruledata *ruledata =
&mcasp->ruledata[substream->stream];
u32 max_channels = 0;
- int i, dir;
+ int i, dir, ret;
int tdm_slots = mcasp->tdm_slots;
/* Do not allow more then one stream per direction */
@@ -1278,6 +1301,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
max_channels++;
}
ruledata->serializers = max_channels;
+ ruledata->mcasp = mcasp;
max_channels *= tdm_slots;
/*
* If the already active stream has less channels than the calculated
@@ -1303,20 +1327,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
0, SNDRV_PCM_HW_PARAM_CHANNELS,
&mcasp->chconstr[substream->stream]);
- if (mcasp->slot_width)
- snd_pcm_hw_constraint_minmax(substream->runtime,
- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
- 8, mcasp->slot_width);
+ if (mcasp->slot_width) {
+ /* Only allow formats require <= slot_width bits on the bus */
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ davinci_mcasp_hw_rule_slot_width,
+ ruledata,
+ SNDRV_PCM_HW_PARAM_FORMAT, -1);
+ if (ret)
+ return ret;
+ }
/*
* If we rely on implicit BCLK divider setting we should
* set constraints based on what we can provide.
*/
if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
- int ret;
-
- ruledata->mcasp = mcasp;
-
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
davinci_mcasp_hw_rule_rate,
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 48f795dfc3ed..0fcf4065d0e4 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -223,16 +223,17 @@ config SND_MPC52xx_SOC_EFIKA
endif # SND_POWERPC_SOC
+config SND_SOC_IMX_PCM_FIQ
+ tristate
+ default y if SND_SOC_IMX_SSI=y && (SND_SOC_FSL_SSI=m || SND_SOC_FSL_SPDIF=m) && (MXC_TZIC || MXC_AVIC)
+ select FIQ
+
if SND_IMX_SOC
config SND_SOC_IMX_SSI
tristate
select SND_SOC_FSL_UTILS
-config SND_SOC_IMX_PCM_FIQ
- tristate
- select FIQ
-
config SND_SOC_IMX_HDMI_DMA
bool
select SND_SOC_GENERIC_DMAENGINE_PCM
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 883087f2b092..38132143b7d5 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -119,13 +119,13 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"fsl,mux-int-port node missing or invalid.\n");
- return ret;
+ goto err;
}
ret = of_property_read_u32(np, "fsl,mux-ext-port", &ext_port);
if (ret) {
dev_err(&pdev->dev,
"fsl,mux-ext-port node missing or invalid.\n");
- return ret;
+ goto err;
}
/*
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index dffd549a0e2a..705d2524ec31 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -689,6 +689,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
asrc_fail:
of_node_put(asrc_np);
of_node_put(codec_np);
+ put_device(&cpu_pdev->dev);
fail:
of_node_put(cpu_np);
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 969c5c20605a..bc3fe4a3fc84 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -350,8 +350,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool p2p_in, bool p2
return -EINVAL;
}
- if ((outrate > 8000 && outrate < 30000) &&
- (outrate/inrate > 24 || inrate/outrate > 8)) {
+ if ((outrate >= 8000 && outrate <= 30000) &&
+ (outrate > 24 * inrate || inrate > 8 * outrate)) {
pair_err("exceed supported ratio range [1/24, 8] for \
inrate/outrate: %d/%d\n", inrate, outrate);
return -EINVAL;
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 0742aff1ab31..efe4904bb6f2 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -439,8 +439,8 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
esai_priv->slot_width = slot_width;
esai_priv->slots = slots;
- esai_priv->tx_mask = tx_mask;
- esai_priv->rx_mask = rx_mask;
+ esai_priv->tx_mask = tx_mask;
+ esai_priv->rx_mask = rx_mask;
return 0;
}
@@ -673,6 +673,18 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
for (i = 0; tx && i < channels; i++)
regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
+ /*
+ * When set the TE/RE in the end of enablement flow, there
+ * will be channel swap issue for multi data line case.
+ * In order to workaround this issue, we switch the bit
+ * enablement sequence to below sequence
+ * 1) clear the xSMB & xSMA: which is done in probe and
+ * stop state.
+ * 2) set TE/RE
+ * 3) set xSMB
+ * 4) set xSMA: xSMA is the last one in this flow, which
+ * will trigger esai to start.
+ */
regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
@@ -1106,6 +1118,9 @@ static int fsl_esai_probe(struct platform_device *pdev)
return ret;
}
+ esai_priv->tx_mask = 0xFFFFFFFF;
+ esai_priv->rx_mask = 0xFFFFFFFF;
+
/* Clear the TSMA, TSMB, RSMA, RSMB */
regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index d6c211921cee..8f45305a1e9a 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -423,12 +423,14 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
case SND_SOC_DAIFMT_CBS_CFS:
val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
+ sai->slave_mode[tx] = false;
break;
case SND_SOC_DAIFMT_CBM_CFM:
sai->slave_mode[tx] = true;
break;
case SND_SOC_DAIFMT_CBS_CFM:
val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
+ sai->slave_mode[tx] = false;
break;
case SND_SOC_DAIFMT_CBM_CFS:
val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c184590d5d8e..342c360f2a65 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1447,6 +1447,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
struct fsl_ssi_private *ssi_private;
int ret = 0;
struct device_node *np = pdev->dev.of_node;
+ struct device_node *root;
const struct of_device_id *of_id;
const char *p, *sprop;
const uint32_t *iprop;
@@ -1638,7 +1639,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
* device tree. We also pass the address of the CPU DAI driver
* structure.
*/
- sprop = of_get_property(of_find_node_by_path("/"), "compatible", NULL);
+ root = of_find_node_by_path("/");
+ sprop = of_get_property(root, "compatible", NULL);
+ of_node_put(root);
/* Sometimes the compatible name has a "fsl," prefix, so we strip it. */
p = strrchr(sprop, ',');
if (p)
diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
index b9e42b503a37..4f8bdb7650e8 100644
--- a/sound/soc/fsl/fsl_utils.c
+++ b/sound/soc/fsl/fsl_utils.c
@@ -75,6 +75,7 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np,
iprop = of_get_property(dma_np, "cell-index", NULL);
if (!iprop) {
of_node_put(dma_np);
+ of_node_put(dma_channel_np);
return -EINVAL;
}
*dma_id = be32_to_cpup(iprop);
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index b99e0b5e00e9..3d99a8579c99 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -115,10 +115,12 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
ret = -EPROBE_DEFER;
goto fail;
}
+ put_device(&ssi_pdev->dev);
codec_dev = of_find_i2c_device_by_node(codec_np);
if (!codec_dev) {
dev_err(&pdev->dev, "failed to find codec platform device\n");
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto fail;
}
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 0838478c4c3f..b3464ac99b99 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -1343,7 +1343,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
dai->capture_widget->name);
w = dai->capture_widget;
snd_soc_dapm_widget_for_each_source_path(w, p) {
- if (p->connected && !p->connected(w, p->sink))
+ if (p->connected && !p->connected(w, p->source))
continue;
if (p->connect && p->source->power &&
diff --git a/sound/soc/intel/atom/sst/sst_pci.c b/sound/soc/intel/atom/sst/sst_pci.c
index 3a0b3bf0af97..e9c6894cc27f 100644
--- a/sound/soc/intel/atom/sst/sst_pci.c
+++ b/sound/soc/intel/atom/sst/sst_pci.c
@@ -107,7 +107,7 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram);
do_release_regions:
pci_release_regions(pci);
- return 0;
+ return ret;
}
/*
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 79a9fdf94d38..582b30a5118d 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -1252,11 +1252,15 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
goto irq_err;
err = sst_dma_new(sst);
- if (err)
- dev_warn(dev, "sst_dma_new failed %d\n", err);
+ if (err) {
+ dev_err(dev, "sst_dma_new failed %d\n", err);
+ goto dma_err;
+ }
return sst;
+dma_err:
+ free_irq(sst->irq, sst);
irq_err:
if (sst->ops->free)
sst->ops->free(sst);
diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
index 6c672ac79cce..9d24cb719f69 100644
--- a/sound/soc/intel/common/sst-ipc.c
+++ b/sound/soc/intel/common/sst-ipc.c
@@ -211,6 +211,8 @@ struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
if (ipc->ops.reply_msg_match != NULL)
header = ipc->ops.reply_msg_match(header, &mask);
+ else
+ mask = (u64)-1;
if (list_empty(&ipc->rx_list)) {
dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index dcf03691ebc8..d8cf37a0f696 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -211,7 +211,7 @@ int skl_nhlt_update_topology_bin(struct skl *skl)
struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
struct device *dev = bus->dev;
- dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n",
+ dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n",
nhlt->header.oem_id, nhlt->header.oem_table_id,
nhlt->header.oem_revision);
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 794a3499e567..0dc1ab48fceb 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -92,7 +92,7 @@
#define JZ_AIC_I2S_STATUS_BUSY BIT(2)
#define JZ_AIC_CLK_DIV_MASK 0xf
-#define I2SDIV_DV_SHIFT 8
+#define I2SDIV_DV_SHIFT 0
#define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
#define I2SDIV_IDV_SHIFT 8
#define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 3a36d60e1785..0a5d9fb6fc84 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -570,10 +570,6 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- err = clk_prepare_enable(priv->clk);
- if (err < 0)
- return err;
-
priv->extclk = devm_clk_get(&pdev->dev, "extclk");
if (IS_ERR(priv->extclk)) {
if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
@@ -589,6 +585,10 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
}
}
+ err = clk_prepare_enable(priv->clk);
+ if (err < 0)
+ return err;
+
/* Some sensible defaults - this reflects the powerup values */
priv->ctl_play = KIRKWOOD_PLAYCTL_SIZE_24;
priv->ctl_rec = KIRKWOOD_RECCTL_SIZE_24;
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index 07f91e918b23..3d91cef3704a 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -114,46 +114,58 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
if (!cpu || !codec) {
dev_err(dev, "Can't find cpu/codec DT node\n");
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto error;
}
link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
if (!link->cpu_of_node) {
dev_err(card->dev, "error getting cpu phandle\n");
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto error;
}
link->codec_of_node = of_parse_phandle(codec, "sound-dai", 0);
if (!link->codec_of_node) {
dev_err(card->dev, "error getting codec phandle\n");
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto error;
}
ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
if (ret) {
dev_err(card->dev, "error getting cpu dai name\n");
- return ERR_PTR(ret);
+ goto error;
}
ret = snd_soc_of_get_dai_name(codec, &link->codec_dai_name);
if (ret) {
dev_err(card->dev, "error getting codec dai name\n");
- return ERR_PTR(ret);
+ goto error;
}
link->platform_of_node = link->cpu_of_node;
ret = of_property_read_string(np, "link-name", &link->name);
if (ret) {
dev_err(card->dev, "error getting codec dai_link name\n");
- return ERR_PTR(ret);
+ goto error;
}
link->stream_name = link->name;
link->init = apq8016_sbc_dai_init;
link++;
+
+ of_node_put(cpu);
+ of_node_put(codec);
}
return data;
+
+ error:
+ of_node_put(np);
+ of_node_put(cpu);
+ of_node_put(codec);
+ return ERR_PTR(ret);
}
static const struct snd_soc_dapm_widget apq8016_sbc_dapm_widgets[] = {
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 08bfee447a36..94b6f9c7dd6b 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -649,7 +649,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (ret) {
dev_err(&pdev->dev, "Could not register PCM\n");
- return ret;
+ goto err_suspend;
}
return 0;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 91b444db575e..5346b3cafc67 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -629,6 +629,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
}
/* set format */
+ rdai->bit_clk_inv = 0;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
rdai->sys_delay = 0;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index ab647f1fe11b..73523cf0329b 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -751,7 +751,13 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i)
val = max - val;
p->connect = !!val;
} else {
- p->connect = 0;
+ /* since a virtual mixer has no backing registers to
+ * decide which path to connect, it will try to match
+ * with initial state. This is to ensure
+ * that the default mixer choice will be
+ * correctly powered up during initialization.
+ */
+ p->connect = invert;
}
}
@@ -1104,8 +1110,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
list_add_tail(&widget->work_list, list);
if (custom_stop_condition && custom_stop_condition(widget, dir)) {
- widget->endpoints[dir] = 1;
- return widget->endpoints[dir];
+ list = NULL;
+ custom_stop_condition = NULL;
}
if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
@@ -1142,8 +1148,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
*
* Optionally, can be supplied with a function acting as a stopping condition.
* This function takes the dapm widget currently being examined and the walk
- * direction as an arguments, it should return true if the walk should be
- * stopped and false otherwise.
+ * direction as an arguments, it should return true if widgets from that point
+ * in the graph onwards should not be added to the widget list.
*/
static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
struct list_head *list,
@@ -4363,7 +4369,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm)
continue;
if (w->power) {
dapm_seq_insert(w, &down_list, false);
- w->power = 0;
+ w->new_power = 0;
powerdown = 1;
}
}
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 6cef3977507a..67d22b4baeb0 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -312,6 +312,12 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
+
+ if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
+ strncpy(rtd->pcm->streams[i].pcm->name,
+ rtd->pcm->streams[i].pcm->id,
+ sizeof(rtd->pcm->streams[i].pcm->name));
+ }
}
return 0;
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index fbaa1bb41102..00d7902ad427 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -80,10 +80,9 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
unsigned int sync = 0;
int enable;
- trace_snd_soc_jack_report(jack, mask, status);
-
if (!jack)
return;
+ trace_snd_soc_jack_report(jack, mask, status);
dapm = &jack->card->dapm;
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 9fc1a7bb8b95..90acdf4d90ed 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -837,7 +837,7 @@ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol,
unsigned int regbase = mc->regbase;
unsigned int regcount = mc->regcount;
unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
- unsigned int regwmask = (1<<regwshift)-1;
+ unsigned int regwmask = (1UL<<regwshift)-1;
unsigned int invert = mc->invert;
unsigned long mask = (1UL<<mc->nbits)-1;
long min = mc->min;
@@ -886,7 +886,7 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
unsigned int regbase = mc->regbase;
unsigned int regcount = mc->regcount;
unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
- unsigned int regwmask = (1<<regwshift)-1;
+ unsigned int regwmask = (1UL<<regwshift)-1;
unsigned int invert = mc->invert;
unsigned long mask = (1UL<<mc->nbits)-1;
long max = mc->max;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index f35ea743171b..c1d41d048dba 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -48,8 +48,8 @@ static bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream)
else
codec_stream = &dai->driver->capture;
- /* If the codec specifies any rate at all, it supports the stream. */
- return codec_stream->rates;
+ /* If the codec specifies any channels at all, it supports the stream */
+ return codec_stream->channels_min;
}
/**
@@ -894,10 +894,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
codec_params = *params;
/* fixup params based on TDM slot masks */
- if (codec_dai->tx_mask)
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ codec_dai->tx_mask)
soc_pcm_codec_params_fixup(&codec_params,
codec_dai->tx_mask);
- if (codec_dai->rx_mask)
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+ codec_dai->rx_mask)
soc_pcm_codec_params_fixup(&codec_params,
codec_dai->rx_mask);
@@ -1589,7 +1592,7 @@ static void dpcm_init_runtime_hw(struct snd_pcm_runtime *runtime,
u64 formats)
{
runtime->hw.rate_min = stream->rate_min;
- runtime->hw.rate_max = stream->rate_max;
+ runtime->hw.rate_max = min_not_zero(stream->rate_max, UINT_MAX);
runtime->hw.channels_min = stream->channels_min;
runtime->hw.channels_max = stream->channels_max;
if (runtime->hw.formats)
@@ -2102,7 +2105,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
ret = dpcm_do_trigger(dpcm, be_substream, cmd);
@@ -2132,7 +2136,8 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_STOP:
- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
+ if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
@@ -2177,42 +2182,81 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
}
EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
+static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream,
+ int cmd, bool fe_first)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ int ret;
+
+ /* call trigger on the frontend before the backend. */
+ if (fe_first) {
+ dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
+
+ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+ return ret;
+ }
+
+ /* call trigger on the frontend after the backend. */
+ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+
+ return ret;
+}
+
static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *fe = substream->private_data;
- int stream = substream->stream, ret;
+ int stream = substream->stream;
+ int ret = 0;
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
switch (trigger) {
case SND_SOC_DPCM_TRIGGER_PRE:
- /* call trigger on the frontend before the backend. */
-
- dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
- fe->dai_link->name, cmd);
-
- ret = soc_pcm_trigger(substream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_POST:
- /* call trigger on the frontend after the backend. */
-
- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
- dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
- fe->dai_link->name, cmd);
-
- ret = soc_pcm_trigger(substream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_BESPOKE:
/* bespoke trigger() - handles both FE and BEs */
@@ -2221,10 +2265,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
fe->dai_link->name, cmd);
ret = soc_pcm_bespoke_trigger(substream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
- }
break;
default:
dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
@@ -2233,6 +2273,12 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
goto out;
}
+ if (ret < 0) {
+ dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n",
+ cmd, ret);
+ goto out;
+ }
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
@@ -2287,7 +2333,8 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
dev_dbg(be->dev, "ASoC: prepare BE %s\n",
@@ -2977,16 +3024,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
ssize_t offset = 0;
/* FE state */
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
"[%s - %s]\n", fe->dai_link->name,
stream ? "Capture" : "Playback");
- offset += snprintf(buf + offset, size - offset, "State: %s\n",
+ offset += scnprintf(buf + offset, size - offset, "State: %s\n",
dpcm_state_string(fe->dpcm[stream].state));
if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
(fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
"Hardware Params: "
"Format = %s, Channels = %d, Rate = %d\n",
snd_pcm_format_name(params_format(params)),
@@ -2994,10 +3041,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
params_rate(params));
/* BEs state */
- offset += snprintf(buf + offset, size - offset, "Backends:\n");
+ offset += scnprintf(buf + offset, size - offset, "Backends:\n");
if (list_empty(&fe->dpcm[stream].be_clients)) {
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
" No active DSP links\n");
goto out;
}
@@ -3006,16 +3053,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be = dpcm->be;
params = &dpcm->hw_params;
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
"- %s\n", be->dai_link->name);
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
" State: %s\n",
dpcm_state_string(be->dpcm[stream].state));
if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
" Hardware Params: "
"Format = %s, Channels = %d, Rate = %d\n",
snd_pcm_format_name(params_format(params)),
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 086fe4d27f60..e9c57bd3c02b 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -344,7 +344,7 @@ static int soc_tplg_add_kcontrol(struct soc_tplg *tplg,
struct snd_soc_component *comp = tplg->comp;
return soc_tplg_add_dcontrol(comp->card->snd_card,
- comp->dev, k, NULL, comp, kcontrol);
+ comp->dev, k, comp->name_prefix, comp, kcontrol);
}
/* remove a mixer kcontrol */
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 15c92400cea4..02c373c65e19 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -78,8 +78,8 @@
#define SUN4I_I2S_CLK_DIV_MCLK_MASK GENMASK(3, 0)
#define SUN4I_I2S_CLK_DIV_MCLK(mclk) ((mclk) << 0)
-#define SUN4I_I2S_RX_CNT_REG 0x28
-#define SUN4I_I2S_TX_CNT_REG 0x2c
+#define SUN4I_I2S_TX_CNT_REG 0x28
+#define SUN4I_I2S_RX_CNT_REG 0x2c
#define SUN4I_I2S_TX_CHAN_SEL_REG 0x30
#define SUN4I_I2S_TX_CHAN_SEL(num_chan) (((num_chan) - 1) << 0)
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
index 1e76869dd488..863e04809a6b 100644
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -152,14 +152,14 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing/invalid\n");
ret = -EINVAL;
- goto err;
+ goto err_put_codec_of_node;
}
tegra_sgtl5000_dai.platform_of_node = tegra_sgtl5000_dai.cpu_of_node;
ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
if (ret)
- goto err;
+ goto err_put_cpu_of_node;
ret = snd_soc_register_card(card);
if (ret) {
@@ -172,6 +172,13 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
err_fini_utils:
tegra_asoc_utils_fini(&machine->util_data);
+err_put_cpu_of_node:
+ of_node_put(tegra_sgtl5000_dai.cpu_of_node);
+ tegra_sgtl5000_dai.cpu_of_node = NULL;
+ tegra_sgtl5000_dai.platform_of_node = NULL;
+err_put_codec_of_node:
+ of_node_put(tegra_sgtl5000_dai.codec_of_node);
+ tegra_sgtl5000_dai.codec_of_node = NULL;
err:
return ret;
}
@@ -186,6 +193,12 @@ static int tegra_sgtl5000_driver_remove(struct platform_device *pdev)
tegra_asoc_utils_fini(&machine->util_data);
+ of_node_put(tegra_sgtl5000_dai.cpu_of_node);
+ tegra_sgtl5000_dai.cpu_of_node = NULL;
+ tegra_sgtl5000_dai.platform_of_node = NULL;
+ of_node_put(tegra_sgtl5000_dai.codec_of_node);
+ tegra_sgtl5000_dai.codec_of_node = NULL;
+
return ret;
}
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 99b73c675743..20d4e2e1bacf 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -287,7 +287,8 @@ retry:
goto retry;
}
spin_unlock(&sound_loader_lock);
- return -EBUSY;
+ r = -EBUSY;
+ goto fail;
}
}
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index b8044c6034b3..30aa5f2df6da 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -403,6 +403,9 @@ static void snd_complete_urb(struct urb *urb)
}
prepare_outbound_urb(ep, ctx);
+ /* can be stopped during prepare callback */
+ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+ goto exit_clear;
} else {
retire_inbound_urb(ep, ctx);
/* can be stopped during retire callback */
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 58d624938a9f..ea3a9bd05e68 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -306,7 +306,7 @@ static void line6_data_received(struct urb *urb)
line6_midibuf_read(mb, line6->buffer_message,
LINE6_MIDI_MESSAGE_MAXLEN);
- if (done == 0)
+ if (done <= 0)
break;
line6->message_length = done;
@@ -337,12 +337,16 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
{
struct usb_device *usbdev = line6->usbdev;
int ret;
- unsigned char len;
+ unsigned char *len;
unsigned count;
if (address > 0xffff || datalen > 0xff)
return -EINVAL;
+ len = kmalloc(sizeof(*len), GFP_KERNEL);
+ if (!len)
+ return -ENOMEM;
+
/* query the serial number: */
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -351,7 +355,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
if (ret < 0) {
dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
/* Wait for data length. We'll get 0xff until length arrives. */
@@ -361,28 +365,29 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
- 0x0012, 0x0000, &len, 1,
+ 0x0012, 0x0000, len, 1,
LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev,
"receive length failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
- if (len != 0xff)
+ if (*len != 0xff)
break;
}
- if (len == 0xff) {
+ ret = -EIO;
+ if (*len == 0xff) {
dev_err(line6->ifcdev, "read failed after %d retries\n",
count);
- return -EIO;
- } else if (len != datalen) {
+ goto exit;
+ } else if (*len != datalen) {
/* should be equal or something went wrong */
dev_err(line6->ifcdev,
"length mismatch (expected %d, got %d)\n",
- (int)datalen, (int)len);
- return -EIO;
+ (int)datalen, (int)*len);
+ goto exit;
}
/* receive the result: */
@@ -391,12 +396,12 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
0x0013, 0x0000, data, datalen,
LINE6_TIMEOUT * HZ);
- if (ret < 0) {
+ if (ret < 0)
dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
- return ret;
- }
- return 0;
+exit:
+ kfree(len);
+ return ret;
}
EXPORT_SYMBOL_GPL(line6_read_data);
@@ -408,12 +413,16 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
{
struct usb_device *usbdev = line6->usbdev;
int ret;
- unsigned char status;
+ unsigned char *status;
int count;
if (address > 0xffff || datalen > 0xffff)
return -EINVAL;
+ status = kmalloc(sizeof(*status), GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0022, address, data, datalen,
@@ -422,7 +431,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
if (ret < 0) {
dev_err(line6->ifcdev,
"write request failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
@@ -433,28 +442,29 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
0x0012, 0x0000,
- &status, 1, LINE6_TIMEOUT * HZ);
+ status, 1, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev,
"receiving status failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
- if (status != 0xff)
+ if (*status != 0xff)
break;
}
- if (status == 0xff) {
+ if (*status == 0xff) {
dev_err(line6->ifcdev, "write failed after %d retries\n",
count);
- return -EIO;
- } else if (status != 0) {
+ ret = -EIO;
+ } else if (*status != 0) {
dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
- return -EIO;
+ ret = -EIO;
}
-
- return 0;
+exit:
+ kfree(status);
+ return ret;
}
EXPORT_SYMBOL_GPL(line6_write_data);
diff --git a/sound/usb/line6/midibuf.c b/sound/usb/line6/midibuf.c
index 36a610ba342e..c931d48801eb 100644
--- a/sound/usb/line6/midibuf.c
+++ b/sound/usb/line6/midibuf.c
@@ -163,7 +163,7 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
int midi_length_prev =
midibuf_message_length(this->command_prev);
- if (midi_length_prev > 0) {
+ if (midi_length_prev > 1) {
midi_length = midi_length_prev - 1;
repeat = 1;
} else
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index fab53f58d447..74b399372e0b 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -552,13 +552,6 @@ int line6_init_pcm(struct usb_line6 *line6,
line6pcm->volume_monitor = 255;
line6pcm->line6 = line6;
- line6pcm->max_packet_size_in =
- usb_maxpacket(line6->usbdev,
- usb_rcvisocpipe(line6->usbdev, ep_read), 0);
- line6pcm->max_packet_size_out =
- usb_maxpacket(line6->usbdev,
- usb_sndisocpipe(line6->usbdev, ep_write), 1);
-
spin_lock_init(&line6pcm->out.lock);
spin_lock_init(&line6pcm->in.lock);
line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
@@ -568,6 +561,18 @@ int line6_init_pcm(struct usb_line6 *line6,
pcm->private_data = line6pcm;
pcm->private_free = line6_cleanup_pcm;
+ line6pcm->max_packet_size_in =
+ usb_maxpacket(line6->usbdev,
+ usb_rcvisocpipe(line6->usbdev, ep_read), 0);
+ line6pcm->max_packet_size_out =
+ usb_maxpacket(line6->usbdev,
+ usb_sndisocpipe(line6->usbdev, ep_write), 1);
+ if (!line6pcm->max_packet_size_in || !line6pcm->max_packet_size_out) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot get proper max packet size\n");
+ return -EINVAL;
+ }
+
err = line6_create_audio_out_urbs(line6pcm);
if (err < 0)
return err;
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 5ab9e0c89211..8c4375bf34ab 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -221,28 +221,32 @@ static void podhd_startup_start_workqueue(unsigned long data)
static int podhd_dev_start(struct usb_line6_podhd *pod)
{
int ret;
- u8 init_bytes[8];
+ u8 *init_bytes;
int i;
struct usb_device *usbdev = pod->line6.usbdev;
+ init_bytes = kmalloc(8, GFP_KERNEL);
+ if (!init_bytes)
+ return -ENOMEM;
+
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x11, 0,
NULL, 0, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
/* NOTE: looks like some kind of ping message */
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x11, 0x0,
- &init_bytes, 3, LINE6_TIMEOUT * HZ);
+ init_bytes, 3, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(pod->line6.ifcdev,
"receive length failed (error %d)\n", ret);
- return ret;
+ goto exit;
}
pod->firmware_version =
@@ -251,7 +255,7 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
for (i = 0; i <= 16; i++) {
ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8);
if (ret < 0)
- return ret;
+ goto exit;
}
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
@@ -259,10 +263,9 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT,
1, 0,
NULL, 0, LINE6_TIMEOUT * HZ);
- if (ret < 0)
- return ret;
-
- return 0;
+exit:
+ kfree(init_bytes);
+ return ret;
}
static void podhd_startup_workqueue(struct work_struct *work)
@@ -382,7 +385,7 @@ static const struct line6_properties podhd_properties_table[] = {
.name = "POD HD500",
.capabilities = LINE6_CAP_PCM
| LINE6_CAP_HWMON,
- .altsetting = 1,
+ .altsetting = 0,
.ep_ctrl_r = 0x81,
.ep_ctrl_w = 0x01,
.ep_audio_r = 0x86,
diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
index 8e22f430d700..d3871d99ade4 100644
--- a/sound/usb/line6/toneport.c
+++ b/sound/usb/line6/toneport.c
@@ -365,15 +365,20 @@ static bool toneport_has_source_select(struct usb_line6_toneport *toneport)
/*
Setup Toneport device.
*/
-static void toneport_setup(struct usb_line6_toneport *toneport)
+static int toneport_setup(struct usb_line6_toneport *toneport)
{
- int ticks;
+ int *ticks;
struct usb_line6 *line6 = &toneport->line6;
struct usb_device *usbdev = line6->usbdev;
+ ticks = kmalloc(sizeof(*ticks), GFP_KERNEL);
+ if (!ticks)
+ return -ENOMEM;
+
/* sync time on device with host: */
- ticks = (int)get_seconds();
- line6_write_data(line6, 0x80c6, &ticks, 4);
+ *ticks = (int)get_seconds();
+ line6_write_data(line6, 0x80c6, ticks, 4);
+ kfree(ticks);
/* enable device: */
toneport_send_cmd(usbdev, 0x0301, 0x0000);
@@ -388,6 +393,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
toneport_update_led(toneport);
mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
+ return 0;
}
/*
@@ -451,7 +457,9 @@ static int toneport_init(struct usb_line6 *line6,
return err;
}
- toneport_setup(toneport);
+ err = toneport_setup(toneport);
+ if (err)
+ return err;
/* register audio system: */
return snd_card_register(line6->card);
@@ -463,7 +471,11 @@ static int toneport_init(struct usb_line6 *line6,
*/
static int toneport_reset_resume(struct usb_interface *interface)
{
- toneport_setup(usb_get_intfdata(interface));
+ int err;
+
+ err = toneport_setup(usb_get_intfdata(interface));
+ if (err)
+ return err;
return line6_resume(interface);
}
#endif
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 64b90b8ec661..e2f62362a0b0 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -82,6 +82,7 @@ struct mixer_build {
unsigned char *buffer;
unsigned int buflen;
DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+ DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
struct usb_audio_term oterm;
const struct usbmix_name_map *map;
const struct usbmix_selector_map *selector_map;
@@ -710,15 +711,24 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
* parse the source unit recursively until it reaches to a terminal
* or a branched unit.
*/
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
struct usb_audio_term *term)
{
int err;
void *p1;
+ unsigned char *hdr;
memset(term, 0, sizeof(*term));
- while ((p1 = find_audio_control_unit(state, id)) != NULL) {
- unsigned char *hdr = p1;
+ for (;;) {
+ /* a loop in the terminal chain? */
+ if (test_and_set_bit(id, state->termbitmap))
+ return -EINVAL;
+
+ p1 = find_audio_control_unit(state, id);
+ if (!p1)
+ break;
+
+ hdr = p1;
term->id = id;
switch (hdr[2]) {
case UAC_INPUT_TERMINAL:
@@ -733,7 +743,7 @@ static int check_input_term(struct mixer_build *state, int id,
/* call recursively to verify that the
* referenced clock entity is valid */
- err = check_input_term(state, d->bCSourceID, term);
+ err = __check_input_term(state, d->bCSourceID, term);
if (err < 0)
return err;
@@ -765,7 +775,7 @@ static int check_input_term(struct mixer_build *state, int id,
case UAC2_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
term->type = d->bDescriptorSubtype << 16; /* virtual type */
@@ -812,6 +822,15 @@ static int check_input_term(struct mixer_build *state, int id,
return -ENODEV;
}
+
+static int check_input_term(struct mixer_build *state, int id,
+ struct usb_audio_term *term)
+{
+ memset(term, 0, sizeof(*term));
+ memset(state->termbitmap, 0, sizeof(state->termbitmap));
+ return __check_input_term(state, id, term);
+}
+
/*
* Feature Unit
*/
@@ -1027,7 +1046,8 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
if (cval->min + cval->res < cval->max) {
int last_valid_res = cval->res;
int saved, test, check;
- get_cur_mix_raw(cval, minchn, &saved);
+ if (get_cur_mix_raw(cval, minchn, &saved) < 0)
+ goto no_res_check;
for (;;) {
test = saved;
if (test < cval->max)
@@ -1047,6 +1067,7 @@ static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
snd_usb_set_cur_mix_value(cval, minchn, 0, saved);
}
+no_res_check:
cval->initialized = 1;
}
@@ -1694,6 +1715,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
int pin, ich, err;
if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
+ desc->bLength < sizeof(*desc) + desc->bNrInPins ||
!(num_outs = uac_mixer_unit_bNrChannels(desc))) {
usb_audio_err(state->chip,
"invalid MIXER UNIT descriptor %d\n",
@@ -2178,6 +2200,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval);
if (! kctl) {
usb_audio_err(state->chip, "cannot malloc kcontrol\n");
+ for (i = 0; i < desc->bNrInPins; i++)
+ kfree(namelist[i]);
kfree(namelist);
kfree(cval);
return -ENOMEM;
@@ -2312,7 +2336,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
if (map->id == state.chip->usb_id) {
state.map = map->map;
state.selector_map = map->selector_map;
- mixer->ignore_ctl_error = map->ignore_ctl_error;
+ mixer->ignore_ctl_error |= map->ignore_ctl_error;
break;
}
}
@@ -2602,7 +2626,9 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
(err = snd_usb_mixer_status_create(mixer)) < 0)
goto _error;
- snd_usb_mixer_apply_create_quirk(mixer);
+ err = snd_usb_mixer_apply_create_quirk(mixer);
+ if (err < 0)
+ goto _error;
err = snd_device_new(chip->card, SNDRV_DEV_CODEC, mixer, &dev_ops);
if (err < 0)
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index eaa03acd4686..26ce6838e842 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -363,6 +363,14 @@ static const struct usbmix_name_map dell_alc4020_map[] = {
{ 0 }
};
+/* Some mobos shipped with a dummy HD-audio show the invalid GET_MIN/GET_MAX
+ * response for Input Gain Pad (id=19, control=12). Skip it.
+ */
+static const struct usbmix_name_map asus_rog_map[] = {
+ { 19, NULL, 12 }, /* FU, Input Gain Pad */
+ {}
+};
+
/*
* Control map entries
*/
@@ -482,6 +490,26 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
.id = USB_ID(0x05a7, 0x1020),
.map = bose_companion5_map,
},
+ { /* Gigabyte TRX40 Aorus Pro WiFi */
+ .id = USB_ID(0x0414, 0xa002),
+ .map = asus_rog_map,
+ },
+ { /* ASUS ROG Zenith II */
+ .id = USB_ID(0x0b05, 0x1916),
+ .map = asus_rog_map,
+ },
+ { /* ASUS ROG Strix */
+ .id = USB_ID(0x0b05, 0x1917),
+ .map = asus_rog_map,
+ },
+ { /* MSI TRX40 Creator */
+ .id = USB_ID(0x0db0, 0x0d64),
+ .map = asus_rog_map,
+ },
+ { /* MSI TRX40 */
+ .id = USB_ID(0x0db0, 0x543d),
+ .map = asus_rog_map,
+ },
{ 0 } /* terminator */
};
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 5d2fc5f58bfe..f4fd9548c529 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -753,7 +753,7 @@ static int snd_ni_control_init_val(struct usb_mixer_interface *mixer,
return err;
}
- kctl->private_value |= (value << 24);
+ kctl->private_value |= ((unsigned int)value << 24);
return 0;
}
@@ -914,7 +914,7 @@ static int snd_ftu_eff_switch_init(struct usb_mixer_interface *mixer,
if (err < 0)
return err;
- kctl->private_value |= value[0] << 24;
+ kctl->private_value |= (unsigned int)value[0] << 24;
return 0;
}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 497bad9f2789..9bc995f9b4e1 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -470,6 +470,7 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
}
ep = get_endpoint(alts, 1)->bEndpointAddress;
if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+ get_endpoint(alts, 0)->bSynchAddress != 0 &&
((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
(!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
dev_err(&dev->dev,
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index d32727c74a16..c892b4d1e733 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3293,19 +3293,14 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.ifnum = 0,
.type = QUIRK_AUDIO_STANDARD_MIXER,
},
- /* Capture */
- {
- .ifnum = 1,
- .type = QUIRK_IGNORE_INTERFACE,
- },
/* Playback */
{
- .ifnum = 2,
+ .ifnum = 1,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = &(const struct audioformat) {
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels = 2,
- .iface = 2,
+ .iface = 1,
.altsetting = 1,
.altset_idx = 1,
.attributes = UAC_EP_CS_ATTR_FILL_MAX |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index da9fc08b20bb..064f3485a977 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1141,6 +1141,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
+ case USB_ID(0x05a7, 0x1020): /* Bose Companion 5 */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
@@ -1148,6 +1149,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
+ case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
return true;
}
return false;
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index 0b34dbc8f302..7dcb33d3886b 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -132,7 +132,7 @@ static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw,
info->num_dsps = 2; // 0: Prepad Data, 1: FPGA Code
if (us428->chip_status & USX2Y_STAT_CHIP_INIT)
info->chip_ready = 1;
- info->version = USX2Y_DRIVER_VERSION;
+ info->version = USX2Y_DRIVER_VERSION;
return 0;
}
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index b5ca536e56a8..34df10a43ef0 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -135,7 +135,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
msg.g.version = 0x1;
na = (struct nlattr *) GENLMSG_DATA(&msg);
na->nla_type = nla_type;
- na->nla_len = nla_len + 1 + NLA_HDRLEN;
+ na->nla_len = nla_len + NLA_HDRLEN;
memcpy(NLA_DATA(na), nla_data, nla_len);
msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
diff --git a/tools/gpio/Build b/tools/gpio/Build
index 620c1937d957..4141f35837db 100644
--- a/tools/gpio/Build
+++ b/tools/gpio/Build
@@ -1,3 +1,4 @@
+gpio-utils-y += gpio-utils.o
lsgpio-y += lsgpio.o gpio-utils.o
gpio-hammer-y += gpio-hammer.o gpio-utils.o
gpio-event-mon-y += gpio-event-mon.o gpio-utils.o
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 250a891e6ef0..e2ac49253a0a 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -32,11 +32,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h
prepare: $(OUTPUT)include/linux/gpio.h
+GPIO_UTILS_IN := $(OUTPUT)gpio-utils-in.o
+$(GPIO_UTILS_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=gpio-utils
+
#
# lsgpio
#
LSGPIO_IN := $(OUTPUT)lsgpio-in.o
-$(LSGPIO_IN): prepare FORCE
+$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=lsgpio
$(OUTPUT)lsgpio: $(LSGPIO_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
@@ -45,7 +49,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN)
# gpio-hammer
#
GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o
-$(GPIO_HAMMER_IN): prepare FORCE
+$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=gpio-hammer
$(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
@@ -54,7 +58,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
# gpio-event-mon
#
GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o
-$(GPIO_EVENT_MON_IN): prepare FORCE
+$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
$(Q)$(MAKE) $(build)=gpio-event-mon
$(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 177480066816..834008639c4b 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -878,7 +878,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
int sn_offset = 0;
int error = 0;
char *buffer;
- struct hv_kvp_ipaddr_value *ip_buffer;
+ struct hv_kvp_ipaddr_value *ip_buffer = NULL;
char cidr_mask[5]; /* /xyz */
int weight;
int i;
@@ -1379,6 +1379,8 @@ int main(int argc, char *argv[])
daemonize = 0;
break;
case 'h':
+ print_usage(argv);
+ exit(0);
default:
print_usage(argv);
exit(EXIT_FAILURE);
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index e0829809c897..bdc1891e0a9a 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -164,6 +164,8 @@ int main(int argc, char *argv[])
daemonize = 0;
break;
case 'h':
+ print_usage(argv);
+ exit(0);
default:
print_usage(argv);
exit(EXIT_FAILURE);
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index 7a6d61c6c012..55272fef3b50 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -159,9 +159,9 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
*be = (endianchar == 'b');
*bytes = padint / 8;
if (*bits_used == 64)
- *mask = ~0;
+ *mask = ~(0ULL);
else
- *mask = (1ULL << *bits_used) - 1;
+ *mask = (1ULL << *bits_used) - 1ULL;
*is_signed = (signchar == 's');
if (fclose(sysfsfp)) {
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index fc446343ff41..7e3b87a77334 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -3,8 +3,6 @@
#include <asm/types.h>
#include <linux/kernel.h>
-#include <linux/compiler.h>
-
#ifndef __WORDSIZE
#define __WORDSIZE (__SIZEOF_LONG__ * 8)
#endif
@@ -12,10 +10,9 @@
#ifndef BITS_PER_LONG
# define BITS_PER_LONG __WORDSIZE
#endif
+#include <linux/bits.h>
+#include <linux/compiler.h>
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/tools/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h
index f436d2420a18..115ecca5f8b9 100644
--- a/tools/include/linux/string.h
+++ b/tools/include/linux/string.h
@@ -13,7 +13,15 @@ int strtobool(const char *s, bool *res);
* However uClibc headers also define __GLIBC__ hence the hack below
*/
#if defined(__GLIBC__) && !defined(__UCLIBC__)
+// pragma diagnostic was introduced in gcc 4.6
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wredundant-decls"
+#endif
extern size_t strlcpy(char *dest, const char *src, size_t size);
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC diagnostic pop
+#endif
#endif
char *str_error_r(int errnum, char *buf, size_t buflen);
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index f99f49e4a31e..21e714cf0126 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -194,6 +194,7 @@ static bool fs__env_override(struct fs *fs)
size_t name_len = strlen(fs->name);
/* name + "_PATH" + '\0' */
char upper_name[name_len + 5 + 1];
+
memcpy(upper_name, fs->name, name_len);
mem_toupper(upper_name, name_len);
strcpy(&upper_name[name_len], "_PATH");
@@ -203,7 +204,8 @@ static bool fs__env_override(struct fs *fs)
return false;
fs->found = true;
- strncpy(fs->path, override_path, sizeof(fs->path));
+ strncpy(fs->path, override_path, sizeof(fs->path) - 1);
+ fs->path[sizeof(fs->path) - 1] = '\0';
return true;
}
diff --git a/tools/lib/string.c b/tools/lib/string.c
index bd239bc1d557..0e071c0bb09e 100644
--- a/tools/lib/string.c
+++ b/tools/lib/string.c
@@ -76,6 +76,10 @@ int strtobool(const char *s, bool *res)
* If libc has strlcpy() then that version will override this
* implementation:
*/
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wignored-attributes"
+#endif
size_t __weak strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
@@ -87,3 +91,6 @@ size_t __weak strlcpy(char *dest, const char *src, size_t size)
}
return ret;
}
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 7851df1490e0..cc3315da6dc3 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -54,15 +54,15 @@ set_plugin_dir := 1
# Set plugin_dir to preffered global plugin location
# If we install under $HOME directory we go under
-# $(HOME)/.traceevent/plugins
+# $(HOME)/.local/lib/traceevent/plugins
#
# We dont set PLUGIN_DIR in case we install under $HOME
# directory, because by default the code looks under:
-# $(HOME)/.traceevent/plugins by default.
+# $(HOME)/.local/lib/traceevent/plugins by default.
#
ifeq ($(plugin_dir),)
ifeq ($(prefix),$(HOME))
-override plugin_dir = $(HOME)/.traceevent/plugins
+override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
set_plugin_dir := 0
else
override plugin_dir = $(libdir)/traceevent/plugins
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 669475300ba8..62f4cacf253a 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -267,10 +267,10 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid)
errno = ENOMEM;
return -1;
}
+ pevent->cmdlines = cmdlines;
cmdlines[pevent->cmdline_count].comm = strdup(comm);
if (!cmdlines[pevent->cmdline_count].comm) {
- free(cmdlines);
errno = ENOMEM;
return -1;
}
@@ -281,7 +281,6 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid)
pevent->cmdline_count++;
qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
- pevent->cmdlines = cmdlines;
return 0;
}
@@ -2204,7 +2203,7 @@ eval_type_str(unsigned long long val, const char *type, int pointer)
return val & 0xffffffff;
if (strcmp(type, "u64") == 0 ||
- strcmp(type, "s64"))
+ strcmp(type, "s64") == 0)
return val;
if (strcmp(type, "s8") == 0)
@@ -2428,7 +2427,7 @@ static int arg_num_eval(struct print_arg *arg, long long *val)
static char *arg_eval (struct print_arg *arg)
{
long long val;
- static char buf[20];
+ static char buf[24];
switch (arg->type) {
case PRINT_ATOM:
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index a16756ae3526..5fe7889606a2 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -30,7 +30,7 @@
#include "event-parse.h"
#include "event-utils.h"
-#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
+#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/"
static struct registered_plugin_options {
struct registered_plugin_options *next;
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 5e10ba796a6f..569bceff5f51 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -1492,8 +1492,10 @@ static int copy_filter_type(struct event_filter *filter,
if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) {
/* Add trivial event */
arg = allocate_arg();
- if (arg == NULL)
+ if (arg == NULL) {
+ free(str);
return -1;
+ }
arg->type = FILTER_ARG_BOOLEAN;
if (strcmp(str, "TRUE") == 0)
@@ -1502,8 +1504,11 @@ static int copy_filter_type(struct event_filter *filter,
arg->boolean.value = 0;
filter_type = add_filter_type(filter, event->id);
- if (filter_type == NULL)
+ if (filter_type == NULL) {
+ free(str);
+ free_arg(arg);
return -1;
+ }
filter_type->filter = arg;
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 8ae824dbfca3..84756140a8a8 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -7,11 +7,12 @@ ARCH := x86
endif
# always use the host compiler
+HOSTAR ?= ar
HOSTCC ?= gcc
HOSTLD ?= ld
+AR = $(HOSTAR)
CC = $(HOSTCC)
LD = $(HOSTLD)
-AR = ar
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -31,7 +32,7 @@ INCLUDES := -I$(srctree)/tools/include \
-I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
-I$(srctree)/tools/objtool/arch/$(ARCH)/include
WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
+CFLAGS := -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
LDFLAGS += -lelf $(LIBSUBCMD)
# Allow old libelf to be used:
diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
index 1754e094bc28..82e105b284e0 100644
--- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
@@ -333,7 +333,7 @@ AVXcode: 1
06: CLTS
07: SYSRET (o64)
08: INVD
-09: WBINVD
+09: WBINVD | WBNOINVD (F3)
0a:
0b: UD2 (1B)
0c:
@@ -364,7 +364,7 @@ AVXcode: 1
# a ModR/M byte.
1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
-1c:
+1c: Grp20 (1A),(1C)
1d:
1e:
1f: NOP Ev
@@ -792,6 +792,8 @@ f3: Grp17 (1A)
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+f8: MOVDIR64B Gv,Mdqq (66) | ENQCMD Gv,Mdqq (F2) | ENQCMDS Gv,Mdqq (F3)
+f9: MOVDIRI My,Gy
EndTable
Table: 3-byte opcode 2 (0x0f 0x3a)
@@ -907,7 +909,7 @@ EndTable
GrpTable: Grp3_2
0: TEST Ev,Iz
-1:
+1: TEST Ev,Iz
2: NOT Ev
3: NEG Ev
4: MUL rAX,Ev
@@ -943,9 +945,9 @@ GrpTable: Grp6
EndTable
GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5: rdpkru (110),(11B) | wrpkru (111),(11B)
@@ -1011,7 +1013,7 @@ GrpTable: Grp15
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
5: XRSTOR | lfence (11B)
-6: XSAVEOPT | clwb (66) | mfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B) | TPAUSE Rd (66),(11B) | UMONITOR Rv (F3),(11B) | UMWAIT Rd (F2),(11B)
7: clflush | clflushopt (66) | sfence (11B)
EndTable
@@ -1042,6 +1044,10 @@ GrpTable: Grp19
6: vscatterpf1qps/d Wx (66),(ev)
EndTable
+GrpTable: Grp20
+0: cldemote Mb
+EndTable
+
# AMD's Prefetch Group
GrpTable: GrpP
0: PREFETCH
diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
index a3d2c62fd805..0a3ad5dd1e8b 100644
--- a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
@@ -68,7 +68,7 @@ BEGIN {
lprefix1_expr = "\\((66|!F3)\\)"
lprefix2_expr = "\\(F3\\)"
- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
+ lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4
@@ -256,7 +256,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
return add_flags(imm, mod)
}
-/^[0-9a-f]+\:/ {
+/^[0-9a-f]+:/ {
if (NR == 1)
next
# get index
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 3ff025b64527..db105207757b 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -28,6 +28,8 @@
#include <linux/hashtable.h>
#include <linux/kernel.h>
+#define FAKE_JUMP_OFFSET -1
+
struct alternative {
struct list_head list;
struct instruction *insn;
@@ -163,6 +165,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
"__reiserfs_panic",
"lbug_with_loc",
"fortify_panic",
+ "machine_real_restart",
+ "rewind_stack_do_exit",
};
if (func->bind == STB_WEAK)
@@ -498,7 +502,7 @@ static int add_jump_destinations(struct objtool_file *file)
insn->type != INSN_JUMP_UNCONDITIONAL)
continue;
- if (insn->ignore)
+ if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
continue;
rela = find_rela_by_dest_range(insn->sec, insn->offset,
@@ -645,10 +649,10 @@ static int handle_group_alt(struct objtool_file *file,
clear_insn_state(&fake_jump->state);
fake_jump->sec = special_alt->new_sec;
- fake_jump->offset = -1;
+ fake_jump->offset = FAKE_JUMP_OFFSET;
fake_jump->type = INSN_JUMP_UNCONDITIONAL;
fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
- fake_jump->ignore = true;
+ fake_jump->func = orig_insn->func;
}
if (!special_alt->new_len) {
@@ -910,10 +914,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
* it.
*/
for (;
- &insn->list != &file->insn_list &&
- insn->sec == func->sec &&
- insn->offset >= func->offset;
-
+ &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func;
insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
@@ -1779,7 +1780,8 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
return 1;
}
- func = insn->func ? insn->func->pfunc : NULL;
+ if (insn->func)
+ func = insn->func->pfunc;
if (func && insn->ignore) {
WARN_FUNC("BUG: why am I validating an ignored function?",
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index dd4ed7c3c062..d84c28eac262 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -305,7 +305,7 @@ static int read_symbols(struct elf *elf)
if (sym->type != STT_FUNC)
continue;
sym->pfunc = sym->cfunc = sym;
- coldstr = strstr(sym->name, ".cold.");
+ coldstr = strstr(sym->name, ".cold");
if (!coldstr)
continue;
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index cb081ac59fd1..bd359a04cb94 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -112,7 +112,7 @@ Given a $HOME/.perfconfig like this:
[report]
# Defaults
- sort-order = comm,dso,symbol
+ sort_order = comm,dso,symbol
percent-limit = 0
queue-size = 0
children = true
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index cd86fd7b35c4..ebcc0868b99e 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -34,7 +34,7 @@ endif
# Only pass canonical directory names as the output directory:
#
ifneq ($(O),)
- FULL_O := $(shell readlink -f $(O) || echo $(O))
+ FULL_O := $(shell cd $(PWD); readlink -f $(O) || echo $(O))
endif
#
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 47d584da5819..f6cff278aa5d 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -41,6 +41,8 @@ struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
struct perf_evlist *evlist;
+ int wrapped_cnt;
+ bool *wrapped;
bool snapshot_mode;
size_t snapshot_size;
};
@@ -458,16 +460,131 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
return 0;
}
-static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
+static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
+{
+ bool *wrapped;
+ int cnt = ptr->wrapped_cnt;
+
+ /* Make @ptr->wrapped as big as @idx */
+ while (cnt <= idx)
+ cnt++;
+
+ /*
+ * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
+ * cross compilation problems where the host's system supports
+ * reallocarray() but not the target.
+ */
+ wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
+ if (!wrapped)
+ return -ENOMEM;
+
+ wrapped[cnt - 1] = false;
+ ptr->wrapped_cnt = cnt;
+ ptr->wrapped = wrapped;
+
+ return 0;
+}
+
+static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
+ size_t buffer_size, u64 head)
+{
+ u64 i, watermark;
+ u64 *buf = (u64 *)buffer;
+ size_t buf_size = buffer_size;
+
+ /*
+ * We want to look the very last 512 byte (chosen arbitrarily) in
+ * the ring buffer.
+ */
+ watermark = buf_size - 512;
+
+ /*
+ * @head is continuously increasing - if its value is equal or greater
+ * than the size of the ring buffer, it has wrapped around.
+ */
+ if (head >= buffer_size)
+ return true;
+
+ /*
+ * The value of @head is somewhere within the size of the ring buffer.
+ * This can be that there hasn't been enough data to fill the ring
+ * buffer yet or the trace time was so long that @head has numerically
+ * wrapped around. To find we need to check if we have data at the very
+ * end of the ring buffer. We can reliably do this because mmap'ed
+ * pages are zeroed out and there is a fresh mapping with every new
+ * session.
+ */
+
+ /* @head is less than 512 byte from the end of the ring buffer */
+ if (head > watermark)
+ watermark = head;
+
+ /*
+ * Speed things up by using 64 bit transactions (see "u64 *buf" above)
+ */
+ watermark >>= 3;
+ buf_size >>= 3;
+
+ /*
+ * If we find trace data at the end of the ring buffer, @head has
+ * been there and has numerically wrapped around at least once.
+ */
+ for (i = watermark; i < buf_size; i++)
+ if (buf[i])
+ return true;
+
+ return false;
+}
+
+static int cs_etm_find_snapshot(struct auxtrace_record *itr,
int idx, struct auxtrace_mmap *mm,
- unsigned char *data __maybe_unused,
+ unsigned char *data,
u64 *head, u64 *old)
{
+ int err;
+ bool wrapped;
+ struct cs_etm_recording *ptr =
+ container_of(itr, struct cs_etm_recording, itr);
+
+ /*
+ * Allocate memory to keep track of wrapping if this is the first
+ * time we deal with this *mm.
+ */
+ if (idx >= ptr->wrapped_cnt) {
+ err = cs_etm_alloc_wrapped_array(ptr, idx);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Check to see if *head has wrapped around. If it hasn't only the
+ * amount of data between *head and *old is snapshot'ed to avoid
+ * bloating the perf.data file with zeros. But as soon as *head has
+ * wrapped around the entire size of the AUX ring buffer it taken.
+ */
+ wrapped = ptr->wrapped[idx];
+ if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
+ wrapped = true;
+ ptr->wrapped[idx] = true;
+ }
+
pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
__func__, idx, (size_t)*old, (size_t)*head, mm->len);
- *old = *head;
- *head += mm->len;
+ /* No wrap has occurred, we can just use *head and *old. */
+ if (!wrapped)
+ return 0;
+
+ /*
+ * *head has wrapped around - adjust *head and *old to pickup the
+ * entire content of the AUX buffer.
+ */
+ if (*head >= mm->len) {
+ *old = *head - mm->len;
+ } else {
+ *head += mm->len;
+ *old = *head - mm->len;
+ }
return 0;
}
@@ -508,6 +625,8 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
+
+ zfree(&ptr->wrapped);
free(ptr);
}
diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
index b9a95a1a8e69..f4f8aff8a9bb 100644
--- a/tools/perf/arch/s390/util/machine.c
+++ b/tools/perf/arch/s390/util/machine.c
@@ -4,16 +4,31 @@
#include "util.h"
#include "machine.h"
#include "api/fs/fs.h"
+#include "debug.h"
-int arch__fix_module_text_start(u64 *start, const char *name)
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
{
+ u64 m_start = *start;
char path[PATH_MAX];
snprintf(path, PATH_MAX, "module/%.*s/sections/.text",
(int)strlen(name) - 2, name + 1);
-
- if (sysfs__read_ull(path, (unsigned long long *)start) < 0)
- return -1;
+ if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
+ pr_debug2("Using module %s start:%#lx\n", path, m_start);
+ *start = m_start;
+ } else {
+ /* Successful read of the modules segment text start address.
+ * Calculate difference between module start address
+ * in memory and module text segment start address.
+ * For example module load address is 0x3ff8011b000
+ * (from /proc/modules) and module text segment start
+ * address is 0x3ff8011b870 (from file above).
+ *
+ * Adjust the module size and subtract the GOT table
+ * size located at the beginning of the module.
+ */
+ *size -= (*start - m_start);
+ }
return 0;
}
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index ee9565a033f4..7b364f2926d4 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -35,6 +35,10 @@
#include <numa.h>
#include <numaif.h>
+#ifndef RUSAGE_THREAD
+# define RUSAGE_THREAD 1
+#endif
+
/*
* Regular printout to the terminal, supressed if -q is specified:
*/
@@ -369,8 +373,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
/* Allocate and initialize all memory on CPU#0: */
if (init_cpu0) {
- orig_mask = bind_to_node(0);
- bind_to_memnode(0);
+ int node = numa_node_of_cpu(0);
+
+ orig_mask = bind_to_node(node);
+ bind_to_memnode(node);
}
bytes = bytes0 + HPSIZE;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 3bdb2c78a21b..476e24cf97fa 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -186,7 +186,7 @@ static void add_man_viewer(const char *name)
while (*p)
p = &((*p)->next);
*p = zalloc(sizeof(**p) + len + 1);
- strncpy((*p)->name, name, len);
+ strcpy((*p)->name, name);
}
static int supported_man_viewer(const char *name, size_t len)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index d426dcb18ce9..496a4ca11667 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -674,6 +674,7 @@ static char *compact_gfp_flags(char *gfp_flags)
new = realloc(new_flags, len + strlen(cpt) + 2);
if (new == NULL) {
free(new_flags);
+ free(orig_flags);
return NULL;
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 9a250c71840e..2b420e7a92c0 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -675,6 +675,16 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
ret = perf_add_probe_events(params.events, params.nevents);
if (ret < 0) {
+
+ /*
+ * When perf_add_probe_events() fails it calls
+ * cleanup_perf_probe_events(pevs, npevs), i.e.
+ * cleanup_perf_probe_events(params.events, params.nevents), which
+ * will call clear_perf_probe_event(), so set nevents to zero
+ * to avoid cleanup_params() to call clear_perf_probe_event() again
+ * on the same pevs.
+ */
+ params.nevents = 0;
pr_err_with_code(" Error: Failed to add events.", ret);
return ret;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 6e88460cd13d..0abca8783bb3 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -671,6 +671,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
struct stat st;
bool has_br_stack = false;
int branch_mode = -1;
+ int last_key = 0;
bool branch_call_mode = false;
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
const char * const report_usage[] = {
@@ -956,7 +957,8 @@ repeat:
else
use_browser = 0;
- if (setup_sorting(session->evlist) < 0) {
+ if ((last_key != K_SWITCH_INPUT_DATA) &&
+ (setup_sorting(session->evlist) < 0)) {
if (sort_order)
parse_options_usage(report_usage, options, "s", 1);
if (field_order)
@@ -1011,6 +1013,7 @@ repeat:
ret = __cmd_report(&report);
if (ret == K_SWITCH_INPUT_DATA) {
perf_session__delete(session);
+ last_key = K_SWITCH_INPUT_DATA;
goto repeat;
} else
ret = 0;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 43d5f35e9074..5cb58f3afa35 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -2564,8 +2564,11 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
fprintf(output, "[ perf stat: executing run #%d ... ]\n",
run_idx + 1);
+ if (run_idx != 0)
+ perf_evlist__reset_prev_raw_counts(evsel_list);
+
status = run_perf_stat(argc, argv);
- if (forever && status != -1) {
+ if (forever && status != -1 && !interval) {
print_counters(NULL, argc, argv);
perf_stat__reset_stats();
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e68c866ae798..cd2900ac473f 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1323,8 +1323,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
goto out_delete_evlist;
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
- if (symbol__init(NULL) < 0)
- return -1;
+ status = symbol__init(NULL);
+ if (status < 0)
+ goto out_delete_evlist;
sort__setup_elide(stdout);
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 83fe2202382e..ff38fc63bceb 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -4,6 +4,7 @@ HEADERS='
include/uapi/linux/fcntl.h
include/uapi/linux/perf_event.h
include/uapi/linux/stat.h
+include/linux/bits.h
include/linux/hash.h
include/uapi/linux/hw_breakpoint.h
arch/x86/include/asm/disabled-features.h
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 8f8d895d5b74..3b9d56125ee2 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -23,7 +23,7 @@ static inline unsigned long long rdclock(void)
}
#ifndef MAX_NR_CPUS
-#define MAX_NR_CPUS 1024
+#define MAX_NR_CPUS 2048
#endif
extern const char *input_name;
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 41611d7f9873..0619054bd7a0 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -311,11 +311,12 @@ static struct fixed {
const char *name;
const char *event;
} fixed[] = {
- { "inst_retired.any", "event=0xc0" },
- { "inst_retired.any_p", "event=0xc0" },
- { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
- { "cpu_clk_unhalted.thread", "event=0x3c" },
- { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
+ { "inst_retired.any", "event=0xc0,period=2000003" },
+ { "inst_retired.any_p", "event=0xc0,period=2000003" },
+ { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03,period=2000003" },
+ { "cpu_clk_unhalted.thread", "event=0x3c,period=2000003" },
+ { "cpu_clk_unhalted.core", "event=0x3c,period=2000003" },
+ { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1,period=2000003" },
{ NULL, NULL},
};
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 66b53f10eb18..b5d0be524655 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -42,7 +42,7 @@ int test__perf_evsel__tp_sched_test(int subtest __maybe_unused)
return -1;
}
- if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
@@ -54,7 +54,7 @@ int test__perf_evsel__tp_sched_test(int subtest __maybe_unused)
if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
ret = -1;
- if (perf_evsel__test_field(evsel, "next_comm", 16, true))
+ if (perf_evsel__test_field(evsel, "next_comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "next_pid", 4, true))
@@ -72,7 +72,7 @@ int test__perf_evsel__tp_sched_test(int subtest __maybe_unused)
return -1;
}
- if (perf_evsel__test_field(evsel, "comm", 16, true))
+ if (perf_evsel__test_field(evsel, "comm", 16, false))
ret = -1;
if (perf_evsel__test_field(evsel, "pid", 4, true))
@@ -84,5 +84,6 @@ int test__perf_evsel__tp_sched_test(int subtest __maybe_unused)
if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
ret = -1;
+ perf_evsel__delete(evsel);
return ret;
}
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 0c5ce44f723f..e5d6e6584001 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -49,7 +49,7 @@ static void *thread_fn(void *arg)
{
struct thread_data *td = arg;
ssize_t ret;
- int go;
+ int go = 0;
if (thread_init(td))
return NULL;
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index c8d9592eb142..75d504e9eeaf 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -38,7 +38,7 @@ int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
- goto out_thread_map_delete;
+ goto out_cpu_map_delete;
}
if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -112,6 +112,8 @@ out_close_fd:
perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
perf_evsel__delete(evsel);
+out_cpu_map_delete:
+ cpu_map__put(cpus);
out_thread_map_delete:
thread_map__put(threads);
return err;
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index b0d005d295a9..de2ddfe0f7c3 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -98,6 +98,7 @@ int test__task_exit(int subtest __maybe_unused)
if (perf_evlist__mmap(evlist, 128, true) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
+ err = -1;
goto out_delete_evlist;
}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a53fef0c673b..ade6abda9f46 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -2930,6 +2930,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
continue;
}
+ actions->ms.map = map;
top = pstack__peek(browser->pstack);
if (top == &browser->hists->dso_filter) {
/*
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
index 88f5143a5981..3c97e27383a9 100644
--- a/tools/perf/ui/tui/helpline.c
+++ b/tools/perf/ui/tui/helpline.c
@@ -23,7 +23,7 @@ static void tui_helpline__push(const char *msg)
SLsmg_set_color(0);
SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
SLsmg_refresh();
- strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
+ strlcpy(ui_helpline__current, msg, sz);
}
static int tui_helpline__show(const char *format, va_list ap)
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 993ef2762508..32aab95e1459 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -176,6 +176,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
return bf;
}
+/* The caller is responsible to free the returned buffer. */
char *build_id_cache__origname(const char *sbuild_id)
{
char *linkname;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 18dae745034f..1d66f8eab9f9 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -595,11 +595,10 @@ static int collect_config(const char *var, const char *value,
}
ret = set_value(item, value);
- return ret;
out_free:
free(key);
- return -1;
+ return ret;
}
static int perf_config_set__init(struct perf_config_set *set)
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 7123f4de32cc..226f4312b8f3 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -265,7 +265,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string)
if (i > 0)
strncpy(buffer, string, i);
}
- strncat(buffer + p, numstr, 4);
+ memcpy(buffer + p, numstr, 4);
p += 3;
}
}
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 41e068e94349..fb4e1d2839c5 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -318,20 +318,50 @@ bool die_is_func_def(Dwarf_Die *dw_die)
}
/**
+ * die_entrypc - Returns entry PC (the lowest address) of a DIE
+ * @dw_die: a DIE
+ * @addr: where to store entry PC
+ *
+ * Since dwarf_entrypc() does not return entry PC if the DIE has only address
+ * range, we have to use this to retrieve the lowest address from the address
+ * range attribute.
+ */
+int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
+{
+ Dwarf_Addr base, end;
+
+ if (!addr)
+ return -EINVAL;
+
+ if (dwarf_entrypc(dw_die, addr) == 0)
+ return 0;
+
+ return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
+}
+
+/**
* die_is_func_instance - Ensure that this DIE is an instance of a subprogram
* @dw_die: a DIE
*
* Ensure that this DIE is an instance (which has an entry address).
- * This returns true if @dw_die is a function instance. If not, you need to
- * call die_walk_instances() to find actual instances.
+ * This returns true if @dw_die is a function instance. If not, the @dw_die
+ * must be a prototype. You can use die_walk_instances() to find actual
+ * instances.
**/
bool die_is_func_instance(Dwarf_Die *dw_die)
{
Dwarf_Addr tmp;
+ Dwarf_Attribute attr_mem;
+ int tag = dwarf_tag(dw_die);
- /* Actually gcc optimizes non-inline as like as inlined */
- return !dwarf_func_inline(dw_die) && dwarf_entrypc(dw_die, &tmp) == 0;
+ if (tag != DW_TAG_subprogram &&
+ tag != DW_TAG_inlined_subroutine)
+ return false;
+
+ return dwarf_entrypc(dw_die, &tmp) == 0 ||
+ dwarf_attr(dw_die, DW_AT_ranges, &attr_mem) != NULL;
}
+
/**
* die_get_data_member_location - Get the data-member offset
* @mb_die: a DIE of a member of a data structure
@@ -608,6 +638,9 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
Dwarf_Die *origin;
int tmp;
+ if (!die_is_func_instance(inst))
+ return DIE_FIND_CB_CONTINUE;
+
attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem);
if (attr == NULL)
return DIE_FIND_CB_CONTINUE;
@@ -679,15 +712,14 @@ static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
fname = die_get_call_file(in_die);
lineno = die_get_call_lineno(in_die);
- if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
+ if (fname && lineno > 0 && die_entrypc(in_die, &addr) == 0) {
lw->retval = lw->callback(fname, lineno, addr, lw->data);
if (lw->retval != 0)
return DIE_FIND_CB_END;
}
+ if (!lw->recursive)
+ return DIE_FIND_CB_SIBLING;
}
- if (!lw->recursive)
- /* Don't need to search recursively */
- return DIE_FIND_CB_SIBLING;
if (addr) {
fname = dwarf_decl_file(in_die);
@@ -720,7 +752,7 @@ static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive,
/* Handle function declaration line */
fname = dwarf_decl_file(sp_die);
if (fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
- dwarf_entrypc(sp_die, &addr) == 0) {
+ die_entrypc(sp_die, &addr) == 0) {
lw.retval = callback(fname, lineno, addr, data);
if (lw.retval != 0)
goto done;
@@ -734,6 +766,10 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
{
struct __line_walk_param *lw = data;
+ /*
+ * Since inlined function can include another inlined function in
+ * the same file, we need to walk in it recursively.
+ */
lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data);
if (lw->retval != 0)
return DWARF_CB_ABORT;
@@ -758,11 +794,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
Dwarf_Lines *lines;
Dwarf_Line *line;
Dwarf_Addr addr;
- const char *fname, *decf = NULL;
+ const char *fname, *decf = NULL, *inf = NULL;
int lineno, ret = 0;
int decl = 0, inl;
Dwarf_Die die_mem, *cu_die;
size_t nlines, i;
+ bool flag;
/* Get the CU die */
if (dwarf_tag(rt_die) != DW_TAG_compile_unit) {
@@ -793,6 +830,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
"Possible error in debuginfo.\n");
continue;
}
+ /* Skip end-of-sequence */
+ if (dwarf_lineendsequence(line, &flag) != 0 || flag)
+ continue;
+ /* Skip Non statement line-info */
+ if (dwarf_linebeginstatement(line, &flag) != 0 || !flag)
+ continue;
/* Filter lines based on address */
if (rt_die != cu_die) {
/*
@@ -802,13 +845,21 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
*/
if (!dwarf_haspc(rt_die, addr))
continue;
+
if (die_find_inlinefunc(rt_die, addr, &die_mem)) {
+ /* Call-site check */
+ inf = die_get_call_file(&die_mem);
+ if ((inf && !strcmp(inf, decf)) &&
+ die_get_call_lineno(&die_mem) == lineno)
+ goto found;
+
dwarf_decl_line(&die_mem, &inl);
if (inl != decl ||
decf != dwarf_decl_file(&die_mem))
continue;
}
}
+found:
/* Get source line */
fname = dwarf_linesrc(line, NULL, NULL);
@@ -823,8 +874,9 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data)
*/
if (rt_die != cu_die)
/*
- * Don't need walk functions recursively, because nested
- * inlined functions don't have lines of the specified DIE.
+ * Don't need walk inlined functions recursively, because
+ * inner inlined functions don't have the lines of the
+ * specified function.
*/
ret = __die_walk_funclines(rt_die, false, callback, data);
else {
@@ -999,7 +1051,7 @@ static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
bool first = true;
const char *name;
- ret = dwarf_entrypc(sp_die, &entry);
+ ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;
@@ -1062,7 +1114,7 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
bool first = true;
const char *name;
- ret = dwarf_entrypc(sp_die, &entry);
+ ret = die_entrypc(sp_die, &entry);
if (ret)
return ret;
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 8ac53bf1ec4e..ee15fac4e1d0 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -41,6 +41,9 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
/* Get DW_AT_linkage_name (should be NULL for C binary) */
const char *die_get_linkage_name(Dwarf_Die *dw_die);
+/* Get the lowest PC in DIE (including range list) */
+int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr);
+
/* Ensure that this DIE is a subprogram and definition (not declaration) */
bool die_is_func_def(Dwarf_Die *dw_die);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f7128c2a6386..758d0108c5a5 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -558,6 +558,9 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
{
char bf[128];
+ if (!evsel)
+ goto out_unknown;
+
if (evsel->name)
return evsel->name;
@@ -594,7 +597,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
evsel->name = strdup(bf);
- return evsel->name ?: "unknown";
+ if (evsel->name)
+ return evsel->name;
+out_unknown:
+ return "unknown";
}
const char *perf_evsel__group_name(struct perf_evsel *evsel)
@@ -1167,6 +1173,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
{
assert(list_empty(&evsel->node));
assert(evsel->evlist == NULL);
+ perf_evsel__free_counts(evsel);
perf_evsel__free_fd(evsel);
perf_evsel__free_id(evsel);
perf_evsel__free_config_terms(evsel);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index a11f6760cce8..b3d947b98a7c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -949,7 +949,7 @@ static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 lev
scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
if (sysfs__read_str(file, &cache->map, &len)) {
- free(cache->map);
+ free(cache->size);
free(cache->type);
return -1;
}
@@ -1008,7 +1008,7 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
return 0;
}
-#define MAX_CACHES 2000
+#define MAX_CACHES (MAX_NR_CPUS * 4)
static int write_cache(int fd, struct perf_header *h __maybe_unused,
struct perf_evlist *evlist __maybe_unused)
@@ -2854,6 +2854,13 @@ int perf_session__read_header(struct perf_session *session)
file->path);
}
+ if (f_header.attr_size == 0) {
+ pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+ "Was the 'perf record' command properly terminated?\n",
+ file->path);
+ return -EINVAL;
+ }
+
nr_attrs = f_header.attrs.size / f_header.attr_size;
lseek(fd, f_header.attrs.offset, SEEK_SET);
@@ -2936,7 +2943,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
size += sizeof(struct perf_event_header);
size += ids * sizeof(u64);
- ev = malloc(size);
+ ev = zalloc(size);
if (ev == NULL)
return -ENOMEM;
@@ -3027,7 +3034,7 @@ perf_event__synthesize_event_update_name(struct perf_tool *tool,
if (ev == NULL)
return -ENOMEM;
- strncpy(ev->data, evsel->name, len);
+ strlcpy(ev->data, evsel->name, len + 1);
err = process(tool, (union perf_event*) ev, NULL, NULL);
free(ev);
return err;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index ad613ea51434..32f991d28497 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1027,8 +1027,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter->evsel, al, max_stack_depth);
- if (err)
+ if (err) {
+ map__put(alm);
return err;
+ }
err = iter->ops->prepare_entry(iter, al);
if (err)
@@ -1483,7 +1485,7 @@ int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
return 0;
}
-static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
+static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
{
struct hists *hists = a->hists;
struct perf_hpp_fmt *fmt;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 159d616e170b..fdd7a1cb6be9 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -312,10 +312,10 @@ static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list)
#define hists__for_each_format(hists, format) \
- perf_hpp_list__for_each_format((hists)->hpp_list, fmt)
+ perf_hpp_list__for_each_format((hists)->hpp_list, format)
#define hists__for_each_sort_list(hists, format) \
- perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt)
+ perf_hpp_list__for_each_sort_list((hists)->hpp_list, format)
extern struct perf_hpp_fmt perf_hpp__format[];
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 94764efb0a6a..63fa3a95a1d6 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -58,6 +58,7 @@ enum intel_pt_pkt_state {
INTEL_PT_STATE_NO_IP,
INTEL_PT_STATE_ERR_RESYNC,
INTEL_PT_STATE_IN_SYNC,
+ INTEL_PT_STATE_TNT_CONT,
INTEL_PT_STATE_TNT,
INTEL_PT_STATE_TIP,
INTEL_PT_STATE_TIP_PGD,
@@ -72,8 +73,9 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
case INTEL_PT_STATE_NO_IP:
case INTEL_PT_STATE_ERR_RESYNC:
case INTEL_PT_STATE_IN_SYNC:
- case INTEL_PT_STATE_TNT:
+ case INTEL_PT_STATE_TNT_CONT:
return true;
+ case INTEL_PT_STATE_TNT:
case INTEL_PT_STATE_TIP:
case INTEL_PT_STATE_TIP_PGD:
case INTEL_PT_STATE_FUP:
@@ -240,19 +242,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
decoder->tsc_ctc_ratio_d;
-
- /*
- * Allow for timestamps appearing to backwards because a TSC
- * packet has slipped past a MTC packet, so allow 2 MTC ticks
- * or ...
- */
- decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
- decoder->tsc_ctc_ratio_n,
- decoder->tsc_ctc_ratio_d);
}
- /* ... or 0x100 paranoia */
- if (decoder->tsc_slip < 0x100)
- decoder->tsc_slip = 0x100;
+
+ /*
+ * A TSC packet can slip past MTC packets so that the timestamp appears
+ * to go backwards. One estimate is that can be up to about 40 CPU
+ * cycles, which is certainly less than 0x1000 TSC ticks, but accept
+ * slippage an order of magnitude more to be on the safe side.
+ */
+ decoder->tsc_slip = 0x10000;
intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
@@ -860,16 +858,20 @@ static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
masked_timestamp = timestamp & decoder->period_mask;
if (decoder->continuous_period) {
- if (masked_timestamp != decoder->last_masked_timestamp)
+ if (masked_timestamp > decoder->last_masked_timestamp)
return 1;
} else {
timestamp += 1;
masked_timestamp = timestamp & decoder->period_mask;
- if (masked_timestamp != decoder->last_masked_timestamp) {
+ if (masked_timestamp > decoder->last_masked_timestamp) {
decoder->last_masked_timestamp = masked_timestamp;
decoder->continuous_period = true;
}
}
+
+ if (masked_timestamp < decoder->last_masked_timestamp)
+ return decoder->period_ticks;
+
return decoder->period_ticks - (timestamp - masked_timestamp);
}
@@ -898,7 +900,10 @@ static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
case INTEL_PT_PERIOD_TICKS:
timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
masked_timestamp = timestamp & decoder->period_mask;
- decoder->last_masked_timestamp = masked_timestamp;
+ if (masked_timestamp > decoder->last_masked_timestamp)
+ decoder->last_masked_timestamp = masked_timestamp;
+ else
+ decoder->last_masked_timestamp += decoder->period_ticks;
break;
case INTEL_PT_PERIOD_NONE:
case INTEL_PT_PERIOD_MTC:
@@ -1175,7 +1180,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
return -ENOENT;
}
decoder->tnt.count -= 1;
- if (!decoder->tnt.count)
+ if (decoder->tnt.count)
+ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+ else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->tnt.payload <<= 1;
decoder->state.from_ip = decoder->ip;
@@ -1206,7 +1213,9 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
decoder->tnt.count -= 1;
- if (!decoder->tnt.count)
+ if (decoder->tnt.count)
+ decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
+ else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
if (decoder->tnt.payload & BIT63) {
decoder->tnt.payload <<= 1;
@@ -1226,8 +1235,11 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
return 0;
}
decoder->ip += intel_pt_insn.length;
- if (!decoder->tnt.count)
+ if (!decoder->tnt.count) {
+ decoder->sample_timestamp = decoder->timestamp;
+ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
return -EAGAIN;
+ }
decoder->tnt.payload <<= 1;
continue;
}
@@ -2150,6 +2162,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
err = intel_pt_walk_trace(decoder);
break;
case INTEL_PT_STATE_TNT:
+ case INTEL_PT_STATE_TNT_CONT:
err = intel_pt_walk_tnt(decoder);
if (err == -EAGAIN)
err = intel_pt_walk_trace(decoder);
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 95f0884aae02..7e2e8aa95467 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -369,7 +369,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
size_t size;
u16 idr_size;
const char *sym;
- uint32_t count;
+ uint64_t count;
int ret, csize;
pid_t pid, tid;
struct {
@@ -391,7 +391,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
return -1;
filename = event->mmap2.filename;
- size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
pid,
count);
@@ -493,7 +493,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
return -1;
filename = event->mmap2.filename;
- size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+ size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
pid,
jr->move.code_index);
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 621f6527b790..4887dd5eb80f 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -220,14 +220,14 @@ static int detect_kbuild_dir(char **kbuild_dir)
const char *prefix_dir = "";
const char *suffix_dir = "";
+ /* _UTSNAME_LENGTH is 65 */
+ char release[128];
+
char *autoconf_path;
int err;
if (!test_dir) {
- /* _UTSNAME_LENGTH is 65 */
- char release[128];
-
err = fetch_kernel_version(NULL, release,
sizeof(release));
if (err)
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index df85b9efd80f..45ea7a8cd818 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1074,22 +1074,25 @@ static int machine__set_modules_path(struct machine *machine)
return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
}
int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
+ u64 *size __maybe_unused,
const char *name __maybe_unused)
{
return 0;
}
-static int machine__create_module(void *arg, const char *name, u64 start)
+static int machine__create_module(void *arg, const char *name, u64 start,
+ u64 size)
{
struct machine *machine = arg;
struct map *map;
- if (arch__fix_module_text_start(&start, name) < 0)
+ if (arch__fix_module_text_start(&start, &size, name) < 0)
return -1;
map = machine__findnew_module_map(machine, start, name);
if (map == NULL)
return -1;
+ map->end = start + size;
dso__kernel_module_get_build_id(map->dso, machine->root_dir);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 354de6e56109..9b87b8b870ae 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -205,7 +205,7 @@ struct symbol *machine__find_kernel_function_by_name(struct machine *machine,
struct map *machine__findnew_module_map(struct machine *machine, u64 start,
const char *filename);
-int arch__fix_module_text_start(u64 *start, const char *name);
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
int __machine__load_kallsyms(struct machine *machine, const char *filename,
enum map_type type, bool no_kcore);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index c662fef95d14..ab8ebfa2159d 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -1,4 +1,5 @@
#include "symbol.h"
+#include <assert.h>
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
@@ -87,7 +88,7 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
return true;
}
- if (!strncmp(filename, "/system/lib/", 11)) {
+ if (!strncmp(filename, "/system/lib/", 12)) {
char *ndk, *app;
const char *arch;
size_t ndk_length;
@@ -716,6 +717,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
}
after->start = map->end;
+ after->pgoff += map->end - pos->start;
+ assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
__map_groups__insert(pos->groups, after);
if (verbose >= 2)
map__fprintf(after, fp);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 14f111a10650..6193be6d7639 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2104,6 +2104,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
printf(" %-50s [%s]\n", buf, "SDT event");
free(buf);
}
+ free(path);
} else
printf(" %-50s [%s]\n", nd->s, "SDT event");
if (nd2) {
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 679d6e493962..e6324397b295 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -26,7 +26,7 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
static inline const char *perf_reg_name(int id __maybe_unused)
{
- return NULL;
+ return "unknown";
}
static inline int perf_reg_value(u64 *valp __maybe_unused,
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 0d9d6e0803b8..82e4f158c88e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -612,38 +612,31 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
const char *function,
struct probe_trace_point *tp)
{
- Dwarf_Addr eaddr, highaddr;
+ Dwarf_Addr eaddr;
GElf_Sym sym;
const char *symbol;
/* Verify the address is correct */
- if (dwarf_entrypc(sp_die, &eaddr) != 0) {
- pr_warning("Failed to get entry address of %s\n",
- dwarf_diename(sp_die));
- return -ENOENT;
- }
- if (dwarf_highpc(sp_die, &highaddr) != 0) {
- pr_warning("Failed to get end address of %s\n",
- dwarf_diename(sp_die));
- return -ENOENT;
- }
- if (paddr > highaddr) {
- pr_warning("Offset specified is greater than size of %s\n",
+ if (!dwarf_haspc(sp_die, paddr)) {
+ pr_warning("Specified offset is out of %s\n",
dwarf_diename(sp_die));
return -EINVAL;
}
- symbol = dwarf_diename(sp_die);
- if (!symbol) {
- /* Try to get the symbol name from symtab */
+ if (dwarf_entrypc(sp_die, &eaddr) == 0) {
+ /* If the DIE has entrypc, use it. */
+ symbol = dwarf_diename(sp_die);
+ } else {
+ /* Try to get actual symbol name and address from symtab */
symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
- if (!symbol) {
- pr_warning("Failed to find symbol at 0x%lx\n",
- (unsigned long)paddr);
- return -ENOENT;
- }
eaddr = sym.st_value;
}
+ if (!symbol) {
+ pr_warning("Failed to find symbol at 0x%lx\n",
+ (unsigned long)paddr);
+ return -ENOENT;
+ }
+
tp->offset = (unsigned long)(paddr - eaddr);
tp->address = (unsigned long)paddr;
tp->symbol = strdup(symbol);
@@ -764,6 +757,16 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
return 0;
}
+/* Return innermost DIE */
+static int find_inner_scope_cb(Dwarf_Die *fn_die, void *data)
+{
+ struct find_scope_param *fsp = data;
+
+ memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
+ fsp->found = true;
+ return 1;
+}
+
/* Find an appropriate scope fits to given conditions */
static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
{
@@ -775,8 +778,13 @@ static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem)
.die_mem = die_mem,
.found = false,
};
+ int ret;
- cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp);
+ ret = cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb,
+ &fsp);
+ if (!ret && !fsp.found)
+ cu_walk_functions_at(&pf->cu_die, pf->addr,
+ find_inner_scope_cb, &fsp);
return fsp.found ? die_mem : NULL;
}
@@ -950,7 +958,7 @@ static int probe_point_inline_cb(Dwarf_Die *in_die, void *data)
ret = find_probe_point_lazy(in_die, pf);
else {
/* Get probe address */
- if (dwarf_entrypc(in_die, &addr) != 0) {
+ if (die_entrypc(in_die, &addr) != 0) {
pr_warning("Failed to get entry address of %s.\n",
dwarf_diename(in_die));
return -ENOENT;
@@ -1002,7 +1010,7 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
param->retval = find_probe_point_by_line(pf);
} else if (die_is_func_instance(sp_die)) {
/* Instances always have the entry address */
- dwarf_entrypc(sp_die, &pf->addr);
+ die_entrypc(sp_die, &pf->addr);
/* But in some case the entry address is 0 */
if (pf->addr == 0) {
pr_debug("%s has no entry PC. Skipped\n",
@@ -1414,6 +1422,18 @@ error:
return DIE_FIND_CB_END;
}
+static bool available_var_finder_overlap(struct available_var_finder *af)
+{
+ int i;
+
+ for (i = 0; i < af->nvls; i++) {
+ if (af->pf.addr == af->vls[i].point.address)
+ return true;
+ }
+ return false;
+
+}
+
/* Add a found vars into available variables list */
static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
{
@@ -1424,6 +1444,14 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
Dwarf_Die die_mem;
int ret;
+ /*
+ * For some reason (e.g. different column assigned to same address),
+ * this callback can be called with the address which already passed.
+ * Ignore it first.
+ */
+ if (available_var_finder_overlap(af))
+ return 0;
+
/* Check number of tevs */
if (af->nvls == af->max_vls) {
pr_warning("Too many( > %d) probe point found.\n", af->max_vls);
@@ -1567,7 +1595,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
/* Get function entry information */
func = basefunc = dwarf_diename(&spdie);
if (!func ||
- dwarf_entrypc(&spdie, &baseaddr) != 0 ||
+ die_entrypc(&spdie, &baseaddr) != 0 ||
dwarf_decl_line(&spdie, &baseline) != 0) {
lineno = 0;
goto post;
@@ -1584,7 +1612,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
&indie)) {
/* There is an inline function */
- if (dwarf_entrypc(&indie, &_addr) == 0 &&
+ if (die_entrypc(&indie, &_addr) == 0 &&
_addr == addr) {
/*
* addr is at an inline function entry.
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 39345c2ddfc2..d4f872f1750e 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -145,6 +145,15 @@ static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
evsel->prev_raw_counts = NULL;
}
+static void perf_evsel__reset_prev_raw_counts(struct perf_evsel *evsel)
+{
+ if (evsel->prev_raw_counts) {
+ evsel->prev_raw_counts->aggr.val = 0;
+ evsel->prev_raw_counts->aggr.ena = 0;
+ evsel->prev_raw_counts->aggr.run = 0;
+ }
+}
+
static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
{
int ncpus = perf_evsel__nr_cpus(evsel);
@@ -195,6 +204,14 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
}
}
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel)
+ perf_evsel__reset_prev_raw_counts(evsel);
+}
+
static void zero_per_pkg(struct perf_evsel *counter)
{
if (counter->per_pkg_mask)
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index c29bb94c48a4..b8845aceac31 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -94,6 +94,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
void perf_evlist__free_stats(struct perf_evlist *evlist);
void perf_evlist__reset_stats(struct perf_evlist *evlist);
+void perf_evlist__reset_prev_raw_counts(struct perf_evlist *evlist);
int perf_stat_process_counter(struct perf_stat_config *config,
struct perf_evsel *counter);
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 842cf3fd9235..d7e5c247c103 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -116,7 +116,6 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
return ret;
}
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
- va_end(ap_saved);
if (len > strbuf_avail(sb)) {
pr_debug("this should not happen, your vsnprintf is broken");
va_end(ap_saved);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 20ba5a9aeae4..5a50326c8158 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1478,7 +1478,7 @@ static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
static int kcore_copy__process_modules(void *arg,
const char *name __maybe_unused,
- u64 start)
+ u64 start, u64 size __maybe_unused)
{
struct kcore_copy_info *kci = arg;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index f199d5b11d76..acde8e489352 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -217,7 +217,8 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
goto out_unlock;
for (next = map__next(curr); next; next = map__next(curr)) {
- curr->end = next->start;
+ if (!curr->end)
+ curr->end = next->start;
curr = next;
}
@@ -225,7 +226,8 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
* We still haven't the actual symbols, so guess the
* last map final address.
*/
- curr->end = ~0ULL;
+ if (!curr->end)
+ curr->end = ~0ULL;
out_unlock:
pthread_rwlock_unlock(&maps->lock);
@@ -512,7 +514,7 @@ void dso__sort_by_name(struct dso *dso, enum map_type type)
int modules__parse(const char *filename, void *arg,
int (*process_module)(void *arg, const char *name,
- u64 start))
+ u64 start, u64 size))
{
char *line = NULL;
size_t n;
@@ -525,8 +527,8 @@ int modules__parse(const char *filename, void *arg,
while (1) {
char name[PATH_MAX];
- u64 start;
- char *sep;
+ u64 start, size;
+ char *sep, *endptr;
ssize_t line_len;
line_len = getline(&line, &n, file);
@@ -558,7 +560,11 @@ int modules__parse(const char *filename, void *arg,
scnprintf(name, sizeof(name), "[%s]", line);
- err = process_module(arg, name, start);
+ size = strtoul(sep + 1, &endptr, 0);
+ if (*endptr != ' ' && *endptr != '\t')
+ continue;
+
+ err = process_module(arg, name, start, size);
if (err)
break;
}
@@ -905,7 +911,8 @@ static struct module_info *find_module(const char *name,
return NULL;
}
-static int __read_proc_modules(void *arg, const char *name, u64 start)
+static int __read_proc_modules(void *arg, const char *name, u64 start,
+ u64 size __maybe_unused)
{
struct rb_root *modules = arg;
struct module_info *mi;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index d964844eb314..833b1be2fb29 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -268,7 +268,7 @@ int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
int modules__parse(const char *filename, void *arg,
int (*process_module)(void *arg, const char *name,
- u64 start));
+ u64 start, u64 size));
int filename__read_debuglink(const char *filename, char *debuglink,
size_t size);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index f5af87f66663..81aee5adc8a2 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -114,14 +114,24 @@ struct comm *thread__comm(const struct thread *thread)
struct comm *thread__exec_comm(const struct thread *thread)
{
- struct comm *comm, *last = NULL;
+ struct comm *comm, *last = NULL, *second_last = NULL;
list_for_each_entry(comm, &thread->comm_list, list) {
if (comm->exec)
return comm;
+ second_last = last;
last = comm;
}
+ /*
+ * 'last' with no start time might be the parent's comm of a synthesized
+ * thread (created by processing a synthesized fork event). For a main
+ * thread, that is very probably wrong. Prefer a later comm to avoid
+ * that case.
+ */
+ if (second_last && !last->start && thread->pid_ == thread->tid)
+ return second_last;
+
return last;
}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index e72d370889f8..8b39e8086c2d 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -74,7 +74,6 @@
#include <sys/ttydefaults.h>
#include <api/fs/tracing_path.h>
#include <termios.h>
-#include <linux/bitops.h>
#include <termios.h>
#include "strlist.h"
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
index a1883bbb0144..fb5559f9819a 100644
--- a/tools/power/acpi/Makefile.config
+++ b/tools/power/acpi/Makefile.config
@@ -18,7 +18,7 @@ include $(srctree)/../../scripts/Makefile.include
OUTPUT=$(srctree)/
ifeq ("$(origin O)", "command line")
- OUTPUT := $(O)/power/acpi/
+ OUTPUT := $(O)/tools/power/acpi/
endif
#$(info Determined 'OUTPUT' to be $(OUTPUT))
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 7ff46be908f0..d426fec3b1d3 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -139,7 +139,7 @@ static int ap_insert_action(char *argument, u32 to_be_done)
current_action++;
if (current_action > AP_MAX_ACTIONS) {
- fprintf(stderr, "Too many table options (max %u)\n",
+ fprintf(stderr, "Too many table options (max %d)\n",
AP_MAX_ACTIONS);
return (-1);
}
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index 1eef0aed6423..08a405593a79 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -306,6 +306,8 @@ int cmd_freq_set(int argc, char **argv)
bitmask_setbit(cpus_chosen, cpus->cpu);
cpus = cpus->next;
}
+ /* Set the last cpu in related cpus list */
+ bitmask_setbit(cpus_chosen, cpus->cpu);
cpufreq_put_related_cpus(cpus);
}
}
diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
index 2116df9ad832..c097a3748674 100644
--- a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
@@ -83,7 +83,7 @@ static struct pci_access *pci_acc;
static struct pci_dev *amd_fam14h_pci_dev;
static int nbp1_entered;
-struct timespec start_time;
+static struct timespec start_time;
static unsigned long long timediff;
#ifdef DEBUG
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
index 5b3205f16217..5277df27191f 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -21,7 +21,7 @@ struct cpuidle_monitor cpuidle_sysfs_monitor;
static unsigned long long **previous_count;
static unsigned long long **current_count;
-struct timespec start_time;
+static struct timespec start_time;
static unsigned long long timediff;
static int cpuidle_get_count_percent(unsigned int id, double *percent,
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index 05f953f0f0a0..80a21cb67d94 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -29,6 +29,8 @@ struct cpuidle_monitor *all_monitors[] = {
0
};
+int cpu_count;
+
static struct cpuidle_monitor *monitors[MONITORS_MAX];
static unsigned int avail_monitors;
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
index 9e43f3371fbc..3558bbae2b5d 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
@@ -18,7 +18,7 @@
#define CSTATE_NAME_LEN 5
#define CSTATE_DESC_LEN 60
-int cpu_count;
+extern int cpu_count;
/* Hard to define the right names ...: */
enum power_range_e {
diff --git a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
index ebeaba6571a3..475e18e04318 100644
--- a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
@@ -40,7 +40,6 @@ static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = {
{
.name = "PC9",
.desc = N_("Processor Package C9"),
- .desc = N_("Processor Package C2"),
.id = PC9,
.range = RANGE_PACKAGE,
.get_count_percent = hsw_ext_get_count_percent,
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 8561e7ddca59..92be948c922d 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -8,7 +8,7 @@ ifeq ("$(origin O)", "command line")
endif
turbostat : turbostat.c
-CFLAGS += -Wall
+CFLAGS += -Wall -I../../../include
CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
%: %.c
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 5ec2de8f49b4..7c2c8e74aa9a 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -3593,7 +3593,7 @@ int initialize_counters(int cpu_id)
void allocate_output_buffer()
{
- output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
+ output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
outp = output_buffer;
if (outp == NULL)
err(-1, "calloc output buffer");
@@ -3691,6 +3691,9 @@ int fork_it(char **argv)
signal(SIGQUIT, SIG_IGN);
if (waitpid(child_pid, &status, 0) == -1)
err(status, "waitpid");
+
+ if (WIFEXITED(status))
+ status = WEXITSTATUS(status);
}
/*
* n.b. fork_it() does not check for errors from for_all_cpus()
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 7ea4438b801d..882c18201c7c 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -1,7 +1,7 @@
ifneq ($(O),)
ifeq ($(origin O), command line)
- dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
- ABSOLUTE_O := $(shell cd $(O) ; pwd)
+ dummy := $(if $(shell cd $(PWD); test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
+ ABSOLUTE_O := $(shell cd $(PWD); cd $(O) ; pwd)
OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
COMMAND_O := O=$(ABSOLUTE_O)
ifeq ($(objtree),)
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
index 231bcd2c4eb5..1e7ac6f3362f 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
@@ -71,8 +71,11 @@ test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
echo "r ${PROBEFUNC} \$retval" > kprobe_events
! echo "p ${PROBEFUNC} \$retval" > kprobe_events
+# $comm was introduced in 4.8, older kernels reject it.
+if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then
: "Comm access"
test_goodarg "\$comm"
+fi
: "Indirect memory access"
test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
new file mode 100644
index 000000000000..63ed533f73d6
--- /dev/null
+++ b/tools/testing/selftests/kvm/config
@@ -0,0 +1,3 @@
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
index 90958aaaafb9..2737d6a595f4 100644
--- a/tools/testing/selftests/net/reuseport_dualstack.c
+++ b/tools/testing/selftests/net/reuseport_dualstack.c
@@ -128,7 +128,7 @@ static void test(int *rcv_fds, int count, int proto)
{
struct epoll_event ev;
int epfd, i, test_fd;
- uint16_t test_family;
+ int test_family;
socklen_t len;
epfd = epoll_create(1);
@@ -145,6 +145,7 @@ static void test(int *rcv_fds, int count, int proto)
send_from_v4(proto);
test_fd = receive_once(epfd, proto);
+ len = sizeof(test_family);
if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
error(1, errno, "failed to read socket domain");
if (test_family != AF_INET)
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
index 16058bbea7a8..c195b4478662 100755
--- a/tools/testing/selftests/net/run_netsocktests
+++ b/tools/testing/selftests/net/run_netsocktests
@@ -6,7 +6,7 @@ echo "--------------------"
./socket
if [ $? -ne 0 ]; then
echo "[FAIL]"
+ exit 1
else
echo "[PASS]"
fi
-
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index c9ff2b47bd1c..a37cb1192c6a 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for netfilter selftests
-TEST_PROGS := nft_trans_stress.sh nft_nat.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh conntrack_icmp_related.sh
include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/conntrack_icmp_related.sh b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
new file mode 100755
index 000000000000..b48e1833bc89
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_icmp_related.sh
@@ -0,0 +1,283 @@
+#!/bin/bash
+#
+# check that ICMP df-needed/pkttoobig icmp are set are set as related
+# state
+#
+# Setup is:
+#
+# nsclient1 -> nsrouter1 -> nsrouter2 -> nsclient2
+# MTU 1500, except for nsrouter2 <-> nsclient2 link (1280).
+# ping nsclient2 from nsclient1, checking that conntrack did set RELATED
+# 'fragmentation needed' icmp packet.
+#
+# In addition, nsrouter1 will perform IP masquerading, i.e. also
+# check the icmp errors are propagated to the correct host as per
+# nat of "established" icmp-echo "connection".
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+cleanup() {
+ for i in 1 2;do ip netns del nsclient$i;done
+ for i in 1 2;do ip netns del nsrouter$i;done
+}
+
+ipv4() {
+ echo -n 192.168.$1.2
+}
+
+ipv6 () {
+ echo -n dead:$1::2
+}
+
+check_counter()
+{
+ ns=$1
+ name=$2
+ expect=$3
+ local lret=0
+
+ cnt=$(ip netns exec $ns nft list counter inet filter "$name" | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ echo "ERROR: counter $name in $ns has unexpected value (expected $expect)" 1>&2
+ ip netns exec $ns nft list counter inet filter "$name" 1>&2
+ lret=1
+ fi
+
+ return $lret
+}
+
+check_unknown()
+{
+ expect="packets 0 bytes 0"
+ for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+ check_counter $n "unknown" "$expect"
+ if [ $? -ne 0 ] ;then
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+for n in nsclient1 nsclient2 nsrouter1 nsrouter2; do
+ ip netns add $n
+ ip -net $n link set lo up
+done
+
+DEV=veth0
+ip link add $DEV netns nsclient1 type veth peer name eth1 netns nsrouter1
+DEV=veth0
+ip link add $DEV netns nsclient2 type veth peer name eth1 netns nsrouter2
+
+DEV=veth0
+ip link add $DEV netns nsrouter1 type veth peer name eth2 netns nsrouter2
+
+DEV=veth0
+for i in 1 2; do
+ ip -net nsclient$i link set $DEV up
+ ip -net nsclient$i addr add $(ipv4 $i)/24 dev $DEV
+ ip -net nsclient$i addr add $(ipv6 $i)/64 dev $DEV
+done
+
+ip -net nsrouter1 link set eth1 up
+ip -net nsrouter1 link set veth0 up
+
+ip -net nsrouter2 link set eth1 up
+ip -net nsrouter2 link set eth2 up
+
+ip -net nsclient1 route add default via 192.168.1.1
+ip -net nsclient1 -6 route add default via dead:1::1
+
+ip -net nsclient2 route add default via 192.168.2.1
+ip -net nsclient2 route add default via dead:2::1
+
+i=3
+ip -net nsrouter1 addr add 192.168.1.1/24 dev eth1
+ip -net nsrouter1 addr add 192.168.3.1/24 dev veth0
+ip -net nsrouter1 addr add dead:1::1/64 dev eth1
+ip -net nsrouter1 addr add dead:3::1/64 dev veth0
+ip -net nsrouter1 route add default via 192.168.3.10
+ip -net nsrouter1 -6 route add default via dead:3::10
+
+ip -net nsrouter2 addr add 192.168.2.1/24 dev eth1
+ip -net nsrouter2 addr add 192.168.3.10/24 dev eth2
+ip -net nsrouter2 addr add dead:2::1/64 dev eth1
+ip -net nsrouter2 addr add dead:3::10/64 dev eth2
+ip -net nsrouter2 route add default via 192.168.3.1
+ip -net nsrouter2 route add default via dead:3::1
+
+sleep 2
+for i in 4 6; do
+ ip netns exec nsrouter1 sysctl -q net.ipv$i.conf.all.forwarding=1
+ ip netns exec nsrouter2 sysctl -q net.ipv$i.conf.all.forwarding=1
+done
+
+for netns in nsrouter1 nsrouter2; do
+ip netns exec $netns nft -f - <<EOF
+table inet filter {
+ counter unknown { }
+ counter related { }
+ chain forward {
+ type filter hook forward priority 0; policy accept;
+ meta l4proto icmpv6 icmpv6 type "packet-too-big" ct state "related" counter name "related" accept
+ meta l4proto icmp icmp type "destination-unreachable" ct state "related" counter name "related" accept
+ meta l4proto { icmp, icmpv6 } ct state new,established accept
+ counter name "unknown" drop
+ }
+}
+EOF
+done
+
+ip netns exec nsclient1 nft -f - <<EOF
+table inet filter {
+ counter unknown { }
+ counter related { }
+ chain input {
+ type filter hook input priority 0; policy accept;
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+ meta l4proto { icmp, icmpv6 } ct state "related" counter name "related" accept
+ counter name "unknown" drop
+ }
+}
+EOF
+
+ip netns exec nsclient2 nft -f - <<EOF
+table inet filter {
+ counter unknown { }
+ counter new { }
+ counter established { }
+
+ chain input {
+ type filter hook input priority 0; policy accept;
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+ meta l4proto { icmp, icmpv6 } ct state "new" counter name "new" accept
+ meta l4proto { icmp, icmpv6 } ct state "established" counter name "established" accept
+ counter name "unknown" drop
+ }
+ chain output {
+ type filter hook output priority 0; policy accept;
+ meta l4proto { icmp, icmpv6 } ct state established,untracked accept
+
+ meta l4proto { icmp, icmpv6 } ct state "new" counter name "new"
+ meta l4proto { icmp, icmpv6 } ct state "established" counter name "established"
+ counter name "unknown" drop
+ }
+}
+EOF
+
+
+# make sure NAT core rewrites adress of icmp error if nat is used according to
+# conntrack nat information (icmp error will be directed at nsrouter1 address,
+# but it needs to be routed to nsclient1 address).
+ip netns exec nsrouter1 nft -f - <<EOF
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ ip protocol icmp oifname "veth0" counter masquerade
+ }
+}
+table ip6 nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ ip6 nexthdr icmpv6 oifname "veth0" counter masquerade
+ }
+}
+EOF
+
+ip netns exec nsrouter2 ip link set eth1 mtu 1280
+ip netns exec nsclient2 ip link set veth0 mtu 1280
+sleep 1
+
+ip netns exec nsclient1 ping -c 1 -s 1000 -q -M do 192.168.2.2 >/dev/null
+if [ $? -ne 0 ]; then
+ echo "ERROR: netns ip routing/connectivity broken" 1>&2
+ cleanup
+ exit 1
+fi
+ip netns exec nsclient1 ping6 -q -c 1 -s 1000 dead:2::2 >/dev/null
+if [ $? -ne 0 ]; then
+ echo "ERROR: netns ipv6 routing/connectivity broken" 1>&2
+ cleanup
+ exit 1
+fi
+
+check_unknown
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+expect="packets 0 bytes 0"
+for netns in nsrouter1 nsrouter2 nsclient1;do
+ check_counter "$netns" "related" "$expect"
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+done
+
+expect="packets 2 bytes 2076"
+check_counter nsclient2 "new" "$expect"
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+ip netns exec nsclient1 ping -q -c 1 -s 1300 -M do 192.168.2.2 > /dev/null
+if [ $? -eq 0 ]; then
+ echo "ERROR: ping should have failed with PMTU too big error" 1>&2
+ ret=1
+fi
+
+# nsrouter2 should have generated the icmp error, so
+# related counter should be 0 (its in forward).
+expect="packets 0 bytes 0"
+check_counter "nsrouter2" "related" "$expect"
+if [ $? -ne 0 ]; then
+ ret=1
+fi
+
+# but nsrouter1 should have seen it, same for nsclient1.
+expect="packets 1 bytes 576"
+for netns in nsrouter1 nsclient1;do
+ check_counter "$netns" "related" "$expect"
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+done
+
+ip netns exec nsclient1 ping6 -c 1 -s 1300 dead:2::2 > /dev/null
+if [ $? -eq 0 ]; then
+ echo "ERROR: ping6 should have failed with PMTU too big error" 1>&2
+ ret=1
+fi
+
+expect="packets 2 bytes 1856"
+for netns in nsrouter1 nsclient1;do
+ check_counter "$netns" "related" "$expect"
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+done
+
+if [ $ret -eq 0 ];then
+ echo "PASS: icmp mtu error had RELATED state"
+else
+ echo "ERROR: icmp error RELATED state test has failed"
+fi
+
+cleanup
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
index 8ec76681605c..f25f72a75cf3 100755
--- a/tools/testing/selftests/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -23,7 +23,11 @@ ip netns add ns0
ip netns add ns1
ip netns add ns2
-ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+fi
ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
ip -net ns0 link set lo up
diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/rseq/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c
index 9887fd538fec..91316ab4b041 100644
--- a/tools/testing/selftests/timers/adjtick.c
+++ b/tools/testing/selftests/timers/adjtick.c
@@ -147,6 +147,7 @@ int check_tick_adj(long tickval)
eppm = get_ppm_drift();
printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm);
+ fflush(stdout);
tx1.modes = 0;
adjtimex(&tx1);
diff --git a/tools/testing/selftests/timers/leapcrash.c b/tools/testing/selftests/timers/leapcrash.c
index a1071bdbdeb7..a77c70b47495 100644
--- a/tools/testing/selftests/timers/leapcrash.c
+++ b/tools/testing/selftests/timers/leapcrash.c
@@ -114,6 +114,7 @@ int main(void)
}
clear_time_state();
printf(".");
+ fflush(stdout);
}
printf("[OK]\n");
return ksft_exit_pass();
diff --git a/tools/testing/selftests/timers/mqueue-lat.c b/tools/testing/selftests/timers/mqueue-lat.c
index a2a3924d0b41..efdb62470052 100644
--- a/tools/testing/selftests/timers/mqueue-lat.c
+++ b/tools/testing/selftests/timers/mqueue-lat.c
@@ -113,6 +113,7 @@ int main(int argc, char **argv)
int ret;
printf("Mqueue latency : ");
+ fflush(stdout);
ret = mqueue_lat_test();
if (ret < 0) {
diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c
index ff942ff7c9b3..2e6e94c02a33 100644
--- a/tools/testing/selftests/timers/nanosleep.c
+++ b/tools/testing/selftests/timers/nanosleep.c
@@ -153,6 +153,7 @@ int main(int argc, char **argv)
continue;
printf("Nanosleep %-31s ", clockstring(clockid));
+ fflush(stdout);
length = 10;
while (length <= (NSEC_PER_SEC * 10)) {
diff --git a/tools/testing/selftests/timers/nsleep-lat.c b/tools/testing/selftests/timers/nsleep-lat.c
index 2d7898fda0f1..ac06cf10a5c2 100644
--- a/tools/testing/selftests/timers/nsleep-lat.c
+++ b/tools/testing/selftests/timers/nsleep-lat.c
@@ -166,6 +166,7 @@ int main(int argc, char **argv)
continue;
printf("nsleep latency %-26s ", clockstring(clockid));
+ fflush(stdout);
length = 10;
while (length <= (NSEC_PER_SEC * 10)) {
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
index 0ab937a17ebb..4e631da7f956 100644
--- a/tools/testing/selftests/timers/raw_skew.c
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -124,6 +124,7 @@ int main(int argv, char **argc)
printf("WARNING: ADJ_OFFSET in progress, this will cause inaccurate results\n");
printf("Estimating clock drift: ");
+ fflush(stdout);
sleep(120);
get_monotonic_and_raw(&mon, &raw);
diff --git a/tools/testing/selftests/timers/set-tai.c b/tools/testing/selftests/timers/set-tai.c
index dc88dbc8831f..3ae76ab483de 100644
--- a/tools/testing/selftests/timers/set-tai.c
+++ b/tools/testing/selftests/timers/set-tai.c
@@ -66,6 +66,7 @@ int main(int argc, char **argv)
printf("tai offset started at %i\n", ret);
printf("Checking tai offsets can be properly set: ");
+ fflush(stdout);
for (i = 1; i <= 60; i++) {
ret = set_tai(i);
ret = get_tai();
diff --git a/tools/testing/selftests/timers/set-tz.c b/tools/testing/selftests/timers/set-tz.c
index f4184928b16b..b038131c9682 100644
--- a/tools/testing/selftests/timers/set-tz.c
+++ b/tools/testing/selftests/timers/set-tz.c
@@ -76,6 +76,7 @@ int main(int argc, char **argv)
printf("tz_minuteswest started at %i, dst at %i\n", min, dst);
printf("Checking tz_minuteswest can be properly set: ");
+ fflush(stdout);
for (i = -15*60; i < 15*60; i += 30) {
ret = set_tz(i, dst);
ret = get_tz_min();
@@ -87,6 +88,7 @@ int main(int argc, char **argv)
printf("[OK]\n");
printf("Checking invalid tz_minuteswest values are caught: ");
+ fflush(stdout);
if (!set_tz(-15*60-1, dst)) {
printf("[FAILED] %i didn't return failure!\n", -15*60-1);
diff --git a/tools/testing/selftests/timers/threadtest.c b/tools/testing/selftests/timers/threadtest.c
index e632e116f05e..a4bf736dd842 100644
--- a/tools/testing/selftests/timers/threadtest.c
+++ b/tools/testing/selftests/timers/threadtest.c
@@ -175,6 +175,7 @@ int main(int argc, char **argv)
strftime(buf, 255, "%a, %d %b %Y %T %z", localtime(&start));
printf("%s\n", buf);
printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime);
+ fflush(stdout);
/* spawn */
for (i = 0; i < thread_count; i++)
diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
index 60fe3c569bd9..a747645d79f4 100644
--- a/tools/testing/selftests/timers/valid-adjtimex.c
+++ b/tools/testing/selftests/timers/valid-adjtimex.c
@@ -134,6 +134,7 @@ int validate_freq(void)
/* Set the leap second insert flag */
printf("Testing ADJ_FREQ... ");
+ fflush(stdout);
for (i = 0; i < NUM_FREQ_VALID; i++) {
tx.modes = ADJ_FREQUENCY;
tx.freq = valid_freq[i];
@@ -261,6 +262,7 @@ int set_bad_offset(long sec, long usec, int use_nano)
int validate_set_offset(void)
{
printf("Testing ADJ_SETOFFSET... ");
+ fflush(stdout);
/* Test valid values */
if (set_offset(NSEC_PER_SEC - 1, 1))
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index 1e3da137a8bb..5390c827a359 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -413,8 +413,12 @@ int main()
#if defined(__i386__) && (!defined(__GLIBC__) || __GLIBC__ > 2 || __GLIBC_MINOR__ >= 16)
vsyscall32 = (void *)getauxval(AT_SYSINFO);
- printf("[RUN]\tCheck AT_SYSINFO return regs\n");
- test_sys32_regs(do_full_vsyscall32);
+ if (vsyscall32) {
+ printf("[RUN]\tCheck AT_SYSINFO return regs\n");
+ test_sys32_regs(do_full_vsyscall32);
+ } else {
+ printf("[SKIP]\tAT_SYSINFO is not available\n");
+ }
#endif
test_ptrace_syscall_restart();
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
index 6ff7b601f854..4bb905925b0e 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.c
+++ b/tools/usb/usbip/libsrc/usbip_host_common.c
@@ -43,7 +43,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
int size;
int fd;
int length;
- char status;
+ char status[2] = { 0 };
int value = 0;
size = snprintf(status_attr_path, sizeof(status_attr_path),
@@ -61,15 +61,15 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
return -1;
}
- length = read(fd, &status, 1);
+ length = read(fd, status, 1);
if (length < 0) {
err("error reading attribute %s", status_attr_path);
close(fd);
return -1;
}
- value = atoi(&status);
-
+ value = atoi(status);
+ close(fd);
return value;
}
diff --git a/tools/usb/usbip/src/usbip_network.c b/tools/usb/usbip/src/usbip_network.c
index b4c37e76a6e0..187dfaa67d0a 100644
--- a/tools/usb/usbip/src/usbip_network.c
+++ b/tools/usb/usbip/src/usbip_network.c
@@ -62,39 +62,39 @@ void usbip_setup_port_number(char *arg)
info("using port %d (\"%s\")", usbip_port, usbip_port_string);
}
-void usbip_net_pack_uint32_t(int pack, uint32_t *num)
+uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num)
{
uint32_t i;
if (pack)
- i = htonl(*num);
+ i = htonl(num);
else
- i = ntohl(*num);
+ i = ntohl(num);
- *num = i;
+ return i;
}
-void usbip_net_pack_uint16_t(int pack, uint16_t *num)
+uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num)
{
uint16_t i;
if (pack)
- i = htons(*num);
+ i = htons(num);
else
- i = ntohs(*num);
+ i = ntohs(num);
- *num = i;
+ return i;
}
void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev)
{
- usbip_net_pack_uint32_t(pack, &udev->busnum);
- usbip_net_pack_uint32_t(pack, &udev->devnum);
- usbip_net_pack_uint32_t(pack, &udev->speed);
+ udev->busnum = usbip_net_pack_uint32_t(pack, udev->busnum);
+ udev->devnum = usbip_net_pack_uint32_t(pack, udev->devnum);
+ udev->speed = usbip_net_pack_uint32_t(pack, udev->speed);
- usbip_net_pack_uint16_t(pack, &udev->idVendor);
- usbip_net_pack_uint16_t(pack, &udev->idProduct);
- usbip_net_pack_uint16_t(pack, &udev->bcdDevice);
+ udev->idVendor = usbip_net_pack_uint16_t(pack, udev->idVendor);
+ udev->idProduct = usbip_net_pack_uint16_t(pack, udev->idProduct);
+ udev->bcdDevice = usbip_net_pack_uint16_t(pack, udev->bcdDevice);
}
void usbip_net_pack_usb_interface(int pack __attribute__((unused)),
@@ -141,6 +141,14 @@ ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen)
return usbip_net_xmit(sockfd, buff, bufflen, 1);
}
+static inline void usbip_net_pack_op_common(int pack,
+ struct op_common *op_common)
+{
+ op_common->version = usbip_net_pack_uint16_t(pack, op_common->version);
+ op_common->code = usbip_net_pack_uint16_t(pack, op_common->code);
+ op_common->status = usbip_net_pack_uint32_t(pack, op_common->status);
+}
+
int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status)
{
struct op_common op_common;
@@ -152,7 +160,7 @@ int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status)
op_common.code = code;
op_common.status = status;
- PACK_OP_COMMON(1, &op_common);
+ usbip_net_pack_op_common(1, &op_common);
rc = usbip_net_send(sockfd, &op_common, sizeof(op_common));
if (rc < 0) {
@@ -176,7 +184,7 @@ int usbip_net_recv_op_common(int sockfd, uint16_t *code)
goto err;
}
- PACK_OP_COMMON(0, &op_common);
+ usbip_net_pack_op_common(0, &op_common);
if (op_common.version != USBIP_VERSION) {
dbg("version mismatch: %d %d", op_common.version,
diff --git a/tools/usb/usbip/src/usbip_network.h b/tools/usb/usbip/src/usbip_network.h
index c1e875cf1078..573fa839b66b 100644
--- a/tools/usb/usbip/src/usbip_network.h
+++ b/tools/usb/usbip/src/usbip_network.h
@@ -33,12 +33,6 @@ struct op_common {
} __attribute__((packed));
-#define PACK_OP_COMMON(pack, op_common) do {\
- usbip_net_pack_uint16_t(pack, &(op_common)->version);\
- usbip_net_pack_uint16_t(pack, &(op_common)->code);\
- usbip_net_pack_uint32_t(pack, &(op_common)->status);\
-} while (0)
-
/* ---------------------------------------------------------------------- */
/* Dummy Code */
#define OP_UNSPEC 0x00
@@ -164,11 +158,11 @@ struct op_devlist_reply_extra {
} while (0)
#define PACK_OP_DEVLIST_REPLY(pack, reply) do {\
- usbip_net_pack_uint32_t(pack, &(reply)->ndev);\
+ (reply)->ndev = usbip_net_pack_uint32_t(pack, (reply)->ndev);\
} while (0)
-void usbip_net_pack_uint32_t(int pack, uint32_t *num);
-void usbip_net_pack_uint16_t(int pack, uint16_t *num);
+uint32_t usbip_net_pack_uint32_t(int pack, uint32_t num);
+uint16_t usbip_net_pack_uint16_t(int pack, uint16_t num);
void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev);
void usbip_net_pack_usb_interface(int pack, struct usbip_usb_interface *uinf);
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 1ebbf233de9a..6d64b2cb02ab 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -1466,6 +1466,7 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
mutex_unlock(&its->its_lock);
kfree(its);
+ kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
}
static int vgic_its_has_attr(struct kvm_device *dev,
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 85814d1bad11..87742c9803a7 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -120,6 +120,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
return value;
}
+static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
+{
+ return (vgic_irq_is_sgi(irq->intid) &&
+ vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
+}
+
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
@@ -130,6 +136,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ /* GICD_ISPENDR0 SGI bits are WI */
+ if (is_vgic_v2_sgi(vcpu, irq)) {
+ vgic_put_irq(vcpu->kvm, irq);
+ continue;
+ }
+
spin_lock(&irq->irq_lock);
irq->pending = true;
if (irq->config == VGIC_CONFIG_LEVEL)
@@ -150,6 +162,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ /* GICD_ICPENDR0 SGI bits are WI */
+ if (is_vgic_v2_sgi(vcpu, irq)) {
+ vgic_put_irq(vcpu->kvm, irq);
+ continue;
+ }
+
spin_lock(&irq->irq_lock);
if (irq->config == VGIC_CONFIG_LEVEL) {
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 1ab58f7b5d74..4c2919cc13ca 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -154,7 +154,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if (vgic_irq_is_sgi(irq->intid)) {
u32 src = ffs(irq->source);
- BUG_ON(!src);
+ if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+ irq->intid))
+ return;
+
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1));
if (irq->source)
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index c7924718990e..267b1cf88a7f 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -137,7 +137,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
u32 src = ffs(irq->source);
- BUG_ON(!src);
+ if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
+ irq->intid))
+ return;
+
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1));
if (irq->source)
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 6440b56ec90e..1934dc8a2ce0 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -196,6 +196,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
bool penda, pendb;
int ret;
+ /*
+ * list_sort may call this function with the same element when
+ * the list is fairly long.
+ */
+ if (unlikely(irqa == irqb))
+ return 0;
+
spin_lock(&irqa->irq_lock);
spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 571c1ce37d15..5c1efb869df2 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -39,7 +39,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
return 1;
}
-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
+static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
{
struct kvm_coalesced_mmio_ring *ring;
unsigned avail;
@@ -51,7 +51,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
* there is always one unused entry in the buffer
*/
ring = dev->kvm->coalesced_mmio_ring;
- avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
+ avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
if (avail == 0) {
/* full */
return 0;
@@ -66,24 +66,27 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
{
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+ __u32 insert;
if (!coalesced_mmio_in_range(dev, addr, len))
return -EOPNOTSUPP;
spin_lock(&dev->kvm->ring_lock);
- if (!coalesced_mmio_has_room(dev)) {
+ insert = READ_ONCE(ring->last);
+ if (!coalesced_mmio_has_room(dev, insert) ||
+ insert >= KVM_COALESCED_MMIO_MAX) {
spin_unlock(&dev->kvm->ring_lock);
return -EOPNOTSUPP;
}
/* copy data in first free entry of the ring */
- ring->coalesced_mmio[ring->last].phys_addr = addr;
- ring->coalesced_mmio[ring->last].len = len;
- memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+ ring->coalesced_mmio[insert].phys_addr = addr;
+ ring->coalesced_mmio[insert].len = len;
+ memcpy(ring->coalesced_mmio[insert].data, val, len);
smp_wmb();
- ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+ ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
spin_unlock(&dev->kvm->ring_lock);
return 0;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 60de4c337f0a..4e4bb5dd2dcd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -49,6 +49,7 @@
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/bsearch.h>
+#include <linux/kthread.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -87,7 +88,7 @@ module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
-DEFINE_SPINLOCK(kvm_lock);
+DEFINE_MUTEX(kvm_lock);
static DEFINE_RAW_SPINLOCK(kvm_count_lock);
LIST_HEAD(vm_list);
@@ -130,10 +131,30 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
{
}
+bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
+{
+ /*
+ * The metadata used by is_zone_device_page() to determine whether or
+ * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
+ * the device has been pinned, e.g. by get_user_pages(). WARN if the
+ * page_count() is zero to help detect bad usage of this helper.
+ */
+ if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
+ return false;
+
+ return is_zone_device_page(pfn_to_page(pfn));
+}
+
bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
{
+ /*
+ * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
+ * perspective they are "normal" pages, albeit with slightly different
+ * usage rules.
+ */
if (pfn_valid(pfn))
- return PageReserved(pfn_to_page(pfn));
+ return PageReserved(pfn_to_page(pfn)) &&
+ !kvm_is_zone_device_pfn(pfn);
return true;
}
@@ -612,6 +633,23 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
return 0;
}
+/*
+ * Called after the VM is otherwise initialized, but just before adding it to
+ * the vm_list.
+ */
+int __weak kvm_arch_post_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+/*
+ * Called just after removing the VM from the vm_list, but before doing any
+ * other destruction.
+ */
+void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
+{
+}
+
static struct kvm *kvm_create_vm(unsigned long type)
{
int r, i;
@@ -659,22 +697,31 @@ static struct kvm *kvm_create_vm(unsigned long type)
kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
GFP_KERNEL);
if (!kvm->buses[i])
- goto out_err;
+ goto out_err_no_mmu_notifier;
}
r = kvm_init_mmu_notifier(kvm);
if (r)
+ goto out_err_no_mmu_notifier;
+
+ r = kvm_arch_post_init_vm(kvm);
+ if (r)
goto out_err;
- spin_lock(&kvm_lock);
+ mutex_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
+ mutex_unlock(&kvm_lock);
preempt_notifier_inc();
return kvm;
out_err:
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ if (kvm->mmu_notifier.ops)
+ mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
+#endif
+out_err_no_mmu_notifier:
cleanup_srcu_struct(&kvm->irq_srcu);
out_err_no_irq_srcu:
cleanup_srcu_struct(&kvm->srcu);
@@ -724,9 +771,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_destroy_vm_debugfs(kvm);
kvm_arch_sync_events(kvm);
- spin_lock(&kvm_lock);
+ mutex_lock(&kvm_lock);
list_del(&kvm->vm_list);
- spin_unlock(&kvm_lock);
+ mutex_unlock(&kvm_lock);
+ kvm_arch_pre_destroy_vm(kvm);
+
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
if (kvm->buses[i])
@@ -1729,7 +1778,7 @@ static void kvm_release_pfn_dirty(kvm_pfn_t pfn)
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
{
- if (!kvm_is_reserved_pfn(pfn)) {
+ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page))
@@ -1740,7 +1789,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
void kvm_set_pfn_accessed(kvm_pfn_t pfn)
{
- if (!kvm_is_reserved_pfn(pfn))
+ if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
mark_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
@@ -1996,12 +2045,12 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
if (slots->generation != ghc->generation)
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
- if (unlikely(!ghc->memslot))
- return kvm_write_guest(kvm, ghc->gpa, data, len);
-
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
+ if (unlikely(!ghc->memslot))
+ return kvm_write_guest(kvm, ghc->gpa, data, len);
+
r = __copy_to_user((void __user *)ghc->hva, data, len);
if (r)
return -EFAULT;
@@ -2022,12 +2071,12 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
if (slots->generation != ghc->generation)
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
- if (unlikely(!ghc->memslot))
- return kvm_read_guest(kvm, ghc->gpa, data, len);
-
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
+ if (unlikely(!ghc->memslot))
+ return kvm_read_guest(kvm, ghc->gpa, data, len);
+
r = __copy_from_user(data, (void __user *)ghc->hva, len);
if (r)
return -EFAULT;
@@ -2793,6 +2842,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
{
struct kvm_device *dev = filp->private_data;
+ if (dev->kvm->mm != current->mm)
+ return -EIO;
+
switch (ioctl) {
case KVM_SET_DEVICE_ATTR:
return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
@@ -3749,13 +3801,13 @@ static int vm_stat_get(void *_offset, u64 *val)
u64 tmp_val;
*val = 0;
- spin_lock(&kvm_lock);
+ mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
*val += tmp_val;
}
- spin_unlock(&kvm_lock);
+ mutex_unlock(&kvm_lock);
return 0;
}
@@ -3769,13 +3821,13 @@ static int vcpu_stat_get(void *_offset, u64 *val)
u64 tmp_val;
*val = 0;
- spin_lock(&kvm_lock);
+ mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
*val += tmp_val;
}
- spin_unlock(&kvm_lock);
+ mutex_unlock(&kvm_lock);
return 0;
}
@@ -3984,3 +4036,86 @@ void kvm_exit(void)
kvm_vfio_ops_exit();
}
EXPORT_SYMBOL_GPL(kvm_exit);
+
+struct kvm_vm_worker_thread_context {
+ struct kvm *kvm;
+ struct task_struct *parent;
+ struct completion init_done;
+ kvm_vm_thread_fn_t thread_fn;
+ uintptr_t data;
+ int err;
+};
+
+static int kvm_vm_worker_thread(void *context)
+{
+ /*
+ * The init_context is allocated on the stack of the parent thread, so
+ * we have to locally copy anything that is needed beyond initialization
+ */
+ struct kvm_vm_worker_thread_context *init_context = context;
+ struct kvm *kvm = init_context->kvm;
+ kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
+ uintptr_t data = init_context->data;
+ int err;
+
+ err = kthread_park(current);
+ /* kthread_park(current) is never supposed to return an error */
+ WARN_ON(err != 0);
+ if (err)
+ goto init_complete;
+
+ err = cgroup_attach_task_all(init_context->parent, current);
+ if (err) {
+ kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
+ __func__, err);
+ goto init_complete;
+ }
+
+ set_user_nice(current, task_nice(init_context->parent));
+
+init_complete:
+ init_context->err = err;
+ complete(&init_context->init_done);
+ init_context = NULL;
+
+ if (err)
+ return err;
+
+ /* Wait to be woken up by the spawner before proceeding. */
+ kthread_parkme();
+
+ if (!kthread_should_stop())
+ err = thread_fn(kvm, data);
+
+ return err;
+}
+
+int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
+ uintptr_t data, const char *name,
+ struct task_struct **thread_ptr)
+{
+ struct kvm_vm_worker_thread_context init_context = {};
+ struct task_struct *thread;
+
+ *thread_ptr = NULL;
+ init_context.kvm = kvm;
+ init_context.parent = current;
+ init_context.thread_fn = thread_fn;
+ init_context.data = data;
+ init_completion(&init_context.init_done);
+
+ thread = kthread_run(kvm_vm_worker_thread, &init_context,
+ "%s-%d", name, task_pid_nr(current));
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
+
+ /* kthread_run is never supposed to return NULL */
+ WARN_ON(thread == NULL);
+
+ wait_for_completion(&init_context.init_done);
+
+ if (!init_context.err)
+ *thread_ptr = thread;
+
+ return init_context.err;
+}